Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 27521fc..3337b1e 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -143,7 +143,9 @@
 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
-static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
+static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+				   unsigned char *cdb, int use_sg,
+				   TW_SG_Entry *sglistarg);
 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
 
@@ -278,7 +280,7 @@
 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
 {
 	int request_id = 0;
-	char cdb[TW_MAX_CDB_LEN];
+	unsigned char cdb[TW_MAX_CDB_LEN];
 	TW_SG_Entry sglist[1];
 	int finished = 0, count = 0;
 	TW_Command_Full *full_command_packet;
@@ -423,7 +425,7 @@
 /* This function will read the aen queue from the isr */
 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
 {
-	char cdb[TW_MAX_CDB_LEN];
+	unsigned char cdb[TW_MAX_CDB_LEN];
 	TW_SG_Entry sglist[1];
 	TW_Command_Full *full_command_packet;
 	int retval = 1;
@@ -518,7 +520,8 @@
 	unsigned long *cpu_addr;
 	int retval = 1;
 
-	cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+			size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
 	if (!cpu_addr) {
 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
 		goto out;
@@ -526,7 +529,8 @@
 
 	if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
-		pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
+		dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
+				cpu_addr, dma_handle);
 		goto out;
 	}
 
@@ -1027,16 +1031,16 @@
 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
 {
 	if (tw_dev->command_packet_virt[0])
-		pci_free_consistent(tw_dev->tw_pci_dev,
-				    sizeof(TW_Command_Full)*TW_Q_LENGTH,
-				    tw_dev->command_packet_virt[0],
-				    tw_dev->command_packet_phys[0]);
+		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+				sizeof(TW_Command_Full) * TW_Q_LENGTH,
+				tw_dev->command_packet_virt[0],
+				tw_dev->command_packet_phys[0]);
 
 	if (tw_dev->generic_buffer_virt[0])
-		pci_free_consistent(tw_dev->tw_pci_dev,
-				    TW_SECTOR_SIZE*TW_Q_LENGTH,
-				    tw_dev->generic_buffer_virt[0],
-				    tw_dev->generic_buffer_phys[0]);
+		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+				TW_SECTOR_SIZE * TW_Q_LENGTH,
+				tw_dev->generic_buffer_virt[0],
+				tw_dev->generic_buffer_phys[0]);
 
 	kfree(tw_dev->event_queue[0]);
 } /* End twa_free_device_extension() */
@@ -1796,7 +1800,9 @@
 static DEF_SCSI_QCMD(twa_scsi_queue)
 
 /* This function hands scsi cdb's to the firmware */
-static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
+static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+				   unsigned char *cdb, int use_sg,
+				   TW_SG_Entry *sglistarg)
 {
 	TW_Command_Full *full_command_packet;
 	TW_Command_Apache *command_packet;
@@ -1992,7 +1998,6 @@
 	.sg_tablesize		= TW_APACHE_MAX_SGL_LENGTH,
 	.max_sectors		= TW_MAX_SECTORS,
 	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= twa_host_attrs,
 	.emulated		= 1,
 	.no_write_same		= 1,
@@ -2004,7 +2009,7 @@
 	struct Scsi_Host *host = NULL;
 	TW_Device_Extension *tw_dev;
 	unsigned long mem_addr, mem_len;
-	int retval = -ENODEV;
+	int retval;
 
 	retval = pci_enable_device(pdev);
 	if (retval) {
@@ -2015,14 +2020,14 @@
 	pci_set_master(pdev);
 	pci_try_set_mwi(pdev);
 
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
-	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
-		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-			TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
-			retval = -ENODEV;
-			goto out_disable_device;
-		}
+	retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (retval)
+		retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (retval) {
+		TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
+		retval = -ENODEV;
+		goto out_disable_device;
+	}
 
 	host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
 	if (!host) {
@@ -2237,14 +2242,14 @@
 	pci_set_master(pdev);
 	pci_try_set_mwi(pdev);
 
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
-	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
-		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-			TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
-			retval = -ENODEV;
-			goto out_disable_device;
-		}
+	retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (retval)
+		retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (retval) {
+		TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
+		retval = -ENODEV;
+		goto out_disable_device;
+	}
 
 	/* Initialize the card */
 	if (twa_reset_sequence(tw_dev, 0)) {
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 40c1e6e..dda6fa8 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -287,7 +287,9 @@
 } /* End twl_post_command_packet() */
 
 /* This function hands scsi cdb's to the firmware */
-static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
+static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+				   unsigned char *cdb, int use_sg,
+				   TW_SG_Entry_ISO *sglistarg)
 {
 	TW_Command_Full *full_command_packet;
 	TW_Command_Apache *command_packet;
@@ -372,7 +374,7 @@
 /* This function will read the aen queue from the isr */
 static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
 {
-	char cdb[TW_MAX_CDB_LEN];
+	unsigned char cdb[TW_MAX_CDB_LEN];
 	TW_SG_Entry_ISO sglist[1];
 	TW_Command_Full *full_command_packet;
 	int retval = 1;
@@ -554,7 +556,7 @@
 static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
 {
 	int request_id = 0;
-	char cdb[TW_MAX_CDB_LEN];
+	unsigned char cdb[TW_MAX_CDB_LEN];
 	TW_SG_Entry_ISO sglist[1];
 	int finished = 0, count = 0;
 	TW_Command_Full *full_command_packet;
@@ -644,8 +646,9 @@
 	unsigned long *cpu_addr;
 	int retval = 1;
 
-	cpu_addr = pci_zalloc_consistent(tw_dev->tw_pci_dev, size * TW_Q_LENGTH,
-					 &dma_handle);
+	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+				      size * TW_Q_LENGTH, &dma_handle,
+				      GFP_KERNEL);
 	if (!cpu_addr) {
 		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
 		goto out;
@@ -899,19 +902,19 @@
 static void twl_free_device_extension(TW_Device_Extension *tw_dev)
 {
 	if (tw_dev->command_packet_virt[0])
-		pci_free_consistent(tw_dev->tw_pci_dev,
+		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
 				    sizeof(TW_Command_Full)*TW_Q_LENGTH,
 				    tw_dev->command_packet_virt[0],
 				    tw_dev->command_packet_phys[0]);
 
 	if (tw_dev->generic_buffer_virt[0])
-		pci_free_consistent(tw_dev->tw_pci_dev,
+		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
 				    TW_SECTOR_SIZE*TW_Q_LENGTH,
 				    tw_dev->generic_buffer_virt[0],
 				    tw_dev->generic_buffer_phys[0]);
 
 	if (tw_dev->sense_buffer_virt[0])
-		pci_free_consistent(tw_dev->tw_pci_dev,
+		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
 				    sizeof(TW_Command_Apache_Header)*
 				    TW_Q_LENGTH,
 				    tw_dev->sense_buffer_virt[0],
@@ -1548,7 +1551,6 @@
 	.sg_tablesize		= TW_LIBERATOR_MAX_SGL_LENGTH,
 	.max_sectors		= TW_MAX_SECTORS,
 	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= twl_host_attrs,
 	.emulated		= 1,
 	.no_write_same		= 1,
@@ -1571,14 +1573,14 @@
 	pci_set_master(pdev);
 	pci_try_set_mwi(pdev);
 
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
-	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
-		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-			TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
-			retval = -ENODEV;
-			goto out_disable_device;
-		}
+	retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (retval)
+		retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (retval) {
+		TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
+		retval = -ENODEV;
+		goto out_disable_device;
+	}
 
 	host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
 	if (!host) {
@@ -1805,14 +1807,14 @@
 	pci_set_master(pdev);
 	pci_try_set_mwi(pdev);
 
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
-	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
-		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-			TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
-			retval = -ENODEV;
-			goto out_disable_device;
-		}
+	retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (retval)
+		retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (retval) {
+		TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
+		retval = -ENODEV;
+		goto out_disable_device;
+	}
 
 	/* Initialize the card */
 	if (twl_reset_sequence(tw_dev, 0)) {
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 4713669..2b1e0d5 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -834,15 +834,17 @@
 
 	dprintk(KERN_NOTICE "3w-xxxx: tw_allocate_memory()\n");
 
-	cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+			size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
 	if (cpu_addr == NULL) {
-		printk(KERN_WARNING "3w-xxxx: pci_alloc_consistent() failed.\n");
+		printk(KERN_WARNING "3w-xxxx: dma_alloc_coherent() failed.\n");
 		return 1;
 	}
 
 	if ((unsigned long)cpu_addr % (tw_dev->tw_pci_dev->device == TW_DEVICE_ID ? TW_ALIGNMENT_6000 : TW_ALIGNMENT_7000)) {
 		printk(KERN_WARNING "3w-xxxx: Couldn't allocate correctly aligned memory.\n");
-		pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
+		dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
+				cpu_addr, dma_handle);
 		return 1;
 	}
 
@@ -1062,10 +1064,16 @@
 
 	/* Free command packet and generic buffer memory */
 	if (tw_dev->command_packet_virtual_address[0])
-		pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Command)*TW_Q_LENGTH, tw_dev->command_packet_virtual_address[0], tw_dev->command_packet_physical_address[0]);
+		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+				sizeof(TW_Command) * TW_Q_LENGTH,
+				tw_dev->command_packet_virtual_address[0],
+				tw_dev->command_packet_physical_address[0]);
 
 	if (tw_dev->alignment_virtual_address[0])
-		pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Sector)*TW_Q_LENGTH, tw_dev->alignment_virtual_address[0], tw_dev->alignment_physical_address[0]);
+		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+				sizeof(TW_Sector) * TW_Q_LENGTH,
+				tw_dev->alignment_virtual_address[0],
+				tw_dev->alignment_physical_address[0]);
 } /* End tw_free_device_extension() */
 
 /* This function will send an initconnection command to controller */
@@ -1166,7 +1174,7 @@
   	command_que_value = tw_dev->command_packet_physical_address[request_id];
 	if (command_que_value == 0) {
 		printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet physical address.\n");
-	return 1;
+		return 1;
 	}
 
 	/* Send command packet to the board */
@@ -2239,7 +2247,6 @@
 	.sg_tablesize		= TW_MAX_SGL_LENGTH,
 	.max_sectors		= TW_MAX_SECTORS,
 	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,	
-	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= tw_host_attrs,
 	.emulated		= 1,
 	.no_write_same		= 1,
@@ -2260,7 +2267,7 @@
 
 	pci_set_master(pdev);
 
-	retval = pci_set_dma_mask(pdev, TW_DMA_MASK);
+	retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 	if (retval) {
 		printk(KERN_WARNING "3w-xxxx: Failed to set dma mask.");
 		goto out_disable_device;
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 69e80c1..bd87fba 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -230,7 +230,6 @@
 #define TW_IOCTL_TIMEOUT                      25 /* 25 seconds */
 #define TW_IOCTL_CHRDEV_TIMEOUT               60 /* 60 seconds */
 #define TW_IOCTL_CHRDEV_FREE                  -1
-#define TW_DMA_MASK			      DMA_BIT_MASK(32)
 #define TW_MAX_CDB_LEN			      16
 
 /* Bitmask macros to eliminate bitfields */
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 6be77b3..0068963 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /* -*- mode: c; c-basic-offset: 8 -*- */
 
 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
@@ -5,19 +6,6 @@
  * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
 **-----------------------------------------------------------------------------
 **  
-**  This program is free software; you can redistribute it and/or modify
-**  it under the terms of the GNU General Public License as published by
-**  the Free Software Foundation; either version 2 of the License, or
-**  (at your option) any later version.
-**
-**  This program is distributed in the hope that it will be useful,
-**  but WITHOUT ANY WARRANTY; without even the implied warranty of
-**  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-**  GNU General Public License for more details.
-**
-**  You should have received a copy of the GNU General Public License
-**  along with this program; if not, write to the Free Software
-**  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 **
 **-----------------------------------------------------------------------------
  */
@@ -295,7 +283,7 @@
 	if(tpnt->sdev_attrs == NULL)
 		tpnt->sdev_attrs = NCR_700_dev_attrs;
 
-	memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
+	memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
 				 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
 	if(memory == NULL) {
 		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
@@ -318,7 +306,6 @@
 	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
 	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
 	tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
-	tpnt->use_clustering = ENABLE_CLUSTERING;
 	tpnt->slave_configure = NCR_700_slave_configure;
 	tpnt->slave_destroy = NCR_700_slave_destroy;
 	tpnt->slave_alloc = NCR_700_slave_alloc;
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index 0c9a100..05fe439 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -90,7 +90,7 @@
 /* The SYNC negotiation sequence looks like:
  * 
  * If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the
- * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION
+ * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTIATION
  * If we get an SDTR reply, work out the SXFER parameters, squirrel
  * them away here, clear DEV_BEGIN_SYNC_NEGOTIATION and set
  * DEV_NEGOTIATED_SYNC.  If we get a REJECT msg, squirrel
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 0d4ffe0..c25e8a5 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 
 /*
 
@@ -5,14 +6,6 @@
 
   Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>
 
-  This program is free software; you may redistribute and/or modify it under
-  the terms of the GNU General Public License Version 2 as published by the
-  Free Software Foundation.
-
-  This program is distributed in the hope that it will be useful, but
-  WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
-  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-  for complete details.
 
   The author respectfully requests that any modifications to this software be
   sent directly to him for evaluation and testing.
@@ -201,8 +194,8 @@
 	dma_addr_t blkp;
 
 	while (adapter->alloc_ccbs < adapter->initccbs) {
-		blk_pointer = pci_alloc_consistent(adapter->pci_device,
-							blk_size, &blkp);
+		blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
+				blk_size, &blkp, GFP_KERNEL);
 		if (blk_pointer == NULL) {
 			blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n",
 					adapter);
@@ -227,15 +220,16 @@
 		next_ccb = ccb->next_all;
 		if (ccb->allocgrp_head) {
 			if (lastccb)
-				pci_free_consistent(adapter->pci_device,
+				dma_free_coherent(&adapter->pci_device->dev,
 						lastccb->allocgrp_size, lastccb,
 						lastccb->allocgrp_head);
 			lastccb = ccb;
 		}
 	}
 	if (lastccb)
-		pci_free_consistent(adapter->pci_device, lastccb->allocgrp_size,
-					lastccb, lastccb->allocgrp_head);
+		dma_free_coherent(&adapter->pci_device->dev,
+				lastccb->allocgrp_size, lastccb,
+				lastccb->allocgrp_head);
 }
 
 
@@ -256,8 +250,8 @@
 	if (addl_ccbs <= 0)
 		return;
 	while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) {
-		blk_pointer = pci_alloc_consistent(adapter->pci_device,
-							blk_size, &blkp);
+		blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
+				blk_size, &blkp, GFP_KERNEL);
 		if (blk_pointer == NULL)
 			break;
 		blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp);
@@ -318,8 +312,8 @@
 	if (ccb->command != NULL)
 		scsi_dma_unmap(ccb->command);
 	if (dma_unmap)
-		pci_unmap_single(adapter->pci_device, ccb->sensedata,
-			 ccb->sense_datalen, PCI_DMA_FROMDEVICE);
+		dma_unmap_single(&adapter->pci_device->dev, ccb->sensedata,
+			 ccb->sense_datalen, DMA_FROM_DEVICE);
 
 	ccb->command = NULL;
 	ccb->status = BLOGIC_CCB_FREE;
@@ -712,7 +706,7 @@
 		if (pci_enable_device(pci_device))
 			continue;
 
-		if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+		if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
 			continue;
 
 		bus = pci_device->bus->number;
@@ -895,7 +889,7 @@
 		if (pci_enable_device(pci_device))
 			continue;
 
-		if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+		if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
 			continue;
 
 		bus = pci_device->bus->number;
@@ -952,7 +946,7 @@
 		if (pci_enable_device(pci_device))
 			continue;
 
-		if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+		if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
 			continue;
 
 		bus = pci_device->bus->number;
@@ -2040,7 +2034,7 @@
 	   Release any allocated memory structs not released elsewhere
 	 */
 	if (adapter->mbox_space)
-		pci_free_consistent(adapter->pci_device, adapter->mbox_sz,
+		dma_free_coherent(&adapter->pci_device->dev, adapter->mbox_sz,
 			adapter->mbox_space, adapter->mbox_space_handle);
 	pci_dev_put(adapter->pci_device);
 	adapter->mbox_space = NULL;
@@ -2092,8 +2086,9 @@
 	   Initialize the Outgoing and Incoming Mailbox pointers.
 	 */
 	adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox));
-	adapter->mbox_space = pci_alloc_consistent(adapter->pci_device,
-				adapter->mbox_sz, &adapter->mbox_space_handle);
+	adapter->mbox_space = dma_alloc_coherent(&adapter->pci_device->dev,
+				adapter->mbox_sz, &adapter->mbox_space_handle,
+				GFP_KERNEL);
 	if (adapter->mbox_space == NULL)
 		return blogic_failure(adapter, "MAILBOX ALLOCATION");
 	adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space;
@@ -2639,6 +2634,7 @@
 	case BLOGIC_BAD_CMD_PARAM:
 		blogic_warn("BusLogic Driver Protocol Error 0x%02X\n",
 				adapter, adapter_status);
+		/* fall through */
 	case BLOGIC_DATA_UNDERRUN:
 	case BLOGIC_DATA_OVERRUN:
 	case BLOGIC_NOEXPECT_BUSFREE:
@@ -3183,9 +3179,9 @@
 	memcpy(ccb->cdb, cdb, cdblen);
 	ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
 	ccb->command = command;
-	sense_buf = pci_map_single(adapter->pci_device,
+	sense_buf = dma_map_single(&adapter->pci_device->dev,
 				command->sense_buffer, ccb->sense_datalen,
-				PCI_DMA_FROMDEVICE);
+				DMA_FROM_DEVICE);
 	if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
 		blogic_err("DMA mapping for sense data buffer failed\n",
 				adapter);
@@ -3855,7 +3851,6 @@
 #endif
 	.unchecked_isa_dma = 1,
 	.max_sectors = 128,
-	.use_clustering = ENABLE_CLUSTERING,
 };
 
 /*
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 8d47e2c..6182cc8 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -1,17 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
 
   Linux Driver for BusLogic MultiMaster and FlashPoint SCSI Host Adapters
 
   Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>
 
-  This program is free software; you may redistribute and/or modify it under
-  the terms of the GNU General Public License Version 2 as published by the
-  Free Software Foundation.
-
-  This program is distributed in the hope that it will be useful, but
-  WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
-  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-  for complete details.
 
   The author respectfully requests that any modifications to this software be
   sent directly to him for evaluation and testing.
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 867b864..0f17bd5 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -2944,7 +2944,7 @@
 			}
 
 			if (currSCCB->Lun == 0x00) {
-				if ((currSCCB->Sccb_scsistat == SELECT_SN_ST)) {
+				if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
 
 					currTar_Info->TarStatus |=
 					    (unsigned char)SYNC_SUPPORTED;
@@ -2953,8 +2953,8 @@
 					    ~EE_SYNC_MASK;
 				}
 
-				else if ((currSCCB->Sccb_scsistat ==
-					  SELECT_WN_ST)) {
+				else if (currSCCB->Sccb_scsistat ==
+					  SELECT_WN_ST) {
 
 					currTar_Info->TarStatus =
 					    (currTar_Info->
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7c09700..90cf469 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 menu "SCSI device support"
 
 config SCSI_MOD
@@ -42,23 +43,14 @@
 	bool
 	default n
 
+config SCSI_ESP_PIO
+	bool
+
 config SCSI_NETLINK
 	bool
 	default	n
 	depends on NET
 
-config SCSI_MQ_DEFAULT
-	bool "SCSI: use blk-mq I/O path by default"
-	default y
-	depends on SCSI
-	---help---
-	  This option enables the blk-mq based I/O path for SCSI devices by
-	  default.  With this option the scsi_mod.use_blk_mq module/boot
-	  option defaults to Y, without it to N, but it can still be
-	  overridden either way.
-
-	  If unsure say Y.
-
 config SCSI_PROC_FS
 	bool "legacy /proc/scsi/ support"
 	depends on SCSI && PROC_FS
@@ -107,28 +99,6 @@
 	  To compile this driver as a module, choose M here and read
 	  <file:Documentation/scsi/scsi.txt>. The module will be called st.
 
-config CHR_DEV_OSST
-	tristate "SCSI OnStream SC-x0 tape support"
-	depends on SCSI
-	---help---
-	  The OnStream SC-x0 SCSI tape drives cannot be driven by the
-	  standard st driver, but instead need this special osst driver and
-	  use the  /dev/osstX char device nodes (major 206).  Via usb-storage,
-	  you may be able to drive the USB-x0 and DI-x0 drives as well.
-	  Note that there is also a second generation of OnStream
-	  tape drives (ADR-x0) that supports the standard SCSI-2 commands for
-	  tapes (QIC-157) and can be driven by the standard driver st.
-	  For more information, you may have a look at the SCSI-HOWTO
-	  <http://www.tldp.org/docs.html#howto>  and
-	  <file:Documentation/scsi/osst.txt>  in the kernel source.
-	  More info on the OnStream driver may be found on
-	  <http://sourceforge.net/projects/osst/>
-	  Please also have a look at the standard st docu, as most of it
-	  applies to osst as well.
-
-	  To compile this driver as a module, choose M here and read
-	  <file:Documentation/scsi/scsi.txt>. The module will be called osst.
-
 config BLK_DEV_SR
 	tristate "SCSI CDROM support"
 	depends on SCSI && BLK_DEV
@@ -191,7 +161,7 @@
 	
 	  If you want to compile this as a module ( = code which can be
 	  inserted in and removed from the running kernel whenever you want),
-	  say M here and read <file:Documentation/kbuild/modules.txt> and
+	  say M here and read <file:Documentation/kbuild/modules.rst> and
 	  <file:Documentation/scsi/scsi.txt>. The module will be called ch.o.
 	  If unsure, say N.
 
@@ -535,7 +505,7 @@
 
 config SCSI_BUSLOGIC
 	tristate "BusLogic SCSI support"
-	depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API && VIRT_TO_BUS
+	depends on (PCI || ISA) && SCSI && ISA_DMA_API && VIRT_TO_BUS
 	---help---
 	  This is support for BusLogic MultiMaster and FlashPoint SCSI Host
 	  Adapters. Consult the SCSI-HOWTO, available from
@@ -557,6 +527,37 @@
 	  substantial, so users of MultiMaster Host Adapters may not
 	  wish to include it.
 
+config SCSI_MYRB
+	tristate "Mylex DAC960/DAC1100 PCI RAID Controller (Block Interface)"
+	depends on PCI
+	select RAID_ATTRS
+	help
+	  This driver adds support for the Mylex DAC960, AcceleRAID, and
+	  eXtremeRAID PCI RAID controllers. This driver supports the
+	  older, block based interface.
+	  This driver is a reimplementation of the original DAC960
+	  driver. If you have used the DAC960 driver you should enable
+	  this module.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called myrb.
+
+config SCSI_MYRS
+	tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)"
+	depends on PCI
+	depends on !CPU_BIG_ENDIAN || COMPILE_TEST
+	select RAID_ATTRS
+	help
+	  This driver adds support for the Mylex DAC960, AcceleRAID, and
+	  eXtremeRAID PCI RAID controllers.  This driver supports the
+	  newer, SCSI-based interface only.
+	  This driver is a reimplementation of the original DAC960
+	  driver. If you have used the DAC960 driver you should enable
+	  this module.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called myrs.
+
 config VMWARE_PVSCSI
 	tristate "VMware PVSCSI driver support"
 	depends on PCI && SCSI && X86
@@ -641,9 +642,44 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called dmx3191d.
 
+config SCSI_FDOMAIN
+	tristate
+	depends on SCSI
+
+config SCSI_FDOMAIN_PCI
+	tristate "Future Domain TMC-3260/AHA-2920A PCI SCSI support"
+	depends on PCI && SCSI
+	select SCSI_FDOMAIN
+	help
+	  This is support for Future Domain's PCI SCSI host adapters (TMC-3260)
+	  and other adapters with PCI bus based on the Future Domain chipsets
+	  (Adaptec AHA-2920A).
+
+	  NOTE: Newer Adaptec AHA-2920C boards use the Adaptec AIC-7850 chip
+	  and should use the aic7xxx driver ("Adaptec AIC7xxx chipset SCSI
+	  controller support"). This Future Domain driver works with the older
+	  Adaptec AHA-2920A boards with a Future Domain chip on them.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called fdomain_pci.
+
+config SCSI_FDOMAIN_ISA
+	tristate "Future Domain 16xx ISA SCSI support"
+	depends on ISA && SCSI
+	select CHECK_SIGNATURE
+	select SCSI_FDOMAIN
+	help
+	  This is support for Future Domain's 16-bit SCSI host adapters
+	  (TMC-1660/1680, TMC-1650/1670, TMC-1610M/MER/MEX) and other adapters
+	  with ISA bus based on the Future Domain chipsets (Quantum ISA-200S,
+	  ISA-250MG; and at least one IBM board).
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called fdomain_isa.
+
 config SCSI_GDTH
 	tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
-	depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
+	depends on PCI && SCSI
 	---help---
 	  Formerly called GDT SCSI Disk Array Controller Support.
 
@@ -862,7 +898,7 @@
 
 config 53C700_LE_ON_BE
 	bool
-	depends on SCSI_LASI700
+	depends on SCSI_LASI700 || SCSI_SNI_53C710
 	default y
 
 config SCSI_STEX
@@ -1142,12 +1178,12 @@
 
 config SCSI_SIM710
 	tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
-	depends on (EISA || MCA) && SCSI
+	depends on EISA && SCSI
 	select SCSI_SPI_ATTRS
 	---help---
 	  This driver is for NCR53c710 based SCSI host adapters.
 
-	  It currently supports Compaq EISA cards and NCR MCA cards
+	  It currently supports Compaq EISA cards.
 
 config SCSI_DC395x
 	tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support"
@@ -1174,8 +1210,6 @@
 	  PCscsi/PCnet (Am53/79C974) solutions.
 	  This is a new implementation base on the generic esp_scsi driver.
 
-	  Documentation can be found in <file:Documentation/scsi/tmscsim.txt>.
-
 	  Note that this driver does NOT support Tekram DC390W/U/F, which are
 	  based on NCR/Symbios chips. Use "NCR53C8XX SCSI support" for those.
 
@@ -1332,6 +1366,7 @@
 	tristate "Zorro ESP SCSI support"
 	depends on ZORRO && SCSI
 	select SCSI_SPI_ATTRS
+	select SCSI_ESP_PIO
 	help
 	  Support for various NCR53C9x (ESP) based SCSI controllers on Zorro
 	  expansion boards for the Amiga.
@@ -1346,14 +1381,14 @@
 	tristate "Atari native SCSI support"
 	depends on ATARI && SCSI
 	select SCSI_SPI_ATTRS
-	select NVRAM
 	---help---
 	  If you have an Atari with built-in NCR5380 SCSI controller (TT,
 	  Falcon, ...) say Y to get it supported. Of course also, if you have
 	  a compatible SCSI controller (e.g. for Medusa).
 
-	  To compile this driver as a module, choose M here: the
-	  module will be called atari_scsi.
+	  To compile this driver as a module, choose M here: the module will
+	  be called atari_scsi. If you also enable NVRAM support, the SCSI
+	  host's ID is taken from the setting in TT RTC NVRAM.
 
 	  This driver supports both styles of NCR integration into the
 	  system: the TT style (separate DMA), and the Falcon style (via
@@ -1374,6 +1409,7 @@
 	tristate "Macintosh NCR53c9[46] SCSI"
 	depends on MAC && SCSI
 	select SCSI_SPI_ATTRS
+	select SCSI_ESP_PIO
 	help
 	  This is the NCR 53c9x SCSI controller found on most of the 68040
 	  based Macintoshes.
@@ -1451,7 +1487,7 @@
 
           This driver is also available as a module. This module will be
           called zfcp. If you want to compile it as a module, say M here
-          and read <file:Documentation/kbuild/modules.txt>.
+          and read <file:Documentation/kbuild/modules.rst>.
 
 config SCSI_PMCRAID
 	tristate "PMC SIERRA Linux MaxRAID adapter support"
@@ -1487,12 +1523,10 @@
 
 source "drivers/scsi/csiostor/Kconfig"
 
-endif # SCSI_LOWLEVEL
-
 source "drivers/scsi/pcmcia/Kconfig"
 
-source "drivers/scsi/device_handler/Kconfig"
+endif # SCSI_LOWLEVEL
 
-source "drivers/scsi/osd/Kconfig"
+source "drivers/scsi/device_handler/Kconfig"
 
 endmenu
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 6d71b2a..c00e3dd 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -76,6 +76,9 @@
 obj-$(CONFIG_SCSI_PM8001)	+= pm8001/
 obj-$(CONFIG_SCSI_ISCI)		+= isci/
 obj-$(CONFIG_SCSI_IPS)		+= ips.o
+obj-$(CONFIG_SCSI_FDOMAIN)	+= fdomain.o
+obj-$(CONFIG_SCSI_FDOMAIN_PCI)	+= fdomain_pci.o
+obj-$(CONFIG_SCSI_FDOMAIN_ISA)	+= fdomain_isa.o
 obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
 obj-$(CONFIG_SCSI_QLOGIC_FAS)	+= qlogicfas408.o	qlogicfas.o
 obj-$(CONFIG_PCMCIA_QLOGIC)	+= qlogicfas408.o
@@ -106,6 +109,8 @@
 obj-$(CONFIG_SCSI_QLOGICPTI)	+= qlogicpti.o
 obj-$(CONFIG_SCSI_MESH)		+= mesh.o
 obj-$(CONFIG_SCSI_MAC53C94)	+= mac53c94.o
+obj-$(CONFIG_SCSI_MYRB)		+= myrb.o
+obj-$(CONFIG_SCSI_MYRS)		+= myrs.o
 obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
 obj-$(CONFIG_SCSI_3W_9XXX)	+= 3w-9xxx.o
 obj-$(CONFIG_SCSI_3W_SAS)	+= 3w-sas.o
@@ -141,14 +146,12 @@
 obj-$(CONFIG_ARM)		+= arm/
 
 obj-$(CONFIG_CHR_DEV_ST)	+= st.o
-obj-$(CONFIG_CHR_DEV_OSST)	+= osst.o
 obj-$(CONFIG_BLK_DEV_SD)	+= sd_mod.o
 obj-$(CONFIG_BLK_DEV_SR)	+= sr_mod.o
 obj-$(CONFIG_CHR_DEV_SG)	+= sg.o
 obj-$(CONFIG_CHR_DEV_SCH)	+= ch.o
 obj-$(CONFIG_SCSI_ENCLOSURE)	+= ses.o
 
-obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
 obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/
 
 # This goes last, so that "real" scsi devices probe earlier
@@ -182,7 +185,7 @@
 # Files generated that shall be removed upon make clean
 clean-files :=	53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
 
-$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
+$(obj)/53c700.o: $(obj)/53c700_d.h
 
 $(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
 
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 90ea0f5..536426f 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -131,6 +131,7 @@
 
 static int do_abort(struct Scsi_Host *);
 static void do_reset(struct Scsi_Host *);
+static void bus_reset_cleanup(struct Scsi_Host *);
 
 /**
  * initialize_SCp - init the scsi pointer field
@@ -148,12 +149,10 @@
 
 	if (scsi_bufflen(cmd)) {
 		cmd->SCp.buffer = scsi_sglist(cmd);
-		cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
 		cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
 		cmd->SCp.this_residual = cmd->SCp.buffer->length;
 	} else {
 		cmd->SCp.buffer = NULL;
-		cmd->SCp.buffers_residual = 0;
 		cmd->SCp.ptr = NULL;
 		cmd->SCp.this_residual = 0;
 	}
@@ -162,6 +161,17 @@
 	cmd->SCp.Message = 0;
 }
 
+static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
+{
+	struct scatterlist *s = cmd->SCp.buffer;
+
+	if (!cmd->SCp.this_residual && s && !sg_is_last(s)) {
+		cmd->SCp.buffer = sg_next(s);
+		cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+		cmd->SCp.this_residual = cmd->SCp.buffer->length;
+	}
+}
+
 /**
  * NCR5380_poll_politely2 - wait for two chip register values
  * @hostdata: host private data
@@ -271,9 +281,8 @@
 static void NCR5380_print(struct Scsi_Host *instance)
 {
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	unsigned char status, data, basr, mr, icr, i;
+	unsigned char status, basr, mr, icr, i;
 
-	data = NCR5380_read(CURRENT_SCSI_DATA_REG);
 	status = NCR5380_read(STATUS_REG);
 	mr = NCR5380_read(MODE_REG);
 	icr = NCR5380_read(INITIATOR_COMMAND_REG);
@@ -513,16 +522,15 @@
 
 	if (hostdata->sensing == cmd) {
 		/* Autosense processing ends here */
-		if ((cmd->result & 0xff) != SAM_STAT_GOOD) {
+		if (status_byte(cmd->result) != GOOD) {
 			scsi_eh_restore_cmnd(cmd, &hostdata->ses);
-			set_host_byte(cmd, DID_ERROR);
-		} else
+		} else {
 			scsi_eh_restore_cmnd(cmd, &hostdata->ses);
+			set_driver_byte(cmd, DRIVER_SENSE);
+		}
 		hostdata->sensing = NULL;
 	}
 
-	hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
-
 	cmd->scsi_done(cmd);
 }
 
@@ -710,6 +718,8 @@
 			NCR5380_information_transfer(instance);
 			done = 0;
 		}
+		if (!hostdata->connected)
+			NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
 		spin_unlock_irq(&hostdata->lock);
 		if (!done)
 			cond_resched();
@@ -884,7 +894,14 @@
 			/* Probably Bus Reset */
 			NCR5380_read(RESET_PARITY_INTERRUPT_REG);
 
-			dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
+			if (sr & SR_RST) {
+				/* Certainly Bus Reset */
+				shost_printk(KERN_WARNING, instance,
+					     "bus reset interrupt\n");
+				bus_reset_cleanup(instance);
+			} else {
+				dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
+			}
 #ifdef SUN3_SCSI_VME
 			dregs->csr |= CSR_DMA_ENABLE;
 #endif
@@ -902,20 +919,16 @@
 	return IRQ_RETVAL(handled);
 }
 
-/*
- * Function : int NCR5380_select(struct Scsi_Host *instance,
- * struct scsi_cmnd *cmd)
+/**
+ * NCR5380_select - attempt arbitration and selection for a given command
+ * @instance: the Scsi_Host instance
+ * @cmd: the scsi_cmnd to execute
  *
- * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
- * including ARBITRATION, SELECTION, and initial message out for
- * IDENTIFY and queue messages.
+ * This routine establishes an I_T_L nexus for a SCSI command. This involves
+ * ARBITRATION, SELECTION and MESSAGE OUT phases and an IDENTIFY message.
  *
- * Inputs : instance - instantiation of the 5380 driver on which this
- * target lives, cmd - SCSI command to execute.
- *
- * Returns cmd if selection failed but should be retried,
- * NULL if selection failed and should not be retried, or
- * NULL if selection succeeded (hostdata->connected == cmd).
+ * Returns true if the operation should be retried.
+ * Returns false if it should not be retried.
  *
  * Side effects :
  * If bus busy, arbitration failed, etc, NCR5380_select() will exit
@@ -923,16 +936,15 @@
  * SELECT_ENABLE will be set appropriately, the NCR5380
  * will cease to drive any SCSI bus signals.
  *
- * If successful : I_T_L or I_T_L_Q nexus will be established,
- * instance->connected will be set to cmd.
+ * If successful : the I_T_L nexus will be established, and
+ * hostdata->connected will be set to cmd.
  * SELECT interrupt will be disabled.
  *
  * If failed (no target) : cmd->scsi_done() will be called, and the
  * cmd->result host byte set to DID_BAD_TARGET.
  */
 
-static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
-                                        struct scsi_cmnd *cmd)
+static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
 	__releases(&hostdata->lock) __acquires(&hostdata->lock)
 {
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
@@ -940,6 +952,9 @@
 	unsigned char *data;
 	int len;
 	int err;
+	bool ret = true;
+	bool can_disconnect = instance->irq != NO_IRQ &&
+			      cmd->cmnd[0] != REQUEST_SENSE;
 
 	NCR5380_dprint(NDEBUG_ARBITRATION, instance);
 	dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
@@ -948,7 +963,7 @@
 	/*
 	 * Arbitration and selection phases are slow and involve dropping the
 	 * lock, so we have to watch out for EH. An exception handler may
-	 * change 'selecting' to NULL. This function will then return NULL
+	 * change 'selecting' to NULL. This function will then return false
 	 * so that the caller will forget about 'cmd'. (During information
 	 * transfer phases, EH may change 'connected' to NULL.)
 	 */
@@ -984,7 +999,7 @@
 	if (!hostdata->selecting) {
 		/* Command was aborted */
 		NCR5380_write(MODE_REG, MR_BASE);
-		goto out;
+		return false;
 	}
 	if (err < 0) {
 		NCR5380_write(MODE_REG, MR_BASE);
@@ -1033,7 +1048,7 @@
 	if (!hostdata->selecting) {
 		NCR5380_write(MODE_REG, MR_BASE);
 		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-		goto out;
+		return false;
 	}
 
 	dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
@@ -1106,8 +1121,6 @@
 		spin_lock_irq(&hostdata->lock);
 		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
 		NCR5380_reselect(instance);
-		if (!hostdata->connected)
-			NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
 		shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n");
 		goto out;
 	}
@@ -1115,14 +1128,16 @@
 	if (err < 0) {
 		spin_lock_irq(&hostdata->lock);
 		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-		NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+
 		/* Can't touch cmd if it has been reclaimed by the scsi ML */
-		if (hostdata->selecting) {
-			cmd->result = DID_BAD_TARGET << 16;
-			complete_cmd(instance, cmd);
-			dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n");
-			cmd = NULL;
-		}
+		if (!hostdata->selecting)
+			return false;
+
+		cmd->result = DID_BAD_TARGET << 16;
+		complete_cmd(instance, cmd);
+		dsprintk(NDEBUG_SELECTION, instance,
+			"target did not respond within 250ms\n");
+		ret = false;
 		goto out;
 	}
 
@@ -1150,17 +1165,16 @@
 	if (err < 0) {
 		shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
 		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-		NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
 		goto out;
 	}
 	if (!hostdata->selecting) {
 		do_abort(instance);
-		goto out;
+		return false;
 	}
 
 	dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
 	         scmd_id(cmd));
-	tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun);
+	tmp[0] = IDENTIFY(can_disconnect, cmd->device->lun);
 
 	len = 1;
 	data = tmp;
@@ -1171,7 +1185,7 @@
 		cmd->result = DID_ERROR << 16;
 		complete_cmd(instance, cmd);
 		dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n");
-		cmd = NULL;
+		ret = false;
 		goto out;
 	}
 
@@ -1186,13 +1200,13 @@
 
 	initialize_SCp(cmd);
 
-	cmd = NULL;
+	ret = false;
 
 out:
 	if (!hostdata->selecting)
-		return NULL;
+		return false;
 	hostdata->selecting = NULL;
-	return cmd;
+	return ret;
 }
 
 /*
@@ -1665,12 +1679,7 @@
 			    sun3_dma_setup_done != cmd) {
 				int count;
 
-				if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
-					++cmd->SCp.buffer;
-					--cmd->SCp.buffers_residual;
-					cmd->SCp.this_residual = cmd->SCp.buffer->length;
-					cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
-				}
+				advance_sg_buffer(cmd);
 
 				count = sun3scsi_dma_xfer_len(hostdata, cmd);
 
@@ -1711,6 +1720,7 @@
 				cmd->result = DID_ERROR << 16;
 				complete_cmd(instance, cmd);
 				hostdata->connected = NULL;
+				hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
 				return;
 #endif
 			case PHASE_DATAIN:
@@ -1719,15 +1729,11 @@
 				 * scatter-gather list, move onto the next one.
 				 */
 
-				if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
-					++cmd->SCp.buffer;
-					--cmd->SCp.buffers_residual;
-					cmd->SCp.this_residual = cmd->SCp.buffer->length;
-					cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
-					dsprintk(NDEBUG_INFORMATION, instance, "%d bytes and %d buffers left\n",
-					         cmd->SCp.this_residual,
-					         cmd->SCp.buffers_residual);
-				}
+				advance_sg_buffer(cmd);
+				dsprintk(NDEBUG_INFORMATION, instance,
+					"this residual %d, sg ents %d\n",
+					cmd->SCp.this_residual,
+					sg_nents(cmd->SCp.buffer));
 
 				/*
 				 * The preferred transfer method is going to be
@@ -1755,10 +1761,8 @@
 						scmd_printk(KERN_INFO, cmd,
 							"switching to slow handshake\n");
 						cmd->device->borken = 1;
-						sink = 1;
-						do_abort(instance);
-						cmd->result = DID_ERROR << 16;
-						/* XXX - need to source or sink data here, as appropriate */
+						do_reset(instance);
+						bus_reset_cleanup(instance);
 					}
 				} else {
 					/* Transfer a small chunk so that the
@@ -1793,6 +1797,7 @@
 					         cmd, scmd_id(cmd), cmd->device->lun);
 
 					hostdata->connected = NULL;
+					hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
 
 					cmd->result &= ~0xffff;
 					cmd->result |= cmd->SCp.Status;
@@ -1817,9 +1822,6 @@
 					 */
 					NCR5380_write(TARGET_COMMAND_REG, 0);
 
-					/* Enable reselect interrupts */
-					NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-
 					maybe_release_dma_irq(instance);
 					return;
 				case MESSAGE_REJECT:
@@ -1851,8 +1853,6 @@
 					 */
 					NCR5380_write(TARGET_COMMAND_REG, 0);
 
-					/* Enable reselect interrupts */
-					NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
 #ifdef SUN3_SCSI_VME
 					dregs->csr |= CSR_DMA_ENABLE;
 #endif
@@ -1923,13 +1923,13 @@
 					if (!hostdata->connected)
 						return;
 
-					/* Fall through to reject message */
-
+					/* Reject message */
+					/* Fall through */
+				default:
 					/*
 					 * If we get something weird that we aren't expecting,
-					 * reject it.
+					 * log it.
 					 */
-				default:
 					if (tmp == EXTENDED_MESSAGE)
 						scmd_printk(KERN_INFO, cmd,
 						            "rejecting unknown extended message code %02x, length %d\n",
@@ -1951,10 +1951,10 @@
 				NCR5380_transfer_pio(instance, &phase, &len, &data);
 				if (msgout == ABORT) {
 					hostdata->connected = NULL;
+					hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
 					cmd->result = DID_ERROR << 16;
 					complete_cmd(instance, cmd);
 					maybe_release_dma_irq(instance);
-					NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
 					return;
 				}
 				msgout = NOP;
@@ -2014,8 +2014,11 @@
 	NCR5380_write(MODE_REG, MR_BASE);
 
 	target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
-
-	dsprintk(NDEBUG_RESELECTION, instance, "reselect\n");
+	if (!target_mask || target_mask & (target_mask - 1)) {
+		shost_printk(KERN_WARNING, instance,
+			     "reselect: bad target_mask 0x%02x\n", target_mask);
+		return;
+	}
 
 	/*
 	 * At this point, we have detected that our SCSI ID is on the bus,
@@ -2029,6 +2032,7 @@
 	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
 	if (NCR5380_poll_politely(hostdata,
 	                          STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
+		shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n");
 		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
 		return;
 	}
@@ -2040,6 +2044,10 @@
 
 	if (NCR5380_poll_politely(hostdata,
 	                          STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
+		if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0)
+			/* BUS FREE phase */
+			return;
+		shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n");
 		do_abort(instance);
 		return;
 	}
@@ -2101,13 +2109,16 @@
 		dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance,
 		         "reselect: removed %p from disconnected queue\n", tmp);
 	} else {
+		int target = ffs(target_mask) - 1;
+
 		shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n",
 		             target_mask, lun);
 		/*
 		 * Since we have an established nexus that we can't do anything
 		 * with, we must abort it.
 		 */
-		do_abort(instance);
+		if (do_abort(instance) == 0)
+			hostdata->busy[target] &= ~(1 << lun);
 		return;
 	}
 
@@ -2115,12 +2126,7 @@
 	if (sun3_dma_setup_done != tmp) {
 		int count;
 
-		if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
-			++tmp->SCp.buffer;
-			--tmp->SCp.buffers_residual;
-			tmp->SCp.this_residual = tmp->SCp.buffer->length;
-			tmp->SCp.ptr = sg_virt(tmp->SCp.buffer);
-		}
+		advance_sg_buffer(tmp);
 
 		count = sun3scsi_dma_xfer_len(hostdata, tmp);
 
@@ -2272,15 +2278,16 @@
 	if (list_del_cmd(&hostdata->autosense, cmd)) {
 		dsprintk(NDEBUG_ABORT, instance,
 		         "abort: removed %p from sense queue\n", cmd);
-		set_host_byte(cmd, DID_ERROR);
 		complete_cmd(instance, cmd);
 	}
 
 out:
 	if (result == FAILED)
 		dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd);
-	else
+	else {
+		hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
 		dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd);
+	}
 
 	queue_work(hostdata->work_q, &hostdata->main_task);
 	maybe_release_dma_irq(instance);
@@ -2290,31 +2297,12 @@
 }
 
 
-/**
- * NCR5380_host_reset - reset the SCSI host
- * @cmd: SCSI command undergoing EH
- *
- * Returns SUCCESS
- */
-
-static int NCR5380_host_reset(struct scsi_cmnd *cmd)
+static void bus_reset_cleanup(struct Scsi_Host *instance)
 {
-	struct Scsi_Host *instance = cmd->device->host;
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	int i;
-	unsigned long flags;
 	struct NCR5380_cmd *ncmd;
 
-	spin_lock_irqsave(&hostdata->lock, flags);
-
-#if (NDEBUG & NDEBUG_ANY)
-	scmd_printk(KERN_INFO, cmd, __func__);
-#endif
-	NCR5380_dprint(NDEBUG_ANY, instance);
-	NCR5380_dprint_phase(NDEBUG_ANY, instance);
-
-	do_reset(instance);
-
 	/* reset NCR registers */
 	NCR5380_write(MODE_REG, MR_BASE);
 	NCR5380_write(TARGET_COMMAND_REG, 0);
@@ -2326,11 +2314,6 @@
 	 * commands!
 	 */
 
-	if (list_del_cmd(&hostdata->unissued, cmd)) {
-		cmd->result = DID_RESET << 16;
-		cmd->scsi_done(cmd);
-	}
-
 	if (hostdata->selecting) {
 		hostdata->selecting->result = DID_RESET << 16;
 		complete_cmd(instance, hostdata->selecting);
@@ -2348,7 +2331,6 @@
 	list_for_each_entry(ncmd, &hostdata->autosense, list) {
 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
 
-		set_host_byte(cmd, DID_RESET);
 		cmd->scsi_done(cmd);
 	}
 	INIT_LIST_HEAD(&hostdata->autosense);
@@ -2365,6 +2347,41 @@
 
 	queue_work(hostdata->work_q, &hostdata->main_task);
 	maybe_release_dma_irq(instance);
+}
+
+/**
+ * NCR5380_host_reset - reset the SCSI host
+ * @cmd: SCSI command undergoing EH
+ *
+ * Returns SUCCESS
+ */
+
+static int NCR5380_host_reset(struct scsi_cmnd *cmd)
+{
+	struct Scsi_Host *instance = cmd->device->host;
+	struct NCR5380_hostdata *hostdata = shost_priv(instance);
+	unsigned long flags;
+	struct NCR5380_cmd *ncmd;
+
+	spin_lock_irqsave(&hostdata->lock, flags);
+
+#if (NDEBUG & NDEBUG_ANY)
+	shost_printk(KERN_INFO, instance, __func__);
+#endif
+	NCR5380_dprint(NDEBUG_ANY, instance);
+	NCR5380_dprint_phase(NDEBUG_ANY, instance);
+
+	list_for_each_entry(ncmd, &hostdata->unissued, list) {
+		struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd);
+
+		scmd->result = DID_RESET << 16;
+		scmd->scsi_done(scmd);
+	}
+	INIT_LIST_HEAD(&hostdata->unissued);
+
+	do_reset(instance);
+	bus_reset_cleanup(instance);
+
 	spin_unlock_irqrestore(&hostdata->lock, flags);
 
 	return SUCCESS;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 31096a0..5935fd6 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -235,7 +235,7 @@
 #define NCR5380_PIO_CHUNK_SIZE		256
 
 /* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
-#define NCR5380_REG_POLL_TIME		15
+#define NCR5380_REG_POLL_TIME		10
 
 static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
 {
@@ -275,7 +275,7 @@
 static void NCR5380_main(struct work_struct *work);
 static const char *NCR5380_info(struct Scsi_Host *instance);
 static void NCR5380_reselect(struct Scsi_Host *instance);
-static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
+static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
 static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
 static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
 static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 23b1762..66c5143 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1078,7 +1078,6 @@
 	.can_queue		= 1,
 	.this_id		= 1,
 	.sg_tablesize		= SG_ALL,
-	.use_clustering		= ENABLE_CLUSTERING,
 };
 
 static int inia100_probe_one(struct pci_dev *pdev,
@@ -1094,7 +1093,7 @@
 
 	if (pci_enable_device(pdev))
 		goto out;
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 		printk(KERN_WARNING "Unable to set 32bit DMA "
 				    "on inia100 adapter, ignoring.\n");
 		goto out_disable_device;
@@ -1124,7 +1123,8 @@
 
 	/* Get total memory needed for SCB */
 	sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
-	host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys);
+	host->scb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->scb_phys,
+					    GFP_KERNEL);
 	if (!host->scb_virt) {
 		printk("inia100: SCB memory allocation error\n");
 		goto out_host_put;
@@ -1132,7 +1132,8 @@
 
 	/* Get total memory needed for ESCB */
 	sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
-	host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys);
+	host->escb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->escb_phys,
+					     GFP_KERNEL);
 	if (!host->escb_virt) {
 		printk("inia100: ESCB memory allocation error\n");
 		goto out_free_scb_array;
@@ -1177,10 +1178,12 @@
 out_free_irq:
         free_irq(shost->irq, shost);
 out_free_escb_array:
-	pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+	dma_free_coherent(&pdev->dev,
+			ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
 			host->escb_virt, host->escb_phys);
 out_free_scb_array:
-	pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+	dma_free_coherent(&pdev->dev,
+			ORC_MAXQUEUE * sizeof(struct orc_scb),
 			host->scb_virt, host->scb_phys);
 out_host_put:
 	scsi_host_put(shost);
@@ -1200,9 +1203,11 @@
 	scsi_remove_host(shost);
 
         free_irq(shost->irq, shost);
-	pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+	dma_free_coherent(&pdev->dev,
+			ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
 			host->escb_virt, host->escb_phys);
-	pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+	dma_free_coherent(&pdev->dev,
+			ORC_MAXQUEUE * sizeof(struct orc_scb),
 			host->scb_virt, host->scb_phys);
         release_region(shost->io_port, 256);
 
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 61aadc7..564b354 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -160,7 +161,7 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= CMD_PER_LUN,
-	.use_clustering		= DISABLE_CLUSTERING
+	.dma_boundary		= PAGE_SIZE - 1,
 };
 
 static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 2427a85..222c77c 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/ioport.h>
@@ -175,7 +176,6 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= CMD_PER_LUN,
-	.use_clustering		= ENABLE_CLUSTERING
 };
 
 static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index 66c5730..5e575af 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
  *		Amiga Technologies A4000T SCSI controller.
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile
index 1bd9fd1..8f0eec6 100644
--- a/drivers/scsi/aacraid/Makefile
+++ b/drivers/scsi/aacraid/Makefile
@@ -1,8 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
 # Adaptec aacraid
 
 obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
 
 aacraid-objs	:= linit.o aachba.o commctrl.o comminit.o commsup.o \
 		   dpcsup.o rx.o sa.o rkt.o nark.o src.o
-
-ccflags-y	:= -Idrivers/scsi
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 6e35632..0ed3f80 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *	Adaptec AAC series RAID controller driver
  *	(c) Copyright 2001 Red Hat Inc.
@@ -9,25 +10,10 @@
  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * Module Name:
  *  aachba.c
  *
  * Abstract: Contains Interfaces to manage IOs.
- *
  */
 
 #include <linux/kernel.h>
@@ -2892,6 +2878,7 @@
 					    !(dev->raw_io_64) ||
 					    ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
 						break;
+					/* fall through */
 				case INQUIRY:
 				case READ_CAPACITY:
 				case TEST_UNIT_READY:
@@ -2966,6 +2953,7 @@
 		/* Issue FIB to tell Firmware to flush it's cache */
 		if ((aac_cache & 6) != 2)
 			return aac_synchronize(scsicmd);
+		/* fall through */
 	case INQUIRY:
 	{
 		struct inquiry_data inq_data;
@@ -3319,8 +3307,9 @@
 			       min_t(size_t,
 				     sizeof(dev->fsa_dev[cid].sense_data),
 				     SCSI_SENSE_BUFFERSIZE));
-		break;
+			break;
 		}
+		/* fall through */
 	case RESERVE:
 	case RELEASE:
 	case REZERO_UNIT:
@@ -3452,7 +3441,7 @@
 	}
 }
 
-int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
+int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg)
 {
 	switch (cmd) {
 	case FSACTL_QUERY_DISK:
@@ -3480,7 +3469,6 @@
 
 static void aac_srb_callback(void *context, struct fib * fibptr)
 {
-	struct aac_dev *dev;
 	struct aac_srb_reply *srbreply;
 	struct scsi_cmnd *scsicmd;
 
@@ -3491,8 +3479,6 @@
 
 	BUG_ON(fibptr == NULL);
 
-	dev = fibptr->dev;
-
 	srbreply = (struct aac_srb_reply *) fib_data(fibptr);
 
 	scsicmd->sense_buffer[0] = '\0';  /* Initialize sense valid flag to false */
@@ -3921,13 +3907,11 @@
 
 static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
 {
-	struct aac_dev *dev;
 	unsigned long byte_count = 0;
 	int nseg;
 	struct scatterlist *sg;
 	int i;
 
-	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 	// Get rid of old data
 	psg->count = 0;
 	psg->sg[0].addr = 0;
@@ -3963,14 +3947,12 @@
 
 static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
 {
-	struct aac_dev *dev;
 	unsigned long byte_count = 0;
 	u64 addr;
 	int nseg;
 	struct scatterlist *sg;
 	int i;
 
-	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 	// Get rid of old data
 	psg->count = 0;
 	psg->sg[0].addr[0] = 0;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 39eb415..3fa0323 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  *	Adaptec AAC series RAID controller driver
  *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
@@ -9,25 +10,10 @@
  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * Module Name:
  *  aacraid.h
  *
  * Abstract: Contains all routines for control of the aacraid driver
- *
  */
 
 #ifndef _AACRAID_H_
@@ -40,6 +26,7 @@
 #define nblank(x) _nblank(x)[0]
 
 #include <linux/interrupt.h>
+#include <linux/completion.h>
 #include <linux/pci.h>
 #include <scsi/scsi_host.h>
 
@@ -1241,7 +1228,7 @@
 	u32			unique;		// unique value representing this context
 	ulong			jiffies;	// used for cleanup - dmb changed to ulong
 	struct list_head	next;		// used to link context's into a linked list
-	struct semaphore	wait_sem;	// this is used to wait for the next fib to arrive.
+	struct completion	completion;	// this is used to wait for the next fib to arrive.
 	int			wait;		// Set to true when thread is in WaitForSingleObject
 	unsigned long		count;		// total number of FIBs on FibList
 	struct list_head	fib_list;	// this holds fibs and their attachd hw_fibs
@@ -1313,7 +1300,7 @@
 	 *	This is the event the sendfib routine will wait on if the
 	 *	caller did not pass one and this is synch io.
 	 */
-	struct semaphore	event_wait;
+	struct completion	event_wait;
 	spinlock_t		event_lock;
 
 	u32			done;	/* gets set to 1 when fib is complete */
@@ -2639,9 +2626,14 @@
 	return capacity;
 }
 
+static inline int aac_pci_offline(struct aac_dev *dev)
+{
+	return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
+}
+
 static inline int aac_adapter_check_health(struct aac_dev *dev)
 {
-	if (unlikely(pci_channel_offline(dev->pdev)))
+	if (unlikely(aac_pci_offline(dev)))
 		return -1;
 
 	return (dev)->a_ops.adapter_check_health(dev);
@@ -2705,12 +2697,12 @@
 int aac_get_config_status(struct aac_dev *dev, int commit_flag);
 int aac_get_containers(struct aac_dev *dev);
 int aac_scsi_cmd(struct scsi_cmnd *cmd);
-int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
+int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg);
 #ifndef shost_to_class
 #define shost_to_class(shost) &shost->shost_dev
 #endif
 ssize_t aac_get_serial_number(struct device *dev, char *buf);
-int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
+int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg);
 int aac_rx_init(struct aac_dev *dev);
 int aac_rkt_init(struct aac_dev *dev);
 int aac_nark_init(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 25f6600..ffe41bc 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *	Adaptec AAC series RAID controller driver
  *	(c) Copyright 2001 Red Hat Inc.
@@ -9,25 +10,10 @@
  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * Module Name:
  *  commctrl.c
  *
  * Abstract: Contains all routines for control of the AFA comm layer
- *
  */
 
 #include <linux/kernel.h>
@@ -41,7 +27,6 @@
 #include <linux/blkdev.h>
 #include <linux/delay.h> /* ssleep prototype */
 #include <linux/kthread.h>
-#include <linux/semaphore.h>
 #include <linux/uaccess.h>
 #include <scsi/scsi_host.h>
 
@@ -203,7 +188,7 @@
 		/*
 		 *	Initialize the mutex used to wait for the next AIF.
 		 */
-		sema_init(&fibctx->wait_sem, 0);
+		init_completion(&fibctx->completion);
 		fibctx->wait = 0;
 		/*
 		 *	Initialize the fibs and set the count of fibs on
@@ -335,7 +320,7 @@
 			ssleep(1);
 		}
 		if (f.wait) {
-			if(down_interruptible(&fibctx->wait_sem) < 0) {
+			if (wait_for_completion_interruptible(&fibctx->completion) < 0) {
 				status = -ERESTARTSYS;
 			} else {
 				/* Lock again and retry */
@@ -1061,7 +1046,7 @@
 	return retval;
 }
 
-int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
+int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg)
 {
 	int status;
 
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 0dc7b5a..d4fcfa1 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *	Adaptec AAC series RAID controller driver
  *	(c) Copyright 2001 Red Hat Inc.
@@ -9,26 +10,11 @@
  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *               2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * Module Name:
  *  comminit.c
  *
  * Abstract: This supports the initialization of the host adapter commuication interface.
  *    This is a platform dependent module for the pci cyclone board.
- *
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 6e1b022..2142a64 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *	Adaptec AAC series RAID controller driver
  *	(c) Copyright 2001 Red Hat Inc.
@@ -9,26 +10,11 @@
  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * Module Name:
  *  commsup.c
  *
  * Abstract: Contain all routines that are required for FSA host/adapter
  *    communication.
- *
  */
 
 #include <linux/kernel.h>
@@ -44,7 +30,6 @@
 #include <linux/delay.h>
 #include <linux/kthread.h>
 #include <linux/interrupt.h>
-#include <linux/semaphore.h>
 #include <linux/bcd.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -189,7 +174,7 @@
 		fibptr->hw_fib_va = hw_fib;
 		fibptr->data = (void *) fibptr->hw_fib_va->data;
 		fibptr->next = fibptr+1;	/* Forward chain the fibs */
-		sema_init(&fibptr->event_wait, 0);
+		init_completion(&fibptr->event_wait);
 		spin_lock_init(&fibptr->event_lock);
 		hw_fib->header.XferState = cpu_to_le32(0xffffffff);
 		hw_fib->header.SenderSize =
@@ -623,7 +608,7 @@
 		}
 		if (wait) {
 			fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
-			if (down_interruptible(&fibptr->event_wait)) {
+			if (wait_for_completion_interruptible(&fibptr->event_wait)) {
 				fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
 				return -EFAULT;
 			}
@@ -659,7 +644,7 @@
 			 * hardware failure has occurred.
 			 */
 			unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
-			while (down_trylock(&fibptr->event_wait)) {
+			while (!try_wait_for_completion(&fibptr->event_wait)) {
 				int blink;
 				if (time_is_before_eq_jiffies(timeout)) {
 					struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
@@ -673,7 +658,7 @@
 					return -ETIMEDOUT;
 				}
 
-				if (unlikely(pci_channel_offline(dev->pdev)))
+				if (unlikely(aac_pci_offline(dev)))
 					return -EFAULT;
 
 				if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -689,9 +674,9 @@
 				 */
 				schedule();
 			}
-		} else if (down_interruptible(&fibptr->event_wait)) {
+		} else if (wait_for_completion_interruptible(&fibptr->event_wait)) {
 			/* Do nothing ... satisfy
-			 * down_interruptible must_check */
+			 * wait_for_completion_interruptible must_check */
 		}
 
 		spin_lock_irqsave(&fibptr->event_lock, flags);
@@ -773,11 +758,11 @@
 
 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
 
-		if (unlikely(pci_channel_offline(dev->pdev)))
+		if (unlikely(aac_pci_offline(dev)))
 			return -EFAULT;
 
 		fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
-		if (down_interruptible(&fibptr->event_wait))
+		if (wait_for_completion_interruptible(&fibptr->event_wait))
 			fibptr->done = 2;
 		fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
 
@@ -1304,8 +1289,9 @@
 				  ADD : DELETE;
 				break;
 			}
-			case AifBuManagerEvent:
-				aac_handle_aif_bu(dev, aifcmd);
+			break;
+		case AifBuManagerEvent:
+			aac_handle_aif_bu(dev, aifcmd);
 			break;
 		}
 
@@ -1377,18 +1363,19 @@
 
 	container = 0;
 retry_next:
-	if (device_config_needed == NOTHING)
-	for (; container < dev->maximum_num_containers; ++container) {
-		if ((dev->fsa_dev[container].config_waiting_on == 0) &&
-			(dev->fsa_dev[container].config_needed != NOTHING) &&
-			time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
-			device_config_needed =
-				dev->fsa_dev[container].config_needed;
-			dev->fsa_dev[container].config_needed = NOTHING;
-			channel = CONTAINER_TO_CHANNEL(container);
-			id = CONTAINER_TO_ID(container);
-			lun = CONTAINER_TO_LUN(container);
-			break;
+	if (device_config_needed == NOTHING) {
+		for (; container < dev->maximum_num_containers; ++container) {
+			if ((dev->fsa_dev[container].config_waiting_on == 0) &&
+			    (dev->fsa_dev[container].config_needed != NOTHING) &&
+			    time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
+				device_config_needed =
+					dev->fsa_dev[container].config_needed;
+				dev->fsa_dev[container].config_needed = NOTHING;
+				channel = CONTAINER_TO_CHANNEL(container);
+				id = CONTAINER_TO_ID(container);
+				lun = CONTAINER_TO_LUN(container);
+				break;
+			}
 		}
 	}
 	if (device_config_needed == NOTHING)
@@ -1538,7 +1525,7 @@
 		  || fib->flags & FIB_CONTEXT_FLAG_WAIT) {
 			unsigned long flagv;
 			spin_lock_irqsave(&fib->event_lock, flagv);
-			up(&fib->event_wait);
+			complete(&fib->event_wait);
 			spin_unlock_irqrestore(&fib->event_lock, flagv);
 			schedule();
 			retval = 0;
@@ -1828,7 +1815,7 @@
 			 * Set the event to wake up the
 			 * thread that will waiting.
 			 */
-			up(&fibctx->wait_sem);
+			complete(&fibctx->completion);
 		} else {
 			printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
 			kfree(fib);
@@ -2165,7 +2152,7 @@
 		 * Set the event to wake up the
 		 * thread that is waiting.
 		 */
-		up(&fibctx->wait_sem);
+		complete(&fibctx->completion);
 
 		entry = entry->next;
 	}
@@ -2586,9 +2573,7 @@
 void aac_free_irq(struct aac_dev *dev)
 {
 	int i;
-	int cpu;
 
-	cpu = cpumask_first(cpu_online_mask);
 	if (aac_is_src(dev)) {
 		if (dev->max_msix > 1) {
 			for (i = 0; i < dev->max_msix; i++)
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index ddc6973..a557aa6 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *	Adaptec AAC series RAID controller driver
  *	(c) Copyright 2001 Red Hat Inc.
@@ -9,26 +10,10 @@
  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * Module Name:
  *  dpcsup.c
  *
  * Abstract: All DPC processing routines for the cyclone board occur here.
- *
- *
  */
 
 #include <linux/kernel.h>
@@ -38,7 +23,6 @@
 #include <linux/slab.h>
 #include <linux/completion.h>
 #include <linux/blkdev.h>
-#include <linux/semaphore.h>
 
 #include "aacraid.h"
 
@@ -129,7 +113,7 @@
 			spin_lock_irqsave(&fib->event_lock, flagv);
 			if (!fib->done) {
 				fib->done = 1;
-				up(&fib->event_wait);
+				complete(&fib->event_wait);
 			}
 			spin_unlock_irqrestore(&fib->event_lock, flagv);
 
@@ -376,16 +360,16 @@
 				start_callback = 1;
 			} else {
 				unsigned long flagv;
-				int complete = 0;
+				int completed = 0;
 
 				dprintk((KERN_INFO "event_wait up\n"));
 				spin_lock_irqsave(&fib->event_lock, flagv);
 				if (fib->done == 2) {
 					fib->done = 1;
-					complete = 1;
+					completed = 1;
 				} else {
 					fib->done = 1;
-					up(&fib->event_wait);
+					complete(&fib->event_wait);
 				}
 				spin_unlock_irqrestore(&fib->event_lock, flagv);
 
@@ -395,7 +379,7 @@
 					mflags);
 
 				FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
-				if (complete)
+				if (completed)
 					aac_fib_complete(fib);
 			}
 		} else {
@@ -428,16 +412,16 @@
 				start_callback = 1;
 			} else {
 				unsigned long flagv;
-				int complete = 0;
+				int completed = 0;
 
 				dprintk((KERN_INFO "event_wait up\n"));
 				spin_lock_irqsave(&fib->event_lock, flagv);
 				if (fib->done == 2) {
 					fib->done = 1;
-					complete = 1;
+					completed = 1;
 				} else {
 					fib->done = 1;
-					up(&fib->event_wait);
+					complete(&fib->event_wait);
 				}
 				spin_unlock_irqrestore(&fib->event_lock, flagv);
 
@@ -447,7 +431,7 @@
 					mflags);
 
 				FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
-				if (complete)
+				if (completed)
 					aac_fib_complete(fib);
 			}
 		}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0444357..4a85878 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *	Adaptec AAC series RAID controller driver
  *	(c) Copyright 2001 Red Hat Inc.
@@ -9,20 +10,6 @@
  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * Module Name:
  *   linit.c
  *
@@ -40,7 +27,6 @@
 #include <linux/moduleparam.h>
 #include <linux/pci.h>
 #include <linux/aer.h>
-#include <linux/pci-aspm.h>
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
@@ -413,13 +399,16 @@
 	if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
 		devtype = aac->hba_map[chn][tid].devtype;
 
-		if (devtype == AAC_DEVTYPE_NATIVE_RAW)
+		if (devtype == AAC_DEVTYPE_NATIVE_RAW) {
 			depth = aac->hba_map[chn][tid].qd_limit;
-		else if (devtype == AAC_DEVTYPE_ARC_RAW)
+			set_timeout = 1;
+			goto common_config;
+		}
+		if (devtype == AAC_DEVTYPE_ARC_RAW) {
 			set_qd_dev_type = true;
-
-		set_timeout = 1;
-		goto common_config;
+			set_timeout = 1;
+			goto common_config;
+		}
 	}
 
 	if (aac->jbod && (sdev->type == TYPE_DISK))
@@ -616,7 +605,8 @@
 	NULL,
 };
 
-static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
+static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
+		     void __user *arg)
 {
 	struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
 	if (!capable(CAP_SYS_RAWIO))
@@ -759,6 +749,7 @@
 			    !(aac->raw_io_64) ||
 			    ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
 				break;
+			/* fall through */
 		case INQUIRY:
 		case READ_CAPACITY:
 			/*
@@ -851,8 +842,7 @@
 
 	address = (u64)fib->hw_error_pa;
 	rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
-	rst->error_ptr_lo = cpu_to_le32
-		((u32)(address & 0xffffffff));
+	rst->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
 	rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
 	fib->hbacmd_size = sizeof(*rst);
 
@@ -1205,7 +1195,8 @@
 	return ret;
 }
 
-static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int aac_compat_ioctl(struct scsi_device *sdev, unsigned int cmd,
+			    void __user *arg)
 {
 	struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
 	if (!capable(CAP_SYS_RAWIO))
@@ -1539,7 +1530,6 @@
 #else
 	.cmd_per_lun			= AAC_NUM_IO_FIB,
 #endif
-	.use_clustering			= ENABLE_CLUSTERING,
 	.emulated			= 1,
 	.no_write_same			= 1,
 };
@@ -1559,7 +1549,7 @@
 			struct fib *fib = &aac->fibs[i];
 			if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
 			    (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
-				up(&fib->event_wait);
+				complete(&fib->event_wait);
 		}
 		kthread_stop(aac->thread);
 		aac->thread = NULL;
@@ -1747,11 +1737,10 @@
 		shost->max_sectors = (shost->sg_tablesize * 8) + 112;
 	}
 
-	error = pci_set_dma_max_seg_size(pdev,
-		(aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
-			(shost->max_sectors << 9) : 65536);
-	if (error)
-		goto out_deinit;
+	if (aac->adapter_info.options & AAC_OPT_NEW_COMM)
+		shost->max_segment_size = shost->max_sectors << 9;
+	else
+		shost->max_segment_size = 65536;
 
 	/*
 	 * Firmware printf works only with older firmware.
@@ -2055,8 +2044,6 @@
 	struct scsi_device *sdev = NULL;
 	struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
 
-	pci_cleanup_aer_uncorrect_error_status(pdev);
-
 	if (aac_adapter_ioremap(aac, aac->base_size)) {
 
 		dev_err(&pdev->dev, "aacraid: ioremap failed\n");
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
index c59074e..b5d6b24 100644
--- a/drivers/scsi/aacraid/nark.c
+++ b/drivers/scsi/aacraid/nark.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *	Adaptec AAC series RAID controller driver
  *
@@ -8,25 +9,10 @@
  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * Module Name:
  *  nark.c
  *
  * Abstract: Hardware Device Interface for NEMER/ARK
- *
  */
 
 #include <linux/pci.h>
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index a1bc5bb..5f2cede 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *	Adaptec AAC series RAID controller driver
  *	(c) Copyright 2001 Red Hat Inc.
@@ -9,25 +10,10 @@
  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * Module Name:
  *  rkt.c
  *
  * Abstract: Hardware miniport for Drawbridge specific hardware functions.
- *
  */
 
 #include <linux/blkdev.h>
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 576cdf9..3dea348 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *	Adaptec AAC series RAID controller driver
  *	(c) Copyright 2001 Red Hat Inc.
@@ -9,25 +10,10 @@
  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * Module Name:
  *  rx.c
  *
  * Abstract: Hardware miniport for Drawbridge specific hardware functions.
- *
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index efa96c1..aa5d763 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *	Adaptec AAC series RAID controller driver
  *	(c) Copyright 2001 Red Hat Inc.
@@ -9,25 +10,10 @@
  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * Module Name:
  *  sa.c
  *
  * Abstract: Drawbridge specific support functions
- *
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 7a51ccf..3b66e06 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *	Adaptec AAC series RAID controller driver
  *	(c) Copyright 2001 Red Hat Inc.
@@ -9,25 +10,10 @@
  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
  * Module Name:
  *  src.c
  *
  * Abstract: Hardware Device Interface for PMC SRC based controllers
- *
  */
 
 #include <linux/kernel.h>
@@ -106,7 +92,7 @@
 			spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
 			if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
 				dev->management_fib_count--;
-				up(&dev->sync_fib->event_wait);
+				complete(&dev->sync_fib->event_wait);
 			}
 			spin_unlock_irqrestore(&dev->sync_fib->event_lock,
 						sflags);
@@ -1157,7 +1143,7 @@
 		dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__,
 			state_str[state], rc);
 
-return rc;
+	return rc;
 }
 /**
  *  aac_srcv_init	-	initialize an SRCv card
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 713f690..a242a62 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
  *
@@ -6,11 +7,6 @@
  * Copyright (c) 2007 Matthew Wilcox <matthew@wil.cx>
  * Copyright (c) 2014 Hannes Reinecke <hare@suse.de>
  * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
  */
 
 /*
@@ -3192,8 +3188,8 @@
 		   shost->sg_tablesize, shost->cmd_per_lun);
 
 	seq_printf(m,
-		   " unchecked_isa_dma %d, use_clustering %d\n",
-		   shost->unchecked_isa_dma, shost->use_clustering);
+		   " unchecked_isa_dma %d\n",
+		   shost->unchecked_isa_dma);
 
 	seq_printf(m,
 		   " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n",
@@ -5949,7 +5945,6 @@
 static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
 {
 	struct asc_board *boardp = adv_dvc_varp->drv_ptr;
-	u32 srb_tag;
 	adv_req_t *reqp;
 	adv_sgblk_t *sgblkp;
 	struct scsi_cmnd *scp;
@@ -5965,7 +5960,6 @@
 	 * completed. The adv_req_t structure actually contains the
 	 * completed ADV_SCSI_REQ_Q structure.
 	 */
-	srb_tag = le32_to_cpu(scsiqp->srb_tag);
 	scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag);
 
 	ASC_DBG(1, "scp 0x%p\n", scp);
@@ -6448,7 +6442,7 @@
 				sdtr_data =
 				    AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
 						   ext_msg.req_ack_offset);
-				if ((sdtr_data == 0xFF)) {
+				if (sdtr_data == 0xFF) {
 
 					q_cntl |= QC_MSG_OUT;
 					asc_dvc->init_sdtr &= ~target_id;
@@ -7716,7 +7710,7 @@
 				sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */
 				return ADV_SUCCESS;
 			}
-			slp++;
+			slp = sg_next(slp);
 		}
 		sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
 		prev_sg_block = sg_block;
@@ -10810,14 +10804,6 @@
 	 * for non-ISA adapters.
 	 */
 	.unchecked_isa_dma = true,
-	/*
-	 * All adapters controlled by this driver are capable of large
-	 * scatter-gather lists. According to the mid-level SCSI documentation
-	 * this obviates any performance gain provided by setting
-	 * 'use_clustering'. But empirically while CPU utilization is increased
-	 * by enabling clustering, I/O throughput increases as well.
-	 */
-	.use_clustering = ENABLE_CLUSTERING,
 };
 
 static int advansys_wide_init_chip(struct Scsi_Host *shost)
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 4d7b0e0..eb466c2 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /* aha152x.c -- Adaptec AHA-152x driver
  * Author: Jürgen E. Fischer, fischer@norbit.de
  * Copyright 1993-2004 Jürgen E. Fischer
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- *
  * $Id: aha152x.c,v 2.7 2004/01/24 11:42:59 fischer Exp $
  *
  * $Log: aha152x.c,v $
@@ -228,7 +218,6 @@
  * Revision 0.0  1993/08/14  19:54:25  root
  * empty function bodies; detect() works.
  *
- *
  **************************************************************************
 
  see Documentation/scsi/aha152x.txt for configuration details
@@ -269,7 +258,7 @@
 /* DEFINES */
 
 /* For PCMCIA cards, always use AUTOCONF */
-#if defined(PCMCIA) || defined(MODULE)
+#if defined(AHA152X_PCMCIA) || defined(MODULE)
 #if !defined(AUTOCONF)
 #define AUTOCONF
 #endif
@@ -297,7 +286,7 @@
 
 #define DELAY_DEFAULT 1000
 
-#if defined(PCMCIA)
+#if defined(AHA152X_PCMCIA)
 #define IRQ_MIN 0
 #define IRQ_MAX 16
 #else
@@ -328,7 +317,7 @@
 MODULE_DESCRIPTION(AHA152X_REVID);
 MODULE_LICENSE("GPL");
 
-#if !defined(PCMCIA)
+#if !defined(AHA152X_PCMCIA)
 #if defined(MODULE)
 static int io[] = {0, 0};
 module_param_hw_array(io, int, ioport, NULL, 0);
@@ -391,7 +380,7 @@
 MODULE_DEVICE_TABLE(isapnp, id_table);
 #endif /* ISAPNP */
 
-#endif /* !PCMCIA */
+#endif /* !AHA152X_PCMCIA */
 
 static struct scsi_host_template aha152x_driver_template;
 
@@ -863,7 +852,7 @@
 	if (shpnt->irq)
 		free_irq(shpnt->irq, shpnt);
 
-#if !defined(PCMCIA)
+#if !defined(AHA152X_PCMCIA)
 	if (shpnt->io_port)
 		release_region(shpnt->io_port, IO_RANGE);
 #endif
@@ -948,7 +937,6 @@
 	   SCp.ptr              : buffer pointer
 	   SCp.this_residual    : buffer length
 	   SCp.buffer           : next buffer
-	   SCp.buffers_residual : left buffers in list
 	   SCp.phase            : current state of the command */
 
 	if ((phase & resetting) || !scsi_sglist(SCpnt)) {
@@ -956,13 +944,11 @@
 		SCpnt->SCp.this_residual = 0;
 		scsi_set_resid(SCpnt, 0);
 		SCpnt->SCp.buffer           = NULL;
-		SCpnt->SCp.buffers_residual = 0;
 	} else {
 		scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
 		SCpnt->SCp.buffer           = scsi_sglist(SCpnt);
 		SCpnt->SCp.ptr              = SG_ADDRESS(SCpnt->SCp.buffer);
 		SCpnt->SCp.this_residual    = SCpnt->SCp.buffer->length;
-		SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
 	}
 
 	DO_LOCK(flags);
@@ -2030,10 +2016,9 @@
 				}
 
 				if (CURRENT_SC->SCp.this_residual == 0 &&
-				    CURRENT_SC->SCp.buffers_residual > 0) {
+				    !sg_is_last(CURRENT_SC->SCp.buffer)) {
 					/* advance to next buffer */
-					CURRENT_SC->SCp.buffers_residual--;
-					CURRENT_SC->SCp.buffer++;
+					CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
 					CURRENT_SC->SCp.ptr           = SG_ADDRESS(CURRENT_SC->SCp.buffer);
 					CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
 				}
@@ -2136,10 +2121,10 @@
 			CMD_INC_RESID(CURRENT_SC, -2 * data_count);
 		}
 
-		if(CURRENT_SC->SCp.this_residual==0 && CURRENT_SC->SCp.buffers_residual>0) {
+		if (CURRENT_SC->SCp.this_residual == 0 &&
+		    !sg_is_last(CURRENT_SC->SCp.buffer)) {
 			/* advance to next buffer */
-			CURRENT_SC->SCp.buffers_residual--;
-			CURRENT_SC->SCp.buffer++;
+			CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
 			CURRENT_SC->SCp.ptr           = SG_ADDRESS(CURRENT_SC->SCp.buffer);
 			CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
 		}
@@ -2158,22 +2143,26 @@
 static void datao_end(struct Scsi_Host *shpnt)
 {
 	if(TESTLO(DMASTAT, DFIFOEMP)) {
-		int data_count = (DATA_LEN - scsi_get_resid(CURRENT_SC)) -
-			GETSTCNT();
+		u32 datao_cnt = GETSTCNT();
+		int datao_out = DATA_LEN - scsi_get_resid(CURRENT_SC);
+		int done;
+		struct scatterlist *sg = scsi_sglist(CURRENT_SC);
 
-		CMD_INC_RESID(CURRENT_SC, data_count);
+		CMD_INC_RESID(CURRENT_SC, datao_out - datao_cnt);
 
-		data_count -= CURRENT_SC->SCp.ptr -
-			SG_ADDRESS(CURRENT_SC->SCp.buffer);
-		while(data_count>0) {
-			CURRENT_SC->SCp.buffer--;
-			CURRENT_SC->SCp.buffers_residual++;
-			data_count -= CURRENT_SC->SCp.buffer->length;
+		done = scsi_bufflen(CURRENT_SC) - scsi_get_resid(CURRENT_SC);
+		/* Locate the first SG entry not yet sent */
+		while (done > 0 && !sg_is_last(sg)) {
+			if (done < sg->length)
+				break;
+			done -= sg->length;
+			sg = sg_next(sg);
 		}
-		CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) -
-			data_count;
-		CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length +
-			data_count;
+
+		CURRENT_SC->SCp.buffer = sg;
+		CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) + done;
+		CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length -
+			done;
 	}
 
 	SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
@@ -2501,7 +2490,7 @@
 
 	seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
 		scsi_get_resid(ptr), ptr->SCp.this_residual,
-		ptr->SCp.buffers_residual);
+		sg_nents(ptr->SCp.buffer) - 1);
 
 	if (ptr->SCp.phase & not_issued)
 		seq_puts(m, "not issued|");
@@ -2920,11 +2909,11 @@
 	.can_queue			= 1,
 	.this_id			= 7,
 	.sg_tablesize			= SG_ALL,
-	.use_clustering			= DISABLE_CLUSTERING,
+	.dma_boundary			= PAGE_SIZE - 1,
 	.slave_alloc			= aha152x_adjust_queue,
 };
 
-#if !defined(PCMCIA)
+#if !defined(AHA152X_PCMCIA)
 static int setup_count;
 static struct aha152x_setup setup[2];
 
@@ -3392,4 +3381,4 @@
 __setup("aha152x=", aha152x_setup);
 #endif
 
-#endif /* !PCMCIA */
+#endif /* !AHA152X_PCMCIA */
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 41add33..dc5667a 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  Driver for Adaptec AHA-1542 SCSI host adapters
  *
@@ -58,8 +59,15 @@
 	int aha1542_last_mbi_used;
 	int aha1542_last_mbo_used;
 	struct scsi_cmnd *int_cmds[AHA1542_MAILBOXES];
-	struct mailbox mb[2 * AHA1542_MAILBOXES];
-	struct ccb ccb[AHA1542_MAILBOXES];
+	struct mailbox *mb;
+	dma_addr_t mb_handle;
+	struct ccb *ccb;
+	dma_addr_t ccb_handle;
+};
+
+struct aha1542_cmd {
+	struct chain *chain;
+	dma_addr_t chain_handle;
 };
 
 static inline void aha1542_intr_reset(u16 base)
@@ -233,6 +241,21 @@
 	return 1;
 }
 
+static void aha1542_free_cmd(struct scsi_cmnd *cmd)
+{
+	struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
+	struct device *dev = cmd->device->host->dma_dev;
+	size_t len = scsi_sg_count(cmd) * sizeof(struct chain);
+
+	if (acmd->chain) {
+		dma_unmap_single(dev, acmd->chain_handle, len, DMA_TO_DEVICE);
+		kfree(acmd->chain);
+	}
+
+	acmd->chain = NULL;
+	scsi_dma_unmap(cmd);
+}
+
 static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
 {
 	struct Scsi_Host *sh = dev_id;
@@ -303,7 +326,7 @@
 			return IRQ_HANDLED;
 		};
 
-		mbo = (scsi2int(mb[mbi].ccbptr) - (isa_virt_to_bus(&ccb[0]))) / sizeof(struct ccb);
+		mbo = (scsi2int(mb[mbi].ccbptr) - (unsigned long)aha1542->ccb_handle) / sizeof(struct ccb);
 		mbistatus = mb[mbi].status;
 		mb[mbi].status = 0;
 		aha1542->aha1542_last_mbi_used = mbi;
@@ -331,8 +354,7 @@
 			return IRQ_HANDLED;
 		}
 		my_done = tmp_cmd->scsi_done;
-		kfree(tmp_cmd->host_scribble);
-		tmp_cmd->host_scribble = NULL;
+		aha1542_free_cmd(tmp_cmd);
 		/* Fetch the sense data, and tuck it away, in the required slot.  The
 		   Adaptec automatically fetches it, and there is no guarantee that
 		   we will still have it in the cdb when we come back */
@@ -369,6 +391,7 @@
 
 static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
 {
+	struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
 	struct aha1542_hostdata *aha1542 = shost_priv(sh);
 	u8 direction;
 	u8 target = cmd->device->id;
@@ -378,7 +401,6 @@
 	int mbo, sg_count;
 	struct mailbox *mb = aha1542->mb;
 	struct ccb *ccb = aha1542->ccb;
-	struct chain *cptr;
 
 	if (*cmd->cmnd == REQUEST_SENSE) {
 		/* Don't do the command - we have the sense data already */
@@ -398,15 +420,17 @@
 		print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
 	}
 #endif
-	if (bufflen) {	/* allocate memory before taking host_lock */
-		sg_count = scsi_sg_count(cmd);
-		cptr = kmalloc_array(sg_count, sizeof(*cptr),
-				     GFP_KERNEL | GFP_DMA);
-		if (!cptr)
-			return SCSI_MLQUEUE_HOST_BUSY;
-	} else {
-		sg_count = 0;
-		cptr = NULL;
+	sg_count = scsi_dma_map(cmd);
+	if (sg_count) {
+		size_t len = sg_count * sizeof(struct chain);
+
+		acmd->chain = kmalloc(len, GFP_DMA);
+		if (!acmd->chain)
+			goto out_unmap;
+		acmd->chain_handle = dma_map_single(sh->dma_dev, acmd->chain,
+				len, DMA_TO_DEVICE);
+		if (dma_mapping_error(sh->dma_dev, acmd->chain_handle))
+			goto out_free_chain;
 	}
 
 	/* Use the outgoing mailboxes in a round-robin fashion, because this
@@ -437,7 +461,8 @@
 	shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
 #endif
 
-	any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo]));	/* This gets trashed for some reason */
+	/* This gets trashed for some reason */
+	any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb));
 
 	memset(&ccb[mbo], 0, sizeof(struct ccb));
 
@@ -456,21 +481,18 @@
 		int i;
 
 		ccb[mbo].op = 2;	/* SCSI Initiator Command  w/scatter-gather */
-		cmd->host_scribble = (void *)cptr;
 		scsi_for_each_sg(cmd, sg, sg_count, i) {
-			any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg))
-								+ sg->offset);
-			any2scsi(cptr[i].datalen, sg->length);
+			any2scsi(acmd->chain[i].dataptr, sg_dma_address(sg));
+			any2scsi(acmd->chain[i].datalen, sg_dma_len(sg));
 		};
 		any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
-		any2scsi(ccb[mbo].dataptr, isa_virt_to_bus(cptr));
+		any2scsi(ccb[mbo].dataptr, acmd->chain_handle);
 #ifdef DEBUG
-		shost_printk(KERN_DEBUG, sh, "cptr %p: ", cptr);
-		print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, cptr, 18);
+		shost_printk(KERN_DEBUG, sh, "cptr %p: ", acmd->chain);
+		print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, acmd->chain, 18);
 #endif
 	} else {
 		ccb[mbo].op = 0;	/* SCSI Initiator Command */
-		cmd->host_scribble = NULL;
 		any2scsi(ccb[mbo].datalen, 0);
 		any2scsi(ccb[mbo].dataptr, 0);
 	};
@@ -488,24 +510,29 @@
 	spin_unlock_irqrestore(sh->host_lock, flags);
 
 	return 0;
+out_free_chain:
+	kfree(acmd->chain);
+	acmd->chain = NULL;
+out_unmap:
+	scsi_dma_unmap(cmd);
+	return SCSI_MLQUEUE_HOST_BUSY;
 }
 
 /* Initialize mailboxes */
 static void setup_mailboxes(struct Scsi_Host *sh)
 {
 	struct aha1542_hostdata *aha1542 = shost_priv(sh);
-	int i;
-	struct mailbox *mb = aha1542->mb;
-	struct ccb *ccb = aha1542->ccb;
-
 	u8 mb_cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0};
+	int i;
 
 	for (i = 0; i < AHA1542_MAILBOXES; i++) {
-		mb[i].status = mb[AHA1542_MAILBOXES + i].status = 0;
-		any2scsi(mb[i].ccbptr, isa_virt_to_bus(&ccb[i]));
+		aha1542->mb[i].status = 0;
+		any2scsi(aha1542->mb[i].ccbptr,
+			 aha1542->ccb_handle + i * sizeof(struct ccb));
+		aha1542->mb[AHA1542_MAILBOXES + i].status = 0;
 	};
 	aha1542_intr_reset(sh->io_port);	/* reset interrupts, so they don't block */
-	any2scsi((mb_cmd + 2), isa_virt_to_bus(mb));
+	any2scsi(mb_cmd + 2, aha1542->mb_handle);
 	if (aha1542_out(sh->io_port, mb_cmd, 5))
 		shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n");
 	aha1542_intr_reset(sh->io_port);
@@ -739,11 +766,26 @@
 	if (aha1542->bios_translation == BIOS_TRANSLATION_25563)
 		shost_printk(KERN_INFO, sh, "Using extended bios translation\n");
 
+	if (dma_set_mask_and_coherent(pdev, DMA_BIT_MASK(24)) < 0)
+		goto unregister;
+
+	aha1542->mb = dma_alloc_coherent(pdev,
+			AHA1542_MAILBOXES * 2 * sizeof(struct mailbox),
+			&aha1542->mb_handle, GFP_KERNEL);
+	if (!aha1542->mb)
+		goto unregister;
+
+	aha1542->ccb = dma_alloc_coherent(pdev,
+			AHA1542_MAILBOXES * sizeof(struct ccb),
+			&aha1542->ccb_handle, GFP_KERNEL);
+	if (!aha1542->ccb)
+		goto free_mb;
+
 	setup_mailboxes(sh);
 
 	if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) {
 		shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n");
-		goto unregister;
+		goto free_ccb;
 	}
 	if (sh->dma_channel != 0xFF) {
 		if (request_dma(sh->dma_channel, "aha1542")) {
@@ -762,11 +804,18 @@
 	scsi_scan_host(sh);
 
 	return sh;
+
 free_dma:
 	if (sh->dma_channel != 0xff)
 		free_dma(sh->dma_channel);
 free_irq:
 	free_irq(sh->irq, sh);
+free_ccb:
+	dma_free_coherent(pdev, AHA1542_MAILBOXES * sizeof(struct ccb),
+			  aha1542->ccb, aha1542->ccb_handle);
+free_mb:
+	dma_free_coherent(pdev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox),
+			  aha1542->mb, aha1542->mb_handle);
 unregister:
 	scsi_host_put(sh);
 release:
@@ -777,9 +826,16 @@
 
 static int aha1542_release(struct Scsi_Host *sh)
 {
+	struct aha1542_hostdata *aha1542 = shost_priv(sh);
+	struct device *dev = sh->dma_dev;
+
 	scsi_remove_host(sh);
 	if (sh->dma_channel != 0xff)
 		free_dma(sh->dma_channel);
+	dma_free_coherent(dev, AHA1542_MAILBOXES * sizeof(struct ccb),
+			  aha1542->ccb, aha1542->ccb_handle);
+	dma_free_coherent(dev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox),
+			  aha1542->mb, aha1542->mb_handle);
 	if (sh->irq)
 		free_irq(sh->irq, sh);
 	if (sh->io_port && sh->n_io_port)
@@ -826,7 +882,8 @@
 
 	aha1542->aha1542_last_mbo_used = mbo;
 
-	any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo]));	/* This gets trashed for some reason */
+	/* This gets trashed for some reason */
+	any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb));
 
 	memset(&ccb[mbo], 0, sizeof(struct ccb));
 
@@ -901,8 +958,7 @@
 				 */
 				continue;
 			}
-			kfree(tmp_cmd->host_scribble);
-			tmp_cmd->host_scribble = NULL;
+			aha1542_free_cmd(tmp_cmd);
 			aha1542->int_cmds[i] = NULL;
 			aha1542->mb[i].status = 0;
 		}
@@ -946,6 +1002,7 @@
 	.module			= THIS_MODULE,
 	.proc_name		= "aha1542",
 	.name			= "Adaptec 1542",
+	.cmd_size		= sizeof(struct aha1542_cmd),
 	.queuecommand		= aha1542_queuecommand,
 	.eh_device_reset_handler= aha1542_dev_reset,
 	.eh_bus_reset_handler	= aha1542_bus_reset,
@@ -955,7 +1012,6 @@
 	.this_id		= 7,
 	.sg_tablesize		= 16,
 	.unchecked_isa_dma	= 1, 
-	.use_clustering		= ENABLE_CLUSTERING,
 };
 
 static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 786bf7f..da4150c 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -545,7 +545,6 @@
 	.can_queue        = AHA1740_ECBS,
 	.this_id          = 7,
 	.sg_tablesize     = AHA1740_SCATTER,
-	.use_clustering   = ENABLE_CLUSTERING,
 	.eh_abort_handler = aha1740_eh_abort_handler,
 };
 
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx
index 3b3d599..16743fb 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic79xx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # AIC79XX 2.5.X Kernel configuration File.
 # $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic79xx#4 $
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index 55ac55e..3546b8c 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -1,9 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # AIC7XXX and AIC79XX 2.5.X Kernel configuration File.
 # $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic7xxx#7 $
 #
 config SCSI_AIC7XXX
-	tristate "Adaptec AIC7xxx Fast -> U160 support (New Driver)"
+	tristate "Adaptec AIC7xxx Fast -> U160 support"
 	depends on (PCI || EISA) && SCSI
 	select SCSI_SPI_ATTRS
 	---help---
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
index c15be25..e0188ec 100644
--- a/drivers/scsi/aic7xxx/Makefile
+++ b/drivers/scsi/aic7xxx/Makefile
@@ -34,7 +34,6 @@
 						   aic79xx_proc.o	\
 						   aic79xx_osm_pci.o
 
-ccflags-y += -Idrivers/scsi
 ifdef WARNINGS_BECOME_ERRORS
 ccflags-y += -Werror
 endif
diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c
index 5000bd6..176704b 100644
--- a/drivers/scsi/aic7xxx/aic7770.c
+++ b/drivers/scsi/aic7xxx/aic7770.c
@@ -42,15 +42,9 @@
  * $FreeBSD$
  */
 
-#ifdef __linux__
 #include "aic7xxx_osm.h"
 #include "aic7xxx_inline.h"
 #include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
 
 #define ID_AIC7770	0x04907770
 #define ID_AHA_274x	0x04907771
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index 3d401d0..bdd177e 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -91,6 +91,7 @@
 	ahc = ahc_alloc(&aic7xxx_driver_template, name);
 	if (ahc == NULL)
 		return (ENOMEM);
+	ahc->dev = dev;
 	error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data,
 			       eisaBase);
 	if (error != 0) {
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index 31f2bb9..9a51555 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -607,9 +607,6 @@
 	ahd_io_ctx_t		  io_ctx;
 	struct ahd_softc	 *ahd_softc;
 	scb_flag		  flags;
-#ifndef __linux__
-	bus_dmamap_t		  dmamap;
-#endif
 	struct scb_platform_data *platform_data;
 	struct map_node	 	 *hscb_map;
 	struct map_node	 	 *sg_map;
@@ -1056,9 +1053,6 @@
 struct ahd_softc {
 	bus_space_tag_t           tags[2];
 	bus_space_handle_t        bshs[2];
-#ifndef __linux__
-	bus_dma_tag_t		  buffer_dmat;   /* dmat for buffer I/O */
-#endif
 	struct scb_data		  scb_data;
 
 	struct hardware_scb	 *next_queued_hscb;
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 2d82ec8..7e5044b 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -40,16 +40,9 @@
  * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $
  */
 
-#ifdef __linux__
 #include "aic79xx_osm.h"
 #include "aic79xx_inline.h"
 #include "aicasm/aicasm_insformat.h"
-#else
-#include <dev/aic7xxx/aic79xx_osm.h>
-#include <dev/aic7xxx/aic79xx_inline.h>
-#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
-#endif
-
 
 /***************************** Lookup Tables **********************************/
 static const char *const ahd_chip_names[] =
@@ -59,7 +52,6 @@
 	"aic7902",
 	"aic7901A"
 };
-static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
 
 /*
  * Hardware error codes.
@@ -2293,6 +2285,7 @@
 			switch (scb->hscb->task_management) {
 			case SIU_TASKMGMT_ABORT_TASK:
 				tag = SCB_GET_TAG(scb);
+				/* fall through */
 			case SIU_TASKMGMT_ABORT_TASK_SET:
 			case SIU_TASKMGMT_CLEAR_TASK_SET:
 				lun = scb->hscb->lun;
@@ -2303,6 +2296,7 @@
 				break;
 			case SIU_TASKMGMT_LUN_RESET:
 				lun = scb->hscb->lun;
+				/* fall through */
 			case SIU_TASKMGMT_TARGET_RESET:
 			{
 				struct ahd_devinfo devinfo;
@@ -6172,17 +6166,11 @@
 	case 2:
 		ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
 	case 1:
-#ifndef __linux__
-		ahd_dma_tag_destroy(ahd, ahd->buffer_dmat);
-#endif
 		break;
 	case 0:
 		break;
 	}
 
-#ifndef __linux__
-	ahd_dma_tag_destroy(ahd, ahd->parent_dmat);
-#endif
 	ahd_platform_free(ahd);
 	ahd_fini_scbdata(ahd);
 	for (i = 0; i < AHD_NUM_TARGETS; i++) {
@@ -6564,8 +6552,8 @@
 			kfree(sns_map);
 		}
 		ahd_dma_tag_destroy(ahd, scb_data->sense_dmat);
-		/* FALLTHROUGH */
 	}
+		/* fall through */
 	case 6:
 	{
 		struct map_node *sg_map;
@@ -6579,8 +6567,8 @@
 			kfree(sg_map);
 		}
 		ahd_dma_tag_destroy(ahd, scb_data->sg_dmat);
-		/* FALLTHROUGH */
 	}
+		/* fall through */
 	case 5:
 	{
 		struct map_node *hscb_map;
@@ -6934,9 +6922,6 @@
 	for (i = 0; i < newcount; i++) {
 		struct scb_platform_data *pdata;
 		u_int col_tag;
-#ifndef __linux__
-		int error;
-#endif
 
 		next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC);
 		if (next_scb == NULL)
@@ -6970,15 +6955,6 @@
 			next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg);
 		next_scb->ahd_softc = ahd;
 		next_scb->flags = SCB_FLAG_NONE;
-#ifndef __linux__
-		error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0,
-					  &next_scb->dmamap);
-		if (error != 0) {
-			kfree(next_scb);
-			kfree(pdata);
-			break;
-		}
-#endif
 		next_scb->hscb->tag = ahd_htole16(scb_data->numscbs);
 		col_tag = scb_data->numscbs ^ 0x100;
 		next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag);
@@ -7091,24 +7067,6 @@
 	if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0)
 		ahd->features &= ~AHD_TARGETMODE;
 
-#ifndef __linux__
-	/* DMA tag for mapping buffers into device visible space. */
-	if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
-			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
-			       /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING
-					? (dma_addr_t)0x7FFFFFFFFFULL
-					: BUS_SPACE_MAXADDR_32BIT,
-			       /*highaddr*/BUS_SPACE_MAXADDR,
-			       /*filter*/NULL, /*filterarg*/NULL,
-			       /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE,
-			       /*nsegments*/AHD_NSEG,
-			       /*maxsegsz*/AHD_MAXTRANSFER_SIZE,
-			       /*flags*/BUS_DMA_ALLOCNOW,
-			       &ahd->buffer_dmat) != 0) {
-		return (ENOMEM);
-	}
-#endif
-
 	ahd->init_level++;
 
 	/*
@@ -7253,6 +7211,7 @@
 		case FLX_CSTAT_OVER:
 		case FLX_CSTAT_UNDER:
 			warn_user++;
+			/* fall through */
 		case FLX_CSTAT_INVALID:
 		case FLX_CSTAT_OKAY:
 			if (warn_user == 0 && bootverbose == 0)
@@ -8457,7 +8416,7 @@
 			if ((scb->flags & SCB_ACTIVE) == 0)
 				printk("Inactive SCB in Waiting List\n");
 			ahd_done_with_status(ahd, scb, status);
-			/* FALLTHROUGH */
+			/* fall through */
 		case SEARCH_REMOVE:
 			ahd_rem_wscb(ahd, scbid, prev, next, tid);
 			*list_tail = prev;
@@ -8466,6 +8425,7 @@
 			break;
 		case SEARCH_PRINT:
 			printk("0x%x ", scbid);
+			/* fall through */
 		case SEARCH_COUNT:
 			prev = scbid;
 			break;
@@ -9591,8 +9551,8 @@
 	{
 		fmt3_ins = &instr.format3;
 		fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address);
-		/* FALLTHROUGH */
 	}
+		/* fall through */
 	case AIC_OP_OR:
 	case AIC_OP_AND:
 	case AIC_OP_XOR:
@@ -9603,7 +9563,7 @@
 			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
 		}
 		fmt1_ins->parity = 0;
-		/* FALLTHROUGH */
+		/* fall through */
 	case AIC_OP_ROL:
 	{
 		int i, count;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 2588b8f..5799251 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -920,7 +920,6 @@
 	.this_id		= -1,
 	.max_sectors		= 8192,
 	.cmd_per_lun		= 2,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.slave_alloc		= ahd_linux_slave_alloc,
 	.slave_configure	= ahd_linux_slave_configure,
 	.target_alloc		= ahd_linux_target_alloc,
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index cc9bd26..8397ae9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -41,14 +41,8 @@
  * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $
  */
 
-#ifdef __linux__
 #include "aic79xx_osm.h"
 #include "aic79xx_inline.h"
-#else
-#include <dev/aic7xxx/aic79xx_osm.h>
-#include <dev/aic7xxx/aic79xx_inline.h>
-#endif
-
 #include "aic79xx_pci.h"
 
 static inline uint64_t
@@ -294,13 +288,11 @@
 int
 ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
 {
-	struct scb_data *shared_scb_data;
 	u_int		 command;
 	uint32_t	 devconfig;
 	uint16_t	 subvendor; 
 	int		 error;
 
-	shared_scb_data = NULL;
 	ahd->description = entry->name;
 	/*
 	 * Record if this is an HP board.
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 4ce4e90..88b90f9 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -568,9 +568,6 @@
 	ahc_io_ctx_t		  io_ctx;
 	struct ahc_softc	 *ahc_softc;
 	scb_flag		  flags;
-#ifndef __linux__
-	bus_dmamap_t		  dmamap;
-#endif
 	struct scb_platform_data *platform_data;
 	struct sg_map_node	 *sg_map;
 	struct ahc_dma_seg 	 *sg_list;
@@ -906,9 +903,6 @@
 struct ahc_softc {
 	bus_space_tag_t           tag;
 	bus_space_handle_t        bsh;
-#ifndef __linux__
-	bus_dma_tag_t		  buffer_dmat;   /* dmat for buffer I/O */
-#endif
 	struct scb_data		 *scb_data;
 
 	struct scb		 *next_queued_scb;
@@ -949,6 +943,7 @@
 	 * Platform specific device information.
 	 */
 	ahc_dev_softc_t		  dev_softc;
+	struct device		  *dev;
 
 	/*
 	 * Bus specific device information.
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
index ba0b411..00fde22 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.reg
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -1666,7 +1666,7 @@
 	size		6
 	/*
 	 * These are reserved registers in the card's scratch ram on the 2742.
-	 * The EISA configuraiton chip is mapped here.  On Rev E. of the
+	 * The EISA configuration chip is mapped here.  On Rev E. of the
 	 * aic7770, the sequencer can use this area for scratch, but the
 	 * host cannot directly access these registers.  On later chips, this
 	 * area can be read and written by both the host and the sequencer.
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index 9e85a7e..cc9e419 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -64,15 +64,9 @@
  *   bit to be sent from the chip.
  */
 
-#ifdef __linux__
 #include "aic7xxx_osm.h"
 #include "aic7xxx_inline.h"
 #include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
 
 /*
  * Right now, we only have to read the SEEPROM.  But we make it easier to
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 915a34f..a9d40d3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -40,15 +40,9 @@
  * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
  */
 
-#ifdef __linux__
 #include "aic7xxx_osm.h"
 #include "aic7xxx_inline.h"
 #include "aicasm/aicasm_insformat.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
-#endif
 
 /***************************** Lookup Tables **********************************/
 static const char *const ahc_chip_names[] = {
@@ -67,7 +61,6 @@
 	"aic7892",
 	"aic7899"
 };
-static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
 
 /*
  * Hardware error codes.
@@ -1673,7 +1666,7 @@
 					printk("\tCRC Value Mismatch\n");
 				if ((sstat2 & CRCENDERR) != 0)
 					printk("\tNo terminal CRC packet "
-					       "recevied\n");
+					       "received\n");
 				if ((sstat2 & CRCREQERR) != 0)
 					printk("\tIllegal CRC packet "
 					       "request\n");
@@ -4509,17 +4502,11 @@
 	case 2:
 		ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
 	case 1:
-#ifndef __linux__
-		ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
-#endif
 		break;
 	case 0:
 		break;
 	}
 
-#ifndef __linux__
-	ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
-#endif
 	ahc_platform_free(ahc);
 	ahc_fini_scbdata(ahc);
 	for (i = 0; i < AHC_NUM_TARGETS; i++) {
@@ -4933,24 +4920,30 @@
 		}
 		ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
 	}
+		/* fall through */
 	case 6:
 		ahc_dmamap_unload(ahc, scb_data->sense_dmat,
 				  scb_data->sense_dmamap);
+		/* fall through */
 	case 5:
 		ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
 				scb_data->sense_dmamap);
 		ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
 				   scb_data->sense_dmamap);
+		/* fall through */
 	case 4:
 		ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
+		/* fall through */
 	case 3:
 		ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
 				  scb_data->hscb_dmamap);
+		/* fall through */
 	case 2:
 		ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
 				scb_data->hscb_dmamap);
 		ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
 				   scb_data->hscb_dmamap);
+		/* fall through */
 	case 1:
 		ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
 		break;
@@ -5005,9 +4998,7 @@
 	newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
 	for (i = 0; i < newcount; i++) {
 		struct scb_platform_data *pdata;
-#ifndef __linux__
-		int error;
-#endif
+
 		pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC);
 		if (pdata == NULL)
 			break;
@@ -5021,12 +5012,6 @@
 		next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
 		next_scb->ahc_softc = ahc;
 		next_scb->flags = SCB_FREE;
-#ifndef __linux__
-		error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
-					  &next_scb->dmamap);
-		if (error != 0)
-			break;
-#endif
 		next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
 		next_scb->hscb->tag = ahc->scb_data->numscbs;
 		SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
@@ -5325,24 +5310,6 @@
 	if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
 		ahc->features &= ~AHC_TARGETMODE;
 
-#ifndef __linux__
-	/* DMA tag for mapping buffers into device visible space. */
-	if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
-			       /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
-			       /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING
-					? (dma_addr_t)0x7FFFFFFFFFULL
-					: BUS_SPACE_MAXADDR_32BIT,
-			       /*highaddr*/BUS_SPACE_MAXADDR,
-			       /*filter*/NULL, /*filterarg*/NULL,
-			       /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
-			       /*nsegments*/AHC_NSEG,
-			       /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
-			       /*flags*/BUS_DMA_ALLOCNOW,
-			       &ahc->buffer_dmat) != 0) {
-		return (ENOMEM);
-	}
-#endif
-
 	ahc->init_level++;
 
 	/*
@@ -6041,8 +6008,8 @@
 				if ((scb->flags & SCB_ACTIVE) == 0)
 					printk("Inactive SCB in Waiting List\n");
 				ahc_done(ahc, scb);
-				/* FALLTHROUGH */
 			}
+				/* fall through */
 			case SEARCH_REMOVE:
 				next = ahc_rem_wscb(ahc, next, prev);
 				break;
@@ -7047,8 +7014,8 @@
 		}
 		address -= address_offset;
 		fmt3_ins->address = address;
-		/* FALLTHROUGH */
 	}
+		/* fall through */
 	case AIC_OP_OR:
 	case AIC_OP_AND:
 	case AIC_OP_XOR:
@@ -7074,7 +7041,7 @@
 			fmt1_ins->opcode = AIC_OP_AND;
 			fmt1_ins->immediate = 0xff;
 		}
-		/* FALLTHROUGH */
+		/* fall through */
 	case AIC_OP_ROL:
 		if ((ahc->features & AHC_ULTRA2) != 0) {
 			int i, count;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c6be3ae..d5c4a0d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -807,7 +807,6 @@
 	.this_id		= -1,
 	.max_sectors		= 8192,
 	.cmd_per_lun		= 2,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.slave_alloc		= ahc_linux_slave_alloc,
 	.slave_configure	= ahc_linux_slave_configure,
 	.target_alloc		= ahc_linux_target_alloc,
@@ -861,8 +860,8 @@
 ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
 		 int flags, bus_dmamap_t *mapp)
 {
-	*vaddr = pci_alloc_consistent(ahc->dev_softc,
-				      dmat->maxsize, mapp);
+	/* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */
+	*vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC);
 	if (*vaddr == NULL)
 		return ENOMEM;
 	return 0;
@@ -872,8 +871,7 @@
 ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
 		void* vaddr, bus_dmamap_t map)
 {
-	pci_free_consistent(ahc->dev_softc, dmat->maxsize,
-			    vaddr, map);
+	dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map);
 }
 
 int
@@ -1124,8 +1122,7 @@
 
 	host->transportt = ahc_linux_transport_template;
 
-	retval = scsi_add_host(host,
-			(ahc->dev_softc ? &ahc->dev_softc->dev : NULL));
+	retval = scsi_add_host(host, ahc->dev);
 	if (retval) {
 		printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n");
 		scsi_host_put(host);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 0fc14da..717d8d1 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -250,6 +250,7 @@
 		}
 	}
 	ahc->dev_softc = pci;
+	ahc->dev = &pci->dev;
 	error = ahc_pci_config(ahc, entry);
 	if (error != 0) {
 		ahc_free(ahc);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 673e826..656f680 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -42,16 +42,9 @@
  * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $
  */
 
-#ifdef __linux__
 #include "aic7xxx_osm.h"
 #include "aic7xxx_inline.h"
 #include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
-
 #include "aic7xxx_pci.h"
 
 static inline uint64_t
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.h b/drivers/scsi/aic7xxx/aicasm/aicasm.h
index 51678dd..716a2ae 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.h
@@ -42,11 +42,7 @@
  * $FreeBSD$
  */
 
-#ifdef __linux__
 #include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
 
 #ifndef TRUE
 #define TRUE 1
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index f1586a4..924d55a 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -52,11 +52,7 @@
 #include <string.h>
 #include <sysexits.h>
 
-#ifdef __linux__
 #include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
 
 #include "aicasm.h"
 #include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
index 708326d..8c04798 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
@@ -52,11 +52,7 @@
 #include <string.h>
 #include <sysexits.h>
 
-#ifdef __linux__
 #include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
 
 #include "aicasm.h"
 #include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
index c0457b8..98e9959 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
@@ -51,11 +51,7 @@
 #include <stdio.h>
 #include <string.h>
 #include <sysexits.h>
-#ifdef __linux__
 #include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
 
 #include "aicasm.h"
 #include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index 93c8667..c78d4f6 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -51,11 +51,7 @@
 #include <stdio.h>
 #include <string.h>
 #include <sysexits.h>
-#ifdef __linux__
 #include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
 
 #include "aicasm.h"
 #include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index 232aff1..975fcfc 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -44,11 +44,7 @@
 
 #include <sys/types.h>
 
-#ifdef __linux__
 #include "aicdb.h"
-#else
-#include <db.h>
-#endif
 #include <fcntl.h>
 #include <inttypes.h>
 #include <regex.h>
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index 34bbcad..7bf7fd5 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -42,11 +42,7 @@
  * $FreeBSD$
  */
 
-#ifdef __linux__
 #include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
 
 typedef enum {
 	UNINITIALIZED,
diff --git a/drivers/scsi/aic94xx/Kconfig b/drivers/scsi/aic94xx/Kconfig
index c83fe75..71931c3 100644
--- a/drivers/scsi/aic94xx/Kconfig
+++ b/drivers/scsi/aic94xx/Kconfig
@@ -1,28 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Kernel configuration file for aic94xx SAS/SATA driver.
 #
 # Copyright (c) 2005 Adaptec, Inc.  All rights reserved.
 # Copyright (c) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
 #
-# This file is licensed under GPLv2.
-#
-# This file is part of the aic94xx driver.
-#
-# The aic94xx driver is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 of the
-# License.
-#
-# The aic94xx driver is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Aic94xx Driver; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-#
-#
 
 config SCSI_AIC94XX
 	tristate "Adaptec AIC94xx SAS/SATA support"
diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile
index c0a15c7..db9fbe3 100644
--- a/drivers/scsi/aic94xx/Makefile
+++ b/drivers/scsi/aic94xx/Makefile
@@ -1,26 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Makefile for Adaptec aic94xx SAS/SATA driver.
 #
 # Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
 # Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
 #
-# This file is licensed under GPLv2.
-#
-# This file is part of the aic94xx driver.
-#
-# The aic94xx driver is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 of the
-# License.
-#
-# The aic94xx driver is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with the aic94xx driver; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 
 ccflags-$(CONFIG_AIC94XX_DEBUG) := -DASD_DEBUG -DASD_ENTER_EXIT
 
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 26d4ad9..c23bbb6 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -1,27 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Aic94xx SAS/SATA driver header file.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  * $Id: //depot/aic94xx/aic94xx.h#31 $
  */
 
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 3307238..604a533 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -1,27 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Aic94xx SAS/SATA DDB management
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  * $Id: //depot/aic94xx/aic94xx_dev.c#21 $
  */
 
@@ -187,9 +170,7 @@
 			}
 		} else {
 			flags |= CONCURRENT_CONN_SUPP;
-			if (!dev->parent &&
-			    (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
-			     dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE))
+			if (!dev->parent && dev_is_expander(dev->dev_type))
 				asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
 						       4);
 			else
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c
index a16a77c..7c4c53a 100644
--- a/drivers/scsi/aic94xx/aic94xx_dump.c
+++ b/drivers/scsi/aic94xx/aic94xx_dump.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Aic94xx SAS/SATA driver dump interface.
  *
@@ -5,28 +6,9 @@
  * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com>
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  * 2005/07/14/LT  Complete overhaul of this file.  Update pages, register
  * locations, names, etc.  Make use of macros.  Print more information.
  * Print all cseq and lseq mip and mdp.
- *
  */
 
 #include <linux/pci.h>
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.h b/drivers/scsi/aic94xx/aic94xx_dump.h
index 191a753..d8faa5d 100644
--- a/drivers/scsi/aic94xx/aic94xx_dump.h
+++ b/drivers/scsi/aic94xx/aic94xx_dump.h
@@ -1,27 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Aic94xx SAS/SATA driver dump header file.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #ifndef _AIC94XX_DUMP_H_
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 3b8ad55..c5a46c5 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1,27 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Aic94xx SAS/SATA driver hardware interface.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/pci.h>
@@ -1057,14 +1039,13 @@
 
 	if (ascb) {
 		ascb->dma_scb.size = sizeof(struct scb);
-		ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool,
+		ascb->dma_scb.vaddr = dma_pool_zalloc(asd_ha->scb_pool,
 						     gfp_flags,
 						    &ascb->dma_scb.dma_handle);
 		if (!ascb->dma_scb.vaddr) {
 			kmem_cache_free(asd_ascb_cache, ascb);
 			return NULL;
 		}
-		memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb));
 		asd_init_ascb(asd_ha, ascb);
 
 		spin_lock_irqsave(&seq->tc_index_lock, flags);
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h
index 8f147e7..930e192 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.h
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.h
@@ -1,27 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Aic94xx SAS/SATA driver hardware interface header file.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #ifndef _AIC94XX_HWI_H_
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 1391e5f..f5781e3 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -1,27 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Aic94xx SAS/SATA driver initialization.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/module.h>
@@ -68,7 +50,6 @@
 	.this_id		= -1,
 	.sg_tablesize		= SG_ALL,
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler	= sas_eh_device_reset_handler,
 	.eh_target_reset_handler	= sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
@@ -281,7 +262,7 @@
 	return snprintf(buf, PAGE_SIZE, "%s\n",
 			asd_dev_rev[asd_ha->revision_id]);
 }
-static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
+static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL);
 
 static ssize_t asd_show_dev_bios_build(struct device *dev,
 				       struct device_attribute *attr,char *buf)
@@ -478,7 +459,7 @@
 {
 	int err;
 
-	err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+	err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
 	if (err)
 		return err;
 
@@ -500,13 +481,13 @@
 err_biosb:
 	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
 err_rev:
-	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
 	return err;
 }
 
 static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
 {
-	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
 	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
 	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
 	device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
@@ -584,8 +565,7 @@
 	if (asd_ha->hw_prof.scb_ext)
 		asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext);
 
-	if (asd_ha->hw_prof.ddb_bitmap)
-		kfree(asd_ha->hw_prof.ddb_bitmap);
+	kfree(asd_ha->hw_prof.ddb_bitmap);
 	asd_ha->hw_prof.ddb_bitmap = NULL;
 
 	for (i = 0; i < ASD_MAX_PHYS; i++) {
@@ -660,12 +640,10 @@
 
 static void asd_destroy_global_caches(void)
 {
-	if (asd_dma_token_cache)
-		kmem_cache_destroy(asd_dma_token_cache);
+	kmem_cache_destroy(asd_dma_token_cache);
 	asd_dma_token_cache = NULL;
 
-	if (asd_ascb_cache)
-		kmem_cache_destroy(asd_ascb_cache);
+	kmem_cache_destroy(asd_ascb_cache);
 	asd_ascb_cache = NULL;
 }
 
@@ -770,14 +748,11 @@
 	if (err)
 		goto Err_remove;
 
-	err = -ENODEV;
-	if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))
-	    && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)))
-		;
-	else if (!pci_set_dma_mask(dev, DMA_BIT_MASK(32))
-		 && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)))
-		;
-	else {
+	err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64));
+	if (err)
+		err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+	if (err) {
+		err = -ENODEV;
 		asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
 		goto Err_remove;
 	}
diff --git a/drivers/scsi/aic94xx/aic94xx_reg.c b/drivers/scsi/aic94xx/aic94xx_reg.c
index 56b17c2..392499e 100644
--- a/drivers/scsi/aic94xx/aic94xx_reg.c
+++ b/drivers/scsi/aic94xx/aic94xx_reg.c
@@ -1,27 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Aic94xx SAS/SATA driver register access.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/pci.h>
diff --git a/drivers/scsi/aic94xx/aic94xx_reg.h b/drivers/scsi/aic94xx/aic94xx_reg.h
index 2279307..d1c0975 100644
--- a/drivers/scsi/aic94xx/aic94xx_reg.h
+++ b/drivers/scsi/aic94xx/aic94xx_reg.h
@@ -1,27 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Aic94xx SAS/SATA driver hardware registers definitions.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #ifndef _AIC94XX_REG_H_
diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h
index dd6cc80..b96cfc3 100644
--- a/drivers/scsi/aic94xx/aic94xx_reg_def.h
+++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Aic94xx SAS/SATA driver hardware registers definitions.
  *
@@ -9,26 +10,7 @@
  * agnostic register r/w functions.  Some register corrections, sizes,
  * etc.
  *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  * $Id: //depot/aic94xx/aic94xx_reg_def.h#27 $
- *
  */
 
 #ifndef _ADP94XX_REG_DEF_H_
diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h
index 101072c..3fe34cb 100644
--- a/drivers/scsi/aic94xx/aic94xx_sas.h
+++ b/drivers/scsi/aic94xx/aic94xx_sas.h
@@ -1,27 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Aic94xx SAS/SATA driver SAS definitions and hardware interface header file.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #ifndef _AIC94XX_SAS_H_
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 22873ce..4a80ec0 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -1,27 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Aic94xx SAS/SATA driver SCB management.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/gfp.h>
@@ -724,9 +706,11 @@
 	switch (pd->max_sas_lrate) {
 	case SAS_LINK_RATE_6_0_GBPS:
 		*speed_mask &= ~SAS_SPEED_60_DIS;
+		/* fall through*/
 	default:
 	case SAS_LINK_RATE_3_0_GBPS:
 		*speed_mask &= ~SAS_SPEED_30_DIS;
+		/* fall through*/
 	case SAS_LINK_RATE_1_5_GBPS:
 		*speed_mask &= ~SAS_SPEED_15_DIS;
 	}
@@ -734,6 +718,7 @@
 	switch (pd->min_sas_lrate) {
 	case SAS_LINK_RATE_6_0_GBPS:
 		*speed_mask |= SAS_SPEED_30_DIS;
+		/* fall through*/
 	case SAS_LINK_RATE_3_0_GBPS:
 		*speed_mask |= SAS_SPEED_15_DIS;
 	default:
@@ -745,6 +730,7 @@
 	switch (pd->max_sata_lrate) {
 	case SAS_LINK_RATE_3_0_GBPS:
 		*speed_mask &= ~SATA_SPEED_30_DIS;
+		/* fall through*/
 	default:
 	case SAS_LINK_RATE_1_5_GBPS:
 		*speed_mask &= ~SATA_SPEED_15_DIS;
@@ -803,6 +789,7 @@
 
 		/* link reset retries, this should be nominal */
 		control_phy->link_reset_retries = 10;
+		/* fall through */
 
 	case RELEASE_SPINUP_HOLD: /* 0x02 */
 		/* decide the func_mask */
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index c831e30..3ddc885 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -1,28 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Aic94xx SAS/SATA driver access to shared data structures and memory
  * maps.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/pci.h>
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.h b/drivers/scsi/aic94xx/aic94xx_sds.h
index a06dc01..80f3c47 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.h
+++ b/drivers/scsi/aic94xx/aic94xx_sds.h
@@ -1,27 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Aic94xx SAS/SATA driver hardware interface header file.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Gilbert Wu <gilbert_wu@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 #ifndef _AIC94XX_SDS_H_
 #define _AIC94XX_SDS_H_
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index da1e056..11853ec 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Aic94xx SAS/SATA driver sequencer interface.
  *
@@ -5,25 +6,6 @@
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  *
  * Parts of this code adapted from David Chaw's adp94xx_seq.c.
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/delay.h>
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.h b/drivers/scsi/aic94xx/aic94xx_seq.h
index ad787c5..5bf9b8a 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.h
+++ b/drivers/scsi/aic94xx/aic94xx_seq.h
@@ -1,27 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Aic94xx SAS/SATA driver sequencer interface header file.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #ifndef _AIC94XX_SEQ_H_
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index cdd4ab6..f923ed0 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -1,27 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Aic94xx SAS/SATA Tasks
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/spinlock.h>
@@ -42,13 +24,13 @@
 	spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
 }
 
-/* PCI_DMA_... to our direction translation.
+/* DMA_... to our direction translation.
  */
 static const u8 data_dir_flags[] = {
-	[PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,	/* UNSPECIFIED */
-	[PCI_DMA_TODEVICE]      = DATA_DIR_OUT, /* OUTBOUND */
-	[PCI_DMA_FROMDEVICE]    = DATA_DIR_IN, /* INBOUND */
-	[PCI_DMA_NONE]          = DATA_DIR_NONE, /* NO TRANSFER */
+	[DMA_BIDIRECTIONAL]	= DATA_DIR_BYRECIPIENT,	/* UNSPECIFIED */
+	[DMA_TO_DEVICE]		= DATA_DIR_OUT,		/* OUTBOUND */
+	[DMA_FROM_DEVICE]	= DATA_DIR_IN,		/* INBOUND */
+	[DMA_NONE]		= DATA_DIR_NONE,	/* NO TRANSFER */
 };
 
 static int asd_map_scatterlist(struct sas_task *task,
@@ -60,12 +42,12 @@
 	struct scatterlist *sc;
 	int num_sg, res;
 
-	if (task->data_dir == PCI_DMA_NONE)
+	if (task->data_dir == DMA_NONE)
 		return 0;
 
 	if (task->num_scatter == 0) {
 		void *p = task->scatter;
-		dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
+		dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
 						task->total_xfer_len,
 						task->data_dir);
 		sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
@@ -79,7 +61,7 @@
 	if (sas_protocol_ata(task->task_proto))
 		num_sg = task->num_scatter;
 	else
-		num_sg = pci_map_sg(asd_ha->pcidev, task->scatter,
+		num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter,
 				    task->num_scatter, task->data_dir);
 	if (num_sg == 0)
 		return -ENOMEM;
@@ -126,8 +108,8 @@
 	return 0;
 err_unmap:
 	if (sas_protocol_ata(task->task_proto))
-		pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
-			     task->data_dir);
+		dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
+			     task->num_scatter, task->data_dir);
 	return res;
 }
 
@@ -136,21 +118,21 @@
 	struct asd_ha_struct *asd_ha = ascb->ha;
 	struct sas_task *task = ascb->uldd_task;
 
-	if (task->data_dir == PCI_DMA_NONE)
+	if (task->data_dir == DMA_NONE)
 		return;
 
 	if (task->num_scatter == 0) {
 		dma_addr_t dma = (dma_addr_t)
 		       le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
-		pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len,
-				 task->data_dir);
+		dma_unmap_single(&ascb->ha->pcidev->dev, dma,
+				 task->total_xfer_len, task->data_dir);
 		return;
 	}
 
 	asd_free_coherent(asd_ha, ascb->sg_arr);
 	if (task->task_proto != SAS_PROTOCOL_STP)
-		pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
-			     task->data_dir);
+		dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
+			     task->num_scatter, task->data_dir);
 }
 
 /* ---------- Task complete tasklet ---------- */
@@ -436,10 +418,10 @@
 	struct domain_device *dev = task->dev;
 	struct scb *scb;
 
-	pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
-		   PCI_DMA_TODEVICE);
-	pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
-		   PCI_DMA_FROMDEVICE);
+	dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1,
+		   DMA_TO_DEVICE);
+	dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1,
+		   DMA_FROM_DEVICE);
 
 	scb = ascb->scb;
 
@@ -471,10 +453,10 @@
 	struct sas_task *task = a->uldd_task;
 
 	BUG_ON(!task);
-	pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
-		     PCI_DMA_TODEVICE);
-	pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
-		     PCI_DMA_FROMDEVICE);
+	dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1,
+		     DMA_TO_DEVICE);
+	dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1,
+		     DMA_FROM_DEVICE);
 }
 
 /* ---------- SSP ---------- */
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 2a01702..f814026 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -1,27 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Aic94xx Task Management Functions
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This file is part of the aic94xx driver.
- *
- * The aic94xx driver is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * The aic94xx driver is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with the aic94xx driver; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/spinlock.h>
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index d81ca66..b69edb4 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * AMD am53c974 driver.
  * Copyright (c) 2014 Hannes Reinecke, SUSE Linux GmbH
@@ -96,9 +97,7 @@
 
 static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
 {
-	struct pci_dev *pdev = esp->dev;
-
-	return pci_get_drvdata(pdev);
+	return dev_get_drvdata(esp->dev);
 }
 
 static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
@@ -116,30 +115,6 @@
 	return iowrite32(val, esp->regs + (reg * 4UL));
 }
 
-static dma_addr_t pci_esp_map_single(struct esp *esp, void *buf,
-				     size_t sz, int dir)
-{
-	return pci_map_single(esp->dev, buf, sz, dir);
-}
-
-static int pci_esp_map_sg(struct esp *esp, struct scatterlist *sg,
-			  int num_sg, int dir)
-{
-	return pci_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void pci_esp_unmap_single(struct esp *esp, dma_addr_t addr,
-				 size_t sz, int dir)
-{
-	pci_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void pci_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
-			     int num_sg, int dir)
-{
-	pci_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
 static int pci_esp_irq_pending(struct esp *esp)
 {
 	struct pci_esp_priv *pep = pci_esp_get_priv(esp);
@@ -295,10 +270,6 @@
 static const struct esp_driver_ops pci_esp_ops = {
 	.esp_write8	=	pci_esp_write8,
 	.esp_read8	=	pci_esp_read8,
-	.map_single	=	pci_esp_map_single,
-	.map_sg		=	pci_esp_map_sg,
-	.unmap_single	=	pci_esp_unmap_single,
-	.unmap_sg	=	pci_esp_unmap_sg,
 	.irq_pending	=	pci_esp_irq_pending,
 	.reset_dma	=	pci_esp_reset_dma,
 	.dma_drain	=	pci_esp_dma_drain,
@@ -375,18 +346,18 @@
 
 static void dc390_check_eeprom(struct esp *esp)
 {
+	struct pci_dev *pdev = to_pci_dev(esp->dev);
 	u8 EEbuf[128];
 	u16 *ptr = (u16 *)EEbuf, wval = 0;
 	int i;
 
-	dc390_read_eeprom((struct pci_dev *)esp->dev, ptr);
+	dc390_read_eeprom(pdev, ptr);
 
 	for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
 		wval += *ptr;
 
 	/* no Tekram EEprom found */
 	if (wval != 0x1234) {
-		struct pci_dev *pdev = esp->dev;
 		dev_printk(KERN_INFO, &pdev->dev,
 			   "No valid Tekram EEprom found\n");
 		return;
@@ -411,7 +382,7 @@
 		return -ENODEV;
 	}
 
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 		dev_printk(KERN_INFO, &pdev->dev,
 			   "failed to set 32bit DMA mask\n");
 		goto fail_disable_device;
@@ -435,7 +406,7 @@
 
 	esp = shost_priv(shost);
 	esp->host = shost;
-	esp->dev = pdev;
+	esp->dev = &pdev->dev;
 	esp->ops = &pci_esp_ops;
 	/*
 	 * The am53c974 HBA has a design flaw of generating
@@ -467,8 +438,8 @@
 
 	pci_set_master(pdev);
 
-	esp->command_block = pci_alloc_consistent(pdev, 16,
-						  &esp->command_block_dma);
+	esp->command_block = dma_alloc_coherent(&pdev->dev, 16,
+			&esp->command_block_dma, GFP_KERNEL);
 	if (!esp->command_block) {
 		dev_printk(KERN_ERR, &pdev->dev,
 			   "failed to allocate command block\n");
@@ -498,7 +469,7 @@
 	/* Assume 40MHz clock */
 	esp->cfreq = 40000000;
 
-	err = scsi_esp_register(esp, &pdev->dev);
+	err = scsi_esp_register(esp);
 	if (err)
 		goto fail_free_irq;
 
@@ -508,8 +479,8 @@
 	free_irq(pdev->irq, esp);
 fail_unmap_command_block:
 	pci_set_drvdata(pdev, NULL);
-	pci_free_consistent(pdev, 16, esp->command_block,
-			    esp->command_block_dma);
+	dma_free_coherent(&pdev->dev, 16, esp->command_block,
+			  esp->command_block_dma);
 fail_unmap_regs:
 	pci_iounmap(pdev, esp->regs);
 fail_release_regions:
@@ -532,8 +503,8 @@
 	scsi_esp_unregister(esp);
 	free_irq(pdev->irq, esp);
 	pci_set_drvdata(pdev, NULL);
-	pci_free_consistent(pdev, 16, esp->command_block,
-			    esp->command_block_dma);
+	dma_free_coherent(&pdev->dev, 16, esp->command_block,
+			  esp->command_block_dma);
 	pci_iounmap(pdev, esp->regs);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
diff --git a/drivers/scsi/arcmsr/Makefile b/drivers/scsi/arcmsr/Makefile
index 721aced..9051f66 100644
--- a/drivers/scsi/arcmsr/Makefile
+++ b/drivers/scsi/arcmsr/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 # File: drivers/arcmsr/Makefile
 # Makefile for the ARECA PCI-X PCI-EXPRESS SATA RAID controllers SCSI driver.
 
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 9c397a2..9220bcf 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -49,7 +49,7 @@
 #define ARCMSR_MAX_OUTSTANDING_CMD	1024
 #define ARCMSR_DEFAULT_OUTSTANDING_CMD	128
 #define ARCMSR_MIN_OUTSTANDING_CMD	32
-#define ARCMSR_DRIVER_VERSION		"v1.40.00.09-20180709"
+#define ARCMSR_DRIVER_VERSION		"v1.40.00.10-20190116"
 #define ARCMSR_SCSI_INITIATOR_ID	255
 #define ARCMSR_MAX_XFER_SECTORS		512
 #define ARCMSR_MAX_XFER_SECTORS_B	4096
@@ -739,7 +739,7 @@
 #define ACB_ADAPTER_TYPE_C		0x00000002	/* hbc L IOP */
 #define ACB_ADAPTER_TYPE_D		0x00000003	/* hbd M IOP */
 #define ACB_ADAPTER_TYPE_E		0x00000004	/* hba L IOP */
-	u32			roundup_ccbsize;
+	u32			ioqueue_size;
 	struct pci_dev *	pdev;
 	struct Scsi_Host *	host;
 	unsigned long		vir2phy_offset;
@@ -747,6 +747,7 @@
 	uint32_t		outbound_int_enable;
 	uint32_t		cdb_phyaddr_hi32;
 	uint32_t		reg_mu_acc_handle0;
+	uint64_t		cdb_phyadd_hipart;
 	spinlock_t		eh_lock;
 	spinlock_t		ccblist_lock;
 	spinlock_t		postq_lock;
@@ -855,11 +856,11 @@
 *******************************************************************************
 */
 struct CommandControlBlock{
-	/*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
+	/*x32:sizeof struct_CCB=(64+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
 	struct list_head		list;		/*x32: 8byte, x64: 16byte*/
 	struct scsi_cmnd		*pcmd;		/*8 bytes pointer of linux scsi command */
 	struct AdapterControlBlock	*acb;		/*x32: 4byte, x64: 8byte*/
-	uint32_t			cdb_phyaddr;	/*x32: 4byte, x64: 4byte*/
+	unsigned long			cdb_phyaddr;	/*x32: 4byte, x64: 8byte*/
 	uint32_t			arc_cdb_size;	/*x32:4byte,x64:4byte*/
 	uint16_t			ccb_flags;	/*x32: 2byte, x64: 2byte*/
 #define	CCB_FLAG_READ		0x0000
@@ -875,10 +876,10 @@
 	uint32_t			smid;
 #if BITS_PER_LONG == 64
 	/*  ======================512+64 bytes========================  */
-		uint32_t		reserved[4];	/*16 byte*/
+		uint32_t		reserved[3];	/*12 byte*/
 #else
 	/*  ======================512+32 bytes========================  */
-	//	uint32_t		reserved;	/*4  byte*/
+		uint32_t		reserved[8];	/*32  byte*/
 #endif
 	/*  =======================================================   */
 	struct ARCMSR_CDB		arcmsr_cdb;
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 12316ef..88053b1 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -91,6 +91,10 @@
 module_param(cmd_per_lun, int, S_IRUGO);
 MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
 
+static int dma_mask_64 = 0;
+module_param(dma_mask_64, int, S_IRUGO);
+MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)");
+
 static int set_date_time = 0;
 module_param(set_date_time, int, S_IRUGO);
 MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
@@ -156,7 +160,6 @@
 	.sg_tablesize	        = ARCMSR_DEFAULT_SG_ENTRIES,
 	.max_sectors		= ARCMSR_MAX_XFER_SECTORS_C,
 	.cmd_per_lun		= ARCMSR_DEFAULT_CMD_PERLUN,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= arcmsr_host_attrs,
 	.no_write_same		= 1,
 };
@@ -224,13 +227,13 @@
 ****************************************************************************
 */
 
-static void arcmsr_free_mu(struct AdapterControlBlock *acb)
+static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_B:
 	case ACB_ADAPTER_TYPE_D:
 	case ACB_ADAPTER_TYPE_E: {
-		dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
+		dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
 			acb->dma_coherent2, acb->dma_coherent_handle2);
 		break;
 	}
@@ -577,6 +580,58 @@
 	}
 }
 
+static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb)
+{
+	struct MessageUnit_B *reg = acb->pmuB;
+
+	if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
+		reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
+		reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
+		reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
+		reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
+	} else {
+		reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
+		reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
+		reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
+		reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
+	}
+	reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
+	reg->message_rbuffer =  MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
+	reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
+}
+
+static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
+{
+	struct MessageUnit_D *reg = acb->pmuD;
+
+	reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
+	reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
+	reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
+	reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
+	reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
+	reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
+	reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
+	reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
+	reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
+	reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
+	reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
+	reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
+	reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
+	reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
+	reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
+	reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
+	reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
+	reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
+	reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
+	reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
+	reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
+	reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
+	reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
+	reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
+	reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
+	reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
+}
+
 static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
 {
 	bool rtn = true;
@@ -586,9 +641,8 @@
 
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_B: {
-		struct MessageUnit_B *reg;
-		acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
-		dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+		acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32);
+		dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
 			&dma_coherent_handle, GFP_KERNEL);
 		if (!dma_coherent) {
 			pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
@@ -596,29 +650,13 @@
 		}
 		acb->dma_coherent_handle2 = dma_coherent_handle;
 		acb->dma_coherent2 = dma_coherent;
-		reg = (struct MessageUnit_B *)dma_coherent;
-		acb->pmuB = reg;
-		if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
-			reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
-			reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
-			reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
-			reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
-		} else {
-			reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
-			reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
-			reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
-			reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
-		}
-		reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
-		reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
-		reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
+		acb->pmuB = (struct MessageUnit_B *)dma_coherent;
+		arcmsr_hbaB_assign_regAddr(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
-		struct MessageUnit_D *reg;
-
-		acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
-		dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+		acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32);
+		dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
 			&dma_coherent_handle, GFP_KERNEL);
 		if (!dma_coherent) {
 			pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
@@ -626,41 +664,15 @@
 		}
 		acb->dma_coherent_handle2 = dma_coherent_handle;
 		acb->dma_coherent2 = dma_coherent;
-		reg = (struct MessageUnit_D *)dma_coherent;
-		acb->pmuD = reg;
-		reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
-		reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
-		reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
-		reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
-		reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
-		reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
-		reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
-		reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
-		reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
-		reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
-		reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
-		reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
-		reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
-		reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
-		reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
-		reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
-		reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
-		reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
-		reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
-		reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
-		reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
-		reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
-		reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
-		reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
-		reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
-		reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
+		acb->pmuD = (struct MessageUnit_D *)dma_coherent;
+		arcmsr_hbaD_assign_regAddr(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_E: {
 		uint32_t completeQ_size;
 		completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
-		acb->roundup_ccbsize = roundup(completeQ_size, 32);
-		dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+		acb->ioqueue_size = roundup(completeQ_size, 32);
+		dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
 			&dma_coherent_handle, GFP_KERNEL);
 		if (!dma_coherent){
 			pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
@@ -669,7 +681,7 @@
 		acb->dma_coherent_handle2 = dma_coherent_handle;
 		acb->dma_coherent2 = dma_coherent;
 		acb->pCompletionQ = dma_coherent;
-		acb->completionQ_entry = acb->roundup_ccbsize / sizeof(struct deliver_completeQ);
+		acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
 		acb->doneq_index = 0;
 		}
 		break;
@@ -686,11 +698,11 @@
 	dma_addr_t dma_coherent_handle;
 	struct CommandControlBlock *ccb_tmp;
 	int i = 0, j = 0;
-	dma_addr_t cdb_phyaddr;
+	unsigned long cdb_phyaddr, next_ccb_phy;
 	unsigned long roundup_ccbsize;
 	unsigned long max_xfer_len;
 	unsigned long max_sg_entrys;
-	uint32_t  firm_config_version;
+	uint32_t  firm_config_version, curr_phy_upper32;
 
 	for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
 		for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
@@ -707,6 +719,7 @@
 	acb->host->sg_tablesize = max_sg_entrys;
 	roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
 	acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
+	acb->uncache_size += acb->ioqueue_size;
 	dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
 	if(!dma_coherent){
 		printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
@@ -717,9 +730,10 @@
 	memset(dma_coherent, 0, acb->uncache_size);
 	acb->ccbsize = roundup_ccbsize;
 	ccb_tmp = dma_coherent;
+	curr_phy_upper32 = upper_32_bits(dma_coherent_handle);
 	acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
 	for(i = 0; i < acb->maxFreeCCB; i++){
-		cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
+		cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
 		switch (acb->adapter_type) {
 		case ACB_ADAPTER_TYPE_A:
 		case ACB_ADAPTER_TYPE_B:
@@ -735,10 +749,34 @@
 		ccb_tmp->acb = acb;
 		ccb_tmp->smid = (u32)i << 16;
 		INIT_LIST_HEAD(&ccb_tmp->list);
-		list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
+		next_ccb_phy = dma_coherent_handle + roundup_ccbsize;
+		if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) {
+			acb->maxFreeCCB = i;
+			acb->host->can_queue = i;
+			break;
+		}
+		else
+			list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
 		ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
-		dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
+		dma_coherent_handle = next_ccb_phy;
 	}
+	acb->dma_coherent_handle2 = dma_coherent_handle;
+	acb->dma_coherent2 = ccb_tmp;
+	switch (acb->adapter_type) {
+	case ACB_ADAPTER_TYPE_B:
+		acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
+		arcmsr_hbaB_assign_regAddr(acb);
+		break;
+	case ACB_ADAPTER_TYPE_D:
+		acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2;
+		arcmsr_hbaD_assign_regAddr(acb);
+		break;
+	case ACB_ADAPTER_TYPE_E:
+		acb->pCompletionQ = acb->dma_coherent2;
+		acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
+		acb->doneq_index = 0;
+		break;
+	}	
 	return 0;
 }
 
@@ -889,6 +927,31 @@
 	add_timer(&pacb->refresh_timer);
 }
 
+static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb)
+{
+	struct pci_dev *pcidev = acb->pdev;
+
+	if (IS_DMA64) {
+		if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) ||
+		    dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64)))
+			goto	dma32;
+		if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) ||
+		    dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) {
+			printk("arcmsr: set DMA 64 mask failed\n");
+			return -ENXIO;
+		}
+	} else {
+dma32:
+		if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
+		    dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
+		    dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) {
+			printk("arcmsr: set DMA 32-bit mask failed\n");
+			return -ENXIO;
+		}
+	}
+	return 0;
+}
+
 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	struct Scsi_Host *host;
@@ -903,22 +966,15 @@
 	if(!host){
     		goto pci_disable_dev;
 	}
-	error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-	if(error){
-		error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if(error){
-			printk(KERN_WARNING
-			       "scsi%d: No suitable DMA mask available\n",
-			       host->host_no);
-			goto scsi_host_release;
-		}
-	}
 	init_waitqueue_head(&wait_q);
 	bus = pdev->bus->number;
 	dev_fun = pdev->devfn;
 	acb = (struct AdapterControlBlock *) host->hostdata;
 	memset(acb,0,sizeof(struct AdapterControlBlock));
 	acb->pdev = pdev;
+	acb->adapter_type = id->driver_data;
+	if (arcmsr_set_dma_mask(acb))
+		goto scsi_host_release;
 	acb->host = host;
 	host->max_lun = ARCMSR_MAX_TARGETLUN;
 	host->max_id = ARCMSR_MAX_TARGETID;		/*16:8*/
@@ -948,7 +1004,6 @@
 			ACB_F_MESSAGE_WQBUFFER_READED);
 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
 	INIT_LIST_HEAD(&acb->ccb_free_list);
-	acb->adapter_type = id->driver_data;
 	error = arcmsr_remap_pciregion(acb);
 	if(!error){
 		goto pci_release_regs;
@@ -960,9 +1015,10 @@
 	if(!error){
 		goto free_hbb_mu;
 	}
+	arcmsr_free_io_queue(acb);
 	error = arcmsr_alloc_ccb_pool(acb);
 	if(error){
-		goto free_hbb_mu;
+		goto unmap_pci_region;
 	}
 	error = scsi_add_host(host, &pdev->dev);
 	if(error){
@@ -990,8 +1046,9 @@
 	scsi_remove_host(host);
 free_ccb_pool:
 	arcmsr_free_ccb_pool(acb);
+	goto unmap_pci_region;
 free_hbb_mu:
-	arcmsr_free_mu(acb);
+	arcmsr_free_io_queue(acb);
 unmap_pci_region:
 	arcmsr_unmap_pciregion(acb);
 pci_release_regs:
@@ -1037,7 +1094,6 @@
 
 static int arcmsr_resume(struct pci_dev *pdev)
 {
-	int error;
 	struct Scsi_Host *host = pci_get_drvdata(pdev);
 	struct AdapterControlBlock *acb =
 		(struct AdapterControlBlock *)host->hostdata;
@@ -1049,24 +1105,30 @@
 		pr_warn("%s: pci_enable_device error\n", __func__);
 		return -ENODEV;
 	}
-	error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (error) {
-		error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (error) {
-			pr_warn("scsi%d: No suitable DMA mask available\n",
-			       host->host_no);
-			goto controller_unregister;
-		}
-	}
+	if (arcmsr_set_dma_mask(acb))
+		goto controller_unregister;
 	pci_set_master(pdev);
 	if (arcmsr_request_irq(pdev, acb) == FAILED)
 		goto controller_stop;
-	if (acb->adapter_type == ACB_ADAPTER_TYPE_E) {
+	switch (acb->adapter_type) {
+	case ACB_ADAPTER_TYPE_B: {
+		struct MessageUnit_B *reg = acb->pmuB;
+		uint32_t i;
+		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
+			reg->post_qbuffer[i] = 0;
+			reg->done_qbuffer[i] = 0;
+		}
+		reg->postq_index = 0;
+		reg->doneq_index = 0;
+		break;
+		}
+	case ACB_ADAPTER_TYPE_E:
 		writel(0, &acb->pmuE->host_int_status);
 		writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
 		acb->in_doorbell = 0;
 		acb->out_doorbell = 0;
 		acb->doneq_index = 0;
+		break;
 	}
 	arcmsr_iop_init(acb);
 	arcmsr_init_get_devmap_timer(acb);
@@ -1317,13 +1379,10 @@
 
 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
 {
-	int id, lun;
 	if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
 		if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
 			struct scsi_cmnd *abortcmd = pCCB->pcmd;
 			if (abortcmd) {
-				id = abortcmd->device->id;
-				lun = abortcmd->device->lun;				
 				abortcmd->result |= DID_ABORT << 16;
 				arcmsr_ccb_complete(pCCB);
 				printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
@@ -1349,10 +1408,12 @@
 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
 {
 	int i = 0;
-	uint32_t flag_ccb, ccb_cdb_phy;
+	uint32_t flag_ccb;
 	struct ARCMSR_CDB *pARCMSR_CDB;
 	bool error;
 	struct CommandControlBlock *pCCB;
+	unsigned long ccb_cdb_phy, cdb_phy_hipart;
+
 	switch (acb->adapter_type) {
 
 	case ACB_ADAPTER_TYPE_A: {
@@ -1364,7 +1425,10 @@
 		writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
 		while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
 				&& (i++ < acb->maxOutstanding)) {
-			pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
+			ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
+			if (acb->cdb_phyadd_hipart)
+				ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
+			pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
 			pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
 			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
 			arcmsr_drain_donequeue(acb, pCCB, error);
@@ -1380,7 +1444,10 @@
 			flag_ccb = reg->done_qbuffer[i];
 			if (flag_ccb != 0) {
 				reg->done_qbuffer[i] = 0;
-				pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
+				ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
+				if (acb->cdb_phyadd_hipart)
+					ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
+				pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
 				pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
 				error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
 				arcmsr_drain_donequeue(acb, pCCB, error);
@@ -1397,7 +1464,9 @@
 			/*need to do*/
 			flag_ccb = readl(&reg->outbound_queueport_low);
 			ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
-			pARCMSR_CDB = (struct  ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/
+			if (acb->cdb_phyadd_hipart)
+				ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
+			pARCMSR_CDB = (struct  ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
 			pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
 			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
 			arcmsr_drain_donequeue(acb, pCCB, error);
@@ -1425,9 +1494,13 @@
 					((toggle ^ 0x4000) + 1);
 				doneq_index = pmu->doneq_index;
 				spin_unlock_irqrestore(&acb->doneq_lock, flags);
+				cdb_phy_hipart = pmu->done_qbuffer[doneq_index &
+					0xFFF].addressHigh;
 				addressLow = pmu->done_qbuffer[doneq_index &
 					0xFFF].addressLow;
 				ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
+				if (acb->cdb_phyadd_hipart)
+					ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
 				pARCMSR_CDB = (struct  ARCMSR_CDB *)
 					(acb->vir2phy_offset + ccb_cdb_phy);
 				pCCB = container_of(pARCMSR_CDB,
@@ -1504,7 +1577,6 @@
 	pdev = acb->pdev;
 	arcmsr_free_irq(pdev, acb);
 	arcmsr_free_ccb_pool(acb);
-	arcmsr_free_mu(acb);
 	arcmsr_unmap_pciregion(acb);
 	pci_release_regions(pdev);
 	scsi_host_put(host);
@@ -1562,7 +1634,6 @@
 	}
 	arcmsr_free_irq(pdev, acb);
 	arcmsr_free_ccb_pool(acb);
-	arcmsr_free_mu(acb);
 	arcmsr_unmap_pciregion(acb);
 	pci_release_regions(pdev);
 	scsi_host_put(host);
@@ -1747,12 +1818,8 @@
 
 		arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
 		ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
-		if (acb->cdb_phyaddr_hi32) {
-			writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
-			writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
-		} else {
-			writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
-		}
+		writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high);
+		writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
@@ -1765,8 +1832,8 @@
 		spin_lock_irqsave(&acb->postq_lock, flags);
 		postq_index = pmu->postq_index;
 		pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
-		pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr);
-		pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr);
+		pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr);
+		pinbound_srb->addressLow = cdb_phyaddr;
 		pinbound_srb->length = ccb->arc_cdb_size >> 2;
 		arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
 		toggle = postq_index & 0x4000;
@@ -1798,7 +1865,7 @@
 	writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
 	if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE
-			"arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+			"arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
 			, acb->host->host_no);
 	}
 }
@@ -1811,7 +1878,7 @@
 
 	if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE
-			"arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+			"arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
 			, acb->host->host_no);
 	}
 }
@@ -1824,7 +1891,7 @@
 	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
 	if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
 		printk(KERN_NOTICE
-			"arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+			"arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
 			, pACB->host->host_no);
 	}
 	return;
@@ -1837,7 +1904,7 @@
 	pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
 	writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
 	if (!arcmsr_hbaD_wait_msgint_ready(pACB))
-		pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+		pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
 			"timeout\n", pACB->host->host_no);
 }
 
@@ -1850,7 +1917,7 @@
 	pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
 	writel(pACB->out_doorbell, &reg->iobound_doorbell);
 	if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
-		pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+		pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
 			"timeout\n", pACB->host->host_no);
 	}
 }
@@ -2302,8 +2369,13 @@
 	struct ARCMSR_CDB *pARCMSR_CDB;
 	struct CommandControlBlock *pCCB;
 	bool error;
+	unsigned long cdb_phy_addr;
+
 	while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
-		pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
+		cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
+		if (acb->cdb_phyadd_hipart)
+			cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
+		pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
 		pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
 		arcmsr_drain_donequeue(acb, pCCB, error);
@@ -2317,13 +2389,18 @@
 	struct ARCMSR_CDB *pARCMSR_CDB;
 	struct CommandControlBlock *pCCB;
 	bool error;
+	unsigned long cdb_phy_addr;
+
 	index = reg->doneq_index;
 	while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
-		reg->done_qbuffer[index] = 0;
-		pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
+		cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
+		if (acb->cdb_phyadd_hipart)
+			cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
+		pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
 		pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
 		arcmsr_drain_donequeue(acb, pCCB, error);
+		reg->done_qbuffer[index] = 0;
 		index++;
 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
 		reg->doneq_index = index;
@@ -2335,7 +2412,8 @@
 	struct MessageUnit_C __iomem *phbcmu;
 	struct ARCMSR_CDB *arcmsr_cdb;
 	struct CommandControlBlock *ccb;
-	uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
+	uint32_t flag_ccb, throttling = 0;
+	unsigned long ccb_cdb_phy;
 	int error;
 
 	phbcmu = acb->pmuC;
@@ -2345,6 +2423,8 @@
 	while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
 			0xFFFFFFFF) {
 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
+		if (acb->cdb_phyadd_hipart)
+			ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
 		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
 			+ ccb_cdb_phy);
 		ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
@@ -2365,12 +2445,12 @@
 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
 {
 	u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
-	uint32_t addressLow, ccb_cdb_phy;
+	uint32_t addressLow;
 	int error;
 	struct MessageUnit_D  *pmu;
 	struct ARCMSR_CDB *arcmsr_cdb;
 	struct CommandControlBlock *ccb;
-	unsigned long flags;
+	unsigned long flags, ccb_cdb_phy, cdb_phy_hipart;
 
 	spin_lock_irqsave(&acb->doneq_lock, flags);
 	pmu = acb->pmuD;
@@ -2384,9 +2464,13 @@
 			pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
 				((toggle ^ 0x4000) + 1);
 			doneq_index = pmu->doneq_index;
+			cdb_phy_hipart = pmu->done_qbuffer[doneq_index &
+				0xFFF].addressHigh;
 			addressLow = pmu->done_qbuffer[doneq_index &
 				0xFFF].addressLow;
 			ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
+			if (acb->cdb_phyadd_hipart)
+				ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
 			arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
 				+ ccb_cdb_phy);
 			ccb = container_of(arcmsr_cdb,
@@ -3227,7 +3311,9 @@
 	uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
 	int rtn;
 	bool error;
-	polling_hba_ccb_retry:
+	unsigned long ccb_cdb_phy;
+
+polling_hba_ccb_retry:
 	poll_count++;
 	outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
 	writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
@@ -3245,7 +3331,10 @@
 				goto polling_hba_ccb_retry;
 			}
 		}
-		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
+		ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
+		if (acb->cdb_phyadd_hipart)
+			ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
+		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
 		ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
 		poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
 		if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
@@ -3283,8 +3372,9 @@
 	uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
 	int index, rtn;
 	bool error;
-	polling_hbb_ccb_retry:
+	unsigned long ccb_cdb_phy;
 
+polling_hbb_ccb_retry:
 	poll_count++;
 	/* clear doorbell interrupt */
 	writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
@@ -3310,7 +3400,10 @@
 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
 		reg->doneq_index = index;
 		/* check if command done with no error*/
-		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
+		ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
+		if (acb->cdb_phyadd_hipart)
+			ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
+		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
 		ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
 		poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
 		if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
@@ -3343,12 +3436,14 @@
 		struct CommandControlBlock *poll_ccb)
 {
 	struct MessageUnit_C __iomem *reg = acb->pmuC;
-	uint32_t flag_ccb, ccb_cdb_phy;
+	uint32_t flag_ccb;
 	struct ARCMSR_CDB *arcmsr_cdb;
 	bool error;
 	struct CommandControlBlock *pCCB;
 	uint32_t poll_ccb_done = 0, poll_count = 0;
 	int rtn;
+	unsigned long ccb_cdb_phy;
+
 polling_hbc_ccb_retry:
 	poll_count++;
 	while (1) {
@@ -3367,7 +3462,9 @@
 		}
 		flag_ccb = readl(&reg->outbound_queueport_low);
 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
-		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
+		if (acb->cdb_phyadd_hipart)
+			ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
+		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
 		pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
 		poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
 		/* check ifcommand done with no error*/
@@ -3401,9 +3498,9 @@
 				struct CommandControlBlock *poll_ccb)
 {
 	bool error;
-	uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy;
+	uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb;
 	int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
-	unsigned long flags;
+	unsigned long flags, ccb_cdb_phy, cdb_phy_hipart;
 	struct ARCMSR_CDB *arcmsr_cdb;
 	struct CommandControlBlock *pCCB;
 	struct MessageUnit_D *pmu = acb->pmuD;
@@ -3435,8 +3532,12 @@
 				((toggle ^ 0x4000) + 1);
 		doneq_index = pmu->doneq_index;
 		spin_unlock_irqrestore(&acb->doneq_lock, flags);
+		cdb_phy_hipart = pmu->done_qbuffer[doneq_index &
+				0xFFF].addressHigh;
 		flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
+		if (acb->cdb_phyadd_hipart)
+			ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
 		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
 			ccb_cdb_phy);
 		pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
@@ -3678,6 +3779,7 @@
 	cdb_phyaddr = lower_32_bits(dma_coherent_handle);
 	cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
 	acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
+	acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32;
 	/*
 	***********************************************************************
 	**    if adapter type B, set window of "post command Q"
@@ -3742,7 +3844,6 @@
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
-		if (cdb_phyaddr_hi32 != 0) {
 			struct MessageUnit_C __iomem *reg = acb->pmuC;
 
 			printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
@@ -3757,7 +3858,6 @@
 				return 1;
 			}
 		}
-		}
 		break;
 	case ACB_ADAPTER_TYPE_D: {
 		uint32_t __iomem *rwbuffer;
@@ -3791,7 +3891,7 @@
 		cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16);
 		writel(cdb_phyaddr, &reg->msgcode_rwbuffer[5]);
 		writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[6]);
-		writel(acb->roundup_ccbsize, &reg->msgcode_rwbuffer[7]);
+		writel(acb->ioqueue_size, &reg->msgcode_rwbuffer[7]);
 		writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
 		acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
 		writel(acb->out_doorbell, &reg->iobound_doorbell);
@@ -3927,7 +4027,7 @@
 	writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
 	if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
-				rebulid' timeout \n", acb->host->host_no);
+				rebuild' timeout \n", acb->host->host_no);
 	}
 }
 
@@ -3938,7 +4038,7 @@
 	writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
 	if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
-				rebulid' timeout \n",acb->host->host_no);
+				rebuild' timeout \n",acb->host->host_no);
 	}
 }
 
@@ -3950,7 +4050,7 @@
 	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
 	if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
 		printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
-				rebulid' timeout \n", pACB->host->host_no);
+				rebuild' timeout \n", pACB->host->host_no);
 	}
 	return;
 }
@@ -3963,7 +4063,7 @@
 	writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
 	if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
 		pr_notice("arcmsr%d: wait 'start adapter "
-			"background rebulid' timeout\n", pACB->host->host_no);
+			"background rebuild' timeout\n", pACB->host->host_no);
 	}
 }
 
@@ -3977,7 +4077,7 @@
 	writel(pACB->out_doorbell, &pmu->iobound_doorbell);
 	if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
 		pr_notice("arcmsr%d: wait 'start adapter "
-			"background rebulid' timeout \n", pACB->host->host_no);
+			"background rebuild' timeout \n", pACB->host->host_no);
 	}
 }
 
@@ -4135,9 +4235,9 @@
 		pci_read_config_byte(acb->pdev, i, &value[i]);
 	}
 	/* hardware reset signal */
-	if ((acb->dev_id == 0x1680)) {
+	if (acb->dev_id == 0x1680) {
 		writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
-	} else if ((acb->dev_id == 0x1880)) {
+	} else if (acb->dev_id == 0x1880) {
 		do {
 			count++;
 			writel(0xF, &pmuC->write_sequence);
@@ -4161,7 +4261,7 @@
 		} while (((readl(&pmuE->host_diagnostic_3xxx) &
 			ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
 		writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
-	} else if ((acb->dev_id == 0x1214)) {
+	} else if (acb->dev_id == 0x1214) {
 		writel(0x20, pmuD->reset_request);
 	} else {
 		pci_write_config_byte(acb->pdev, 0x84, 0x20);
diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig
index cfd172a..f34badc 100644
--- a/drivers/scsi/arm/Kconfig
+++ b/drivers/scsi/arm/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # SCSI driver configuration for Acorn
 #
diff --git a/drivers/scsi/arm/acornscsi-io.S b/drivers/scsi/arm/acornscsi-io.S
index 22171b2..fdd7237 100644
--- a/drivers/scsi/arm/acornscsi-io.S
+++ b/drivers/scsi/arm/acornscsi-io.S
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  *  linux/drivers/acorn/scsi/acornscsi-io.S: Acorn SCSI card IO
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 #include <linux/linkage.h>
 
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 421fe86..d12dd89 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  linux/drivers/acorn/scsi/acornscsi.c
  *
  *  Acorn SCSI 3 driver
  *  By R.M.King.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  * Abandoned using the Select and Transfer command since there were
  * some nasty races between our software and the target devices that
  * were not easy to solve, and the device errata had a lot of entries
@@ -2890,7 +2887,7 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= 2,
-	.use_clustering		= DISABLE_CLUSTERING,
+	.dma_boundary		= PAGE_SIZE - 1,
 	.proc_name		= "acornscsi",
 };
 
diff --git a/drivers/scsi/arm/acornscsi.h b/drivers/scsi/arm/acornscsi.h
index 01bc715..376c76b 100644
--- a/drivers/scsi/arm/acornscsi.h
+++ b/drivers/scsi/arm/acornscsi.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  *  linux/drivers/acorn/scsi/acornscsi.h
  *
  *  Copyright (C) 1997 Russell King
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  *  Acorn SCSI driver
  */
 #ifndef ACORNSCSI_H
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 3110736..5914141 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * linux/drivers/scsi/arm/arxescsi.c
  *
@@ -245,7 +246,7 @@
 	.can_queue			= 0,
 	.this_id			= 7,
 	.sg_tablesize			= SG_ALL,
-	.use_clustering			= DISABLE_CLUSTERING,
+	.dma_boundary			= PAGE_SIZE - 1,
 	.proc_name			= "arxescsi",
 };
 
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index ae1d809..3fd9443 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Generic Generic NCR5380 driver
  *
@@ -221,10 +222,10 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= 2,
-	.use_clustering		= DISABLE_CLUSTERING,
 	.proc_name		= "CumanaSCSI-1",
 	.cmd_size		= NCR5380_CMD_SIZE,
 	.max_sectors		= 128,
+	.dma_boundary		= PAGE_SIZE - 1,
 };
 
 static int cumanascsi1_probe(struct expansion_card *ec,
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index edce5f3..a1f3e9e 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  linux/drivers/acorn/scsi/cumana_2.c
  *
  *  Copyright (C) 1997-2005 Russell King
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  *  Changelog:
  *   30-08-1997	RMK	0.0.0	Created, READONLY version.
  *   22-01-1998	RMK	0.0.1	Updated to 2.1.80.
@@ -367,7 +364,6 @@
 	.this_id			= 7,
 	.sg_tablesize			= SG_MAX_SEGMENTS,
 	.dma_boundary			= IOMD_DMA_BOUNDARY,
-	.use_clustering			= DISABLE_CLUSTERING,
 	.proc_name			= "cumanascsi2",
 };
 
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index e93e047..134f040 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  linux/drivers/acorn/scsi/eesox.c
  *
  *  Copyright (C) 1997-2005 Russell King
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  *  This driver is based on experimentation.  Hence, it may have made
  *  assumptions about the particular card that I have available, and
  *  may not be reliable!
@@ -486,7 +483,6 @@
 	.this_id			= 7,
 	.sg_tablesize			= SG_MAX_SEGMENTS,
 	.dma_boundary			= IOMD_DMA_BOUNDARY,
-	.use_clustering			= DISABLE_CLUSTERING,
 	.proc_name			= "eesox",
 };
 
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 27bda2b..6c68c23 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  linux/drivers/acorn/scsi/fas216.c
  *
  *  Copyright (C) 1997-2003 Russell King
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  * Based on information in qlogicfas.c by Tom Zerucha, Michael Griffith, and
  * other sources, including:
  *   the AMD Am53CF94 data sheet
@@ -606,6 +603,7 @@
 		msgqueue_flush(&info->scsi.msgs);
 		msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT);
 		info->scsi.phase = PHASE_MSGOUT_EXPECT;
+		/* fall through */
 
 	case async:
 		dev->period = info->ifcfg.asyncperiod / 4;
@@ -918,6 +916,7 @@
 			fas216_done(info, DID_ABORT);
 			break;
 		}
+		/* else, fall through */
 
 	default:				/* huh?					*/
 		printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n",
@@ -1414,6 +1413,8 @@
 	case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out     -> Status       */
 	case STATE(STAT_STATUS, PHASE_DATAIN):  /* Data In      -> Status       */
 		fas216_stoptransfer(info);
+		/* fall through */
+
 	case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status       */
 	case STATE(STAT_STATUS, PHASE_MSGOUT):  /* Message Out  -> Status       */
 	case STATE(STAT_STATUS, PHASE_COMMAND): /* Command      -> Status       */
@@ -1425,6 +1426,8 @@
 	case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out     -> Message In   */
 	case STATE(STAT_MESGIN, PHASE_DATAIN):  /* Data In      -> Message In   */
 		fas216_stoptransfer(info);
+		/* fall through */
+
 	case STATE(STAT_MESGIN, PHASE_COMMAND):	/* Command	-> Message In	*/
 	case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In   */
 	case STATE(STAT_MESGIN, PHASE_MSGOUT):  /* Message Out  -> Message In   */
@@ -1578,6 +1581,7 @@
 			fas216_message(info);
 			break;
 		}
+		/* else, fall through */
 
 	default:
 		fas216_log(info, 0, "internal phase %s for function done?"
@@ -1960,6 +1964,7 @@
 	switch (where_from) {
 	case TYPE_QUEUE:
 		fas216_allocate_tag(info, SCpnt);
+		/* fall through */
 	case TYPE_OTHER:
 		fas216_start_command(info, SCpnt);
 		break;
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index c57c16e..847413c 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  *  linux/drivers/acorn/scsi/fas216.h
  *
  *  Copyright (C) 1997-2000 Russell King
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  *  FAS216 generic driver
  */
 #ifndef FAS216_H
diff --git a/drivers/scsi/arm/msgqueue.c b/drivers/scsi/arm/msgqueue.c
index 7c95c75..5811583 100644
--- a/drivers/scsi/arm/msgqueue.c
+++ b/drivers/scsi/arm/msgqueue.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  linux/drivers/acorn/scsi/msgqueue.c
  *
  *  Copyright (C) 1997-1998 Russell King
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  *  message queue handling
  */
 #include <linux/module.h>
diff --git a/drivers/scsi/arm/msgqueue.h b/drivers/scsi/arm/msgqueue.h
index 41c7333..4bcc400 100644
--- a/drivers/scsi/arm/msgqueue.h
+++ b/drivers/scsi/arm/msgqueue.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  *  linux/drivers/acorn/scsi/msgqueue.h
  *
  *  Copyright (C) 1997 Russell King
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  *  message queue handling
  */
 #ifndef MSGQUEUE_H
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 05b7f75..7c9d361 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Oak Generic NCR5380 driver
  *
@@ -110,7 +111,7 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= 2,
-	.use_clustering		= DISABLE_CLUSTERING,
+	.dma_boundary		= PAGE_SIZE - 1,
 	.proc_name		= "oakscsi",
 	.cmd_size		= NCR5380_CMD_SIZE,
 	.max_sectors		= 128,
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 79aa889..c795537 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  linux/drivers/acorn/scsi/powertec.c
  *
  *  Copyright (C) 1997-2005 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 #include <linux/module.h>
 #include <linux/blkdev.h>
@@ -294,7 +291,6 @@
 	.sg_tablesize			= SG_MAX_SEGMENTS,
 	.dma_boundary			= IOMD_DMA_BOUNDARY,
 	.cmd_per_lun			= 2,
-	.use_clustering			= ENABLE_CLUSTERING,
 	.proc_name			= "powertec",
 };
 
diff --git a/drivers/scsi/arm/queue.c b/drivers/scsi/arm/queue.c
index 996dfe9..e5559f2 100644
--- a/drivers/scsi/arm/queue.c
+++ b/drivers/scsi/arm/queue.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  linux/drivers/acorn/scsi/queue.c: queue handling primitives
  *
  *  Copyright (C) 1997-2000 Russell King
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  *  Changelog:
  *   15-Sep-1997 RMK	Created.
  *   11-Oct-1997 RMK	Corrected problem with queue_remove_exclude
diff --git a/drivers/scsi/arm/queue.h b/drivers/scsi/arm/queue.h
index 3c519c9..cb51379 100644
--- a/drivers/scsi/arm/queue.h
+++ b/drivers/scsi/arm/queue.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  *  linux/drivers/acorn/scsi/queue.h: queue handling
  *
  *  Copyright (C) 1997 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 #ifndef QUEUE_H
 #define QUEUE_H
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h
index 138a521..4d5ff7b 100644
--- a/drivers/scsi/arm/scsi.h
+++ b/drivers/scsi/arm/scsi.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  *  linux/drivers/acorn/scsi/scsi.h
  *
  *  Copyright (C) 2002 Russell King
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  *  Commonly used scsi driver functions.
  */
 
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 89f5154..e809493 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -714,7 +714,7 @@
 	.eh_host_reset_handler	= atari_scsi_host_reset,
 	.this_id		= 7,
 	.cmd_per_lun		= 2,
-	.use_clustering		= DISABLE_CLUSTERING,
+	.dma_boundary		= PAGE_SIZE - 1,
 	.cmd_size		= NCR5380_CMD_SIZE,
 };
 
@@ -757,15 +757,17 @@
 
 	if (setup_hostid >= 0) {
 		atari_scsi_template.this_id = setup_hostid & 7;
-	} else {
+	} else if (IS_REACHABLE(CONFIG_NVRAM)) {
 		/* Test if a host id is set in the NVRam */
-		if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) {
-			unsigned char b = nvram_read_byte(16);
+		if (ATARIHW_PRESENT(TT_CLK)) {
+			unsigned char b;
+			loff_t offset = 16;
+			ssize_t count = nvram_read(&b, 1, &offset);
 
 			/* Arbitration enabled? (for TOS)
 			 * If yes, use configured host ID
 			 */
-			if (b & 0x80)
+			if ((count == 1) && (b & 0x80))
 				atari_scsi_template.this_id = b & 7;
 		}
 	}
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 8996d23..e41f0bb 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* 
  *  Copyright (C) 1997	Wu Ching Chen
  *  2.1.x update (C) 1998  Krzysztof G. Baranowski
@@ -194,12 +195,11 @@
 				((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12);
 				((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13);
 				((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14);
-				if (dev->id[c][target_id].last_len != adrcnt)
-				{
-			   		k = dev->id[c][target_id].last_len;
+				if (dev->id[c][target_id].last_len != adrcnt) {
+					k = dev->id[c][target_id].last_len;
 			   		k -= adrcnt;
 			   		dev->id[c][target_id].tran_len = k;			   
-			   	dev->id[c][target_id].last_len = adrcnt;			   
+					dev->id[c][target_id].last_len = adrcnt;
 				}
 #ifdef ED_DBGP
 				printk("dev->id[c][target_id].last_len = %d dev->id[c][target_id].tran_len = %d\n",dev->id[c][target_id].last_len,dev->id[c][target_id].tran_len);
@@ -1193,7 +1193,7 @@
 		for (k = 0; k < 16; k++) {
 			if (!atp_dev->id[j][k].prd_table)
 				continue;
-			pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
+			dma_free_coherent(&atp_dev->pdev->dev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
 			atp_dev->id[j][k].prd_table = NULL;
 		}
 	}
@@ -1205,7 +1205,7 @@
 	int c,k;
 	for(c=0;c < 2;c++) {
 	   	for(k=0;k<16;k++) {
-	   			atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus));
+				atp_dev->id[c][k].prd_table = dma_alloc_coherent(&atp_dev->pdev->dev, 1024, &(atp_dev->id[c][k].prd_bus), GFP_KERNEL);
 	   			if (!atp_dev->id[c][k].prd_table) {
 	   				printk("atp870u_init_tables fail\n");
 				atp870u_free_tables(host);
@@ -1509,7 +1509,7 @@
 	if (err)
 		goto fail;
 
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
                 printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
                 err = -EIO;
                 goto disable_device;
@@ -1681,7 +1681,6 @@
      .can_queue         	= qcnt			/* can_queue */,
      .this_id           	= 7			/* SCSI ID */,
      .sg_tablesize      	= ATP870U_SCATTER	/*SG_ALL*/ /*SG_NONE*/,
-     .use_clustering    	= ENABLE_CLUSTERING,
      .max_sectors		= ATP870U_MAX_SECTORS,
 };
 
diff --git a/drivers/scsi/be2iscsi/Kconfig b/drivers/scsi/be2iscsi/Kconfig
index bad5f32..958c9b4 100644
--- a/drivers/scsi/be2iscsi/Kconfig
+++ b/drivers/scsi/be2iscsi/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config BE2ISCSI
 	tristate "Emulex 10Gbps iSCSI - BladeEngine 2"
 	depends on PCI && SCSI && NET
diff --git a/drivers/scsi/be2iscsi/Makefile b/drivers/scsi/be2iscsi/Makefile
index d0488ea..9108853 100644
--- a/drivers/scsi/be2iscsi/Makefile
+++ b/drivers/scsi/be2iscsi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Makefile to build the iSCSI driver for Emulex OneConnect.
 #
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index e035acf..4c58a02 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright 2017 Broadcom. All Rights Reserved.
  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
- *
  * Contact Information:
  * linux-drivers@broadcom.com
- *
  */
 
 #ifndef BEISCSI_H
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index c10aac4..69b1a80 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright 2017 Broadcom. All Rights Reserved.
  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
- *
  * Contact Information:
  * linux-drivers@broadcom.com
- *
  */
 
 #include <scsi/iscsi_proto.h>
@@ -520,7 +515,7 @@
 		 **/
 		tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
 		if (tag_mem->size) {
-			pci_free_consistent(ctrl->pdev, tag_mem->size,
+			dma_free_coherent(&ctrl->pdev->dev, tag_mem->size,
 					tag_mem->va, tag_mem->dma);
 			tag_mem->size = 0;
 		}
@@ -963,7 +958,7 @@
  * @ctrl: ptr to ctrl_info
  * @cq: Completion Queue
  * @dq: Default Queue
- * @lenght: ring size
+ * @length: ring size
  * @entry_size: size of each entry in DEFQ
  * @is_header: Header or Data DEFQ
  * @ulp_num: Bind to which ULP
@@ -1269,12 +1264,12 @@
 	struct be_sge *sge = nonembedded_sgl(wrb);
 	int status = 0;
 
-	nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
+	nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev,
 				sizeof(struct be_mgmt_controller_attributes),
-				&nonemb_cmd.dma);
+				&nonemb_cmd.dma, GFP_KERNEL);
 	if (nonemb_cmd.va == NULL) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BG_%d : pci_alloc_consistent failed in %s\n",
+			    "BG_%d : dma_alloc_coherent failed in %s\n",
 			    __func__);
 		return -ENOMEM;
 	}
@@ -1314,7 +1309,7 @@
 			    "BG_%d :  Failed in beiscsi_check_supported_fw\n");
 	mutex_unlock(&ctrl->mbox_lock);
 	if (nonemb_cmd.va)
-		pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
+		dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size,
 				    nonemb_cmd.va, nonemb_cmd.dma);
 
 	return status;
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 6f05d1d..063dccc 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright 2017 Broadcom. All Rights Reserved.
  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
- *
  * Contact Information:
  * linux-drivers@broadcom.com
- *
  */
 
 #ifndef BEISCSI_CMDS_H
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index c8f0a21..2058d50 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
  * Host Bus Adapters. Refer to the README file included with this package
@@ -6,13 +7,8 @@
  * Copyright (c) 2018 Broadcom. All Rights Reserved.
  * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as published
- * by the Free Software Foundation.
- *
  * Contact Information:
  * linux-drivers@broadcom.com
- *
  */
 
 #include <scsi/libiscsi.h>
@@ -679,6 +675,7 @@
 	case ISCSI_PARAM_MAX_XMIT_DLENGTH:
 		if (conn->max_xmit_dlength > 65536)
 			conn->max_xmit_dlength = 65536;
+		/* fall through */
 	default:
 		return 0;
 	}
@@ -771,7 +768,7 @@
 			status = beiscsi_get_initiator_name(phba, buf, false);
 			if (status < 0) {
 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-					    "BS_%d : Retreiving Initiator Name Failed\n");
+					    "BS_%d : Retrieving Initiator Name Failed\n");
 				status = 0;
 			}
 		}
@@ -1071,9 +1068,9 @@
 	else
 		req_memsize = sizeof(struct tcp_connect_and_offload_in_v1);
 
-	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+	nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
 				req_memsize,
-				&nonemb_cmd.dma);
+				&nonemb_cmd.dma, GFP_KERNEL);
 	if (nonemb_cmd.va == NULL) {
 
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -1091,7 +1088,7 @@
 			    "BS_%d : mgmt_open_connection Failed for cid=%d\n",
 			    beiscsi_ep->ep_cid);
 
-		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+		dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
 				    nonemb_cmd.va, nonemb_cmd.dma);
 		beiscsi_free_ep(beiscsi_ep);
 		return -EAGAIN;
@@ -1104,8 +1101,9 @@
 			    "BS_%d : mgmt_open_connection Failed");
 
 		if (ret != -EBUSY)
-			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-					    nonemb_cmd.va, nonemb_cmd.dma);
+			dma_free_coherent(&phba->ctrl.pdev->dev,
+					nonemb_cmd.size, nonemb_cmd.va,
+					nonemb_cmd.dma);
 
 		beiscsi_free_ep(beiscsi_ep);
 		return ret;
@@ -1118,7 +1116,7 @@
 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
 		    "BS_%d : mgmt_open_connection Success\n");
 
-	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+	dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
 			    nonemb_cmd.va, nonemb_cmd.dma);
 	return 0;
 }
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index f41dfda..8c88657 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright 2017 Broadcom. All Rights Reserved.
  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
- *
  * Contact Information:
  * linux-drivers@broadcom.com
- *
  */
 
 #ifndef _BE_ISCSI_
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 3660059..0760d0b 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -214,12 +214,6 @@
 	"CXN_KILLED_IMM_DATA_RCVD"
 };
 
-static int beiscsi_slave_configure(struct scsi_device *sdev)
-{
-	blk_queue_max_segment_size(sdev->request_queue, 65536);
-	return 0;
-}
-
 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
 {
 	struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
@@ -393,7 +387,6 @@
 	.proc_name = DRV_NAME,
 	.queuecommand = iscsi_queuecommand,
 	.change_queue_depth = scsi_change_queue_depth,
-	.slave_configure = beiscsi_slave_configure,
 	.target_alloc = iscsi_target_alloc,
 	.eh_timed_out = iscsi_eh_cmd_timed_out,
 	.eh_abort_handler = beiscsi_eh_abort,
@@ -404,8 +397,8 @@
 	.can_queue = BE2_IO_DEPTH,
 	.this_id = -1,
 	.max_sectors = BEISCSI_MAX_SECTORS,
+	.max_segment_size = 65536,
 	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
-	.use_clustering = ENABLE_CLUSTERING,
 	.vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
 	.track_queue_depth = 1,
 };
@@ -511,18 +504,9 @@
 	}
 
 	pci_set_master(pcidev);
-	ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
+	ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
 	if (ret) {
-		ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
-		if (ret) {
-			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
-			goto pci_region_release;
-		} else {
-			ret = pci_set_consistent_dma_mask(pcidev,
-							  DMA_BIT_MASK(32));
-		}
-	} else {
-		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
+		ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
 		if (ret) {
 			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
 			goto pci_region_release;
@@ -550,9 +534,8 @@
 	if (status)
 		return status;
 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
-	mbox_mem_alloc->va = pci_alloc_consistent(pdev,
-						  mbox_mem_alloc->size,
-						  &mbox_mem_alloc->dma);
+	mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev,
+			mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL);
 	if (!mbox_mem_alloc->va) {
 		beiscsi_unmap_pci_function(phba);
 		return -ENOMEM;
@@ -1549,6 +1532,7 @@
 		break;
 	case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
 		error = 1;
+		/* fall through */
 	case UNSOL_DATA_NOTIFY:
 		pasync_handle = pasync_ctx->async_entry[ci].data;
 		break;
@@ -1866,7 +1850,6 @@
 {
 	struct be_queue_info *cq;
 	struct sol_cqe *sol;
-	struct dmsg_cqe *dmsg;
 	unsigned int total = 0;
 	unsigned int num_processed = 0;
 	unsigned short code = 0, cid = 0;
@@ -1939,7 +1922,6 @@
 				    "BM_%d : Received %s[%d] on CID : %d\n",
 				    cqe_desc[code], code, cid);
 
-			dmsg = (struct dmsg_cqe *)sol;
 			hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
 			break;
 		case UNSOL_HDR_NOTIFY:
@@ -2304,11 +2286,11 @@
 
 		/* Map addr only if there is data_count */
 		if (dsp_value) {
-			io_task->mtask_addr = pci_map_single(phba->pcidev,
+			io_task->mtask_addr = dma_map_single(&phba->pcidev->dev,
 							     task->data,
 							     task->data_count,
-							     PCI_DMA_TODEVICE);
-			if (pci_dma_mapping_error(phba->pcidev,
+							     DMA_TO_DEVICE);
+			if (dma_mapping_error(&phba->pcidev->dev,
 						  io_task->mtask_addr))
 				return -ENOMEM;
 			io_task->mtask_data_count = task->data_count;
@@ -2519,10 +2501,9 @@
 		       BEISCSI_MAX_FRAGS_INIT);
 		curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
 		do {
-			mem_arr->virtual_address = pci_alloc_consistent(
-							phba->pcidev,
-							curr_alloc_size,
-							&bus_add);
+			mem_arr->virtual_address =
+				dma_alloc_coherent(&phba->pcidev->dev,
+					curr_alloc_size, &bus_add, GFP_KERNEL);
 			if (!mem_arr->virtual_address) {
 				if (curr_alloc_size <= BE_MIN_MEM_SIZE)
 					goto free_mem;
@@ -2560,7 +2541,7 @@
 	mem_descr->num_elements = j;
 	while ((i) || (j)) {
 		for (j = mem_descr->num_elements; j > 0; j--) {
-			pci_free_consistent(phba->pcidev,
+			dma_free_coherent(&phba->pcidev->dev,
 					    mem_descr->mem_array[j - 1].size,
 					    mem_descr->mem_array[j - 1].
 					    virtual_address,
@@ -3031,9 +3012,9 @@
 		eq = &phwi_context->be_eq[i].q;
 		mem = &eq->dma_mem;
 		phwi_context->be_eq[i].phba = phba;
-		eq_vaddress = pci_alloc_consistent(phba->pcidev,
+		eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
 						   num_eq_pages * PAGE_SIZE,
-						   &paddr);
+						   &paddr, GFP_KERNEL);
 		if (!eq_vaddress) {
 			ret = -ENOMEM;
 			goto create_eq_error;
@@ -3069,7 +3050,7 @@
 		eq = &phwi_context->be_eq[i].q;
 		mem = &eq->dma_mem;
 		if (mem->va)
-			pci_free_consistent(phba->pcidev, num_eq_pages
+			dma_free_coherent(&phba->pcidev->dev, num_eq_pages
 					    * PAGE_SIZE,
 					    mem->va, mem->dma);
 	}
@@ -3097,9 +3078,9 @@
 		pbe_eq->cq = cq;
 		pbe_eq->phba = phba;
 		mem = &cq->dma_mem;
-		cq_vaddress = pci_alloc_consistent(phba->pcidev,
+		cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
 						   num_cq_pages * PAGE_SIZE,
-						   &paddr);
+						   &paddr, GFP_KERNEL);
 		if (!cq_vaddress) {
 			ret = -ENOMEM;
 			goto create_cq_error;
@@ -3134,7 +3115,7 @@
 		cq = &phwi_context->be_cq[i];
 		mem = &cq->dma_mem;
 		if (mem->va)
-			pci_free_consistent(phba->pcidev, num_cq_pages
+			dma_free_coherent(&phba->pcidev->dev, num_cq_pages
 					    * PAGE_SIZE,
 					    mem->va, mem->dma);
 	}
@@ -3326,7 +3307,7 @@
 {
 	struct be_dma_mem *mem = &q->dma_mem;
 	if (mem->va) {
-		pci_free_consistent(phba->pcidev, mem->size,
+		dma_free_coherent(&phba->pcidev->dev, mem->size,
 			mem->va, mem->dma);
 		mem->va = NULL;
 	}
@@ -3341,7 +3322,8 @@
 	q->len = len;
 	q->entry_size = entry_size;
 	mem->size = len * entry_size;
-	mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma);
+	mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
+				     GFP_KERNEL);
 	if (!mem->va)
 		return -ENOMEM;
 	return 0;
@@ -3479,7 +3461,7 @@
 			     &ctrl->ptag_state[tag].tag_state)) {
 			ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
 			if (ptag_mem->size) {
-				pci_free_consistent(ctrl->pdev,
+				dma_free_coherent(&ctrl->pdev->dev,
 						    ptag_mem->size,
 						    ptag_mem->va,
 						    ptag_mem->dma);
@@ -3585,7 +3567,7 @@
 
 	/* if eqid_count == 1 fall back to INTX */
 	if (enable_msix && nvec > 1) {
-		const struct irq_affinity desc = { .post_vectors = 1 };
+		struct irq_affinity desc = { .post_vectors = 1 };
 
 		if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec,
 				PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) {
@@ -3880,7 +3862,7 @@
 	j = 0;
 	for (i = 0; i < SE_MEM_MAX; i++) {
 		for (j = mem_descr->num_elements; j > 0; j--) {
-			pci_free_consistent(phba->pcidev,
+			dma_free_coherent(&phba->pcidev->dev,
 			  mem_descr->mem_array[j - 1].size,
 			  mem_descr->mem_array[j - 1].virtual_address,
 			  (unsigned long)mem_descr->mem_array[j - 1].
@@ -4255,10 +4237,10 @@
 	}
 
 	if (io_task->mtask_addr) {
-		pci_unmap_single(phba->pcidev,
+		dma_unmap_single(&phba->pcidev->dev,
 				 io_task->mtask_addr,
 				 io_task->mtask_data_count,
-				 PCI_DMA_TODEVICE);
+				 DMA_TO_DEVICE);
 		io_task->mtask_addr = 0;
 	}
 }
@@ -4852,9 +4834,9 @@
 
 	switch (bsg_req->msgcode) {
 	case ISCSI_BSG_HST_VENDOR:
-		nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+		nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
 					job->request_payload.payload_len,
-					&nonemb_cmd.dma);
+					&nonemb_cmd.dma, GFP_KERNEL);
 		if (nonemb_cmd.va == NULL) {
 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
 				    "BM_%d : Failed to allocate memory for "
@@ -4867,7 +4849,7 @@
 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
 				    "BM_%d : MBX Tag Allocation Failed\n");
 
-			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+			dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
 					    nonemb_cmd.va, nonemb_cmd.dma);
 			return -EAGAIN;
 		}
@@ -4881,7 +4863,7 @@
 		if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
 			clear_bit(MCC_TAG_STATE_RUNNING,
 				  &phba->ctrl.ptag_state[tag].tag_state);
-			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+			dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
 					    nonemb_cmd.va, nonemb_cmd.dma);
 			return -EIO;
 		}
@@ -4898,7 +4880,7 @@
 		bsg_reply->result = status;
 		bsg_job_done(job, bsg_reply->result,
 			     bsg_reply->reply_payload_rcv_len);
-		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+		dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
 				    nonemb_cmd.va, nonemb_cmd.dma);
 		if (status || extd_status) {
 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -5529,7 +5511,6 @@
 		return PCI_ERS_RESULT_DISCONNECT;
 	}
 
-	pci_cleanup_aer_uncorrect_error_status(pdev);
 	return PCI_ERS_RESULT_RECOVERED;
 }
 
@@ -5755,7 +5736,7 @@
 	beiscsi_cleanup_port(phba);
 	beiscsi_free_mem(phba);
 free_port:
-	pci_free_consistent(phba->pcidev,
+	dma_free_coherent(&phba->pcidev->dev,
 			    phba->ctrl.mbox_mem_alloced.size,
 			    phba->ctrl.mbox_mem_alloced.va,
 			    phba->ctrl.mbox_mem_alloced.dma);
@@ -5799,7 +5780,7 @@
 
 	/* ctrl uninit */
 	beiscsi_unmap_pci_function(phba);
-	pci_free_consistent(phba->pcidev,
+	dma_free_coherent(&phba->pcidev->dev,
 			    phba->ctrl.mbox_mem_alloced.size,
 			    phba->ctrl.mbox_mem_alloced.va,
 			    phba->ctrl.mbox_mem_alloced.dma);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 42bb6bd..98977c0 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright 2017 Broadcom. All Rights Reserved.
  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
- *
  * Contact Information:
  * linux-drivers@broadcom.com
- *
  */
 
 #ifndef _BEISCSI_MAIN_
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 8fdc07b..d4febaa 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -284,7 +284,7 @@
 		return rc;
 
 free_cmd:
-	pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
+	dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size,
 			    nonemb_cmd->va, nonemb_cmd->dma);
 	return rc;
 }
@@ -293,7 +293,8 @@
 				 struct be_dma_mem *cmd,
 				 u8 subsystem, u8 opcode, u32 size)
 {
-	cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
+	cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
+				     GFP_KERNEL);
 	if (!cmd->va) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
 			    "BG_%d : Failed to allocate memory for if info\n");
@@ -315,7 +316,7 @@
 	__beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
 	tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
 	if (tag_mem->size) {
-		pci_free_consistent(phba->pcidev, tag_mem->size,
+		dma_free_coherent(&phba->pcidev->dev, tag_mem->size,
 				    tag_mem->va, tag_mem->dma);
 		tag_mem->size = 0;
 	}
@@ -761,7 +762,7 @@
 				    "BG_%d : Memory Allocation Failure\n");
 
 				/* Free the DMA memory for the IOCTL issuing */
-				pci_free_consistent(phba->ctrl.pdev,
+				dma_free_coherent(&phba->ctrl.pdev->dev,
 						    nonemb_cmd.size,
 						    nonemb_cmd.va,
 						    nonemb_cmd.dma);
@@ -780,7 +781,7 @@
 			ioctl_size += sizeof(struct be_cmd_req_hdr);
 
 			/* Free the previous allocated DMA memory */
-			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+			dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
 					    nonemb_cmd.va,
 					    nonemb_cmd.dma);
 
@@ -869,7 +870,7 @@
 				      status);
 			boot_work = 0;
 		}
-		pci_free_consistent(phba->ctrl.pdev, bs->nonemb_cmd.size,
+		dma_free_coherent(&phba->ctrl.pdev->dev, bs->nonemb_cmd.size,
 				    bs->nonemb_cmd.va, bs->nonemb_cmd.dma);
 		bs->nonemb_cmd.va = NULL;
 		break;
@@ -1012,9 +1013,10 @@
 
 	nonemb_cmd = &phba->boot_struct.nonemb_cmd;
 	nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp);
-	nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
+	nonemb_cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
 					      nonemb_cmd->size,
-					      &nonemb_cmd->dma);
+					      &nonemb_cmd->dma,
+					      GFP_KERNEL);
 	if (!nonemb_cmd->va) {
 		mutex_unlock(&ctrl->mbox_lock);
 		return 0;
@@ -1508,9 +1510,9 @@
 		return -EINVAL;
 
 	nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
-	nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
-					      nonemb_cmd.size,
-					      &nonemb_cmd.dma);
+	nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
+					   nonemb_cmd.size, &nonemb_cmd.dma,
+					   GFP_KERNEL);
 	if (!nonemb_cmd.va) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
 			    "BM_%d : invldt_cmds_params alloc failed\n");
@@ -1521,7 +1523,7 @@
 	wrb = alloc_mcc_wrb(phba, &tag);
 	if (!wrb) {
 		mutex_unlock(&ctrl->mbox_lock);
-		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+		dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
 				    nonemb_cmd.va, nonemb_cmd.dma);
 		return -ENOMEM;
 	}
@@ -1548,7 +1550,7 @@
 
 	rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
 	if (rc != -EBUSY)
-		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+		dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
 				    nonemb_cmd.va, nonemb_cmd.dma);
 	return rc;
 }
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 0b22c99..d108588 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright 2017 Broadcom. All Rights Reserved.
  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
- *
  * Contact Information:
  * linux-drivers@broadcom.com
- *
  */
 
 #ifndef _BEISCSI_MGMT_
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 0e119d8..7bd2ba1 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 #ifndef __BFA_H__
 #define __BFA_H__
@@ -62,8 +54,7 @@
 			((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
 		writel((__bfa)->iocfc.req_cq_pi[__reqq],		\
 			(__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]);	\
-		mmiowb();      \
-	} while (0)
+		} while (0)
 
 #define bfa_rspq_pi(__bfa, __rspq)					\
 	(*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 10a63be..0f554eb 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #include "bfad_drv.h"
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index 9685efc..6b606bf 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 /*
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index 5dc3782..6abd9f4 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #ifndef __BFA_DEFS_H__
diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h
index 5815a90..5e36620 100644
--- a/drivers/scsi/bfa/bfa_defs_fcs.h
+++ b/drivers/scsi/bfa/bfa_defs_fcs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #ifndef __BFA_DEFS_FCS_H__
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 3d0c96a..8439951 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #ifndef __BFA_DEFS_SVC_H__
@@ -1453,7 +1445,7 @@
 struct bfa_aen_entry_s {
 	struct list_head	qe;
 	enum bfa_aen_category   aen_category;
-	u32                     aen_type;
+	int                     aen_type;
 	union bfa_aen_data_u    aen_data;
 	u64			aen_tv_sec;
 	u64			aen_tv_usec;
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 18b7304..b00fb24 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #ifndef __BFA_FC_H__
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index d3b00a4..df18d9d 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 /*
  * fcbuild.c - FC link service frame building and parsing routines
@@ -190,27 +182,6 @@
 	fchs->ox_id = ox_id;
 }
 
-enum fc_parse_status
-fc_els_rsp_parse(struct fchs_s *fchs, int len)
-{
-	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-	struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
-
-	len = len;
-
-	switch (els_cmd->els_code) {
-	case FC_ELS_LS_RJT:
-		if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
-			return FC_PARSE_BUSY;
-		else
-			return FC_PARSE_FAILURE;
-
-	case FC_ELS_ACC:
-		return FC_PARSE_OK;
-	}
-	return FC_PARSE_OK;
-}
-
 static void
 fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
@@ -831,18 +802,6 @@
 }
 
 u16
-fc_logo_rsp_parse(struct fchs_s *fchs, int len)
-{
-	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-
-	len = len;
-	if (els_cmd->els_code != FC_ELS_ACC)
-		return FC_PARSE_FAILURE;
-
-	return FC_PARSE_OK;
-}
-
-u16
 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
 	       wwn_t port_name, wwn_t node_name, u16 pdu_size)
 {
@@ -908,40 +867,6 @@
 }
 
 u16
-fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
-{
-	struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
-	int             num_pages = 0;
-	int             page = 0;
-
-	len = len;
-
-	if (prlo->command != FC_ELS_ACC)
-		return FC_PARSE_FAILURE;
-
-	num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
-
-	for (page = 0; page < num_pages; page++) {
-		if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
-			return FC_PARSE_FAILURE;
-
-		if (prlo->prlo_acc_params[page].opa_valid != 0)
-			return FC_PARSE_FAILURE;
-
-		if (prlo->prlo_acc_params[page].rpa_valid != 0)
-			return FC_PARSE_FAILURE;
-
-		if (prlo->prlo_acc_params[page].orig_process_assc != 0)
-			return FC_PARSE_FAILURE;
-
-		if (prlo->prlo_acc_params[page].resp_process_assc != 0)
-			return FC_PARSE_FAILURE;
-	}
-	return FC_PARSE_OK;
-
-}
-
-u16
 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
 	       int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
 {
@@ -972,47 +897,6 @@
 }
 
 u16
-fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
-{
-	struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1);
-	int             num_pages = 0;
-	int             page = 0;
-
-	len = len;
-
-	if (tprlo->command != FC_ELS_ACC)
-		return FC_PARSE_ACC_INVAL;
-
-	num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
-
-	for (page = 0; page < num_pages; page++) {
-		if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
-			return FC_PARSE_NOT_FCP;
-		if (tprlo->tprlo_acc_params[page].opa_valid != 0)
-			return FC_PARSE_OPAFLAG_INVAL;
-		if (tprlo->tprlo_acc_params[page].rpa_valid != 0)
-			return FC_PARSE_RPAFLAG_INVAL;
-		if (tprlo->tprlo_acc_params[page].orig_process_assc != 0)
-			return FC_PARSE_OPA_INVAL;
-		if (tprlo->tprlo_acc_params[page].resp_process_assc != 0)
-			return FC_PARSE_RPA_INVAL;
-	}
-	return FC_PARSE_OK;
-}
-
-enum fc_parse_status
-fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
-{
-	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-
-	len = len;
-	if (els_cmd->els_code != FC_ELS_ACC)
-		return FC_PARSE_FAILURE;
-
-	return FC_PARSE_OK;
-}
-
-u16
 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
 		u32 reason_code, u32 reason_expl)
 {
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
index b109a88..49e0ee4 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.h
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 /*
  * fcbuild.h - FC link service frame building and parsing routines
@@ -163,7 +155,6 @@
 
 u16        fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
 			     u32 s_id, u16 ox_id, u16 rrq_oxid);
-enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
 
 u16        fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
 				u16 ox_id, u8 *name);
@@ -276,8 +267,6 @@
 void		fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 					 __be16 ox_id);
 
-enum fc_parse_status	fc_els_rsp_parse(struct fchs_s *fchs, int len);
-
 enum fc_parse_status	fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
 					wwn_t port_name);
 
@@ -297,8 +286,6 @@
 u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
 		u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
 
-u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
-
 u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 		u16 ox_id, wwn_t port_name, wwn_t node_name,
 		u16 pdu_size);
@@ -308,14 +295,10 @@
 u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 		u16 ox_id, int num_pages);
 
-u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
-
 u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 		u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
 		u32 tpr_id);
 
-u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
-
 u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 		__be16 ox_id, u32 reason_code, u32 reason_expl);
 
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 2c85f5b..284baa3 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #include "bfad_drv.h"
@@ -2586,6 +2578,7 @@
 	case FCP_IODIR_RW:
 		bfa_stats(itnim, input_reqs);
 		bfa_stats(itnim, output_reqs);
+		/* fall through */
 	default:
 		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
 	}
@@ -2820,6 +2813,7 @@
 
 	case BFI_IOIM_STS_TIMEDOUT:
 		bfa_stats(ioim->itnim, iocomp_timedout);
+		/* fall through */
 	case BFI_IOIM_STS_ABORTED:
 		rsp->io_status = BFI_IOIM_STS_ABORTED;
 		bfa_stats(ioim->itnim, iocomp_aborted);
@@ -3215,9 +3209,7 @@
 	switch (event) {
 	case BFA_TSKIM_SM_DONE:
 		bfa_reqq_wcancel(&tskim->reqq_wait);
-		/*
-		 * Fall through !!!
-		 */
+		/* fall through */
 	case BFA_TSKIM_SM_QRESUME:
 		bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
 		bfa_tskim_send_abort(tskim);
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index ec8f863..8bf0943 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #ifndef __BFA_FCPIM_H__
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 932feb0..d2d396c 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 /*
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index e60f72b..3e117fe 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #ifndef __BFA_FCS_H__
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 2e3b19e..c7de62b 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 /*
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index b4f2c1d..7c3eadc 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #include "bfad_drv.h"
@@ -6430,9 +6422,7 @@
 	switch (event) {
 	case BFA_FCS_VPORT_SM_OFFLINE:
 		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
-		/*
-		 * !!! fall through !!!
-		 */
+		/* fall through */
 
 	case BFA_FCS_VPORT_SM_RSP_OK:
 	case BFA_FCS_VPORT_SM_RSP_ERROR:
@@ -6458,9 +6448,7 @@
 	switch (event) {
 	case BFA_FCS_VPORT_SM_OFFLINE:
 		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
-		/*
-		 * !!! fall through !!!
-		 */
+		/* fall through */
 
 	case BFA_FCS_VPORT_SM_RSP_OK:
 	case BFA_FCS_VPORT_SM_RSP_ERROR:
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index de50349..82801b3 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 /*
@@ -427,17 +419,13 @@
 
 	case RPSM_EVENT_LOGO_RCVD:
 		bfa_fcs_rport_send_logo_acc(rport);
-		/*
-		 * !! fall through !!
-		 */
+		/* fall through */
 	case RPSM_EVENT_PRLO_RCVD:
 		if (rport->prlo == BFA_TRUE)
 			bfa_fcs_rport_send_prlo_acc(rport);
 
 		bfa_fcxp_discard(rport->fcxp);
-		/*
-		 * !! fall through !!
-		 */
+		/* fall through */
 	case RPSM_EVENT_FAILED:
 		if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) {
 			rport->plogi_retries++;
@@ -868,9 +856,7 @@
 		 * At least go offline when a PLOGI is received.
 		 */
 		bfa_fcxp_discard(rport->fcxp);
-		/*
-		 * !!! fall through !!!
-		 */
+		/* fall through */
 
 	case RPSM_EVENT_FAILED:
 	case RPSM_EVENT_ADDRESS_CHANGE:
@@ -1056,6 +1042,7 @@
 
 	case RPSM_EVENT_LOGO_RCVD:
 		bfa_fcs_rport_send_logo_acc(rport);
+		/* fall through */
 	case RPSM_EVENT_PRLO_RCVD:
 		if (rport->prlo == BFA_TRUE)
 			bfa_fcs_rport_send_prlo_acc(rport);
@@ -1144,9 +1131,7 @@
 			bfa_fcs_rport_send_plogiacc(rport, NULL);
 			break;
 		}
-		/*
-		 * !! fall through !!
-		 */
+		/* fall through */
 
 	case RPSM_EVENT_ADDRESS_CHANGE:
 		if (!bfa_fcs_lport_is_online(rport->port)) {
@@ -1303,6 +1288,7 @@
 
 	case RPSM_EVENT_LOGO_RCVD:
 		bfa_fcs_rport_send_logo_acc(rport);
+		/* fall through */
 	case RPSM_EVENT_PRLO_RCVD:
 		if (rport->prlo == BFA_TRUE)
 			bfa_fcs_rport_send_prlo_acc(rport);
@@ -1346,6 +1332,7 @@
 
 	case RPSM_EVENT_LOGO_RCVD:
 		bfa_fcs_rport_send_logo_acc(rport);
+		/* fall through */
 	case RPSM_EVENT_PRLO_RCVD:
 		if (rport->prlo == BFA_TRUE)
 			bfa_fcs_rport_send_prlo_acc(rport);
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index c4a0c0e..6cc2f72 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #include "bfad_drv.h"
@@ -61,7 +53,6 @@
 
 	bfa_rspq_ci(bfa, rspq) = ci;
 	writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
-	mmiowb();
 }
 
 void
@@ -72,7 +63,6 @@
 
 	bfa_rspq_ci(bfa, rspq) = ci;
 	writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
-	mmiowb();
 }
 
 void
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index b0ff378..4b1c0a5 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #include "bfad_drv.h"
@@ -81,7 +73,6 @@
 
 	bfa_rspq_ci(bfa, rspq) = ci;
 	writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
-	mmiowb();
 }
 
 /*
@@ -94,7 +85,6 @@
 {
 	bfa_rspq_ci(bfa, rspq) = ci;
 	writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
-	mmiowb();
 }
 
 void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 16d3aeb..93471d7 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #include "bfad_drv.h"
@@ -978,9 +970,7 @@
 
 	case IOCPF_E_INITFAIL:
 		bfa_iocpf_timer_stop(ioc);
-		/*
-		 * !!! fall through !!!
-		 */
+		/* fall through */
 
 	case IOCPF_E_TIMEOUT:
 		writel(1, ioc->ioc_regs.ioc_sem_reg);
@@ -1056,9 +1046,7 @@
 
 	case IOCPF_E_FAIL:
 		bfa_iocpf_timer_stop(ioc);
-		/*
-		 * !!! fall through !!!
-		 */
+		/* fall through */
 
 	case IOCPF_E_TIMEOUT:
 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
@@ -3819,7 +3807,7 @@
 		sfp->state = BFA_SFP_STATE_REMOVED;
 		sfp->data_valid = 0;
 		bfa_sfp_scn_aen_post(sfp, rsp);
-		 break;
+		break;
 	case BFA_SFP_SCN_FAILED:
 		sfp->state = BFA_SFP_STATE_FAILED;
 		sfp->data_valid = 0;
@@ -5763,7 +5751,7 @@
 				(struct bfa_phy_stats_s *) phy->ubuf;
 			bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
 				sizeof(struct bfa_phy_stats_s));
-				bfa_trc(phy, stats->status);
+			bfa_trc(phy, stats->status);
 		}
 
 		phy->status = status;
@@ -6007,6 +5995,7 @@
 	case BFA_DCONF_SM_IOCDISABLE:
 	case BFA_DCONF_SM_FLASH_COMP:
 		bfa_timer_stop(&dconf->timer);
+		/* fall through */
 	case BFA_DCONF_SM_TIMEOUT:
 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 0f9fab7..933a1c3 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #ifndef __BFA_IOC_H__
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index f1b80da..2fc6215 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #include "bfad_drv.h"
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 651a8fb..18b58b2 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #include "bfad_drv.h"
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 1c2ab39..578e767 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 /*
diff --git a/drivers/scsi/bfa/bfa_plog.h b/drivers/scsi/bfa/bfa_plog.h
index da570c0..0ed6733 100644
--- a/drivers/scsi/bfa/bfa_plog.h
+++ b/drivers/scsi/bfa/bfa_plog.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 #ifndef __BFA_PORTLOG_H__
 #define __BFA_PORTLOG_H__
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index 079bc77..4511ec8 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #include "bfad_drv.h"
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index 0c3b200..7e569d4 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #ifndef __BFA_PORT_H__
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 6fc34fb..6d21314 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #include "bfad_drv.h"
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 7e8fb62..9c83109 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #ifndef __BFA_SVC_H__
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index bd7e6a6..2f9213b 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 /*
@@ -727,7 +719,7 @@
 int
 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
 {
-	int		rc = -ENODEV;
+	int rc = -ENODEV;
 
 	if (pci_enable_device(pdev)) {
 		printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
@@ -739,14 +731,14 @@
 
 	pci_set_master(pdev);
 
+	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (rc)
+		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 
-	if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
-	    (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
-		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
-		   (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
-			printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
-			goto out_release_region;
-		}
+	if (rc) {
+		rc = -ENODEV;
+		printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
+		goto out_release_region;
 	}
 
 	/* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
@@ -1538,6 +1530,7 @@
 {
 	struct bfad_s *bfad = pci_get_drvdata(pdev);
 	u8 byte;
+	int rc;
 
 	dev_printk(KERN_ERR, &pdev->dev,
 		   "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
@@ -1565,11 +1558,12 @@
 	pci_save_state(pdev);
 	pci_set_master(pdev);
 
-	if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0)
-		if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
-			goto out_disable_device;
-
-	pci_cleanup_aer_uncorrect_error_status(pdev);
+	rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
+	if (rc)
+		rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
+					       DMA_BIT_MASK(32));
+	if (rc)
+		goto out_disable_device;
 
 	if (restart_bfa(bfad) == -1)
 		goto out_disable_device;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 26b0fa4..29ab81d 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 /*
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 5d163ca..a76c968 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #include <linux/uaccess.h>
@@ -3264,9 +3256,9 @@
 	/* Allocate dma coherent memory */
 	buf_info = buf_base;
 	buf_info->size = payload_len;
-	buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev,
-					     buf_info->size, &buf_info->phys,
-					     GFP_KERNEL);
+	buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev,
+					    buf_info->size, &buf_info->phys,
+					    GFP_KERNEL);
 	if (!buf_info->virt)
 		goto out_free_mem;
 
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index 917e140..e525339 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 #ifndef BFAD_BSG_H
 #define BFAD_BSG_H
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 349cfe7..fd1b378 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #include <linux/debugfs.h>
@@ -460,11 +452,6 @@
 	if (!bfa_debugfs_root) {
 		bfa_debugfs_root = debugfs_create_dir("bfa", NULL);
 		atomic_set(&bfa_debugfs_port_count, 0);
-		if (!bfa_debugfs_root) {
-			printk(KERN_WARNING
-				"BFA debugfs root dir creation failed\n");
-			goto err;
-		}
 	}
 
 	/* Setup the pci_dev debugfs directory for the port */
@@ -472,12 +459,6 @@
 	if (!port->port_debugfs_root) {
 		port->port_debugfs_root =
 			debugfs_create_dir(name, bfa_debugfs_root);
-		if (!port->port_debugfs_root) {
-			printk(KERN_WARNING
-				"bfa %s: debugfs root creation failed\n",
-				bfad->pci_name);
-			goto err;
-		}
 
 		atomic_inc(&bfa_debugfs_port_count);
 
@@ -489,16 +470,9 @@
 							port->port_debugfs_root,
 							port,
 							file->fops);
-			if (!bfad->bfad_dentry_files[i]) {
-				printk(KERN_WARNING
-					"bfa %s: debugfs %s creation failed\n",
-					bfad->pci_name, file->name);
-				goto err;
-			}
 		}
 	}
 
-err:
 	return;
 }
 
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 4fe980a..eaee7c8 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 /*
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index c4a3331..22f06be 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 /*
@@ -544,7 +536,7 @@
 			struct device *dev)
 {
 	struct bfad_im_port_pointer *im_portp;
-	int error = 1;
+	int error;
 
 	mutex_lock(&bfad_mutex);
 	error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL);
@@ -817,7 +809,6 @@
 	.this_id = -1,
 	.sg_tablesize = BFAD_IO_MAX_SGE,
 	.cmd_per_lun = 3,
-	.use_clustering = ENABLE_CLUSTERING,
 	.shost_attrs = bfad_im_host_attrs,
 	.max_sectors = BFAD_MAX_SECTORS,
 	.vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
@@ -840,7 +831,6 @@
 	.this_id = -1,
 	.sg_tablesize = BFAD_IO_MAX_SGE,
 	.cmd_per_lun = 3,
-	.use_clustering = ENABLE_CLUSTERING,
 	.shost_attrs = bfad_im_vport_attrs,
 	.max_sectors = BFAD_MAX_SECTORS,
 };
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index e61ed8d..f16d4b2 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #ifndef __BFAD_IM_H__
@@ -143,7 +135,7 @@
 static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
 					     struct bfad_s *drv, int cnt,
 					     enum bfa_aen_category cat,
-					     enum bfa_ioc_aen_event evt)
+					     int evt)
 {
 	struct timespec64 ts;
 
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 5f698d0..41e6b4d 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #ifndef __BFI_H__
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index ccbd9e3..b9dc0b9 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 #ifndef __BFI_MS_H__
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
index fd5b876..0803b71 100644
--- a/drivers/scsi/bfa/bfi_reg.h
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  * Copyright (c) 2014- QLogic Corporation.
@@ -5,15 +6,6 @@
  * www.qlogic.com
  *
  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
  */
 
 /*
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
index d401a09..e0ccb48 100644
--- a/drivers/scsi/bnx2fc/Kconfig
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config SCSI_BNX2X_FCOE
 	tristate "QLogic FCoE offload support"
 	depends on PCI
diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile
index 141149e..1d72e27 100644
--- a/drivers/scsi/bnx2fc/Makefile
+++ b/drivers/scsi/bnx2fc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o
 
 bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o \
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 901a316..3b84db8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -66,7 +66,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME		"bnx2fc"
-#define BNX2FC_VERSION		"2.11.8"
+#define BNX2FC_VERSION		"2.12.10"
 
 #define PFX			"bnx2fc: "
 
@@ -75,8 +75,9 @@
 #define BNX2X_DOORBELL_PCI_BAR		2
 
 #define BNX2FC_MAX_BD_LEN		0xffff
-#define BNX2FC_BD_SPLIT_SZ		0x8000
-#define BNX2FC_MAX_BDS_PER_CMD		256
+#define BNX2FC_BD_SPLIT_SZ		0xffff
+#define BNX2FC_MAX_BDS_PER_CMD		255
+#define BNX2FC_FW_MAX_BDS_PER_CMD	255
 
 #define BNX2FC_SQ_WQES_MAX	256
 
@@ -433,8 +434,10 @@
 	void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg);
 	struct bnx2fc_els_cb_arg *cb_arg;
 	struct delayed_work timeout_work; /* timer for ULP timeouts */
-	struct completion tm_done;
-	int wait_for_comp;
+	struct completion abts_done;
+	struct completion cleanup_done;
+	int wait_for_abts_comp;
+	int wait_for_cleanup_comp;
 	u16 xid;
 	struct fcoe_err_report_entry err_entry;
 	struct fcoe_task_ctx_entry *task;
@@ -455,6 +458,7 @@
 #define BNX2FC_FLAG_ELS_TIMEOUT		0xb
 #define BNX2FC_FLAG_CMD_LOST		0xc
 #define BNX2FC_FLAG_SRR_SENT		0xd
+#define BNX2FC_FLAG_ISSUE_CLEANUP_REQ	0xe
 	u8 rec_retry;
 	u8 srr_retry;
 	u32 srr_offset;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 76e65a3..754f2e8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -610,7 +610,6 @@
 	rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
 				 bnx2fc_rec_compl, cb_arg,
 				 r_a_tov);
-rec_err:
 	if (rc) {
 		BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
 		spin_lock_bh(&tgt->tgt_lock);
@@ -618,6 +617,7 @@
 		spin_unlock_bh(&tgt->tgt_lock);
 		kfree(cb_arg);
 	}
+rec_err:
 	return rc;
 }
 
@@ -654,7 +654,6 @@
 	rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
 				 bnx2fc_srr_compl, cb_arg,
 				 r_a_tov);
-srr_err:
 	if (rc) {
 		BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
 		spin_lock_bh(&tgt->tgt_lock);
@@ -664,6 +663,7 @@
 	} else
 		set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
 
+srr_err:
 	return rc;
 }
 
@@ -854,33 +854,57 @@
 	kref_put(&els_req->refcount, bnx2fc_cmd_release);
 }
 
+#define		BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC	1
+#define		BNX2FC_FCOE_MAC_METHOD_FCF_MAP		2
+#define		BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC	3
 static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
 			      void *arg)
 {
 	struct fcoe_ctlr *fip = arg;
 	struct fc_exch *exch = fc_seq_exch(seq);
 	struct fc_lport *lport = exch->lp;
-	u8 *mac;
-	u8 op;
+
+	struct fc_frame_header *fh;
+	u8 *granted_mac;
+	u8 fcoe_mac[6];
+	u8 fc_map[3];
+	int method;
 
 	if (IS_ERR(fp))
 		goto done;
 
-	mac = fr_cb(fp)->granted_mac;
-	if (is_zero_ether_addr(mac)) {
-		op = fc_frame_payload_op(fp);
-		if (lport->vport) {
-			if (op == ELS_LS_RJT) {
-				printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
-				fc_vport_terminate(lport->vport);
-				fc_frame_free(fp);
-				return;
-			}
-		}
-		fcoe_ctlr_recv_flogi(fip, lport, fp);
+	fh = fc_frame_header_get(fp);
+	granted_mac = fr_cb(fp)->granted_mac;
+
+	/*
+	 * We set the source MAC for FCoE traffic based on the Granted MAC
+	 * address from the switch.
+	 *
+	 * If granted_mac is non-zero, we use that.
+	 * If the granted_mac is zeroed out, create the FCoE MAC based on
+	 * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
+	 * If sel_fcf->fc_map is 0, then we use the default FCF-MAC plus the
+	 * d_id of the FLOGI frame.
+	 */
+	if (!is_zero_ether_addr(granted_mac)) {
+		ether_addr_copy(fcoe_mac, granted_mac);
+		method = BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC;
+	} else if (fip->sel_fcf && fip->sel_fcf->fc_map != 0) {
+		hton24(fc_map, fip->sel_fcf->fc_map);
+		fcoe_mac[0] = fc_map[0];
+		fcoe_mac[1] = fc_map[1];
+		fcoe_mac[2] = fc_map[2];
+		fcoe_mac[3] = fh->fh_d_id[0];
+		fcoe_mac[4] = fh->fh_d_id[1];
+		fcoe_mac[5] = fh->fh_d_id[2];
+		method = BNX2FC_FCOE_MAC_METHOD_FCF_MAP;
+	} else {
+		fc_fcoe_set_mac(fcoe_mac, fh->fh_d_id);
+		method = BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC;
 	}
-	if (!is_zero_ether_addr(mac))
-		fip->update_mac(lport, mac);
+
+	BNX2FC_HBA_DBG(lport, "fcoe_mac=%pM method=%d\n", fcoe_mac, method);
+	fip->update_mac(lport, fcoe_mac);
 done:
 	fc_lport_flogi_resp(seq, fp, lport);
 }
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f000458..b4bfab5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -150,15 +150,11 @@
 	struct fcoe_rcv_info *fr;
 	struct sk_buff_head *list;
 	struct sk_buff *skb, *next;
-	struct sk_buff *head;
 
 	bg = &bnx2fc_global;
 	spin_lock_bh(&bg->fcoe_rx_list.lock);
 	list = &bg->fcoe_rx_list;
-	head = list->next;
-	for (skb = head; skb != (struct sk_buff *)list;
-	     skb = next) {
-		next = skb->next;
+	skb_queue_walk_safe(list, skb, next) {
 		fr = fcoe_dev_from_skb(skb);
 		if (fr->fr_dev == lp) {
 			__skb_unlink(skb, list);
@@ -350,7 +346,7 @@
 			return -ENOMEM;
 		}
 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
-		cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+		cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
 	} else {
 		cp = skb_put(skb, tlen);
 	}
@@ -432,11 +428,9 @@
 	struct fc_lport *lport;
 	struct bnx2fc_interface *interface;
 	struct fcoe_ctlr *ctlr;
-	struct fc_frame_header *fh;
 	struct fcoe_rcv_info *fr;
 	struct fcoe_percpu_s *bg;
 	struct sk_buff *tmp_skb;
-	unsigned short oxid;
 
 	interface = container_of(ptype, struct bnx2fc_interface,
 				 fcoe_packet_type);
@@ -468,9 +462,6 @@
 		goto err;
 
 	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
-	fh = (struct fc_frame_header *) skb_transport_header(skb);
-
-	oxid = ntohs(fh->fh_ox_id);
 
 	fr = fcoe_dev_from_skb(skb);
 	fr->fr_dev = lport;
@@ -1445,7 +1436,7 @@
 static struct bnx2fc_interface *
 bnx2fc_interface_create(struct bnx2fc_hba *hba,
 			struct net_device *netdev,
-			enum fip_state fip_mode)
+			enum fip_mode fip_mode)
 {
 	struct fcoe_ctlr_device *ctlr_dev;
 	struct bnx2fc_interface *interface;
@@ -2371,7 +2362,7 @@
 	if (!interface) {
 		printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
 		rc = -ENOMEM;
-		goto ifput_err;
+		goto netdev_err;
 	}
 
 	if (is_vlan_dev(netdev)) {
@@ -2977,9 +2968,9 @@
 	.change_queue_depth	= scsi_change_queue_depth,
 	.this_id		= -1,
 	.cmd_per_lun		= 3,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.sg_tablesize		= BNX2FC_MAX_BDS_PER_CMD,
-	.max_sectors		= 1024,
+	.dma_boundary           = 0x7fff,
+	.max_sectors		= 0x3fbf,
 	.track_queue_depth	= 1,
 	.slave_configure	= bnx2fc_slave_configure,
 	.shost_attrs		= bnx2fc_host_attrs,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index e8ae4d6..f069e09 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -633,7 +633,6 @@
 	u16 xid;
 	u32 frame_len, len;
 	struct bnx2fc_cmd *io_req = NULL;
-	struct fcoe_task_ctx_entry *task, *task_page;
 	struct bnx2fc_interface *interface = tgt->port->priv;
 	struct bnx2fc_hba *hba = interface->hba;
 	int task_idx, index;
@@ -711,9 +710,6 @@
 
 		task_idx = xid / BNX2FC_TASKS_PER_PAGE;
 		index = xid % BNX2FC_TASKS_PER_PAGE;
-		task_page = (struct fcoe_task_ctx_entry *)
-					hba->task_ctx[task_idx];
-		task = &(task_page[index]);
 
 		io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 		if (!io_req)
@@ -830,7 +826,7 @@
 			((u64)err_entry->data.err_warn_bitmap_hi << 32) |
 			(u64)err_entry->data.err_warn_bitmap_lo;
 		for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
-			if (err_warn_bit_map & (u64) (1 << i)) {
+			if (err_warn_bit_map & ((u64)1 << i)) {
 				err_warn = i;
 				break;
 			}
@@ -839,9 +835,6 @@
 
 		task_idx = xid / BNX2FC_TASKS_PER_PAGE;
 		index = xid % BNX2FC_TASKS_PER_PAGE;
-		task_page = (struct fcoe_task_ctx_entry *)
-			     interface->hba->task_ctx[task_idx];
-		task = &(task_page[index]);
 		io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 		if (!io_req)
 			goto ret_warn_rqe;
@@ -991,7 +984,6 @@
 			FCOE_CQE_TOGGLE_BIT_SHIFT);
 	msg = *((u32 *)rx_db);
 	writel(cpu_to_le32(msg), tgt->ctx_base);
-	mmiowb();
 
 }
 
@@ -1123,7 +1115,6 @@
 					struct fcoe_kcqe *ofld_kcqe)
 {
 	struct bnx2fc_rport		*tgt;
-	struct fcoe_port		*port;
 	struct bnx2fc_interface		*interface;
 	u32				conn_id;
 	u32				context_id;
@@ -1137,7 +1128,6 @@
 	}
 	BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
 		ofld_kcqe->fcoe_conn_context_id);
-	port = tgt->port;
 	interface = tgt->port->priv;
 	if (hba != interface->hba) {
 		printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
@@ -1409,7 +1399,6 @@
 				(tgt->sq_curr_toggle_bit << 15);
 	msg = *((u32 *)sq_db);
 	writel(cpu_to_le32(msg), tgt->ctx_base);
-	mmiowb();
 
 }
 
@@ -1465,10 +1454,7 @@
 {
 	struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
 	struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
-	struct bnx2fc_interface *interface = tgt->port->priv;
 	struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
-	struct fcoe_task_ctx_entry *orig_task;
-	struct fcoe_task_ctx_entry *task_page;
 	struct fcoe_ext_mul_sges_ctx *sgl;
 	u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
 	u8 orig_task_type;
@@ -1530,10 +1516,6 @@
 		orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
 		index = orig_xid % BNX2FC_TASKS_PER_PAGE;
 
-		task_page = (struct fcoe_task_ctx_entry *)
-			     interface->hba->task_ctx[orig_task_idx];
-		orig_task = &(task_page[index]);
-
 		/* Multiple SGEs were used for this IO */
 		sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
 		sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
@@ -1857,10 +1839,10 @@
 	 * entries. Hence the limit with one page is 8192 task context
 	 * entries.
 	 */
-	hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev,
-						   PAGE_SIZE,
-						   &hba->task_ctx_bd_dma,
-						   GFP_KERNEL);
+	hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+						  PAGE_SIZE,
+						  &hba->task_ctx_bd_dma,
+						  GFP_KERNEL);
 	if (!hba->task_ctx_bd_tbl) {
 		printk(KERN_ERR PFX "unable to allocate task context BDT\n");
 		rc = -1;
@@ -1894,10 +1876,10 @@
 	task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
 	for (i = 0; i < task_ctx_arr_sz; i++) {
 
-		hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev,
-						       PAGE_SIZE,
-						       &hba->task_ctx_dma[i],
-						       GFP_KERNEL);
+		hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
+						      PAGE_SIZE,
+						      &hba->task_ctx_dma[i],
+						      GFP_KERNEL);
 		if (!hba->task_ctx[i]) {
 			printk(KERN_ERR PFX "unable to alloc task context\n");
 			rc = -1;
@@ -2031,19 +2013,19 @@
 	}
 
 	for (i = 0; i < segment_count; ++i) {
-		hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev,
-								BNX2FC_HASH_TBL_CHUNK_SIZE,
-								&dma_segment_array[i],
-								GFP_KERNEL);
+		hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
+							       BNX2FC_HASH_TBL_CHUNK_SIZE,
+							       &dma_segment_array[i],
+							       GFP_KERNEL);
 		if (!hba->hash_tbl_segments[i]) {
 			printk(KERN_ERR PFX "hash segment alloc failed\n");
 			goto cleanup_dma;
 		}
 	}
 
-	hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
-						&hba->hash_tbl_pbl_dma,
-						GFP_KERNEL);
+	hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+					       &hba->hash_tbl_pbl_dma,
+					       GFP_KERNEL);
 	if (!hba->hash_tbl_pbl) {
 		printk(KERN_ERR PFX "hash table pbl alloc failed\n");
 		goto cleanup_dma;
@@ -2104,10 +2086,9 @@
 		return -ENOMEM;
 
 	mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
-	hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev,
-						   mem_size,
-						   &hba->t2_hash_tbl_ptr_dma,
-						   GFP_KERNEL);
+	hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+						  &hba->t2_hash_tbl_ptr_dma,
+						  GFP_KERNEL);
 	if (!hba->t2_hash_tbl_ptr) {
 		printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
 		bnx2fc_free_fw_resc(hba);
@@ -2116,9 +2097,9 @@
 
 	mem_size = BNX2FC_NUM_MAX_SESS *
 				sizeof(struct fcoe_t2_hash_table_entry);
-	hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size,
-					       &hba->t2_hash_tbl_dma,
-					       GFP_KERNEL);
+	hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+					      &hba->t2_hash_tbl_dma,
+					      GFP_KERNEL);
 	if (!hba->t2_hash_tbl) {
 		printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
 		bnx2fc_free_fw_resc(hba);
@@ -2140,9 +2121,9 @@
 		return -ENOMEM;
 	}
 
-	hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
-						&hba->stats_buf_dma,
-						GFP_KERNEL);
+	hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+					       &hba->stats_buf_dma,
+					       GFP_KERNEL);
 	if (!hba->stats_buffer) {
 		printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
 		bnx2fc_free_fw_resc(hba);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 350257c..401743e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -70,7 +70,7 @@
 							&io_req->req_flags)) {
 			/* Handle eh_abort timeout */
 			BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
-			complete(&io_req->tm_done);
+			complete(&io_req->abts_done);
 		} else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
 				    &io_req->req_flags)) {
 			/* Handle internally generated ABTS timeout */
@@ -240,6 +240,7 @@
 		return NULL;
 	}
 
+	cmgr->hba = hba;
 	cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
 				  GFP_KERNEL);
 	if (!cmgr->free_list) {
@@ -256,7 +257,6 @@
 		goto mem_err;
 	}
 
-	cmgr->hba = hba;
 	cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
 
 	for (i = 0; i < arr_sz; i++)  {
@@ -295,7 +295,7 @@
 
 	/* Allocate pool of io_bdts - one for each bnx2fc_cmd */
 	mem_size = num_ios * sizeof(struct io_bdt *);
-	cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
+	cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
 	if (!cmgr->io_bdt_pool) {
 		printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
 		goto mem_err;
@@ -775,31 +775,32 @@
 	io_req->on_tmf_queue = 1;
 	list_add_tail(&io_req->link, &tgt->active_tm_queue);
 
-	init_completion(&io_req->tm_done);
-	io_req->wait_for_comp = 1;
+	init_completion(&io_req->abts_done);
+	io_req->wait_for_abts_comp = 1;
 
 	/* Ring doorbell */
 	bnx2fc_ring_doorbell(tgt);
 	spin_unlock_bh(&tgt->tgt_lock);
 
-	rc = wait_for_completion_timeout(&io_req->tm_done,
+	rc = wait_for_completion_timeout(&io_req->abts_done,
 					 interface->tm_timeout * HZ);
 	spin_lock_bh(&tgt->tgt_lock);
 
-	io_req->wait_for_comp = 0;
+	io_req->wait_for_abts_comp = 0;
 	if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
 		set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
 		if (io_req->on_tmf_queue) {
 			list_del_init(&io_req->link);
 			io_req->on_tmf_queue = 0;
 		}
-		io_req->wait_for_comp = 1;
+		io_req->wait_for_cleanup_comp = 1;
+		init_completion(&io_req->cleanup_done);
 		bnx2fc_initiate_cleanup(io_req);
 		spin_unlock_bh(&tgt->tgt_lock);
-		rc = wait_for_completion_timeout(&io_req->tm_done,
+		rc = wait_for_completion_timeout(&io_req->cleanup_done,
 						 BNX2FC_FW_TIMEOUT);
 		spin_lock_bh(&tgt->tgt_lock);
-		io_req->wait_for_comp = 0;
+		io_req->wait_for_cleanup_comp = 0;
 		if (!rc)
 			kref_put(&io_req->refcount, bnx2fc_cmd_release);
 	}
@@ -929,7 +930,6 @@
 int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
 				enum fc_rctl r_ctl)
 {
-	struct fc_lport *lport;
 	struct bnx2fc_rport *tgt = orig_io_req->tgt;
 	struct bnx2fc_interface *interface;
 	struct fcoe_port *port;
@@ -947,7 +947,6 @@
 
 	port = orig_io_req->port;
 	interface = port->priv;
-	lport = port->lport;
 
 	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
 	if (!cb_arg) {
@@ -998,7 +997,6 @@
 
 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
 {
-	struct fc_lport *lport;
 	struct bnx2fc_rport *tgt = io_req->tgt;
 	struct bnx2fc_interface *interface;
 	struct fcoe_port *port;
@@ -1014,7 +1012,6 @@
 
 	port = io_req->port;
 	interface = port->priv;
-	lport = port->lport;
 
 	cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
 	if (!cleanup_io_req) {
@@ -1047,6 +1044,9 @@
 	/* Obtain free SQ entry */
 	bnx2fc_add_2_sq(tgt, xid);
 
+	/* Set flag that cleanup request is pending with the firmware */
+	set_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags);
+
 	/* Ring doorbell */
 	bnx2fc_ring_doorbell(tgt);
 
@@ -1083,10 +1083,10 @@
 static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
 {
 	struct bnx2fc_rport *tgt = io_req->tgt;
-	int rc = SUCCESS;
 	unsigned int time_left;
 
-	io_req->wait_for_comp = 1;
+	init_completion(&io_req->cleanup_done);
+	io_req->wait_for_cleanup_comp = 1;
 	bnx2fc_initiate_cleanup(io_req);
 
 	spin_unlock_bh(&tgt->tgt_lock);
@@ -1095,22 +1095,22 @@
 	 * Can't wait forever on cleanup response lest we let the SCSI error
 	 * handler wait forever
 	 */
-	time_left = wait_for_completion_timeout(&io_req->tm_done,
+	time_left = wait_for_completion_timeout(&io_req->cleanup_done,
 						BNX2FC_FW_TIMEOUT);
-	io_req->wait_for_comp = 0;
-	if (!time_left)
+	if (!time_left) {
 		BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n",
 			      __func__);
 
-	/*
-	 * Release reference held by SCSI command the cleanup completion
-	 * hits the BNX2FC_CLEANUP case in bnx2fc_process_cq_compl() and
-	 * thus the SCSI command is not returnedi by bnx2fc_scsi_done().
-	 */
-	kref_put(&io_req->refcount, bnx2fc_cmd_release);
+		/*
+		 * Put the extra reference to the SCSI command since it would
+		 * not have been returned in this case.
+		 */
+		kref_put(&io_req->refcount, bnx2fc_cmd_release);
+	}
 
 	spin_lock_bh(&tgt->tgt_lock);
-	return rc;
+	io_req->wait_for_cleanup_comp = 0;
+	return SUCCESS;
 }
 
 /**
@@ -1198,7 +1198,8 @@
 	/* Move IO req to retire queue */
 	list_add_tail(&io_req->link, &tgt->io_retire_queue);
 
-	init_completion(&io_req->tm_done);
+	init_completion(&io_req->abts_done);
+	init_completion(&io_req->cleanup_done);
 
 	if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
 		printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
@@ -1226,26 +1227,28 @@
 		kref_put(&io_req->refcount,
 			 bnx2fc_cmd_release); /* drop timer hold */
 	set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
-	io_req->wait_for_comp = 1;
+	io_req->wait_for_abts_comp = 1;
 	rc = bnx2fc_initiate_abts(io_req);
 	if (rc == FAILED) {
+		io_req->wait_for_cleanup_comp = 1;
 		bnx2fc_initiate_cleanup(io_req);
 		spin_unlock_bh(&tgt->tgt_lock);
-		wait_for_completion(&io_req->tm_done);
+		wait_for_completion(&io_req->cleanup_done);
 		spin_lock_bh(&tgt->tgt_lock);
-		io_req->wait_for_comp = 0;
+		io_req->wait_for_cleanup_comp = 0;
 		goto done;
 	}
 	spin_unlock_bh(&tgt->tgt_lock);
 
 	/* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
-	time_left = wait_for_completion_timeout(&io_req->tm_done,
-	    (2 * rp->r_a_tov + 1) * HZ);
+	time_left = wait_for_completion_timeout(&io_req->abts_done,
+						(2 * rp->r_a_tov + 1) * HZ);
 	if (time_left)
-		BNX2FC_IO_DBG(io_req, "Timed out in eh_abort waiting for tm_done");
+		BNX2FC_IO_DBG(io_req,
+			      "Timed out in eh_abort waiting for abts_done");
 
 	spin_lock_bh(&tgt->tgt_lock);
-	io_req->wait_for_comp = 0;
+	io_req->wait_for_abts_comp = 0;
 	if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
 		BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
 		rc = SUCCESS;
@@ -1320,10 +1323,29 @@
 	BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
 			      "refcnt = %d, cmd_type = %d\n",
 		   kref_read(&io_req->refcount), io_req->cmd_type);
+	/*
+	 * Test whether there is a cleanup request pending. If not just
+	 * exit.
+	 */
+	if (!test_and_clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ,
+				&io_req->req_flags))
+		return;
+	/*
+	 * If we receive a cleanup completion for this request then the
+	 * firmware will not give us an abort completion for this request
+	 * so clear any ABTS pending flags.
+	 */
+	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags) &&
+	    !test_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags)) {
+		set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags);
+		if (io_req->wait_for_abts_comp)
+			complete(&io_req->abts_done);
+	}
+
 	bnx2fc_scsi_done(io_req, DID_ERROR);
 	kref_put(&io_req->refcount, bnx2fc_cmd_release);
-	if (io_req->wait_for_comp)
-		complete(&io_req->tm_done);
+	if (io_req->wait_for_cleanup_comp)
+		complete(&io_req->cleanup_done);
 }
 
 void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
@@ -1347,6 +1369,16 @@
 		return;
 	}
 
+	/*
+	 * If we receive an ABTS completion here then we will not receive
+	 * a cleanup completion so clear any cleanup pending flags.
+	 */
+	if (test_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags)) {
+		clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags);
+		if (io_req->wait_for_cleanup_comp)
+			complete(&io_req->cleanup_done);
+	}
+
 	/* Do not issue RRQ as this IO is already cleanedup */
 	if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
 				&io_req->req_flags))
@@ -1391,10 +1423,10 @@
 	bnx2fc_cmd_timer_set(io_req, r_a_tov);
 
 io_compl:
-	if (io_req->wait_for_comp) {
+	if (io_req->wait_for_abts_comp) {
 		if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
 				       &io_req->req_flags))
-			complete(&io_req->tm_done);
+			complete(&io_req->abts_done);
 	} else {
 		/*
 		 * We end up here when ABTS is issued as
@@ -1578,9 +1610,9 @@
 	sc_cmd->scsi_done(sc_cmd);
 
 	kref_put(&io_req->refcount, bnx2fc_cmd_release);
-	if (io_req->wait_for_comp) {
+	if (io_req->wait_for_abts_comp) {
 		BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
-		complete(&io_req->tm_done);
+		complete(&io_req->abts_done);
 	}
 }
 
@@ -1624,6 +1656,7 @@
 	u64 addr;
 	int i;
 
+	WARN_ON(scsi_sg_count(sc) > BNX2FC_MAX_BDS_PER_CMD);
 	/*
 	 * Use dma_map_sg directly to ensure we're using the correct
 	 * dev struct off of pcidev.
@@ -1671,6 +1704,16 @@
 	}
 	io_req->bd_tbl->bd_valid = bd_count;
 
+	/*
+	 * Return the command to ML if BD count exceeds the max number
+	 * that can be handled by FW.
+	 */
+	if (bd_count > BNX2FC_FW_MAX_BDS_PER_CMD) {
+		pr_err("bd_count = %d exceeded FW supported max BD(255), task_id = 0x%x\n",
+		       bd_count, io_req->xid);
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
@@ -1880,8 +1923,7 @@
 	struct fcoe_fcp_rsp_payload *fcp_rsp;
 	struct bnx2fc_rport *tgt = io_req->tgt;
 	struct scsi_cmnd *sc_cmd;
-	struct Scsi_Host *host;
-
+	u16 scope = 0, qualifier = 0;
 
 	/* scsi_cmd_cmpl is called with tgt lock held */
 
@@ -1910,7 +1952,6 @@
 	/* parse fcp_rsp and obtain sense data from RQ if available */
 	bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
 
-	host = sc_cmd->device->host;
 	if (!sc_cmd->SCp.ptr) {
 		printk(KERN_ERR PFX "SCp.ptr is NULL\n");
 		return;
@@ -1927,10 +1968,10 @@
 		 * between command abort and (late) completion.
 		 */
 		BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
-		if (io_req->wait_for_comp)
+		if (io_req->wait_for_abts_comp)
 			if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
 					       &io_req->req_flags))
-				complete(&io_req->tm_done);
+				complete(&io_req->abts_done);
 	}
 
 	bnx2fc_unmap_sg_list(io_req);
@@ -1950,12 +1991,30 @@
 
 			if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
 			    io_req->cdb_status == SAM_STAT_BUSY) {
-				/* Set the jiffies + retry_delay_timer * 100ms
-				   for the rport/tgt */
-				tgt->retry_delay_timestamp = jiffies +
-					fcp_rsp->retry_delay_timer * HZ / 10;
+				/* Newer array firmware with BUSY or
+				 * TASK_SET_FULL may return a status that needs
+				 * the scope bits masked.
+				 * Or a huge delay timestamp up to 27 minutes
+				 * can result.
+				 */
+				if (fcp_rsp->retry_delay_timer) {
+					/* Upper 2 bits */
+					scope = fcp_rsp->retry_delay_timer
+						& 0xC000;
+					/* Lower 14 bits */
+					qualifier = fcp_rsp->retry_delay_timer
+						& 0x3FFF;
+				}
+				if (scope > 0 && qualifier > 0 &&
+					qualifier <= 0x3FEF) {
+					/* Set the jiffies +
+					 * retry_delay_timer * 100ms
+					 * for the rport/tgt
+					 */
+					tgt->retry_delay_timestamp = jiffies +
+						(qualifier * HZ / 10);
+				}
 			}
-
 		}
 		if (io_req->fcp_resid)
 			scsi_set_resid(sc_cmd, io_req->fcp_resid);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index e3d1c7c..50384b4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -187,7 +187,7 @@
 				/* Handle eh_abort timeout */
 				BNX2FC_IO_DBG(io_req, "eh_abort for IO "
 					      "cleaned up\n");
-				complete(&io_req->tm_done);
+				complete(&io_req->abts_done);
 			}
 			kref_put(&io_req->refcount,
 				 bnx2fc_cmd_release); /* drop timer hold */
@@ -210,8 +210,8 @@
 		list_del_init(&io_req->link);
 		io_req->on_tmf_queue = 0;
 		BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
-		if (io_req->wait_for_comp)
-			complete(&io_req->tm_done);
+		if (io_req->wait_for_abts_comp)
+			complete(&io_req->abts_done);
 	}
 
 	list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) {
@@ -251,8 +251,8 @@
 				/* Handle eh_abort timeout */
 				BNX2FC_IO_DBG(io_req, "eh_abort for IO "
 					      "in retire_q\n");
-				if (io_req->wait_for_comp)
-					complete(&io_req->tm_done);
+				if (io_req->wait_for_abts_comp)
+					complete(&io_req->abts_done);
 			}
 			kref_put(&io_req->refcount, bnx2fc_cmd_release);
 		}
@@ -672,8 +672,8 @@
 	tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
 			   CNIC_PAGE_MASK;
 
-	tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
-				      &tgt->sq_dma, GFP_KERNEL);
+	tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+				     &tgt->sq_dma, GFP_KERNEL);
 	if (!tgt->sq) {
 		printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
 			tgt->sq_mem_size);
@@ -685,8 +685,8 @@
 	tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
 			   CNIC_PAGE_MASK;
 
-	tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
-				      &tgt->cq_dma, GFP_KERNEL);
+	tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+				     &tgt->cq_dma, GFP_KERNEL);
 	if (!tgt->cq) {
 		printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
 			tgt->cq_mem_size);
@@ -698,8 +698,8 @@
 	tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
 			   CNIC_PAGE_MASK;
 
-	tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
-				      &tgt->rq_dma, GFP_KERNEL);
+	tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+				     &tgt->rq_dma, GFP_KERNEL);
 	if (!tgt->rq) {
 		printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
 			tgt->rq_mem_size);
@@ -710,8 +710,8 @@
 	tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
 			   CNIC_PAGE_MASK;
 
-	tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
-					  &tgt->rq_pbl_dma, GFP_KERNEL);
+	tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+					 &tgt->rq_pbl_dma, GFP_KERNEL);
 	if (!tgt->rq_pbl) {
 		printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
 			tgt->rq_pbl_size);
@@ -735,9 +735,9 @@
 	tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
 			       CNIC_PAGE_MASK;
 
-	tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev,
-					 tgt->xferq_mem_size, &tgt->xferq_dma,
-					 GFP_KERNEL);
+	tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
+					tgt->xferq_mem_size, &tgt->xferq_dma,
+					GFP_KERNEL);
 	if (!tgt->xferq) {
 		printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
 			tgt->xferq_mem_size);
@@ -749,9 +749,9 @@
 	tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
 			       CNIC_PAGE_MASK;
 
-	tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev,
-					 tgt->confq_mem_size, &tgt->confq_dma,
-					 GFP_KERNEL);
+	tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
+					tgt->confq_mem_size, &tgt->confq_dma,
+					GFP_KERNEL);
 	if (!tgt->confq) {
 		printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
 			tgt->confq_mem_size);
@@ -763,9 +763,9 @@
 	tgt->confq_pbl_size =
 		(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
 
-	tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev,
-					     tgt->confq_pbl_size,
-					     &tgt->confq_pbl_dma, GFP_KERNEL);
+	tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+					    tgt->confq_pbl_size,
+					    &tgt->confq_pbl_dma, GFP_KERNEL);
 	if (!tgt->confq_pbl) {
 		printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
 			tgt->confq_pbl_size);
@@ -787,9 +787,9 @@
 	/* Allocate and map ConnDB */
 	tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
 
-	tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev,
-					   tgt->conn_db_mem_size,
-					   &tgt->conn_db_dma, GFP_KERNEL);
+	tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
+					  tgt->conn_db_mem_size,
+					  &tgt->conn_db_dma, GFP_KERNEL);
 	if (!tgt->conn_db) {
 		printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
 						tgt->conn_db_mem_size);
@@ -802,8 +802,8 @@
 	tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
 			     CNIC_PAGE_MASK;
 
-	tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
-				       &tgt->lcq_dma, GFP_KERNEL);
+	tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+				      &tgt->lcq_dma, GFP_KERNEL);
 
 	if (!tgt->lcq) {
 		printk(KERN_ERR PFX "unable to allocate lcq %d\n",
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
index ba30ff8..702dc82 100644
--- a/drivers/scsi/bnx2i/Kconfig
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config SCSI_BNX2_ISCSI
 	tristate "QLogic NetXtreme II iSCSI support"
 	depends on NET
diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile
index b5802bd..2537867 100644
--- a/drivers/scsi/bnx2i/Makefile
+++ b/drivers/scsi/bnx2i/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
 
 obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e9e669a..1266631 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -253,7 +253,6 @@
 		writew(ep->qp.rq_prod_idx,
 		       ep->qp.ctx_base + CNIC_RECV_DOORBELL);
 	}
-	mmiowb();
 }
 
 
@@ -279,8 +278,6 @@
 		bnx2i_ring_577xx_doorbell(bnx2i_conn);
 	} else
 		writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
-
-	mmiowb(); /* flush posted PCI writes */
 }
 
 
@@ -1070,8 +1067,8 @@
 
 	/* Allocate memory area for actual SQ element */
 	ep->qp.sq_virt =
-		dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
-					&ep->qp.sq_phys, GFP_KERNEL);
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+				   &ep->qp.sq_phys, GFP_KERNEL);
 	if (!ep->qp.sq_virt) {
 		printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
 				  ep->qp.sq_mem_size);
@@ -1106,8 +1103,8 @@
 
 	/* Allocate memory area for actual CQ element */
 	ep->qp.cq_virt =
-		dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
-					&ep->qp.cq_phys, GFP_KERNEL);
+		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+				   &ep->qp.cq_phys, GFP_KERNEL);
 	if (!ep->qp.cq_virt) {
 		printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
 				  ep->qp.cq_mem_size);
@@ -1906,7 +1903,6 @@
 	struct iscsi_task *task;
 	struct scsi_cmnd *sc;
 	int rc = 0;
-	int cpu;
 
 	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
@@ -1917,14 +1913,9 @@
 	}
 	sc = task->sc;
 
-	if (!blk_rq_cpu_valid(sc->request))
-		cpu = smp_processor_id();
-	else
-		cpu = sc->request->cpu;
-
 	spin_unlock(&session->back_lock);
 
-	p = &per_cpu(bnx2i_percpu, cpu);
+	p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(sc->request));
 	spin_lock(&p->p_work_lock);
 	if (unlikely(!p->iothread)) {
 		rc = -EINVAL;
@@ -2433,7 +2424,6 @@
 {
 	u32 cid_addr;
 	struct bnx2i_endpoint *ep;
-	u32 cid_num;
 
 	ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
 	if (!ep) {
@@ -2468,7 +2458,6 @@
 	} else {
 		ep->state = EP_STATE_OFLD_COMPL;
 		cid_addr = ofld_kcqe->iscsi_conn_context_id;
-		cid_num = bnx2i_get_cid_num(ep);
 		ep->ep_cid = cid_addr;
 		ep->qp.ctx_base = NULL;
 	}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index de0a507..c5fa5f3 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -577,7 +577,7 @@
 				  hba->dummy_buffer, hba->dummy_buf_dma);
 		hba->dummy_buffer = NULL;
 	}
-		return;
+	return;
 }
 
 /**
@@ -2263,7 +2263,6 @@
 	.max_sectors		= 127,
 	.cmd_per_lun		= 128,
 	.this_id		= -1,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.sg_tablesize		= ISCSI_MAX_BDS_PER_CMD,
 	.shost_attrs		= bnx2i_dev_attributes,
 	.track_queue_depth	= 1,
diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c
index 0f846ae..8d72b25 100644
--- a/drivers/scsi/bvme6000_scsi.c
+++ b/drivers/scsi/bvme6000_scsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux.
  *
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 1c5051b..76751d6 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * SCSI Media Changer device driver for Linux 2.6
  *
@@ -578,7 +579,6 @@
 	scsi_changer *ch = file->private_data;
 
 	scsi_device_put(ch->device);
-	ch->device = NULL;
 	file->private_data = NULL;
 	kref_put(&ch->ref, ch_destroy);
 	return 0;
diff --git a/drivers/scsi/csiostor/Kconfig b/drivers/scsi/csiostor/Kconfig
index 7c7e508..c6c03f9 100644
--- a/drivers/scsi/csiostor/Kconfig
+++ b/drivers/scsi/csiostor/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config SCSI_CHELSIO_FCOE
 	tristate "Chelsio Communications FCoE support"
 	depends on PCI && SCSI
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
index 8a00403..200e500 100644
--- a/drivers/scsi/csiostor/csio_attr.c
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -497,7 +497,6 @@
 static int
 csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)
 {
-	struct csio_lnode *pln;
 	struct csio_mb  *mbp;
 	struct fw_fcoe_vnp_cmd *rsp;
 	int ret = 0;
@@ -514,8 +513,6 @@
 		goto out;
 	}
 
-	pln = ln->pln;
-
 	csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
 				   ln->fcf_flowid, ln->vnp_flowid,
 				   NULL);
@@ -594,12 +591,12 @@
 	}
 
 	fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+	ln->fc_vport = fc_vport;
 
 	if (csio_fcoe_alloc_vnp(hw, ln))
 		goto error;
 
 	*(struct csio_lnode **)fc_vport->dd_data = ln;
-	ln->fc_vport = fc_vport;
 	if (!fc_vport->node_name)
 		fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
 	if (!fc_vport->port_name)
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index ed2dae6..a6dd704 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -167,14 +167,10 @@
  * csio_dfs_init - Debug filesystem initialization for the module.
  *
  */
-static int
+static void
 csio_dfs_init(void)
 {
 	csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
-	if (!csio_debugfs_root)
-		pr_warn("Could not create debugfs entry, continuing\n");
-
-	return 0;
 }
 
 /*
@@ -210,11 +206,11 @@
 	pci_set_master(pdev);
 	pci_try_set_mwi(pdev);
 
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
-		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-	} else {
+	rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (rv)
+		rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (rv) {
+		rv = -ENODEV;
 		dev_err(&pdev->dev, "No suitable DMA available.\n");
 		goto err_release_regions;
 	}
@@ -258,7 +254,6 @@
 csio_hw_exit_workers(struct csio_hw *hw)
 {
 	cancel_work_sync(&hw->evtq_work);
-	flush_scheduled_work();
 }
 
 static int
@@ -649,7 +644,7 @@
 	if (csio_lnode_init(ln, hw, pln))
 		goto err_shost_put;
 
-	if (scsi_add_host(shost, dev))
+	if (scsi_add_host_with_dma(shost, dev, &hw->pdev->dev))
 		goto err_lnode_exit;
 
 	return ln;
@@ -1102,7 +1097,6 @@
 	pci_set_master(pdev);
 	pci_restore_state(pdev);
 	pci_save_state(pdev);
-	pci_cleanup_aer_uncorrect_error_status(pdev);
 
 	/* Bring HW s/m to ready state.
 	 * but don't resume IOs.
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c
index 7c88147..b254040 100644
--- a/drivers/scsi/csiostor/csio_isr.c
+++ b/drivers/scsi/csiostor/csio_isr.c
@@ -474,13 +474,39 @@
 	csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
 }
 
+static void csio_calc_sets(struct irq_affinity *affd, unsigned int nvecs)
+{
+	struct csio_hw *hw = affd->priv;
+	u8 i;
+
+	if (!nvecs)
+		return;
+
+	if (nvecs < hw->num_pports) {
+		affd->nr_sets = 1;
+		affd->set_size[0] = nvecs;
+		return;
+	}
+
+	affd->nr_sets = hw->num_pports;
+	for (i = 0; i < hw->num_pports; i++)
+		affd->set_size[i] = nvecs / hw->num_pports;
+}
+
 static int
 csio_enable_msix(struct csio_hw *hw)
 {
 	int i, j, k, n, min, cnt;
 	int extra = CSIO_EXTRA_VECS;
 	struct csio_scsi_cpu_info *info;
-	struct irq_affinity desc = { .pre_vectors = 2 };
+	struct irq_affinity desc = {
+		.pre_vectors = CSIO_EXTRA_VECS,
+		.calc_sets = csio_calc_sets,
+		.priv = hw,
+	};
+
+	if (hw->num_pports > IRQ_AFFINITY_MAX_SETS)
+		return -ENOSPC;
 
 	min = hw->num_pports + extra;
 	cnt = hw->num_sqsets + extra;
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index cc5611e..66e58f0 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -1845,8 +1845,8 @@
 	/* Allocate Dma buffers for FDMI response Payload */
 	dma_buf = &ln->mgmt_req->dma_buf;
 	dma_buf->len = 2048;
-	dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
-						&dma_buf->paddr);
+	dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len,
+						&dma_buf->paddr, GFP_KERNEL);
 	if (!dma_buf->vaddr) {
 		csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
 		kfree(ln->mgmt_req);
@@ -1873,7 +1873,7 @@
 
 	dma_buf = &ln->mgmt_req->dma_buf;
 	if (dma_buf->vaddr)
-		pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
+		dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr,
 				    dma_buf->paddr);
 
 	kfree(ln->mgmt_req);
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index dab0d3f..469d0bc 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1713,8 +1713,11 @@
 	}
 
 out:
-	if (req->nsge > 0)
+	if (req->nsge > 0) {
 		scsi_dma_unmap(cmnd);
+		if (req->dcopy && (host_status == DID_OK))
+			host_status = csio_scsi_copy_to_sgl(hw, req);
+	}
 
 	cmnd->result = (((host_status) << 16) | scsi_status);
 	cmnd->scsi_done(cmnd);
@@ -1780,16 +1783,10 @@
 	int nsge = 0;
 	int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
 	int retval;
-	int cpu;
 	struct csio_scsi_qset *sqset;
 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
 
-	if (!blk_rq_cpu_valid(cmnd->request))
-		cpu = smp_processor_id();
-	else
-		cpu = cmnd->request->cpu;
-
-	sqset = &hw->sqset[ln->portid][cpu];
+	sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(cmnd->request)];
 
 	nr = fc_remote_port_chkready(rport);
 	if (nr) {
@@ -1990,15 +1987,15 @@
 	/* FW successfully aborted the request */
 	if (host_byte(cmnd->result) == DID_REQUEUE) {
 		csio_info(hw,
-			"Aborted SCSI command to (%d:%llu) serial#:0x%lx\n",
+			"Aborted SCSI command to (%d:%llu) tag %u\n",
 			cmnd->device->id, cmnd->device->lun,
-			cmnd->serial_number);
+			cmnd->request->tag);
 		return SUCCESS;
 	} else {
 		csio_info(hw,
-			"Failed to abort SCSI command, (%d:%llu) serial#:0x%lx\n",
+			"Failed to abort SCSI command, (%d:%llu) tag %u\n",
 			cmnd->device->id, cmnd->device->lun,
-			cmnd->serial_number);
+			cmnd->request->tag);
 		return FAILED;
 	}
 }
@@ -2280,7 +2277,6 @@
 	.this_id		= -1,
 	.sg_tablesize		= CSIO_SCSI_MAX_SGE,
 	.cmd_per_lun		= CSIO_MAX_CMD_PER_LUN,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= csio_fcoe_lport_attrs,
 	.max_sectors		= CSIO_MAX_SECTOR_SIZE,
 };
@@ -2300,7 +2296,6 @@
 	.this_id		= -1,
 	.sg_tablesize		= CSIO_SCSI_MAX_SGE,
 	.cmd_per_lun		= CSIO_MAX_CMD_PER_LUN,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= csio_fcoe_vport_attrs,
 	.max_sectors		= CSIO_MAX_SECTOR_SIZE,
 };
@@ -2349,8 +2344,8 @@
 		}
 
 		/* Allocate Dma buffers for DDP */
-		ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size,
-							&ddp_desc->paddr);
+		ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
+				&ddp_desc->paddr, GFP_KERNEL);
 		if (!ddp_desc->vaddr) {
 			csio_err(hw,
 				 "SCSI response DMA buffer (ddp) allocation"
@@ -2372,8 +2367,8 @@
 	list_for_each(tmp, &scm->ddp_freelist) {
 		ddp_desc = (struct csio_dma_buf *) tmp;
 		tmp = csio_list_prev(tmp);
-		pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
-				    ddp_desc->paddr);
+		dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
+				  ddp_desc->vaddr, ddp_desc->paddr);
 		list_del_init(&ddp_desc->list);
 		kfree(ddp_desc);
 	}
@@ -2399,8 +2394,8 @@
 	list_for_each(tmp, &scm->ddp_freelist) {
 		ddp_desc = (struct csio_dma_buf *) tmp;
 		tmp = csio_list_prev(tmp);
-		pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
-				    ddp_desc->paddr);
+		dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
+				  ddp_desc->vaddr, ddp_desc->paddr);
 		list_del_init(&ddp_desc->list);
 		kfree(ddp_desc);
 	}
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 5022e82..0ca6951 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -124,8 +124,8 @@
 
 	while (n--) {
 		buf->len = sge->sge_fl_buf_size[sreg];
-		buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
-						  &buf->paddr);
+		buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len,
+						&buf->paddr, GFP_KERNEL);
 		if (!buf->vaddr) {
 			csio_err(hw, "Could only fill %d buffers!\n", n + 1);
 			return -ENOMEM;
@@ -233,7 +233,8 @@
 
 	q = wrm->q_arr[free_idx];
 
-	q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart);
+	q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
+				       GFP_KERNEL);
 	if (!q->vstart) {
 		csio_err(hw,
 			 "Failed to allocate DMA memory for "
@@ -807,6 +808,7 @@
 
 				csio_q_eqid(hw, i) = CSIO_MAX_QID;
 			}
+			/* fall through */
 		case CSIO_INGRESS:
 			if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
 				csio_wr_cleanup_iq_ftr(hw, i);
@@ -1314,7 +1316,6 @@
 	u32 fl_align = clsz < 32 ? 32 : clsz;
 	u32 pack_align;
 	u32 ingpad, ingpack;
-	int pcie_cap;
 
 	csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
 		      HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
@@ -1345,8 +1346,7 @@
 	 * multiple of the Maximum Payload Size.
 	 */
 	pack_align = fl_align;
-	pcie_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
-	if (pcie_cap) {
+	if (pci_is_pcie(hw->pdev)) {
 		u32 mps, mps_log;
 		u16 devctl;
 
@@ -1354,9 +1354,7 @@
 		 * [bits 7:5] encodes sizes as powers of 2 starting at
 		 * 128 bytes.
 		 */
-		pci_read_config_word(hw->pdev,
-				     pcie_cap + PCI_EXP_DEVCTL,
-				     &devctl);
+		pcie_capability_read_word(hw->pdev, PCI_EXP_DEVCTL, &devctl);
 		mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
 		mps = 1 << mps_log;
 		if (mps > pack_align)
@@ -1703,14 +1701,14 @@
 					buf = &q->un.fl.bufs[j];
 					if (!buf->vaddr)
 						continue;
-					pci_free_consistent(hw->pdev, buf->len,
-							    buf->vaddr,
-							    buf->paddr);
+					dma_free_coherent(&hw->pdev->dev,
+							buf->len, buf->vaddr,
+							buf->paddr);
 				}
 				kfree(q->un.fl.bufs);
 			}
-			pci_free_consistent(hw->pdev, q->size,
-					    q->vstart, q->pstart);
+			dma_free_coherent(&hw->pdev->dev, q->size,
+					q->vstart, q->pstart);
 		}
 		kfree(q);
 	}
diff --git a/drivers/scsi/cxgbi/Kconfig b/drivers/scsi/cxgbi/Kconfig
index 17eb5d5..75f9428 100644
--- a/drivers/scsi/cxgbi/Kconfig
+++ b/drivers/scsi/cxgbi/Kconfig
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
 source "drivers/scsi/cxgbi/cxgb3i/Kconfig"
 source "drivers/scsi/cxgbi/cxgb4i/Kconfig"
diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile
index a73781a..abfd38a 100644
--- a/drivers/scsi/cxgbi/Makefile
+++ b/drivers/scsi/cxgbi/Makefile
@@ -1,4 +1,5 @@
-ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y += -I $(srctree)/drivers/net/ethernet/chelsio/libcxgb
 
 obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libcxgbi.o cxgb3i/
 obj-$(CONFIG_SCSI_CXGB4_ISCSI)	+= libcxgbi.o cxgb4i/
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kbuild b/drivers/scsi/cxgbi/cxgb3i/Kbuild
index 663c52e..8d8a43f 100644
--- a/drivers/scsi/cxgbi/cxgb3i/Kbuild
+++ b/drivers/scsi/cxgbi/cxgb3i/Kbuild
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb3
 ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/libcxgb
 
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig
index f68c871..3e4b644 100644
--- a/drivers/scsi/cxgbi/cxgb3i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config SCSI_CXGB3_ISCSI
 	tristate "Chelsio T3 iSCSI support"
 	depends on PCI && INET && (IPV6 || IPV6=n)
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index bf07735..524cdbc 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -95,7 +95,7 @@
 	.eh_device_reset_handler = iscsi_eh_device_reset,
 	.eh_target_reset_handler = iscsi_eh_recover_target,
 	.target_alloc	= iscsi_target_alloc,
-	.use_clustering	= DISABLE_CLUSTERING,
+	.dma_boundary	= PAGE_SIZE - 1,
 	.this_id	= -1,
 	.track_queue_depth = 1,
 };
@@ -979,14 +979,17 @@
 	csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
 	if (csk->atid < 0) {
 		pr_err("NO atid available.\n");
-		goto rel_resource;
+		return -EINVAL;
 	}
 	cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
 	cxgbi_sock_get(csk);
 
 	skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
-	if (!skb)
-		goto rel_resource;
+	if (!skb) {
+		cxgb3_free_atid(t3dev, csk->atid);
+		cxgbi_sock_put(csk);
+		return -ENOMEM;
+	}
 	skb->sk = (struct sock *)csk;
 	set_arp_failure_handler(skb, act_open_arp_failure);
 	csk->snd_win = cxgb3i_snd_win;
@@ -1007,11 +1010,6 @@
 	cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
 	send_act_open_req(csk, skb, csk->l2t);
 	return 0;
-
-rel_resource:
-	if (skb)
-		__kfree_skb(skb);
-	return -EINVAL;
 }
 
 cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
@@ -1144,7 +1142,7 @@
 }
 
 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
-				       unsigned int tid, int pg_idx, bool reply)
+				unsigned int tid, int pg_idx)
 {
 	struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
 					GFP_KERNEL);
@@ -1160,7 +1158,7 @@
 	req = (struct cpl_set_tcb_field *)skb->head;
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
-	req->reply = V_NO_REPLY(reply ? 0 : 1);
+	req->reply = V_NO_REPLY(1);
 	req->cpu_idx = 0;
 	req->word = htons(31);
 	req->mask = cpu_to_be64(0xF0000000);
@@ -1177,11 +1175,10 @@
  * @tid: connection id
  * @hcrc: header digest enabled
  * @dcrc: data digest enabled
- * @reply: request reply from h/w
  * set up the iscsi digest settings for a connection identified by tid
  */
 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
-			     int hcrc, int dcrc, int reply)
+				 int hcrc, int dcrc)
 {
 	struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
 					GFP_KERNEL);
@@ -1197,7 +1194,7 @@
 	req = (struct cpl_set_tcb_field *)skb->head;
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
-	req->reply = V_NO_REPLY(reply ? 0 : 1);
+	req->reply = V_NO_REPLY(1);
 	req->cpu_idx = 0;
 	req->word = htons(31);
 	req->mask = cpu_to_be64(0x0F000000);
@@ -1246,8 +1243,12 @@
 		tformat.pgsz_order[i] = uinfo.pgsz_factor[i];
 	cxgbi_tagmask_check(tagmask, &tformat);
 
-	cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat, ppmax,
-			    uinfo.llimit, uinfo.llimit, 0);
+	err = cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat,
+				  (uinfo.ulimit - uinfo.llimit + 1),
+				  uinfo.llimit, uinfo.llimit, 0, 0, 0);
+	if (err)
+		return err;
+
 	if (!(cdev->flags & CXGBI_FLAG_DDP_OFF)) {
 		uinfo.tagmask = tagmask;
 		uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
@@ -1321,7 +1322,7 @@
 
 	err = cxgb3i_ddp_init(cdev);
 	if (err) {
-		pr_info("0x%p ddp init failed\n", cdev);
+		pr_info("0x%p ddp init failed %d\n", cdev, err);
 		goto err_out;
 	}
 
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kbuild b/drivers/scsi/cxgbi/cxgb4i/Kbuild
index 38e03c2..fd3e0c9 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kbuild
+++ b/drivers/scsi/cxgbi/cxgb4i/Kbuild
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
 ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/libcxgb
 
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index 594f593..d1f1bab 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -1,8 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config SCSI_CXGB4_ISCSI
 	tristate "Chelsio T4 iSCSI support"
 	depends on PCI && INET && (IPV6 || IPV6=n)
-	select NETDEVICES
-	select ETHERNET
+	depends on THERMAL || !THERMAL
+	depends on ETHERNET
 	select NET_VENDOR_CHELSIO
 	select CHELSIO_T4
 	select CHELSIO_LIB
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 211da1d..da50e87 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -35,6 +35,11 @@
 
 #include "../libcxgbi.h"
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+#include <net/dcbevent.h>
+#include "cxgb4_dcb.h"
+#endif
+
 #define	DRV_MODULE_NAME		"cxgb4i"
 #define DRV_MODULE_DESC		"Chelsio T4-T6 iSCSI Driver"
 #define	DRV_MODULE_VERSION	"0.9.5-ko"
@@ -55,7 +60,7 @@
 #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
 static int cxgb4i_rcv_win = -1;
 module_param(cxgb4i_rcv_win, int, 0644);
-MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
+MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP receive window in bytes");
 
 #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
 static int cxgb4i_snd_win = -1;
@@ -108,7 +113,7 @@
 	.eh_device_reset_handler = iscsi_eh_device_reset,
 	.eh_target_reset_handler = iscsi_eh_recover_target,
 	.target_alloc	= iscsi_target_alloc,
-	.use_clustering	= DISABLE_CLUSTERING,
+	.dma_boundary	= PAGE_SIZE - 1,
 	.this_id	= -1,
 	.track_queue_depth = 1,
 };
@@ -155,6 +160,15 @@
 	.session_recovery_timedout = iscsi_session_recovery_timedout,
 };
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+static int
+cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *);
+
+static struct notifier_block cxgb4_dcb_change = {
+	.notifier_call = cxgb4_dcb_change_notify,
+};
+#endif
+
 static struct scsi_transport_template *cxgb4i_stt;
 
 /*
@@ -574,6 +588,9 @@
 	int nparams, flowclen16, flowclen;
 
 	nparams = FLOWC_WR_NPARAMS_MIN;
+#ifdef CONFIG_CHELSIO_T4_DCB
+	nparams++;
+#endif
 	flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
 	flowclen16 = DIV_ROUND_UP(flowclen, 16);
 	flowclen = flowclen16 * 16;
@@ -595,6 +612,9 @@
 	struct fw_flowc_wr *flowc;
 	int nparams, flowclen16, flowclen;
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+	u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
+#endif
 	flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
 	skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
 	flowc = (struct fw_flowc_wr *)skb->head;
@@ -622,6 +642,17 @@
 	flowc->mnemval[8].val = 0;
 	flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
 	flowc->mnemval[8].val = 16384;
+#ifdef CONFIG_CHELSIO_T4_DCB
+	flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
+	if (vlan == CPL_L2T_VLAN_NONE) {
+		pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
+				    csk->tid);
+		flowc->mnemval[9].val = cpu_to_be32(0);
+	} else {
+		flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >>
+					VLAN_PRIO_SHIFT);
+	}
+#endif
 
 	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 
@@ -1179,7 +1210,8 @@
 		csk->skb_ulp_lhdr = skb;
 		cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
 
-		if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
+		if ((CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) &&
+		    (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) {
 			pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
 				csk->tid, cxgbi_skcb_tcp_seq(skb),
 				csk->rcv_nxt);
@@ -1517,16 +1549,22 @@
 	struct cxgbi_sock *csk;
 
 	csk = lookup_tid(t, tid);
-	if (!csk)
+	if (!csk) {
 		pr_err("can't find conn. for tid %u.\n", tid);
+		return;
+	}
 
 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 		"csk 0x%p,%u,%lx,%u, status 0x%x.\n",
 		csk, csk->state, csk->flags, csk->tid, rpl->status);
 
-	if (rpl->status != CPL_ERR_NONE)
+	if (rpl->status != CPL_ERR_NONE) {
 		pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
 			csk, tid, rpl->status);
+		csk->err = -EINVAL;
+	}
+
+	complete(&csk->cmpl);
 
 	__kfree_skb(skb);
 }
@@ -1600,6 +1638,50 @@
 	csk->dst = NULL;
 }
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+static inline u8 get_iscsi_dcb_state(struct net_device *ndev)
+{
+	return ndev->dcbnl_ops->getstate(ndev);
+}
+
+static int select_priority(int pri_mask)
+{
+	if (!pri_mask)
+		return 0;
+	return (ffs(pri_mask) - 1);
+}
+
+static u8 get_iscsi_dcb_priority(struct net_device *ndev)
+{
+	int rv;
+	u8 caps;
+
+	struct dcb_app iscsi_dcb_app = {
+		.protocol = 3260
+	};
+
+	rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
+	if (rv)
+		return 0;
+
+	if (caps & DCB_CAP_DCBX_VER_IEEE) {
+		iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
+		rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+		if (!rv) {
+			iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+			rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+		}
+	} else if (caps & DCB_CAP_DCBX_VER_CEE) {
+		iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
+		rv = dcb_getapp(ndev, &iscsi_dcb_app);
+	}
+
+	log_debug(1 << CXGBI_DBG_ISCSI,
+		  "iSCSI priority is set to %u\n", select_priority(rv));
+	return select_priority(rv);
+}
+#endif
+
 static int init_act_open(struct cxgbi_sock *csk)
 {
 	struct cxgbi_device *cdev = csk->cdev;
@@ -1613,7 +1695,9 @@
 	unsigned int size, size6;
 	unsigned int linkspeed;
 	unsigned int rcv_winf, snd_winf;
-
+#ifdef CONFIG_CHELSIO_T4_DCB
+	u8 priority = 0;
+#endif
 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 		"csk 0x%p,%u,0x%lx,%u.\n",
 		csk, csk->state, csk->flags, csk->tid);
@@ -1647,7 +1731,15 @@
 	cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
 	cxgbi_sock_get(csk);
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+	if (get_iscsi_dcb_state(ndev))
+		priority = get_iscsi_dcb_priority(ndev);
+
+	csk->dcb_priority = priority;
+	csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
+#else
 	csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
+#endif
 	if (!csk->l2t) {
 		pr_err("%s, cannot alloc l2t.\n", ndev->name);
 		goto rel_resource_without_clip;
@@ -1686,8 +1778,7 @@
 		csk->mtu = dst_mtu(csk->dst);
 	cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
 	csk->tx_chan = cxgb4_port_chan(ndev);
-	csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type,
-					 cxgb4_port_viid(ndev));
+	csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
 	step = lldi->ntxq / lldi->nchan;
 	csk->txq_idx = cxgb4_port_idx(ndev) * step;
 	step = lldi->nrxq / lldi->nchan;
@@ -1903,7 +1994,7 @@
 }
 
 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
-				int pg_idx, bool reply)
+				int pg_idx)
 {
 	struct sk_buff *skb;
 	struct cpl_set_tcb_field *req;
@@ -1919,7 +2010,7 @@
 	req = (struct cpl_set_tcb_field *)skb->head;
 	INIT_TP_WR(req, csk->tid);
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
-	req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
 	req->word_cookie = htons(0);
 	req->mask = cpu_to_be64(0x3 << 8);
 	req->val = cpu_to_be64(pg_idx << 8);
@@ -1928,12 +2019,15 @@
 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 		"csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
 
+	reinit_completion(&csk->cmpl);
 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
-	return 0;
+	wait_for_completion(&csk->cmpl);
+
+	return csk->err;
 }
 
 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
-				 int hcrc, int dcrc, int reply)
+				 int hcrc, int dcrc)
 {
 	struct sk_buff *skb;
 	struct cpl_set_tcb_field *req;
@@ -1951,7 +2045,7 @@
 	req = (struct cpl_set_tcb_field *)skb->head;
 	INIT_TP_WR(req, tid);
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
-	req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+	req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
 	req->word_cookie = htons(0);
 	req->mask = cpu_to_be64(0x3 << 4);
 	req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
@@ -1961,8 +2055,11 @@
 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 		"csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
 
+	reinit_completion(&csk->cmpl);
 	cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
-	return 0;
+	wait_for_completion(&csk->cmpl);
+
+	return csk->err;
 }
 
 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
@@ -1977,7 +2074,7 @@
 	struct net_device *ndev = cdev->ports[0];
 	struct cxgbi_tag_format tformat;
 	unsigned int ppmax;
-	int i;
+	int i, err;
 
 	if (!lldi->vr->iscsi.size) {
 		pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
@@ -1993,8 +2090,17 @@
 					 & 0xF;
 	cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
 
-	cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, ppmax,
-			    lldi->iscsi_llimit, lldi->vr->iscsi.start, 2);
+	pr_info("iscsi_edram.start 0x%x iscsi_edram.size 0x%x",
+		lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size);
+
+	err = cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat,
+				  lldi->vr->iscsi.size, lldi->iscsi_llimit,
+				  lldi->vr->iscsi.start, 2,
+				  lldi->vr->ppod_edram.start,
+				  lldi->vr->ppod_edram.size);
+
+	if (err < 0)
+		return err;
 
 	cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
 	cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
@@ -2042,14 +2148,13 @@
 	cdev->itp = &cxgb4i_iscsi_transport;
 	cdev->owner = THIS_MODULE;
 
-	cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
-			<< FW_VIID_PFN_S;
+	cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf);
 	pr_info("cdev 0x%p,%s, pfvf %u.\n",
 		cdev, lldi->ports[0]->name, cdev->pfvf);
 
 	rc = cxgb4i_ddp_init(cdev);
 	if (rc) {
-		pr_info("t4 0x%p ddp init failed.\n", cdev);
+		pr_info("t4 0x%p ddp init failed %d.\n", cdev, rc);
 		goto err_out;
 	}
 	rc = cxgb4i_ofld_init(cdev);
@@ -2146,6 +2251,71 @@
 	return 0;
 }
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+static int
+cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val,
+			void *data)
+{
+	int i, port = 0xFF;
+	struct net_device *ndev;
+	struct cxgbi_device *cdev = NULL;
+	struct dcb_app_type *iscsi_app = data;
+	struct cxgbi_ports_map *pmap;
+	u8 priority;
+
+	if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
+		if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) &&
+		    (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY))
+			return NOTIFY_DONE;
+
+		priority = iscsi_app->app.priority;
+	} else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
+		if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
+			return NOTIFY_DONE;
+
+		if (!iscsi_app->app.priority)
+			return NOTIFY_DONE;
+
+		priority = ffs(iscsi_app->app.priority) - 1;
+	} else {
+		return NOTIFY_DONE;
+	}
+
+	if (iscsi_app->app.protocol != 3260)
+		return NOTIFY_DONE;
+
+	log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n",
+		  iscsi_app->ifindex, priority);
+
+	ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
+	if (!ndev)
+		return NOTIFY_DONE;
+
+	cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
+
+	dev_put(ndev);
+	if (!cdev)
+		return NOTIFY_DONE;
+
+	pmap = &cdev->pmap;
+
+	for (i = 0; i < pmap->used; i++) {
+		if (pmap->port_csk[i]) {
+			struct cxgbi_sock *csk = pmap->port_csk[i];
+
+			if (csk->dcb_priority != priority) {
+				iscsi_conn_failure(csk->user_data,
+						   ISCSI_ERR_CONN_FAILED);
+				pr_info("Restarting iSCSI connection %p with "
+					"priority %u->%u.\n", csk,
+					csk->dcb_priority, priority);
+			}
+		}
+	}
+	return NOTIFY_OK;
+}
+#endif
+
 static int __init cxgb4i_init_module(void)
 {
 	int rc;
@@ -2157,11 +2327,18 @@
 		return rc;
 	cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+	pr_info("%s dcb enabled.\n", DRV_MODULE_NAME);
+	register_dcbevent_notifier(&cxgb4_dcb_change);
+#endif
 	return 0;
 }
 
 static void __exit cxgb4i_exit_module(void)
 {
+#ifdef CONFIG_CHELSIO_T4_DCB
+	unregister_dcbevent_notifier(&cxgb4_dcb_change);
+#endif
 	cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
 	cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
 	cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 3f3af5e..3e17af8 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -282,7 +282,6 @@
 }
 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
 
-#if IS_ENABLED(CONFIG_IPV6)
 static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
 						     int *port)
 {
@@ -315,7 +314,6 @@
 		  ndev, ndev->name);
 	return NULL;
 }
-#endif
 
 void cxgbi_hbas_remove(struct cxgbi_device *cdev)
 {
@@ -573,6 +571,7 @@
 	skb_queue_head_init(&csk->receive_queue);
 	skb_queue_head_init(&csk->write_queue);
 	timer_setup(&csk->retry_timer, NULL, 0);
+	init_completion(&csk->cmpl);
 	rwlock_init(&csk->callback_lock);
 	csk->cdev = cdev;
 	csk->flags = 0;
@@ -640,6 +639,10 @@
 
 	if (ndev->flags & IFF_LOOPBACK) {
 		ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
+		if (!ndev) {
+			err = -ENETUNREACH;
+			goto rel_neigh;
+		}
 		mtu = ndev->mtu;
 		pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
 			n->dev->name, ndev->name, mtu);
@@ -652,6 +655,8 @@
 	}
 
 	cdev = cxgbi_device_find_by_netdev(ndev, &port);
+	if (!cdev)
+		cdev = cxgbi_device_find_by_mac(ndev, &port);
 	if (!cdev) {
 		pr_info("dst %pI4, %s, NOT cxgbi device.\n",
 			&daddr->sin_addr.s_addr, ndev->name);
@@ -784,7 +789,8 @@
 	csk->mtu = mtu;
 	csk->dst = dst;
 
-	if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) {
+	rt6_get_prefsrc(rt, &pref_saddr);
+	if (ipv6_addr_any(&pref_saddr)) {
 		struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
 
 		err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL,
@@ -794,8 +800,6 @@
 				&daddr6->sin6_addr);
 			goto rel_rt;
 		}
-	} else {
-		pref_saddr = rt->rt6i_prefsrc.addr;
 	}
 
 	csk->csk_family = AF_INET6;
@@ -1212,7 +1216,7 @@
 		unsigned int *sgcnt, unsigned int *dlen,
 		unsigned int prot)
 {
-	struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : scsi_out(sc);
+	struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : &sc->sdb;
 
 	*sgl = sdb->table.sgl;
 	*sgcnt = sdb->table.nents;
@@ -1281,14 +1285,15 @@
 
 static unsigned char padding[4];
 
-void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
-			 struct cxgbi_tag_format *tformat, unsigned int ppmax,
-			 unsigned int llimit, unsigned int start,
-			 unsigned int rsvd_factor)
+int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
+			struct cxgbi_tag_format *tformat,
+			unsigned int iscsi_size, unsigned int llimit,
+			unsigned int start, unsigned int rsvd_factor,
+			unsigned int edram_start, unsigned int edram_size)
 {
 	int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev,
-				cdev->lldev, tformat, ppmax, llimit, start,
-				rsvd_factor);
+				cdev->lldev, tformat, iscsi_size, llimit, start,
+				rsvd_factor, edram_start, edram_size);
 
 	if (err >= 0) {
 		struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
@@ -1300,6 +1305,8 @@
 	} else {
 		cdev->flags |= CXGBI_FLAG_DDP_OFF;
 	}
+
+	return err;
 }
 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup);
 
@@ -1428,8 +1435,7 @@
 	log_debug(1 << CXGBI_DBG_DDP,
 		  "cdev 0x%p, task 0x%p, release tag 0x%x.\n",
 		  cdev, task, tag);
-	if (sc &&
-	    (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
+	if (sc && sc->sc_data_direction == DMA_FROM_DEVICE &&
 	    cxgbi_ppm_is_ddp_tag(ppm, tag)) {
 		struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
 		struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
@@ -1461,9 +1467,7 @@
 	u32 tag = 0;
 	int err = -EINVAL;
 
-	if (sc &&
-	    (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)
-	) {
+	if (sc && sc->sc_data_direction == DMA_FROM_DEVICE) {
 		struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
 		struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
 
@@ -1897,7 +1901,7 @@
 	if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
 	    (opcode == ISCSI_OP_SCSI_DATA_OUT ||
 	     (opcode == ISCSI_OP_SCSI_CMD &&
-	      (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
+	      sc->sc_data_direction == DMA_TO_DEVICE)))
 		/* data could goes into skb head */
 		headroom += min_t(unsigned int,
 				SKB_MAX_HEAD(cdev->skb_tx_rsvd),
@@ -1972,7 +1976,7 @@
 		return 0;
 
 	if (task->sc) {
-		struct scsi_data_buffer *sdb = scsi_out(task->sc);
+		struct scsi_data_buffer *sdb = &task->sc->sdb;
 		struct scatterlist *sg = NULL;
 		int err;
 
@@ -2252,14 +2256,14 @@
 		if (!err && conn->hdrdgst_en)
 			err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
 							conn->hdrdgst_en,
-							conn->datadgst_en, 0);
+							conn->datadgst_en);
 		break;
 	case ISCSI_PARAM_DATADGST_EN:
 		err = iscsi_set_param(cls_conn, param, buf, buflen);
 		if (!err && conn->datadgst_en)
 			err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
 							conn->hdrdgst_en,
-							conn->datadgst_en, 0);
+							conn->datadgst_en);
 		break;
 	case ISCSI_PARAM_MAX_R2T:
 		return iscsi_tcp_set_max_r2t(conn, buf);
@@ -2313,7 +2317,6 @@
 {
 	struct cxgbi_endpoint *cep = ep->dd_data;
 	struct cxgbi_sock *csk;
-	int len;
 
 	log_debug(1 << CXGBI_DBG_ISCSI,
 		"cls_conn 0x%p, param %d.\n", ep, param);
@@ -2331,9 +2334,9 @@
 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
 						 &csk->daddr, param, buf);
 	default:
-		return -ENOSYS;
+		break;
 	}
-	return len;
+	return -ENOSYS;
 }
 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
 
@@ -2385,7 +2388,7 @@
 
 	ppm = csk->cdev->cdev2ppm(csk->cdev);
 	err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
-					     ppm->tformat.pgsz_idx_dflt, 0);
+					     ppm->tformat.pgsz_idx_dflt);
 	if (err < 0)
 		return err;
 
@@ -2566,13 +2569,9 @@
 			pr_info("shost 0x%p, priv NULL.\n", shost);
 			goto err_out;
 		}
-
-		rtnl_lock();
-		if (!vlan_uses_dev(hba->ndev))
-			ifindex = hba->ndev->ifindex;
-		rtnl_unlock();
 	}
 
+check_route:
 	if (dst_addr->sa_family == AF_INET) {
 		csk = cxgbi_check_route(dst_addr, ifindex);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -2593,6 +2592,13 @@
 	if (!hba)
 		hba = csk->cdev->hbas[csk->port_id];
 	else if (hba != csk->cdev->hbas[csk->port_id]) {
+		if (ifindex != hba->ndev->ifindex) {
+			cxgbi_sock_put(csk);
+			cxgbi_sock_closed(csk);
+			ifindex = hba->ndev->ifindex;
+			goto check_route;
+		}
+
 		pr_info("Could not connect through requested host %u"
 			"hba 0x%p != 0x%p (%u).\n",
 			shost->host_no, hba,
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index dcb190e..84b96af 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -120,6 +120,9 @@
 	int wr_max_cred;
 	int wr_cred;
 	int wr_una_cred;
+#ifdef CONFIG_CHELSIO_T4_DCB
+	u8 dcb_priority;
+#endif
 	unsigned char hcrc_len;
 	unsigned char dcrc_len;
 
@@ -146,6 +149,7 @@
 	struct sk_buff_head receive_queue;
 	struct sk_buff_head write_queue;
 	struct timer_list retry_timer;
+	struct completion cmpl;
 	int err;
 	rwlock_t callback_lock;
 	void *user_data;
@@ -487,9 +491,9 @@
 				  struct cxgbi_ppm *,
 				  struct cxgbi_task_tag_info *);
 	int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
-				unsigned int, int, int, int);
+				    unsigned int, int, int);
 	int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
-				unsigned int, int, bool);
+				   unsigned int, int);
 
 	void (*csk_release_offload_resources)(struct cxgbi_sock *);
 	int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
@@ -613,8 +617,9 @@
 void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *,
 			    struct cxgbi_task_tag_info *,
 			    struct scatterlist **sg_pp, unsigned int *sg_off);
-void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *,
-			 struct cxgbi_tag_format *, unsigned int ppmax,
-			 unsigned int llimit, unsigned int start,
-			 unsigned int rsvd_factor);
+int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
+			struct cxgbi_tag_format *tformat,
+			unsigned int iscsi_size, unsigned int llimit,
+			unsigned int start, unsigned int rsvd_factor,
+			unsigned int edram_start, unsigned int edram_size);
 #endif	/*__LIBCXGBI_H__*/
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
index f1b17e3..5533bdc 100644
--- a/drivers/scsi/cxlflash/Kconfig
+++ b/drivers/scsi/cxlflash/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # IBM CXL-attached Flash Accelerator SCSI Driver
 #
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
index 283377d..fd2f0dd 100644
--- a/drivers/scsi/cxlflash/Makefile
+++ b/drivers/scsi/cxlflash/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_CXLFLASH) += cxlflash.o
 cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
 cxlflash-$(CONFIG_CXL) += cxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
index 55638d1..181e044 100644
--- a/drivers/scsi/cxlflash/backend.h
+++ b/drivers/scsi/cxlflash/backend.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * CXL Flash Device Driver
  *
@@ -5,11 +6,6 @@
  *             Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2018 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #ifndef _CXLFLASH_BACKEND_H
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 8908a20..de6229e 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * CXL Flash Device Driver
  *
@@ -5,11 +6,6 @@
  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2015 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #ifndef _CXLFLASH_COMMON_H
@@ -334,7 +330,8 @@
 void cxlflash_list_init(void);
 void cxlflash_term_global_luns(void);
 void cxlflash_free_errpage(void);
-int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
+int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd,
+		   void __user *arg);
 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg);
 int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg);
 void cxlflash_term_local_luns(struct cxlflash_cfg *cfg);
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
index b42da88..b814130 100644
--- a/drivers/scsi/cxlflash/cxl_hw.c
+++ b/drivers/scsi/cxlflash/cxl_hw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * CXL Flash Device Driver
  *
@@ -5,11 +6,6 @@
  *             Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2018 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #include <misc/cxl.h>
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index edea125..e0e15b4 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * CXL Flash Device Driver
  *
@@ -5,11 +6,6 @@
  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2015 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #include <asm/unaligned.h>
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 6637116..93ef97a 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * CXL Flash Device Driver
  *
@@ -5,11 +6,6 @@
  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2015 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #include <linux/delay.h>
@@ -757,10 +753,13 @@
 		/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
 		if (index == PRIMARY_HWQ)
 			cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
+		/* fall through */
 	case UNMAP_TWO:
 		cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
+		/* fall through */
 	case UNMAP_ONE:
 		cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
+		/* fall through */
 	case FREE_IRQ:
 		cfg->ops->free_afu_irqs(hwq->ctx_cookie);
 		/* fall through */
@@ -977,14 +976,18 @@
 	switch (cfg->init_state) {
 	case INIT_STATE_CDEV:
 		cxlflash_release_chrdev(cfg);
+		/* fall through */
 	case INIT_STATE_SCSI:
 		cxlflash_term_local_luns(cfg);
 		scsi_remove_host(cfg->host);
+		/* fall through */
 	case INIT_STATE_AFU:
 		term_afu(cfg);
+		/* fall through */
 	case INIT_STATE_PCI:
 		cfg->ops->destroy_afu(cfg->afu_cookie);
 		pci_disable_device(pdev);
+		/* fall through */
 	case INIT_STATE_NONE:
 		free_mem(cfg);
 		scsi_host_put(cfg->host);
@@ -2357,11 +2360,11 @@
 			cxlflash_schedule_async_reset(cfg);
 			break;
 		}
-		/* fall through to retry */
+		/* fall through - to retry */
 	case -EAGAIN:
 		if (++nretry < 2)
 			goto retry;
-		/* fall through to exit */
+		/* fall through - to exit */
 	default:
 		break;
 	}
@@ -3021,6 +3024,7 @@
 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
 		if (cfg->state == STATE_NORMAL)
 			goto retry;
+		/* else, fall through */
 	default:
 		/* Ideally should not happen */
 		dev_err(dev, "%s: Device is not ready, state=%d\n",
@@ -3088,12 +3092,6 @@
 		return -EINVAL;
 	}
 
-	if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) {
-		dev_info(cfgdev, "SCSI-MQ is not enabled, use a different "
-			 "HWQ steering mode.\n");
-		return -EINVAL;
-	}
-
 	afu->hwq_mode = mode;
 
 	return count;
@@ -3180,7 +3178,6 @@
 	.this_id = -1,
 	.sg_tablesize = 1,	/* No scatter gather support */
 	.max_sectors = CXLFLASH_MAX_SECTORS,
-	.use_clustering = ENABLE_CLUSTERING,
 	.shost_attrs = cxlflash_host_attrs,
 	.sdev_attrs = cxlflash_dev_attrs,
 };
@@ -3289,7 +3286,7 @@
  *
  * Return: A string identifying the decoded host ioctl.
  */
-static char *decode_hioctl(int cmd)
+static char *decode_hioctl(unsigned int cmd)
 {
 	switch (cmd) {
 	case HT_CXLFLASH_LUN_PROVISION:
@@ -3694,6 +3691,7 @@
 	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
 
 	cfg = shost_priv(host);
+	cfg->state = STATE_PROBING;
 	cfg->host = host;
 	rc = alloc_mem(cfg);
 	if (rc) {
@@ -3782,6 +3780,7 @@
 	return rc;
 
 out_remove:
+	cfg->state = STATE_PROBED;
 	cxlflash_remove(pdev);
 	goto out;
 }
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index a39be94..0bfb98e 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * CXL Flash Device Driver
  *
@@ -5,11 +6,6 @@
  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2015 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #ifndef _CXLFLASH_MAIN_H
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 37b8dc6..7018cd8 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * CXL Flash Device Driver
  *
@@ -5,17 +6,13 @@
  *             Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2018 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #include <linux/file.h>
 #include <linux/idr.h>
 #include <linux/module.h>
 #include <linux/mount.h>
+#include <linux/pseudo_fs.h>
 #include <linux/poll.h>
 #include <linux/sched/signal.h>
 
@@ -35,31 +32,15 @@
 static int ocxlflash_fs_cnt;
 static struct vfsmount *ocxlflash_vfs_mount;
 
-static const struct dentry_operations ocxlflash_fs_dops = {
-	.d_dname	= simple_dname,
-};
-
-/*
- * ocxlflash_fs_mount() - mount the pseudo-filesystem
- * @fs_type:	File system type.
- * @flags:	Flags for the filesystem.
- * @dev_name:	Device name associated with the filesystem.
- * @data:	Data pointer.
- *
- * Return: pointer to the directory entry structure
- */
-static struct dentry *ocxlflash_fs_mount(struct file_system_type *fs_type,
-					 int flags, const char *dev_name,
-					 void *data)
+static int ocxlflash_fs_init_fs_context(struct fs_context *fc)
 {
-	return mount_pseudo(fs_type, "ocxlflash:", NULL, &ocxlflash_fs_dops,
-			    OCXLFLASH_FS_MAGIC);
+	return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM;
 }
 
 static struct file_system_type ocxlflash_fs_type = {
 	.name		= "ocxlflash",
 	.owner		= THIS_MODULE,
-	.mount		= ocxlflash_fs_mount,
+	.init_fs_context = ocxlflash_fs_init_fs_context,
 	.kill_sb	= kill_anon_super,
 };
 
diff --git a/drivers/scsi/cxlflash/ocxl_hw.h b/drivers/scsi/cxlflash/ocxl_hw.h
index 9270d35..fc6ad4f 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.h
+++ b/drivers/scsi/cxlflash/ocxl_hw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * CXL Flash Device Driver
  *
@@ -5,11 +6,6 @@
  *	       Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2018 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #define OCXL_MAX_IRQS	4	/* Max interrupts per process */
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 874abce..ab315c5 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * CXL Flash Device Driver
  *
@@ -5,11 +6,6 @@
  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2015 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #ifndef _SISLITE_H
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index acac615..593669a 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * CXL Flash Device Driver
  *
@@ -5,11 +6,6 @@
  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2015 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #include <linux/delay.h>
@@ -1924,7 +1920,7 @@
  *
  * Return: A string identifying the decoded ioctl.
  */
-static char *decode_ioctl(int cmd)
+static char *decode_ioctl(unsigned int cmd)
 {
 	switch (cmd) {
 	case DK_CXLFLASH_ATTACH:
@@ -2051,7 +2047,7 @@
  *
  * Return: 0 on success, -errno on failure
  */
-static int ioctl_common(struct scsi_device *sdev, int cmd)
+static int ioctl_common(struct scsi_device *sdev, unsigned int cmd)
 {
 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
 	struct device *dev = &cfg->dev->dev;
@@ -2096,7 +2092,7 @@
  *
  * Return: 0 on success, -errno on failure
  */
-int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
 {
 	typedef int (*sioctl) (struct scsi_device *, void *);
 
@@ -2179,8 +2175,7 @@
 	}
 
 	if (unlikely(copy_from_user(&buf, arg, size))) {
-		dev_err(dev, "%s: copy_from_user() fail "
-			"size=%lu cmd=%d (%s) arg=%p\n",
+		dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n",
 			__func__, size, cmd, decode_ioctl(cmd), arg);
 		rc = -EFAULT;
 		goto cxlflash_ioctl_exit;
@@ -2203,8 +2198,7 @@
 	rc = do_ioctl(sdev, (void *)&buf);
 	if (likely(!rc))
 		if (unlikely(copy_to_user(arg, &buf, size))) {
-			dev_err(dev, "%s: copy_to_user() fail "
-				"size=%lu cmd=%d (%s) arg=%p\n",
+			dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n",
 				__func__, size, cmd, decode_ioctl(cmd), arg);
 			rc = -EFAULT;
 		}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index 35c3cbf..0e3b459 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * CXL Flash Device Driver
  *
@@ -5,11 +6,6 @@
  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2015 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #ifndef _CXLFLASH_SUPERPIPE_H
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 2c904bf..f1406ac 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * CXL Flash Device Driver
  *
@@ -5,11 +6,6 @@
  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2015 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #include <linux/interrupt.h>
diff --git a/drivers/scsi/cxlflash/vlun.h b/drivers/scsi/cxlflash/vlun.h
index 27a63a0..68e3ea5 100644
--- a/drivers/scsi/cxlflash/vlun.h
+++ b/drivers/scsi/cxlflash/vlun.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * CXL Flash Device Driver
  *
@@ -5,11 +6,6 @@
  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2015 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #ifndef _CXLFLASH_VLUN_H
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 1ed2cd8..13fbb2e 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -753,105 +753,6 @@
 	return NULL;
 }
 
-
-static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
-{
-	struct list_head *head = &acb->srb_free_list;
-	struct ScsiReqBlk *srb = NULL;
-
-	if (!list_empty(head)) {
-		srb = list_entry(head->next, struct ScsiReqBlk, list);
-		list_del(head->next);
-		dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
-	}
-	return srb;
-}
-
-
-static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
-{
-	dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
-	list_add_tail(&srb->list, &acb->srb_free_list);
-}
-
-
-static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
-		struct ScsiReqBlk *srb)
-{
-	dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
-		srb->cmd, dcb->target_id, dcb->target_lun, srb);
-	list_add(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_waiting_append(struct DeviceCtlBlk *dcb,
-		struct ScsiReqBlk *srb)
-{
-	dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
-		 srb->cmd, dcb->target_id, dcb->target_lun, srb);
-	list_add_tail(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
-{
-	dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
-		srb->cmd, dcb->target_id, dcb->target_lun, srb);
-	list_add_tail(&srb->list, &dcb->srb_going_list);
-}
-
-
-static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
-{
-	struct ScsiReqBlk *i;
-	struct ScsiReqBlk *tmp;
-	dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
-		srb->cmd, dcb->target_id, dcb->target_lun, srb);
-
-	list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
-		if (i == srb) {
-			list_del(&srb->list);
-			break;
-		}
-}
-
-
-static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
-		struct ScsiReqBlk *srb)
-{
-	struct ScsiReqBlk *i;
-	struct ScsiReqBlk *tmp;
-	dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
-		srb->cmd, dcb->target_id, dcb->target_lun, srb);
-
-	list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
-		if (i == srb) {
-			list_del(&srb->list);
-			break;
-		}
-}
-
-
-static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
-		struct ScsiReqBlk *srb)
-{
-	dprintkdbg(DBG_0,
-		"srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
-		srb->cmd, dcb->target_id, dcb->target_lun, srb);
-	list_move(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
-		struct ScsiReqBlk *srb)
-{
-	dprintkdbg(DBG_0,
-		"srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
-		srb->cmd, dcb->target_id, dcb->target_lun, srb);
-	list_move(&srb->list, &dcb->srb_going_list);
-}
-
-
 /* Sets the timer to wake us up */
 static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
 {
@@ -923,7 +824,7 @@
 
 			/* Try to send to the bus */
 			if (!start_scsi(acb, pos, srb))
-				srb_waiting_to_going_move(pos, srb);
+				list_move(&srb->list, &pos->srb_going_list);
 			else
 				waiting_set_timer(acb, HZ/50);
 			break;
@@ -960,15 +861,15 @@
 	if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
 	    acb->active_dcb ||
 	    (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
-		srb_waiting_append(dcb, srb);
+		list_add_tail(&srb->list, &dcb->srb_waiting_list);
 		waiting_process_next(acb);
 		return;
 	}
 
-	if (!start_scsi(acb, dcb, srb))
-		srb_going_append(dcb, srb);
-	else {
-		srb_waiting_insert(dcb, srb);
+	if (!start_scsi(acb, dcb, srb)) {
+		list_add_tail(&srb->list, &dcb->srb_going_list);
+	} else {
+		list_add(&srb->list, &dcb->srb_waiting_list);
 		waiting_set_timer(acb, HZ / 50);
 	}
 }
@@ -1045,10 +946,8 @@
 			sgp->length++;
 		}
 
-		srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
-						srb->segment_x,
-				            	SEGMENTX_LEN,
-				            	PCI_DMA_TODEVICE);
+		srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
+				srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
 
 		dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
 			srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
@@ -1116,9 +1015,9 @@
 	cmd->scsi_done = done;
 	cmd->result = 0;
 
-	srb = srb_get_free(acb);
-	if (!srb)
-	{
+	srb = list_first_entry_or_null(&acb->srb_free_list,
+			struct ScsiReqBlk, list);
+	if (!srb) {
 		/*
 		 * Return 1 since we are unable to queue this command at this
 		 * point in time.
@@ -1126,12 +1025,13 @@
 		dprintkdbg(DBG_0, "queue_command: No free srb's\n");
 		return 1;
 	}
+	list_del(&srb->list);
 
 	build_srb(cmd, dcb, srb);
 
 	if (!list_empty(&dcb->srb_waiting_list)) {
 		/* append to waiting queue */
-		srb_waiting_append(dcb, srb);
+		list_add_tail(&srb->list, &dcb->srb_waiting_list);
 		waiting_process_next(acb);
 	} else {
 		/* process immediately */
@@ -1376,11 +1276,11 @@
 
 	srb = find_cmd(cmd, &dcb->srb_waiting_list);
 	if (srb) {
-		srb_waiting_remove(dcb, srb);
+		list_del(&srb->list);
 		pci_unmap_srb_sense(acb, srb);
 		pci_unmap_srb(acb, srb);
 		free_tag(dcb, srb);
-		srb_free_insert(acb, srb);
+		list_add_tail(&srb->list, &acb->srb_free_list);
 		dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
 		cmd->result = DID_ABORT << 16;
 		return SUCCESS;
@@ -1969,14 +1869,15 @@
 			xferred -= psge->length;
 		} else {
 			/* Partial SG entry done */
+			dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev,
+					srb->sg_bus_addr, SEGMENTX_LEN,
+					DMA_TO_DEVICE);
 			psge->length -= xferred;
 			psge->address += xferred;
 			srb->sg_index = idx;
-			pci_dma_sync_single_for_device(srb->dcb->
-					    acb->dev,
-					    srb->sg_bus_addr,
-					    SEGMENTX_LEN,
-					    PCI_DMA_TODEVICE);
+			dma_sync_single_for_device(&srb->dcb->acb->dev->dev,
+					srb->sg_bus_addr, SEGMENTX_LEN,
+					DMA_TO_DEVICE);
 			break;
 		}
 		psge++;
@@ -3083,7 +2984,7 @@
 					goto disc1;
 				}
 				free_tag(dcb, srb);
-				srb_going_to_waiting_move(dcb, srb);
+				list_move(&srb->list, &dcb->srb_waiting_list);
 				dprintkdbg(DBG_KG,
 					"disconnect: (0x%p) Retry\n",
 					srb->cmd);
@@ -3148,7 +3049,7 @@
 
 			srb->state = SRB_READY;
 			free_tag(dcb, srb);
-			srb_going_to_waiting_move(dcb, srb);
+			list_move(&srb->list, &dcb->srb_waiting_list);
 			waiting_set_timer(acb, HZ / 20);
 
 			/* return; */
@@ -3271,9 +3172,8 @@
 		/* unmap DC395x SG list */
 		dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
 			srb->sg_bus_addr, SEGMENTX_LEN);
-		pci_unmap_single(acb->dev, srb->sg_bus_addr,
-				 SEGMENTX_LEN,
-				 PCI_DMA_TODEVICE);
+		dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
+				DMA_TO_DEVICE);
 		dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
 			   scsi_sg_count(cmd), scsi_bufflen(cmd));
 		/* unmap the sg segments */
@@ -3291,8 +3191,8 @@
 	/* Unmap sense buffer */
 	dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
 	       srb->segment_x[0].address);
-	pci_unmap_single(acb->dev, srb->segment_x[0].address,
-			 srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
+	dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
+			 srb->segment_x[0].length, DMA_FROM_DEVICE);
 	/* Restore SG stuff */
 	srb->total_xfer_length = srb->xferred;
 	srb->segment_x[0].address =
@@ -3411,7 +3311,7 @@
 				tempcnt--;
 			dcb->max_command = tempcnt;
 			free_tag(dcb, srb);
-			srb_going_to_waiting_move(dcb, srb);
+			list_move(&srb->list, &dcb->srb_waiting_list);
 			waiting_set_timer(acb, HZ / 20);
 			srb->adapter_status = 0;
 			srb->target_status = 0;
@@ -3447,14 +3347,12 @@
 		}
 	}
 
-	if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
-		pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
-					scsi_sg_count(cmd), dir);
-
 	ckc_only = 0;
 /* Check Error Conditions */
       ckc_e:
 
+	pci_unmap_srb(acb, srb);
+
 	if (cmd->cmnd[0] == INQUIRY) {
 		unsigned char *base = NULL;
 		struct ScsiInqData *ptr;
@@ -3498,16 +3396,14 @@
 				cmd->cmnd[0], srb->total_xfer_length);
 	}
 
-	srb_going_remove(dcb, srb);
-	/* Add to free list */
-	if (srb == acb->tmp_srb)
-		dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
-	else {
+	if (srb != acb->tmp_srb) {
+		/* Add to free list */
 		dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
 			cmd, cmd->result);
-		srb_free_insert(acb, srb);
+		list_move_tail(&srb->list, &acb->srb_free_list);
+	} else {
+		dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
 	}
-	pci_unmap_srb(acb, srb);
 
 	cmd->scsi_done(cmd);
 	waiting_process_next(acb);
@@ -3535,9 +3431,9 @@
 			result = MK_RES(0, did_flag, 0, 0);
 			printk("G:%p(%02i-%i) ", p,
 			       p->device->id, (u8)p->device->lun);
-			srb_going_remove(dcb, srb);
+			list_del(&srb->list);
 			free_tag(dcb, srb);
-			srb_free_insert(acb, srb);
+			list_add_tail(&srb->list, &acb->srb_free_list);
 			p->result = result;
 			pci_unmap_srb_sense(acb, srb);
 			pci_unmap_srb(acb, srb);
@@ -3565,8 +3461,7 @@
 			result = MK_RES(0, did_flag, 0, 0);
 			printk("W:%p<%02i-%i>", p, p->device->id,
 			       (u8)p->device->lun);
-			srb_waiting_remove(dcb, srb);
-			srb_free_insert(acb, srb);
+			list_move_tail(&srb->list, &acb->srb_free_list);
 			p->result = result;
 			pci_unmap_srb_sense(acb, srb);
 			pci_unmap_srb(acb, srb);
@@ -3692,9 +3587,9 @@
 	srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
 	srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
 	/* Map sense buffer */
-	srb->segment_x[0].address =
-	    pci_map_single(acb->dev, cmd->sense_buffer,
-			   SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+	srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
+			cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+			DMA_FROM_DEVICE);
 	dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
 	       cmd->sense_buffer, srb->segment_x[0].address,
 	       SCSI_SENSE_BUFFERSIZE);
@@ -3705,7 +3600,7 @@
 		dprintkl(KERN_DEBUG,
 			"request_sense: (0x%p) failed <%02i-%i>\n",
 			srb->cmd, dcb->target_id, dcb->target_lun);
-		srb_going_to_waiting_move(dcb, srb);
+		list_move(&srb->list, &dcb->srb_waiting_list);
 		waiting_set_timer(acb, HZ / 100);
 	}
 }
@@ -4392,7 +4287,7 @@
 	
 	/* link static array of srbs into the srb free list */
 	for (i = 0; i < acb->srb_count - 1; i++)
-		srb_free_insert(acb, &acb->srb_array[i]);
+		list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list);
 }
 
 
@@ -4736,7 +4631,7 @@
 	.cmd_per_lun            = DC395x_MAX_CMD_PER_LUN,
 	.eh_abort_handler       = dc395x_eh_abort,
 	.eh_bus_reset_handler   = dc395x_eh_bus_reset,
-	.use_clustering         = DISABLE_CLUSTERING,
+	.dma_boundary		= PAGE_SIZE - 1,
 };
 
 
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 0b331c9..368eb94 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # SCSI Device Handler configuration
 #
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
index 09866c5..0a603ae 100644
--- a/drivers/scsi/device_handler/Makefile
+++ b/drivers/scsi/device_handler/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # SCSI Device Handler
 #
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 12dc710..f32da0c 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1,23 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Generic SCSI-3 ALUA SCSI Device Handler
  *
  * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH.
  * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  */
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -54,6 +40,7 @@
 #define ALUA_FAILOVER_TIMEOUT		60
 #define ALUA_FAILOVER_RETRIES		5
 #define ALUA_RTPG_DELAY_MSECS		5
+#define ALUA_RTPG_RETRY_DELAY		2
 
 /* device handler flags */
 #define ALUA_OPTIMIZE_STPG		0x01
@@ -525,6 +512,7 @@
 	unsigned int tpg_desc_tbl_off;
 	unsigned char orig_transition_tmo;
 	unsigned long flags;
+	bool transitioning_sense = false;
 
 	if (!pg->expiry) {
 		unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
@@ -585,13 +573,19 @@
 			goto retry;
 		}
 		/*
-		 * Retry on ALUA state transition or if any
-		 * UNIT ATTENTION occurred.
+		 * If the array returns with 'ALUA state transition'
+		 * sense code here it cannot return RTPG data during
+		 * transition. So set the state to 'transitioning' directly.
 		 */
 		if (sense_hdr.sense_key == NOT_READY &&
-		    sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
-			err = SCSI_DH_RETRY;
-		else if (sense_hdr.sense_key == UNIT_ATTENTION)
+		    sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
+			transitioning_sense = true;
+			goto skip_rtpg;
+		}
+		/*
+		 * Retry on any other UNIT ATTENTION occurred.
+		 */
+		if (sense_hdr.sense_key == UNIT_ATTENTION)
 			err = SCSI_DH_RETRY;
 		if (err == SCSI_DH_RETRY &&
 		    pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
@@ -679,7 +673,11 @@
 		off = 8 + (desc[7] * 4);
 	}
 
+ skip_rtpg:
 	spin_lock_irqsave(&pg->lock, flags);
+	if (transitioning_sense)
+		pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+
 	sdev_printk(KERN_INFO, sdev,
 		    "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
 		    ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
@@ -696,7 +694,7 @@
 	case SCSI_ACCESS_STATE_TRANSITIONING:
 		if (time_before(jiffies, pg->expiry)) {
 			/* State transition, retry */
-			pg->interval = 2;
+			pg->interval = ALUA_RTPG_RETRY_DELAY;
 			err = SCSI_DH_RETRY;
 		} else {
 			struct alua_dh_data *h;
@@ -821,6 +819,8 @@
 				spin_lock_irqsave(&pg->lock, flags);
 				pg->flags &= ~ALUA_PG_RUNNING;
 				pg->flags |= ALUA_PG_RUN_RTPG;
+				if (!pg->interval)
+					pg->interval = ALUA_RTPG_RETRY_DELAY;
 				spin_unlock_irqrestore(&pg->lock, flags);
 				queue_delayed_work(kaluad_wq, &pg->rtpg_work,
 						   pg->interval * HZ);
@@ -832,6 +832,8 @@
 		spin_lock_irqsave(&pg->lock, flags);
 		if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
 			pg->flags &= ~ALUA_PG_RUNNING;
+			if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
+				pg->interval = ALUA_RTPG_RETRY_DELAY;
 			pg->flags |= ALUA_PG_RUN_RTPG;
 			spin_unlock_irqrestore(&pg->lock, flags);
 			queue_delayed_work(kaluad_wq, &pg->rtpg_work,
@@ -1071,28 +1073,29 @@
  * Fail I/O to all paths not in state
  * active/optimized or active/non-optimized.
  */
-static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
+static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
 {
 	struct alua_dh_data *h = sdev->handler_data;
 	struct alua_port_group *pg;
 	unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
-	int ret = BLKPREP_OK;
 
 	rcu_read_lock();
 	pg = rcu_dereference(h->pg);
 	if (pg)
 		state = pg->state;
 	rcu_read_unlock();
-	if (state == SCSI_ACCESS_STATE_TRANSITIONING)
-		ret = BLKPREP_DEFER;
-	else if (state != SCSI_ACCESS_STATE_OPTIMAL &&
-		 state != SCSI_ACCESS_STATE_ACTIVE &&
-		 state != SCSI_ACCESS_STATE_LBA) {
-		ret = BLKPREP_KILL;
-		req->rq_flags |= RQF_QUIET;
-	}
-	return ret;
 
+	switch (state) {
+	case SCSI_ACCESS_STATE_OPTIMAL:
+	case SCSI_ACCESS_STATE_ACTIVE:
+	case SCSI_ACCESS_STATE_LBA:
+		return BLK_STS_OK;
+	case SCSI_ACCESS_STATE_TRANSITIONING:
+		return BLK_STS_RESOURCE;
+	default:
+		req->rq_flags |= RQF_QUIET;
+		return BLK_STS_IOERR;
+	}
 }
 
 static void alua_rescan(struct scsi_device *sdev)
@@ -1173,10 +1176,8 @@
 	int r;
 
 	kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
-	if (!kaluad_wq) {
-		/* Temporary failure, bypass */
-		return SCSI_DH_DEV_TEMP_BUSY;
-	}
+	if (!kaluad_wq)
+		return -ENOMEM;
 
 	r = scsi_register_device_handler(&alua_dh);
 	if (r != 0) {
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 95c4790..caa685c 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Target driver for EMC CLARiiON AX/CX-series hardware.
  * Based on code from Lars Marowsky-Bree <lmb@suse.de>
@@ -5,20 +6,6 @@
  *
  * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
  * Copyright (C) 2006 Mike Christie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/slab.h>
 #include <linux/module.h>
@@ -341,17 +328,17 @@
 	return SCSI_RETURN_NOT_HANDLED;
 }
 
-static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
+static blk_status_t clariion_prep_fn(struct scsi_device *sdev,
+		struct request *req)
 {
 	struct clariion_dh_data *h = sdev->handler_data;
-	int ret = BLKPREP_OK;
 
 	if (h->lun_state != CLARIION_LUN_OWNED) {
-		ret = BLKPREP_KILL;
 		req->rq_flags |= RQF_QUIET;
+		return BLK_STS_IOERR;
 	}
-	return ret;
 
+	return BLK_STS_OK;
 }
 
 static int clariion_std_inquiry(struct scsi_device *sdev,
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index e65a0eb..8acd4bb 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be
  * upgraded.
@@ -5,20 +6,6 @@
  * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
  * Copyright (C) 2006 Mike Christie
  * Copyright (C) 2008 Hannes Reinecke <hare@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/slab.h>
@@ -172,17 +159,16 @@
 	return rc;
 }
 
-static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
+static blk_status_t hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
 {
 	struct hp_sw_dh_data *h = sdev->handler_data;
-	int ret = BLKPREP_OK;
 
 	if (h->path_state != HP_SW_PATH_ACTIVE) {
-		ret = BLKPREP_KILL;
 		req->rq_flags |= RQF_QUIET;
+		return BLK_STS_IOERR;
 	}
-	return ret;
 
+	return BLK_STS_OK;
 }
 
 /*
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index d27faba..5efc959 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -546,6 +546,8 @@
 	spin_unlock(&ctlr->ms_lock);
 
  retry:
+	memset(cdb, 0, sizeof(cdb));
+
 	data_size = rdac_failover_get(ctlr, &list, cdb);
 
 	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
@@ -642,17 +644,16 @@
 	return 0;
 }
 
-static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
+static blk_status_t rdac_prep_fn(struct scsi_device *sdev, struct request *req)
 {
 	struct rdac_dh_data *h = sdev->handler_data;
-	int ret = BLKPREP_OK;
 
 	if (h->state != RDAC_STATE_ACTIVE) {
-		ret = BLKPREP_KILL;
 		req->rq_flags |= RQF_QUIET;
+		return BLK_STS_IOERR;
 	}
-	return ret;
 
+	return BLK_STS_OK;
 }
 
 static int rdac_check_sense(struct scsi_device *sdev,
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 003c3d7..6df60b3 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
     dmx3191d.c - driver for the Domex DMX3191D SCSI card.
     Copyright (C) 2000 by Massimo Piccioni <dafastidio@libero.it>
@@ -5,19 +6,6 @@
 
     Based on the generic NCR5380 driver by Drew Eckhardt et al.
 
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
 #include <linux/init.h>
@@ -63,7 +51,7 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= 2,
-	.use_clustering		= DISABLE_CLUSTERING,
+	.dma_boundary		= PAGE_SIZE - 1,
 	.cmd_size		= NCR5380_CMD_SIZE,
 };
 
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index 16fc380..bf0daee 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 #ifndef _SCSI_I2O_H
 #define _SCSI_I2O_H
 
@@ -5,16 +6,10 @@
  *
  * (c) Copyright 1999, 2000 Red Hat Software
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
  *************************************************************************
  *
  * This header file defined the I2O APIs/structures for use by
  * the I2O kernel modules.
- *
  */
 
 #ifdef __KERNEL__       /* This file to be included by kernel only */
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
index f602367..6bc33f4 100644
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ b/drivers/scsi/dpt/dpti_ioctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /***************************************************************************
                           dpti_ioctl.h  -  description
                              -------------------
@@ -10,10 +11,6 @@
 
 /***************************************************************************
  *                                                                         *
- *   This program is free software; you can redistribute it and/or modify  *
- *   it under the terms of the GNU General Public License as published by  *
- *   the Free Software Foundation; either version 2 of the License, or     *
- *   (at your option) any later version.                                   *
  *                                                                         *
  ***************************************************************************/
 
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 37de8fb..abc74fd 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /***************************************************************************
                           dpti.c  -  description
                              -------------------
@@ -13,10 +14,6 @@
 
 /***************************************************************************
  *                                                                         *
- *   This program is free software; you can redistribute it and/or modify  *
- *   it under the terms of the GNU General Public License as published by  *
- *   the Free Software Foundation; either version 2 of the License, or     *
- *   (at your option) any later version.                                   *
  *                                                                         *
  ***************************************************************************/
 /***************************************************************************
@@ -589,46 +586,6 @@
 }
 
 /*
- *	Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
- */
-static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
-{
-	return (u32)cmd->serial_number;
-}
-
-/*
- *	Go from a u32 'context' to a struct scsi_cmnd * .
- *	This could probably be made more efficient.
- */
-static struct scsi_cmnd *
-	adpt_cmd_from_context(adpt_hba * pHba, u32 context)
-{
-	struct scsi_cmnd * cmd;
-	struct scsi_device * d;
-
-	if (context == 0)
-		return NULL;
-
-	spin_unlock(pHba->host->host_lock);
-	shost_for_each_device(d, pHba->host) {
-		unsigned long flags;
-		spin_lock_irqsave(&d->list_lock, flags);
-		list_for_each_entry(cmd, &d->cmd_list, list) {
-			if (((u32)cmd->serial_number == context)) {
-				spin_unlock_irqrestore(&d->list_lock, flags);
-				scsi_device_put(d);
-				spin_lock(pHba->host->host_lock);
-				return cmd;
-			}
-		}
-		spin_unlock_irqrestore(&d->list_lock, flags);
-	}
-	spin_lock(pHba->host->host_lock);
-
-	return NULL;
-}
-
-/*
  *	Turn a pointer to ioctl reply data into an u32 'context'
  */
 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
@@ -685,9 +642,6 @@
 	u32 msg[5];
 	int rcode;
 
-	if(cmd->serial_number == 0){
-		return FAILED;
-	}
 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
 	printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
 	if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
@@ -699,8 +653,9 @@
 	msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
 	msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
 	msg[2] = 0;
-	msg[3]= 0; 
-	msg[4] = adpt_cmd_to_context(cmd);
+	msg[3]= 0;
+	/* Add 1 to avoid firmware treating it as invalid command */
+	msg[4] = cmd->request->tag + 1;
 	if (pHba->host)
 		spin_lock_irq(pHba->host->host_lock);
 	rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
@@ -877,8 +832,8 @@
 	adpt_hba *pHba, *pNext;
 	struct adpt_i2o_post_wait_data *p1, *old;
 
-	 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
-	 printk(KERN_INFO"   This could take a few minutes if there are many devices attached\n");
+	printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
+	printk(KERN_INFO "   This could take a few minutes if there are many devices attached\n");
 	/* Delete all IOPs from the controller chain */
 	/* They should have already been released by the
 	 * scsi-core
@@ -901,7 +856,7 @@
 //	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
 	adpt_post_wait_queue = NULL;
 
-	 printk(KERN_INFO "Adaptec I2O controllers down.\n");
+	printk(KERN_INFO "Adaptec I2O controllers down.\n");
 }
 
 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
@@ -934,15 +889,15 @@
 	 *	See if we should enable dma64 mode.
 	 */
 	if (sizeof(dma_addr_t) > 4 &&
-	    pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
-		if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
-			dma64 = 1;
-	}
-	if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
+	    dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
+	    dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
+		dma64 = 1;
+
+	if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
 		return -EINVAL;
 
 	/* adapter only supports message blocks below 4GB */
-	pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
+	dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
 
 	base_addr0_phys = pci_resource_start(pDev,0);
 	hba_map0_area_size = pci_resource_len(pDev,0);
@@ -2198,20 +2153,27 @@
 				status = I2O_POST_WAIT_OK;
 			}
 			if(!(context & 0x40000000)) {
-				cmd = adpt_cmd_from_context(pHba,
-							readl(reply+12));
+				/*
+				 * The request tag is one less than the command tag
+				 * as the firmware might treat a 0 tag as invalid
+				 */
+				cmd = scsi_host_find_tag(pHba->host,
+							 readl(reply + 12) - 1);
 				if(cmd != NULL) {
 					printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
 				}
 			}
 			adpt_i2o_post_wait_complete(context, status);
 		} else { // SCSI message
-			cmd = adpt_cmd_from_context (pHba, readl(reply+12));
+			/*
+			 * The request tag is one less than the command tag
+			 * as the firmware might treat a 0 tag as invalid
+			 */
+			cmd = scsi_host_find_tag(pHba->host,
+						 readl(reply + 12) - 1);
 			if(cmd != NULL){
 				scsi_dma_unmap(cmd);
-				if(cmd->serial_number != 0) { // If not timedout
-					adpt_i2o_to_scsi(reply, cmd);
-				}
+				adpt_i2o_to_scsi(reply, cmd);
 			}
 		}
 		writel(m, pHba->reply_port);
@@ -2277,7 +2239,8 @@
 	// I2O_CMD_SCSI_EXEC
 	msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
 	msg[2] = 0;
-	msg[3] = adpt_cmd_to_context(cmd);  /* Want SCSI control block back */
+	/* Add 1 to avoid firmware treating it as invalid command */
+	msg[3] = cmd->request->tag + 1;
 	// Our cards use the transaction context as the tag for queueing
 	// Adaptec/DPT Private stuff 
 	msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
@@ -2693,9 +2656,6 @@
 		unsigned long flags;
 		spin_lock_irqsave(&d->list_lock, flags);
 		list_for_each_entry(cmd, &d->cmd_list, list) {
-			if(cmd->serial_number == 0){
-				continue;
-			}
 			cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
 			cmd->scsi_done(cmd);
 		}
@@ -3427,7 +3387,7 @@
 		return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
 	}
 
-	 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */ 
+	return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
 }
 
 
@@ -3500,8 +3460,8 @@
 
 static int adpt_i2o_systab_send(adpt_hba* pHba)
 {
-	 u32 msg[12];
-	 int ret;
+	u32 msg[12];
+	int ret;
 
 	msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
 	msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
@@ -3569,7 +3529,6 @@
 	.slave_configure	= adpt_slave_configure,
 	.can_queue		= MAX_TO_IOP_MESSAGES,
 	.this_id		= 7,
-	.use_clustering		= ENABLE_CLUSTERING,
 };
 
 static int __init adpt_init(void)
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index dfc8d2e..42b1e28 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /***************************************************************************
                           dpti.h  -  description
                              -------------------
@@ -10,10 +11,6 @@
 
 /***************************************************************************
  *                                                                         *
- *   This program is free software; you can redistribute it and/or modify  *
- *   it under the terms of the GNU General Public License as published by  *
- *   the Free Software Foundation; either version 2 of the License, or     *
- *   (at your option) any later version.                                   *
  *                                                                         *
  ***************************************************************************/
 
diff --git a/drivers/scsi/esas2r/Kconfig b/drivers/scsi/esas2r/Kconfig
index 78fdbfd..19f6d30 100644
--- a/drivers/scsi/esas2r/Kconfig
+++ b/drivers/scsi/esas2r/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config SCSI_ESAS2R
 	tristate "ATTO Technology's ExpressSAS RAID adapter driver"
 	depends on PCI && SCSI
diff --git a/drivers/scsi/esas2r/Makefile b/drivers/scsi/esas2r/Makefile
index c77160b..279d9cb 100644
--- a/drivers/scsi/esas2r/Makefile
+++ b/drivers/scsi/esas2r/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_SCSI_ESAS2R)	+= esas2r.o
 
 esas2r-objs := esas2r_log.o esas2r_disc.o esas2r_flash.o esas2r_init.o \
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index 858c3b3..7f43b95 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -965,8 +965,8 @@
 const char *esas2r_info(struct Scsi_Host *);
 int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
 			struct esas2r_sas_nvram *data);
-int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg);
-int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg);
+int esas2r_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg);
 u8 handle_hba_ioctl(struct esas2r_adapter *a,
 		    struct atto_ioctl *ioctl_hba);
 int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index bbe77db..eb7d139 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -266,6 +266,7 @@
 	int i;
 	void *next_uncached;
 	struct esas2r_request *first_request, *last_request;
+	bool dma64 = false;
 
 	if (index >= MAX_ADAPTERS) {
 		esas2r_log(ESAS2R_LOG_CRIT,
@@ -286,42 +287,20 @@
 	a->pcid = pcid;
 	a->host = host;
 
-	if (sizeof(dma_addr_t) > 4) {
-		const uint64_t required_mask = dma_get_required_mask
-						       (&pcid->dev);
-		if (required_mask > DMA_BIT_MASK(32)
-		    && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64))
-		    && !pci_set_consistent_dma_mask(pcid,
-						    DMA_BIT_MASK(64))) {
-			esas2r_log_dev(ESAS2R_LOG_INFO,
-				       &(a->pcid->dev),
-				       "64-bit PCI addressing enabled\n");
-		} else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
-			   && !pci_set_consistent_dma_mask(pcid,
-							   DMA_BIT_MASK(32))) {
-			esas2r_log_dev(ESAS2R_LOG_INFO,
-				       &(a->pcid->dev),
-				       "32-bit PCI addressing enabled\n");
-		} else {
-			esas2r_log(ESAS2R_LOG_CRIT,
-				   "failed to set DMA mask");
-			esas2r_kill_adapter(index);
-			return 0;
-		}
-	} else {
-		if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
-		    && !pci_set_consistent_dma_mask(pcid,
-						    DMA_BIT_MASK(32))) {
-			esas2r_log_dev(ESAS2R_LOG_INFO,
-				       &(a->pcid->dev),
-				       "32-bit PCI addressing enabled\n");
-		} else {
-			esas2r_log(ESAS2R_LOG_CRIT,
-				   "failed to set DMA mask");
-			esas2r_kill_adapter(index);
-			return 0;
-		}
+	if (sizeof(dma_addr_t) > 4 &&
+	    dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) &&
+	    !dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64)))
+		dma64 = true;
+
+	if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) {
+		esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask");
+		esas2r_kill_adapter(index);
+		return 0;
 	}
+
+	esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev,
+		       "%s-bit PCI addressing enabled\n", dma64 ? "64" : "32");
+
 	esas2r_adapters[index] = a;
 	sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
 	esas2r_debug("new adapter %p, name %s", a, a->name);
@@ -783,14 +762,10 @@
 
 static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
 {
-	int pcie_cap_reg;
-
-	pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
-	if (pcie_cap_reg) {
+	if (pci_is_pcie(a->pcid)) {
 		u16 devcontrol;
 
-		pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
-				     &devcontrol);
+		pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol);
 
 		if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
 		     PCI_EXP_DEVCTL_READRQ_512B) {
@@ -799,9 +774,8 @@
 
 			devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
 			devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
-			pci_write_config_word(a->pcid,
-					      pcie_cap_reg + PCI_EXP_DEVCTL,
-					      devcontrol);
+			pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL,
+						   devcontrol);
 		}
 	}
 }
@@ -1262,6 +1236,7 @@
 			a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
 			break;
 		}
+		/* fall through */
 
 	case ESAS2R_INIT_MSG_GET_INIT:
 		if (msg == ESAS2R_INIT_MSG_GET_INIT) {
@@ -1275,7 +1250,7 @@
 				esas2r_hdebug("FAILED");
 			}
 		}
-	/* fall through */
+		/* fall through */
 
 	default:
 		rq->req_stat = RS_SUCCESS;
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 34bcc8c..442c5e7 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -757,7 +757,6 @@
 
 		struct atto_hba_get_adapter_info *gai =
 			&hi->data.get_adap_info;
-		int pcie_cap_reg;
 
 		if (hi->flags & HBAF_TUNNEL) {
 			hi->status = ATTO_STS_UNSUPPORTED;
@@ -784,17 +783,14 @@
 		gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
 		gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
 
-		pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
-		if (pcie_cap_reg) {
+		if (pci_is_pcie(a->pcid)) {
 			u16 stat;
 			u32 caps;
 
-			pci_read_config_word(a->pcid,
-					     pcie_cap_reg + PCI_EXP_LNKSTA,
-					     &stat);
-			pci_read_config_dword(a->pcid,
-					      pcie_cap_reg + PCI_EXP_LNKCAP,
-					      &caps);
+			pcie_capability_read_word(a->pcid, PCI_EXP_LNKSTA,
+						  &stat);
+			pcie_capability_read_dword(a->pcid, PCI_EXP_LNKCAP,
+						   &caps);
 
 			gai->pci.link_speed_curr =
 				(u8)(stat & PCI_EXP_LNKSTA_CLS);
@@ -1274,7 +1270,7 @@
 
 
 /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
-int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
+int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg)
 {
 	struct atto_express_ioctl *ioctl = NULL;
 	struct esas2r_adapter *a;
@@ -1292,9 +1288,8 @@
 	ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl));
 	if (IS_ERR(ioctl)) {
 		esas2r_log(ESAS2R_LOG_WARN,
-			   "ioctl_handler access_ok failed for cmd %d, "
-			   "address %p", cmd,
-			   arg);
+			   "ioctl_handler access_ok failed for cmd %u, address %p",
+			   cmd, arg);
 		return PTR_ERR(ioctl);
 	}
 
@@ -1493,7 +1488,7 @@
 ioctl_done:
 
 	if (err < 0) {
-		esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err,
+		esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %u", err,
 			   cmd);
 
 		switch (err) {
@@ -1518,9 +1513,8 @@
 	err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
 	if (err != 0) {
 		esas2r_log(ESAS2R_LOG_WARN,
-			   "ioctl_handler copy_to_user didn't copy "
-			   "everything (err %d, cmd %d)", err,
-			   cmd);
+			   "ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)",
+			   err, cmd);
 		kfree(ioctl);
 
 		return -EFAULT;
@@ -1531,7 +1525,7 @@
 	return 0;
 }
 
-int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg)
+int esas2r_ioctl(struct scsi_device *sd, unsigned int cmd, void __user *arg)
 {
 	return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg);
 }
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index c071186..fdbda5c 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -250,7 +250,6 @@
 		ESAS2R_DEFAULT_CMD_PER_LUN,
 	.present			= 0,
 	.unchecked_isa_dma		= 0,
-	.use_clustering			= ENABLE_CLUSTERING,
 	.emulated			= 0,
 	.proc_name			= ESAS2R_DRVR_NAME,
 	.change_queue_depth		= scsi_change_queue_depth,
@@ -624,7 +623,7 @@
 long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
 {
 	return esas2r_ioctl_handler(esas2r_proc_host->hostdata,
-				    (int)cmd, (void __user *)arg);
+				    cmd, (void __user *)arg);
 }
 
 static void __exit esas2r_exit(void)
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 9e5d3f7..bb88995 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* esp_scsi.c: ESP SCSI driver.
  *
  * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
@@ -369,19 +370,31 @@
 {
 	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 	struct scatterlist *sg = scsi_sglist(cmd);
-	int dir = cmd->sc_data_direction;
-	int total, i;
+	int total = 0, i;
+	struct scatterlist *s;
 
-	if (dir == DMA_NONE)
+	if (cmd->sc_data_direction == DMA_NONE)
 		return;
 
-	spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
-	spriv->cur_residue = sg_dma_len(sg);
-	spriv->cur_sg = sg;
+	if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
+		/*
+		 * For pseudo DMA and PIO we need the virtual address instead of
+		 * a dma address, so perform an identity mapping.
+		 */
+		spriv->num_sg = scsi_sg_count(cmd);
 
-	total = 0;
-	for (i = 0; i < spriv->u.num_sg; i++)
-		total += sg_dma_len(&sg[i]);
+		scsi_for_each_sg(cmd, s, spriv->num_sg, i) {
+			s->dma_address = (uintptr_t)sg_virt(s);
+			total += sg_dma_len(s);
+		}
+	} else {
+		spriv->num_sg = scsi_dma_map(cmd);
+		scsi_for_each_sg(cmd, s, spriv->num_sg, i)
+			total += sg_dma_len(s);
+	}
+	spriv->cur_residue = sg_dma_len(sg);
+	spriv->prv_sg = NULL;
+	spriv->cur_sg = sg;
 	spriv->tot_residue = total;
 }
 
@@ -434,20 +447,16 @@
 		p->tot_residue = 0;
 	}
 	if (!p->cur_residue && p->tot_residue) {
-		p->cur_sg++;
+		p->prv_sg = p->cur_sg;
+		p->cur_sg = sg_next(p->cur_sg);
 		p->cur_residue = sg_dma_len(p->cur_sg);
 	}
 }
 
 static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
 {
-	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
-	int dir = cmd->sc_data_direction;
-
-	if (dir == DMA_NONE)
-		return;
-
-	esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
+	if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
+		scsi_dma_unmap(cmd);
 }
 
 static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
@@ -460,6 +469,7 @@
 		return;
 	}
 	ent->saved_cur_residue = spriv->cur_residue;
+	ent->saved_prv_sg = spriv->prv_sg;
 	ent->saved_cur_sg = spriv->cur_sg;
 	ent->saved_tot_residue = spriv->tot_residue;
 }
@@ -474,21 +484,11 @@
 		return;
 	}
 	spriv->cur_residue = ent->saved_cur_residue;
+	spriv->prv_sg = ent->saved_prv_sg;
 	spriv->cur_sg = ent->saved_cur_sg;
 	spriv->tot_residue = ent->saved_tot_residue;
 }
 
-static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
-{
-	if (cmd->cmd_len == 6 ||
-	    cmd->cmd_len == 10 ||
-	    cmd->cmd_len == 12) {
-		esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
-	} else {
-		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
-	}
-}
-
 static void esp_write_tgt_config3(struct esp *esp, int tgt)
 {
 	if (esp->rev > ESP100A) {
@@ -624,6 +624,26 @@
 	}
 }
 
+static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
+{
+	ent->sense_ptr = ent->cmd->sense_buffer;
+	if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
+		ent->sense_dma = (uintptr_t)ent->sense_ptr;
+		return;
+	}
+
+	ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
+					SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+}
+
+static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
+{
+	if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
+		dma_unmap_single(esp->dev, ent->sense_dma,
+				 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+	ent->sense_ptr = NULL;
+}
+
 /* When a contingent allegiance conditon is created, we force feed a
  * REQUEST_SENSE command to the device to fetch the sense data.  I
  * tried many other schemes, relying on the scsi error handling layer
@@ -645,12 +665,7 @@
 	if (!ent->sense_ptr) {
 		esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
 				  tgt, lun);
-
-		ent->sense_ptr = cmd->sense_buffer;
-		ent->sense_dma = esp->ops->map_single(esp,
-						      ent->sense_ptr,
-						      SCSI_SENSE_BUFFERSIZE,
-						      DMA_FROM_DEVICE);
+		esp_map_sense(esp, ent);
 	}
 	ent->saved_sense_ptr = ent->sense_ptr;
 
@@ -717,10 +732,10 @@
 static void esp_maybe_execute_command(struct esp *esp)
 {
 	struct esp_target_data *tp;
-	struct esp_lun_data *lp;
 	struct scsi_device *dev;
 	struct scsi_cmnd *cmd;
 	struct esp_cmd_entry *ent;
+	bool select_and_stop = false;
 	int tgt, lun, i;
 	u32 val, start_cmd;
 	u8 *p;
@@ -743,7 +758,6 @@
 	tgt = dev->id;
 	lun = dev->lun;
 	tp = &esp->target[tgt];
-	lp = dev->hostdata;
 
 	list_move(&ent->list, &esp->active_cmds);
 
@@ -752,7 +766,8 @@
 	esp_map_dma(esp, cmd);
 	esp_save_pointers(esp, ent);
 
-	esp_check_command_len(esp, cmd);
+	if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
+		select_and_stop = true;
 
 	p = esp->command_block;
 
@@ -793,42 +808,22 @@
 			tp->flags &= ~ESP_TGT_CHECK_NEGO;
 		}
 
-		/* Process it like a slow command.  */
-		if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
-			esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+		/* If there are multiple message bytes, use Select and Stop */
+		if (esp->msg_out_len)
+			select_and_stop = true;
 	}
 
 build_identify:
-	/* If we don't have a lun-data struct yet, we're probing
-	 * so do not disconnect.  Also, do not disconnect unless
-	 * we have a tag on this command.
-	 */
-	if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
-		*p++ = IDENTIFY(1, lun);
-	else
-		*p++ = IDENTIFY(0, lun);
+	*p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
 
 	if (ent->tag[0] && esp->rev == ESP100) {
 		/* ESP100 lacks select w/atn3 command, use select
 		 * and stop instead.
 		 */
-		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+		select_and_stop = true;
 	}
 
-	if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
-		start_cmd = ESP_CMD_SELA;
-		if (ent->tag[0]) {
-			*p++ = ent->tag[0];
-			*p++ = ent->tag[1];
-
-			start_cmd = ESP_CMD_SA3;
-		}
-
-		for (i = 0; i < cmd->cmd_len; i++)
-			*p++ = cmd->cmnd[i];
-
-		esp->select_state = ESP_SELECT_BASIC;
-	} else {
+	if (select_and_stop) {
 		esp->cmd_bytes_left = cmd->cmd_len;
 		esp->cmd_bytes_ptr = &cmd->cmnd[0];
 
@@ -843,6 +838,19 @@
 
 		start_cmd = ESP_CMD_SELAS;
 		esp->select_state = ESP_SELECT_MSGOUT;
+	} else {
+		start_cmd = ESP_CMD_SELA;
+		if (ent->tag[0]) {
+			*p++ = ent->tag[0];
+			*p++ = ent->tag[1];
+
+			start_cmd = ESP_CMD_SA3;
+		}
+
+		for (i = 0; i < cmd->cmd_len; i++)
+			*p++ = cmd->cmnd[i];
+
+		esp->select_state = ESP_SELECT_BASIC;
 	}
 	val = tgt;
 	if (esp->rev == FASHME)
@@ -902,9 +910,7 @@
 	}
 
 	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
-		esp->ops->unmap_single(esp, ent->sense_dma,
-				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
-		ent->sense_ptr = NULL;
+		esp_unmap_sense(esp, ent);
 
 		/* Restore the message/status bytes to what we actually
 		 * saw originally.  Also, report that we are providing
@@ -965,7 +971,7 @@
 	cmd->scsi_done = done;
 
 	spriv = ESP_CMD_PRIV(cmd);
-	spriv->u.dma_addr = ~(dma_addr_t)0x0;
+	spriv->num_sg = 0;
 
 	list_add_tail(&ent->list, &esp->queued_cmds);
 
@@ -1032,7 +1038,7 @@
 
 static void esp_schedule_reset(struct esp *esp)
 {
-	esp_log_reset("esp_schedule_reset() from %pf\n",
+	esp_log_reset("esp_schedule_reset() from %ps\n",
 		      __builtin_return_address(0));
 	esp->flags |= ESP_FLAG_RESETTING;
 	esp_event(esp, ESP_EVENT_RESET);
@@ -1252,14 +1258,10 @@
 			esp_unmap_dma(esp, cmd);
 			esp_free_lun_tag(ent, cmd->device->hostdata);
 			tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
-			esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
 			esp->cmd_bytes_ptr = NULL;
 			esp->cmd_bytes_left = 0;
 		} else {
-			esp->ops->unmap_single(esp, ent->sense_dma,
-					       SCSI_SENSE_BUFFERSIZE,
-					       DMA_FROM_DEVICE);
-			ent->sense_ptr = NULL;
+			esp_unmap_sense(esp, ent);
 		}
 
 		/* Now that the state is unwound properly, put back onto
@@ -1303,9 +1305,8 @@
 				esp_flush_fifo(esp);
 		}
 
-		/* If we are doing a slow command, negotiation, etc.
-		 * we'll do the right thing as we transition to the
-		 * next phase.
+		/* If we are doing a Select And Stop command, negotiation, etc.
+		 * we'll do the right thing as we transition to the next phase.
 		 */
 		esp_event(esp, ESP_EVENT_CHECK_PHASE);
 		return 0;
@@ -1359,7 +1360,7 @@
 			struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 			u8 *ptr;
 
-			ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
+			ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
 						  &offset, &count);
 			if (likely(ptr)) {
 				*(ptr + offset) = bval;
@@ -1652,7 +1653,7 @@
 		spriv = ESP_CMD_PRIV(ent->cmd);
 
 		if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
-			spriv->cur_sg--;
+			spriv->cur_sg = spriv->prv_sg;
 			spriv->cur_residue = 1;
 		} else
 			spriv->cur_residue++;
@@ -2040,11 +2041,8 @@
 	esp_free_lun_tag(ent, cmd->device->hostdata);
 	cmd->result = DID_RESET << 16;
 
-	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
-		esp->ops->unmap_single(esp, ent->sense_dma,
-				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
-		ent->sense_ptr = NULL;
-	}
+	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
+		esp_unmap_sense(esp, ent);
 
 	cmd->scsi_done(cmd);
 	list_del(&ent->list);
@@ -2383,7 +2381,7 @@
 
 static struct scsi_transport_template *esp_transport_template;
 
-int scsi_esp_register(struct esp *esp, struct device *dev)
+int scsi_esp_register(struct esp *esp)
 {
 	static int instance;
 	int err;
@@ -2403,10 +2401,10 @@
 
 	esp_bootup_reset(esp);
 
-	dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
+	dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
 		   esp->host->unique_id, esp->regs, esp->dma_regs,
 		   esp->host->irq);
-	dev_printk(KERN_INFO, dev,
+	dev_printk(KERN_INFO, esp->dev,
 		   "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
 		   esp->host->unique_id, esp_chip_names[esp->rev],
 		   esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
@@ -2414,7 +2412,7 @@
 	/* Let the SCSI bus reset settle. */
 	ssleep(esp_bus_reset_settle);
 
-	err = scsi_add_host(esp->host, dev);
+	err = scsi_add_host(esp->host, esp->dev);
 	if (err)
 		return err;
 
@@ -2685,7 +2683,6 @@
 	.can_queue		= 7,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.max_sectors		= 0xffff,
 	.skip_settle_delay	= 1,
 };
@@ -2791,3 +2788,131 @@
 
 module_init(esp_init);
 module_exit(esp_exit);
+
+#ifdef CONFIG_SCSI_ESP_PIO
+static inline unsigned int esp_wait_for_fifo(struct esp *esp)
+{
+	int i = 500000;
+
+	do {
+		unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+		if (fbytes)
+			return fbytes;
+
+		udelay(1);
+	} while (--i);
+
+	shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
+		     esp_read8(ESP_STATUS));
+	return 0;
+}
+
+static inline int esp_wait_for_intr(struct esp *esp)
+{
+	int i = 500000;
+
+	do {
+		esp->sreg = esp_read8(ESP_STATUS);
+		if (esp->sreg & ESP_STAT_INTR)
+			return 0;
+
+		udelay(1);
+	} while (--i);
+
+	shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
+		     esp->sreg);
+	return 1;
+}
+
+#define ESP_FIFO_SIZE 16
+
+void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
+		      u32 dma_count, int write, u8 cmd)
+{
+	u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+	cmd &= ~ESP_CMD_DMA;
+	esp->send_cmd_error = 0;
+
+	if (write) {
+		u8 *dst = (u8 *)addr;
+		u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
+
+		scsi_esp_cmd(esp, cmd);
+
+		while (1) {
+			if (!esp_wait_for_fifo(esp))
+				break;
+
+			*dst++ = readb(esp->fifo_reg);
+			--esp_count;
+
+			if (!esp_count)
+				break;
+
+			if (esp_wait_for_intr(esp)) {
+				esp->send_cmd_error = 1;
+				break;
+			}
+
+			if ((esp->sreg & ESP_STAT_PMASK) != phase)
+				break;
+
+			esp->ireg = esp_read8(ESP_INTRPT);
+			if (esp->ireg & mask) {
+				esp->send_cmd_error = 1;
+				break;
+			}
+
+			if (phase == ESP_MIP)
+				esp_write8(ESP_CMD_MOK, ESP_CMD);
+
+			esp_write8(ESP_CMD_TI, ESP_CMD);
+		}
+	} else {
+		unsigned int n = ESP_FIFO_SIZE;
+		u8 *src = (u8 *)addr;
+
+		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+		if (n > esp_count)
+			n = esp_count;
+		writesb(esp->fifo_reg, src, n);
+		src += n;
+		esp_count -= n;
+
+		scsi_esp_cmd(esp, cmd);
+
+		while (esp_count) {
+			if (esp_wait_for_intr(esp)) {
+				esp->send_cmd_error = 1;
+				break;
+			}
+
+			if ((esp->sreg & ESP_STAT_PMASK) != phase)
+				break;
+
+			esp->ireg = esp_read8(ESP_INTRPT);
+			if (esp->ireg & ~ESP_INTR_BSERV) {
+				esp->send_cmd_error = 1;
+				break;
+			}
+
+			n = ESP_FIFO_SIZE -
+			    (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
+
+			if (n > esp_count)
+				n = esp_count;
+			writesb(esp->fifo_reg, src, n);
+			src += n;
+			esp_count -= n;
+
+			esp_write8(ESP_CMD_TI, ESP_CMD);
+		}
+	}
+
+	esp->send_cmd_residual = esp_count;
+}
+EXPORT_SYMBOL(esp_send_pio_cmd);
+#endif
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index a777727..91b32f2 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -249,12 +249,9 @@
 #define SYNC_DEFP_FAST            0x19   /* 10mb/s */
 
 struct esp_cmd_priv {
-	union {
-		dma_addr_t	dma_addr;
-		int		num_sg;
-	} u;
-
+	int			num_sg;
 	int			cur_residue;
+	struct scatterlist	*prv_sg;
 	struct scatterlist	*cur_sg;
 	int			tot_residue;
 };
@@ -277,6 +274,7 @@
 	struct scsi_cmnd	*cmd;
 
 	unsigned int		saved_cur_residue;
+	struct scatterlist	*saved_prv_sg;
 	struct scatterlist	*saved_cur_sg;
 	unsigned int		saved_tot_residue;
 
@@ -363,19 +361,6 @@
 	void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg);
 	u8 (*esp_read8)(struct esp *esp, unsigned long reg);
 
-	/* Map and unmap DMA memory.  Eventually the driver will be
-	 * converted to the generic DMA API as soon as SBUS is able to
-	 * cope with that.  At such time we can remove this.
-	 */
-	dma_addr_t (*map_single)(struct esp *esp, void *buf,
-				 size_t sz, int dir);
-	int (*map_sg)(struct esp *esp, struct scatterlist *sg,
-		      int num_sg, int dir);
-	void (*unmap_single)(struct esp *esp, dma_addr_t addr,
-			     size_t sz, int dir);
-	void (*unmap_sg)(struct esp *esp, struct scatterlist *sg,
-			 int num_sg, int dir);
-
 	/* Return non-zero if there is an IRQ pending.  Usually this
 	 * status bit lives in the DMA controller sitting in front of
 	 * the ESP.  This has to be accurate or else the ESP interrupt
@@ -435,7 +420,7 @@
 	const struct esp_driver_ops *ops;
 
 	struct Scsi_Host	*host;
-	void			*dev;
+	struct device		*dev;
 
 	struct esp_cmd_entry	*active_cmd;
 
@@ -490,11 +475,11 @@
 	u32			flags;
 #define ESP_FLAG_DIFFERENTIAL	0x00000001
 #define ESP_FLAG_RESETTING	0x00000002
-#define ESP_FLAG_DOING_SLOWCMD	0x00000004
 #define ESP_FLAG_WIDE_CAPABLE	0x00000008
 #define ESP_FLAG_QUICKIRQ_CHECK	0x00000010
 #define ESP_FLAG_DISABLE_SYNC	0x00000020
 #define ESP_FLAG_USE_FIFO	0x00000040
+#define ESP_FLAG_NO_DMA_MAP	0x00000080
 
 	u8			select_state;
 #define ESP_SELECT_NONE		0x00 /* Not selecting */
@@ -532,7 +517,7 @@
 	u32			min_period;
 	u32			radelay;
 
-	/* Slow command state.  */
+	/* ESP_CMD_SELAS command state */
 	u8			*cmd_bytes_ptr;
 	int			cmd_bytes_left;
 
@@ -541,6 +526,9 @@
 	void			*dma;
 	int			dmarev;
 
+	/* These are used by esp_send_pio_cmd() */
+	u8 __iomem		*fifo_reg;
+	int			send_cmd_error;
 	u32			send_cmd_residual;
 };
 
@@ -570,16 +558,18 @@
  *     example, the DMA engine has to be reset before ESP can
  *     be programmed.
  * 11) If necessary, call dev_set_drvdata() as needed.
- * 12) Call scsi_esp_register() with prepared 'esp' structure
- *     and a device pointer if possible.
+ * 12) Call scsi_esp_register() with prepared 'esp' structure.
  * 13) Check scsi_esp_register() return value, release all resources
  *     if an error was returned.
  */
 extern struct scsi_host_template scsi_esp_template;
-extern int scsi_esp_register(struct esp *, struct device *);
+extern int scsi_esp_register(struct esp *);
 
 extern void scsi_esp_unregister(struct esp *);
 extern irqreturn_t scsi_esp_intr(int, void *);
 extern void scsi_esp_cmd(struct esp *, u8);
 
+extern void esp_send_pio_cmd(struct esp *esp, u32 dma_addr, u32 esp_count,
+			     u32 dma_count, int write, u8 cmd);
+
 #endif /* !(_ESP_SCSI_H) */
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index aed0f5d..1183e80 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_FCOE) += fcoe.o
 obj-$(CONFIG_LIBFCOE) += libfcoe.o
 
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f46b312..25dae9f 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
@@ -286,7 +274,6 @@
 	.this_id = -1,
 	.cmd_per_lun = 3,
 	.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
-	.use_clustering = ENABLE_CLUSTERING,
 	.sg_tablesize = SG_ALL,
 	.max_sectors = 0xffff,
 	.track_queue_depth = 1,
@@ -390,7 +377,7 @@
  * Returns: pointer to a struct fcoe_interface or NULL on error
  */
 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
-						    enum fip_state fip_mode)
+						    enum fip_mode fip_mode)
 {
 	struct fcoe_ctlr_device *ctlr_dev;
 	struct fcoe_ctlr *ctlr;
@@ -1263,15 +1250,21 @@
 	/* attach to scsi transport */
 	fcoe_nport_scsi_transport =
 		fc_attach_transport(&fcoe_nport_fc_functions);
+	if (!fcoe_nport_scsi_transport)
+		goto err;
+
 	fcoe_vport_scsi_transport =
 		fc_attach_transport(&fcoe_vport_fc_functions);
-
-	if (!fcoe_nport_scsi_transport) {
-		printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
-		return -ENODEV;
-	}
+	if (!fcoe_vport_scsi_transport)
+		goto err_vport;
 
 	return 0;
+
+err_vport:
+	fc_release_transport(fcoe_nport_scsi_transport);
+err:
+	printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
+	return -ENODEV;
 }
 
 /**
@@ -1535,8 +1528,7 @@
 			return -ENOMEM;
 		}
 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
-		cp = kmap_atomic(skb_frag_page(frag))
-			+ frag->page_offset;
+		cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
 	} else {
 		cp = skb_put(skb, tlen);
 	}
@@ -1631,7 +1623,6 @@
 	else
 		fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
 
-	fh = (struct fc_frame_header *) skb_transport_header(skb);
 	fh = fc_frame_header_get(fp);
 	if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
 		return 0;
@@ -1670,7 +1661,6 @@
 	struct fc_stats *stats;
 	struct fcoe_crc_eof crc_eof;
 	struct fc_frame *fp;
-	struct fcoe_port *port;
 	struct fcoe_hdr *hp;
 
 	fr = fcoe_dev_from_skb(skb);
@@ -1688,7 +1678,6 @@
 			skb_end_pointer(skb), skb->csum,
 			skb->dev ? skb->dev->name : "<NULL>");
 
-	port = lport_priv(lport);
 	skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
 
 	/*
@@ -1859,7 +1848,6 @@
 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
 	struct fcoe_ctlr *ctlr;
 	struct fcoe_interface *fcoe;
-	struct fcoe_port *port;
 	struct fc_stats *stats;
 	u32 link_possible = 1;
 	u32 mfs;
@@ -1897,7 +1885,6 @@
 		break;
 	case NETDEV_UNREGISTER:
 		list_del(&fcoe->list);
-		port = lport_priv(ctlr->lp);
 		fcoe_vport_remove(lport);
 		mutex_lock(&fcoe_config_mutex);
 		fcoe_if_destroy(lport);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 6aa4820..520c535 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright(c) 2009 Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 54da316..1791a39 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2008-2009 Cisco Systems, Inc.  All rights reserved.
  * Copyright (c) 2009 Intel Corporation.  All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
@@ -147,7 +135,7 @@
  * fcoe_ctlr_init() - Initialize the FCoE Controller instance
  * @fip: The FCoE controller to initialize
  */
-void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
+void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_mode mode)
 {
 	fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT);
 	fip->mode = mode;
@@ -454,7 +442,10 @@
 		mutex_unlock(&fip->ctlr_mutex);
 		fc_linkup(fip->lp);
 	} else if (fip->state == FIP_ST_LINK_WAIT) {
-		fcoe_ctlr_set_state(fip, fip->mode);
+		if (fip->mode == FIP_MODE_NON_FIP)
+			fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP);
+		else
+			fcoe_ctlr_set_state(fip, FIP_ST_AUTO);
 		switch (fip->mode) {
 		default:
 			LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode);
@@ -1028,7 +1019,7 @@
 {
 	struct fcoe_fcf *fcf;
 	struct fcoe_fcf new;
-	unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
+	unsigned long sol_tov = msecs_to_jiffies(FCOE_CTLR_SOL_TOV);
 	int first = 0;
 	int mtu_valid;
 	int found = 0;
@@ -2014,7 +2005,7 @@
  */
 static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
 {
-	return (struct fcoe_rport *)(rdata + 1);
+	return container_of(rdata, struct fcoe_rport, rdata);
 }
 
 /**
@@ -2278,7 +2269,7 @@
  */
 static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
 			      struct sk_buff *skb,
-			      struct fc_rport_priv *rdata)
+			      struct fcoe_rport *frport)
 {
 	struct fip_header *fiph;
 	struct fip_desc *desc = NULL;
@@ -2286,16 +2277,12 @@
 	struct fip_wwn_desc *wwn = NULL;
 	struct fip_vn_desc *vn = NULL;
 	struct fip_size_desc *size = NULL;
-	struct fcoe_rport *frport;
 	size_t rlen;
 	size_t dlen;
 	u32 desc_mask = 0;
 	u32 dtype;
 	u8 sub;
 
-	memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
-	frport = fcoe_ctlr_rport(rdata);
-
 	fiph = (struct fip_header *)skb->data;
 	frport->flags = ntohs(fiph->fip_flags);
 
@@ -2358,15 +2345,17 @@
 			if (dlen != sizeof(struct fip_wwn_desc))
 				goto len_err;
 			wwn = (struct fip_wwn_desc *)desc;
-			rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+			frport->rdata.ids.node_name =
+				get_unaligned_be64(&wwn->fd_wwn);
 			break;
 		case FIP_DT_VN_ID:
 			if (dlen != sizeof(struct fip_vn_desc))
 				goto len_err;
 			vn = (struct fip_vn_desc *)desc;
 			memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
-			rdata->ids.port_id = ntoh24(vn->fd_fc_id);
-			rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
+			frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
+			frport->rdata.ids.port_name =
+				get_unaligned_be64(&vn->fd_wwpn);
 			break;
 		case FIP_DT_FC4F:
 			if (dlen != sizeof(struct fip_fc4_feat))
@@ -2412,16 +2401,14 @@
 /**
  * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request.
  * @fip: The FCoE controller
- * @rdata: parsed remote port with frport from the probe request
+ * @frport: parsed FCoE rport from the probe request
  *
  * Called with ctlr_mutex held.
  */
 static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
-				   struct fc_rport_priv *rdata)
+				   struct fcoe_rport *frport)
 {
-	struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
-
-	if (rdata->ids.port_id != fip->port_id)
+	if (frport->rdata.ids.port_id != fip->port_id)
 		return;
 
 	switch (fip->state) {
@@ -2441,7 +2428,7 @@
 		 * Probe's REC bit is not set.
 		 * If we don't reply, we will change our address.
 		 */
-		if (fip->lp->wwpn > rdata->ids.port_name &&
+		if (fip->lp->wwpn > frport->rdata.ids.port_name &&
 		    !(frport->flags & FIP_FL_REC_OR_P2P)) {
 			LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
 					"port_id collision\n");
@@ -2465,14 +2452,14 @@
 /**
  * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply.
  * @fip: The FCoE controller
- * @rdata: parsed remote port with frport from the probe request
+ * @frport: parsed FCoE rport from the probe request
  *
  * Called with ctlr_mutex held.
  */
 static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
-				   struct fc_rport_priv *rdata)
+				     struct fcoe_rport *frport)
 {
-	if (rdata->ids.port_id != fip->port_id)
+	if (frport->rdata.ids.port_id != fip->port_id)
 		return;
 	switch (fip->state) {
 	case FIP_ST_VNMP_START:
@@ -2495,11 +2482,11 @@
 /**
  * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply.
  * @fip: The FCoE controller
- * @new: newly-parsed remote port with frport as a template for new rdata
+ * @new: newly-parsed FCoE rport as a template for new rdata
  *
  * Called with ctlr_mutex held.
  */
-static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
+static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fcoe_rport *new)
 {
 	struct fc_lport *lport = fip->lp;
 	struct fc_rport_priv *rdata;
@@ -2507,7 +2494,7 @@
 	struct fcoe_rport *frport;
 	u32 port_id;
 
-	port_id = new->ids.port_id;
+	port_id = new->rdata.ids.port_id;
 	if (port_id == fip->port_id)
 		return;
 
@@ -2524,22 +2511,28 @@
 	rdata->disc_id = lport->disc.disc_id;
 
 	ids = &rdata->ids;
-	if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
-	    (ids->node_name != -1 && ids->node_name != new->ids.node_name)) {
+	if ((ids->port_name != -1 &&
+	     ids->port_name != new->rdata.ids.port_name) ||
+	    (ids->node_name != -1 &&
+	     ids->node_name != new->rdata.ids.node_name)) {
 		mutex_unlock(&rdata->rp_mutex);
 		LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id);
 		fc_rport_logoff(rdata);
 		mutex_lock(&rdata->rp_mutex);
 	}
-	ids->port_name = new->ids.port_name;
-	ids->node_name = new->ids.node_name;
+	ids->port_name = new->rdata.ids.port_name;
+	ids->node_name = new->rdata.ids.node_name;
 	mutex_unlock(&rdata->rp_mutex);
 
 	frport = fcoe_ctlr_rport(rdata);
 	LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n",
 			port_id, frport->fcoe_len ? "old" : "new",
 			rdata->rp_state);
-	*frport = *fcoe_ctlr_rport(new);
+	frport->fcoe_len = new->fcoe_len;
+	frport->flags = new->flags;
+	frport->login_count = new->login_count;
+	memcpy(frport->enode_mac, new->enode_mac, ETH_ALEN);
+	memcpy(frport->vn_mac, new->vn_mac, ETH_ALEN);
 	frport->time = 0;
 }
 
@@ -2571,16 +2564,14 @@
 /**
  * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification
  * @fip: The FCoE controller
- * @new: newly-parsed remote port with frport as a template for new rdata
+ * @new: newly-parsed FCoE rport as a template for new rdata
  *
  * Called with ctlr_mutex held.
  */
 static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
-				      struct fc_rport_priv *new)
+				      struct fcoe_rport *new)
 {
-	struct fcoe_rport *frport = fcoe_ctlr_rport(new);
-
-	if (frport->flags & FIP_FL_REC_OR_P2P) {
+	if (new->flags & FIP_FL_REC_OR_P2P) {
 		LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n");
 		fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
 		return;
@@ -2589,7 +2580,7 @@
 	case FIP_ST_VNMP_START:
 	case FIP_ST_VNMP_PROBE1:
 	case FIP_ST_VNMP_PROBE2:
-		if (new->ids.port_id == fip->port_id) {
+		if (new->rdata.ids.port_id == fip->port_id) {
 			LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
 					"restart, state %d\n",
 					fip->state);
@@ -2598,8 +2589,8 @@
 		break;
 	case FIP_ST_VNMP_CLAIM:
 	case FIP_ST_VNMP_UP:
-		if (new->ids.port_id == fip->port_id) {
-			if (new->ids.port_name > fip->lp->wwpn) {
+		if (new->rdata.ids.port_id == fip->port_id) {
+			if (new->rdata.ids.port_name > fip->lp->wwpn) {
 				LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
 						"restart, port_id collision\n");
 				fcoe_ctlr_vn_restart(fip);
@@ -2611,15 +2602,16 @@
 			break;
 		}
 		LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n",
-				new->ids.port_id);
-		fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
-				  min((u32)frport->fcoe_len,
+				new->rdata.ids.port_id);
+		fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, new->enode_mac,
+				  min((u32)new->fcoe_len,
 				      fcoe_ctlr_fcoe_size(fip)));
 		fcoe_ctlr_vn_add(fip, new);
 		break;
 	default:
 		LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
-				"ignoring claim from %x\n", new->ids.port_id);
+				"ignoring claim from %x\n",
+				new->rdata.ids.port_id);
 		break;
 	}
 }
@@ -2627,15 +2619,15 @@
 /**
  * fcoe_ctlr_vn_claim_resp() - handle received Claim Response
  * @fip: The FCoE controller that received the frame
- * @new: newly-parsed remote port with frport from the Claim Response
+ * @new: newly-parsed FCoE rport from the Claim Response
  *
  * Called with ctlr_mutex held.
  */
 static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
-				    struct fc_rport_priv *new)
+				    struct fcoe_rport *new)
 {
 	LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n",
-			new->ids.port_id, fcoe_ctlr_state(fip->state));
+			new->rdata.ids.port_id, fcoe_ctlr_state(fip->state));
 	if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM)
 		fcoe_ctlr_vn_add(fip, new);
 }
@@ -2643,28 +2635,28 @@
 /**
  * fcoe_ctlr_vn_beacon() - handle received beacon.
  * @fip: The FCoE controller that received the frame
- * @new: newly-parsed remote port with frport from the Beacon
+ * @new: newly-parsed FCoE rport from the Beacon
  *
  * Called with ctlr_mutex held.
  */
 static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
-				struct fc_rport_priv *new)
+				struct fcoe_rport *new)
 {
 	struct fc_lport *lport = fip->lp;
 	struct fc_rport_priv *rdata;
 	struct fcoe_rport *frport;
 
-	frport = fcoe_ctlr_rport(new);
-	if (frport->flags & FIP_FL_REC_OR_P2P) {
+	if (new->flags & FIP_FL_REC_OR_P2P) {
 		LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n");
 		fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
 		return;
 	}
-	rdata = fc_rport_lookup(lport, new->ids.port_id);
+	rdata = fc_rport_lookup(lport, new->rdata.ids.port_id);
 	if (rdata) {
-		if (rdata->ids.node_name == new->ids.node_name &&
-		    rdata->ids.port_name == new->ids.port_name) {
+		if (rdata->ids.node_name == new->rdata.ids.node_name &&
+		    rdata->ids.port_name == new->rdata.ids.port_name) {
 			frport = fcoe_ctlr_rport(rdata);
+
 			LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n",
 					rdata->ids.port_id);
 			if (!frport->time && fip->state == FIP_ST_VNMP_UP) {
@@ -2687,7 +2679,7 @@
 	 * Don't add the neighbor yet.
 	 */
 	LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n",
-			new->ids.port_id);
+			new->rdata.ids.port_id);
 	if (time_after(jiffies,
 		       fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT)))
 		fcoe_ctlr_vn_send_claim(fip);
@@ -2747,10 +2739,7 @@
 {
 	struct fip_header *fiph;
 	enum fip_vn2vn_subcode sub;
-	struct {
-		struct fc_rport_priv rdata;
-		struct fcoe_rport frport;
-	} buf;
+	struct fcoe_rport frport = { };
 	int rc, vlan_id = 0;
 
 	fiph = (struct fip_header *)skb->data;
@@ -2766,7 +2755,7 @@
 		goto drop;
 	}
 
-	rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
+	rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
 	if (rc) {
 		LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
 		goto drop;
@@ -2775,19 +2764,19 @@
 	mutex_lock(&fip->ctlr_mutex);
 	switch (sub) {
 	case FIP_SC_VN_PROBE_REQ:
-		fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
+		fcoe_ctlr_vn_probe_req(fip, &frport);
 		break;
 	case FIP_SC_VN_PROBE_REP:
-		fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
+		fcoe_ctlr_vn_probe_reply(fip, &frport);
 		break;
 	case FIP_SC_VN_CLAIM_NOTIFY:
-		fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
+		fcoe_ctlr_vn_claim_notify(fip, &frport);
 		break;
 	case FIP_SC_VN_CLAIM_REP:
-		fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
+		fcoe_ctlr_vn_claim_resp(fip, &frport);
 		break;
 	case FIP_SC_VN_BEACON:
-		fcoe_ctlr_vn_beacon(fip, &buf.rdata);
+		fcoe_ctlr_vn_beacon(fip, &frport);
 		break;
 	default:
 		LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
@@ -2811,22 +2800,18 @@
  */
 static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
 			      struct sk_buff *skb,
-			      struct fc_rport_priv *rdata)
+			      struct fcoe_rport *frport)
 {
 	struct fip_header *fiph;
 	struct fip_desc *desc = NULL;
 	struct fip_mac_desc *macd = NULL;
 	struct fip_wwn_desc *wwn = NULL;
-	struct fcoe_rport *frport;
 	size_t rlen;
 	size_t dlen;
 	u32 desc_mask = 0;
 	u32 dtype;
 	u8 sub;
 
-	memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
-	frport = fcoe_ctlr_rport(rdata);
-
 	fiph = (struct fip_header *)skb->data;
 	frport->flags = ntohs(fiph->fip_flags);
 
@@ -2880,7 +2865,8 @@
 			if (dlen != sizeof(struct fip_wwn_desc))
 				goto len_err;
 			wwn = (struct fip_wwn_desc *)desc;
-			rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+			frport->rdata.ids.node_name =
+				get_unaligned_be64(&wwn->fd_wwn);
 			break;
 		default:
 			LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
@@ -2966,13 +2952,13 @@
 /**
  * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification.
  * @fip: The FCoE controller
+ * @frport: The newly-parsed FCoE rport from the Discovery Request
  *
  * Called with ctlr_mutex held.
  */
 static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip,
-				      struct fc_rport_priv *rdata)
+				      struct fcoe_rport *frport)
 {
-	struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
 	enum fip_vlan_subcode sub = FIP_SC_VL_NOTE;
 
 	if (fip->mode == FIP_MODE_VN2VN)
@@ -2991,22 +2977,19 @@
 {
 	struct fip_header *fiph;
 	enum fip_vlan_subcode sub;
-	struct {
-		struct fc_rport_priv rdata;
-		struct fcoe_rport frport;
-	} buf;
+	struct fcoe_rport frport = { };
 	int rc;
 
 	fiph = (struct fip_header *)skb->data;
 	sub = fiph->fip_subcode;
-	rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
+	rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
 	if (rc) {
 		LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
 		goto drop;
 	}
 	mutex_lock(&fip->ctlr_mutex);
 	if (sub == FIP_SC_VL_REQ)
-		fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
+		fcoe_ctlr_vlan_disc_reply(fip, &frport);
 	mutex_unlock(&fip->ctlr_mutex);
 
 drop:
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 5c8310b..2cb7a8c 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
@@ -671,8 +659,19 @@
 	.release = fcoe_fcf_device_release,
 };
 
-static BUS_ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store);
-static BUS_ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store);
+static ssize_t ctlr_create_store(struct bus_type *bus, const char *buf,
+				 size_t count)
+{
+	return fcoe_ctlr_create_store(bus, buf, count);
+}
+static BUS_ATTR_WO(ctlr_create);
+
+static ssize_t ctlr_destroy_store(struct bus_type *bus, const char *buf,
+				  size_t count)
+{
+	return fcoe_ctlr_destroy_store(bus, buf, count);
+}
+static BUS_ATTR_WO(ctlr_destroy);
 
 static struct attribute *fcoe_bus_attrs[] = {
 	&bus_attr_ctlr_create.attr,
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index f4909cd..a20ddc3 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
@@ -320,7 +308,7 @@
 u32 fcoe_fc_crc(struct fc_frame *fp)
 {
 	struct sk_buff *skb = fp_skb(fp);
-	struct skb_frag_struct *frag;
+	skb_frag_t *frag;
 	unsigned char *data;
 	unsigned long off, len, clen;
 	u32 crc;
@@ -330,7 +318,7 @@
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		frag = &skb_shinfo(skb)->frags[i];
-		off = frag->page_offset;
+		off = skb_frag_off(frag);
 		len = skb_frag_size(frag);
 		while (len > 0) {
 			clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
@@ -855,7 +843,6 @@
 	mutex_unlock(&ft_mutex);
 	return rc;
 }
-EXPORT_SYMBOL(fcoe_ctlr_destroy_store);
 
 /**
  * fcoe_transport_create() - Create a fcoe interface
@@ -873,7 +860,7 @@
 	int rc = -ENODEV;
 	struct net_device *netdev = NULL;
 	struct fcoe_transport *ft = NULL;
-	enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
+	enum fip_mode fip_mode = (enum fip_mode)kp->arg;
 
 	mutex_lock(&ft_mutex);
 
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
new file mode 100644
index 0000000..772bdc9
--- /dev/null
+++ b/drivers/scsi/fdomain.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Future Domain TMC-16x0 and TMC-3260 SCSI host adapters
+ * Copyright 2019 Ondrej Zary
+ *
+ * Original driver by
+ * Rickard E. Faith, faith@cs.unc.edu
+ *
+ * Future Domain BIOS versions supported for autodetect:
+ *    2.0, 3.0, 3.2, 3.4 (1.0), 3.5 (2.0), 3.6, 3.61
+ * Chips supported:
+ *    TMC-1800, TMC-18C50, TMC-18C30, TMC-36C70
+ * Boards supported:
+ *    Future Domain TMC-1650, TMC-1660, TMC-1670, TMC-1680, TMC-1610M/MER/MEX
+ *    Future Domain TMC-3260 (PCI)
+ *    Quantum ISA-200S, ISA-250MG
+ *    Adaptec AHA-2920A (PCI) [BUT *NOT* AHA-2920C -- use aic7xxx instead]
+ *    IBM ?
+ *
+ * NOTE:
+ *
+ * The Adaptec AHA-2920C has an Adaptec AIC-7850 chip on it.
+ * Use the aic7xxx driver for this board.
+ *
+ * The Adaptec AHA-2920A has a Future Domain chip on it, so this is the right
+ * driver for that card.  Unfortunately, the boxes will probably just say
+ * "2920", so you'll have to look on the card for a Future Domain logo, or a
+ * letter after the 2920.
+ *
+ * If you have a TMC-8xx or TMC-9xx board, then this is not the driver for
+ * your board.
+ *
+ * DESCRIPTION:
+ *
+ * This is the Linux low-level SCSI driver for Future Domain TMC-1660/1680
+ * TMC-1650/1670, and TMC-3260 SCSI host adapters.  The 1650 and 1670 have a
+ * 25-pin external connector, whereas the 1660 and 1680 have a SCSI-2 50-pin
+ * high-density external connector.  The 1670 and 1680 have floppy disk
+ * controllers built in.  The TMC-3260 is a PCI bus card.
+ *
+ * Future Domain's older boards are based on the TMC-1800 chip, and this
+ * driver was originally written for a TMC-1680 board with the TMC-1800 chip.
+ * More recently, boards are being produced with the TMC-18C50 and TMC-18C30
+ * chips.
+ *
+ * Please note that the drive ordering that Future Domain implemented in BIOS
+ * versions 3.4 and 3.5 is the opposite of the order (currently) used by the
+ * rest of the SCSI industry.
+ *
+ *
+ * REFERENCES USED:
+ *
+ * "TMC-1800 SCSI Chip Specification (FDC-1800T)", Future Domain Corporation,
+ * 1990.
+ *
+ * "Technical Reference Manual: 18C50 SCSI Host Adapter Chip", Future Domain
+ * Corporation, January 1992.
+ *
+ * "LXT SCSI Products: Specifications and OEM Technical Manual (Revision
+ * B/September 1991)", Maxtor Corporation, 1991.
+ *
+ * "7213S product Manual (Revision P3)", Maxtor Corporation, 1992.
+ *
+ * "Draft Proposed American National Standard: Small Computer System
+ * Interface - 2 (SCSI-2)", Global Engineering Documents. (X3T9.2/86-109,
+ * revision 10h, October 17, 1991)
+ *
+ * Private communications, Drew Eckhardt (drew@cs.colorado.edu) and Eric
+ * Youngdale (ericy@cais.com), 1992.
+ *
+ * Private communication, Tuong Le (Future Domain Engineering department),
+ * 1994. (Disk geometry computations for Future Domain BIOS version 3.4, and
+ * TMC-18C30 detection.)
+ *
+ * Hogan, Thom. The Programmer's PC Sourcebook. Microsoft Press, 1988. Page
+ * 60 (2.39: Disk Partition Table Layout).
+ *
+ * "18C30 Technical Reference Manual", Future Domain Corporation, 1993, page
+ * 6-1.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/workqueue.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include "fdomain.h"
+
+/*
+ * FIFO_COUNT: The host adapter has an 8K cache (host adapters based on the
+ * 18C30 chip have a 2k cache).  When this many 512 byte blocks are filled by
+ * the SCSI device, an interrupt will be raised.  Therefore, this could be as
+ * low as 0, or as high as 16.  Note, however, that values which are too high
+ * or too low seem to prevent any interrupts from occurring, and thereby lock
+ * up the machine.
+ */
+#define FIFO_COUNT	2	/* Number of 512 byte blocks before INTR */
+#define PARITY_MASK	ACTL_PAREN	/* Parity enabled, 0 = disabled */
+
+enum chip_type {
+	unknown		= 0x00,
+	tmc1800		= 0x01,
+	tmc18c50	= 0x02,
+	tmc18c30	= 0x03,
+};
+
+struct fdomain {
+	int base;
+	struct scsi_cmnd *cur_cmd;
+	enum chip_type chip;
+	struct work_struct work;
+};
+
+static inline void fdomain_make_bus_idle(struct fdomain *fd)
+{
+	outb(0, fd->base + REG_BCTL);
+	outb(0, fd->base + REG_MCTL);
+	if (fd->chip == tmc18c50 || fd->chip == tmc18c30)
+		/* Clear forced intr. */
+		outb(ACTL_RESET | ACTL_CLRFIRQ | PARITY_MASK,
+		     fd->base + REG_ACTL);
+	else
+		outb(ACTL_RESET | PARITY_MASK, fd->base + REG_ACTL);
+}
+
+static enum chip_type fdomain_identify(int port)
+{
+	u16 id = inb(port + REG_ID_LSB) | inb(port + REG_ID_MSB) << 8;
+
+	switch (id) {
+	case 0x6127:
+		return tmc1800;
+	case 0x60e9: /* 18c50 or 18c30 */
+		break;
+	default:
+		return unknown;
+	}
+
+	/* Try to toggle 32-bit mode. This only works on an 18c30 chip. */
+	outb(CFG2_32BIT, port + REG_CFG2);
+	if ((inb(port + REG_CFG2) & CFG2_32BIT)) {
+		outb(0, port + REG_CFG2);
+		if ((inb(port + REG_CFG2) & CFG2_32BIT) == 0)
+			return tmc18c30;
+	}
+	/* If that failed, we are an 18c50. */
+	return tmc18c50;
+}
+
+static int fdomain_test_loopback(int base)
+{
+	int i;
+
+	for (i = 0; i < 255; i++) {
+		outb(i, base + REG_LOOPBACK);
+		if (inb(base + REG_LOOPBACK) != i)
+			return 1;
+	}
+
+	return 0;
+}
+
+static void fdomain_reset(int base)
+{
+	outb(BCTL_RST, base + REG_BCTL);
+	mdelay(20);
+	outb(0, base + REG_BCTL);
+	mdelay(1150);
+	outb(0, base + REG_MCTL);
+	outb(PARITY_MASK, base + REG_ACTL);
+}
+
+static int fdomain_select(struct Scsi_Host *sh, int target)
+{
+	int status;
+	unsigned long timeout;
+	struct fdomain *fd = shost_priv(sh);
+
+	outb(BCTL_BUSEN | BCTL_SEL, fd->base + REG_BCTL);
+	outb(BIT(sh->this_id) | BIT(target), fd->base + REG_SCSI_DATA_NOACK);
+
+	/* Stop arbitration and enable parity */
+	outb(PARITY_MASK, fd->base + REG_ACTL);
+
+	timeout = 350;	/* 350 msec */
+
+	do {
+		status = inb(fd->base + REG_BSTAT);
+		if (status & BSTAT_BSY) {
+			/* Enable SCSI Bus */
+			/* (on error, should make bus idle with 0) */
+			outb(BCTL_BUSEN, fd->base + REG_BCTL);
+			return 0;
+		}
+		mdelay(1);
+	} while (--timeout);
+	fdomain_make_bus_idle(fd);
+	return 1;
+}
+
+static void fdomain_finish_cmd(struct fdomain *fd, int result)
+{
+	outb(0, fd->base + REG_ICTL);
+	fdomain_make_bus_idle(fd);
+	fd->cur_cmd->result = result;
+	fd->cur_cmd->scsi_done(fd->cur_cmd);
+	fd->cur_cmd = NULL;
+}
+
+static void fdomain_read_data(struct scsi_cmnd *cmd)
+{
+	struct fdomain *fd = shost_priv(cmd->device->host);
+	unsigned char *virt, *ptr;
+	size_t offset, len;
+
+	while ((len = inw(fd->base + REG_FIFO_COUNT)) > 0) {
+		offset = scsi_bufflen(cmd) - scsi_get_resid(cmd);
+		virt = scsi_kmap_atomic_sg(scsi_sglist(cmd), scsi_sg_count(cmd),
+					   &offset, &len);
+		ptr = virt + offset;
+		if (len & 1)
+			*ptr++ = inb(fd->base + REG_FIFO);
+		if (len > 1)
+			insw(fd->base + REG_FIFO, ptr, len >> 1);
+		scsi_set_resid(cmd, scsi_get_resid(cmd) - len);
+		scsi_kunmap_atomic_sg(virt);
+	}
+}
+
+static void fdomain_write_data(struct scsi_cmnd *cmd)
+{
+	struct fdomain *fd = shost_priv(cmd->device->host);
+	/* 8k FIFO for pre-tmc18c30 chips, 2k FIFO for tmc18c30 */
+	int FIFO_Size = fd->chip == tmc18c30 ? 0x800 : 0x2000;
+	unsigned char *virt, *ptr;
+	size_t offset, len;
+
+	while ((len = FIFO_Size - inw(fd->base + REG_FIFO_COUNT)) > 512) {
+		offset = scsi_bufflen(cmd) - scsi_get_resid(cmd);
+		if (len + offset > scsi_bufflen(cmd)) {
+			len = scsi_bufflen(cmd) - offset;
+			if (len == 0)
+				break;
+		}
+		virt = scsi_kmap_atomic_sg(scsi_sglist(cmd), scsi_sg_count(cmd),
+					   &offset, &len);
+		ptr = virt + offset;
+		if (len & 1)
+			outb(*ptr++, fd->base + REG_FIFO);
+		if (len > 1)
+			outsw(fd->base + REG_FIFO, ptr, len >> 1);
+		scsi_set_resid(cmd, scsi_get_resid(cmd) - len);
+		scsi_kunmap_atomic_sg(virt);
+	}
+}
+
+static void fdomain_work(struct work_struct *work)
+{
+	struct fdomain *fd = container_of(work, struct fdomain, work);
+	struct Scsi_Host *sh = container_of((void *)fd, struct Scsi_Host,
+					    hostdata);
+	struct scsi_cmnd *cmd = fd->cur_cmd;
+	unsigned long flags;
+	int status;
+	int done = 0;
+
+	spin_lock_irqsave(sh->host_lock, flags);
+
+	if (cmd->SCp.phase & in_arbitration) {
+		status = inb(fd->base + REG_ASTAT);
+		if (!(status & ASTAT_ARB)) {
+			fdomain_finish_cmd(fd, DID_BUS_BUSY << 16);
+			goto out;
+		}
+		cmd->SCp.phase = in_selection;
+
+		outb(ICTL_SEL | FIFO_COUNT, fd->base + REG_ICTL);
+		outb(BCTL_BUSEN | BCTL_SEL, fd->base + REG_BCTL);
+		outb(BIT(cmd->device->host->this_id) | BIT(scmd_id(cmd)),
+		     fd->base + REG_SCSI_DATA_NOACK);
+		/* Stop arbitration and enable parity */
+		outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
+		goto out;
+	} else if (cmd->SCp.phase & in_selection) {
+		status = inb(fd->base + REG_BSTAT);
+		if (!(status & BSTAT_BSY)) {
+			/* Try again, for slow devices */
+			if (fdomain_select(cmd->device->host, scmd_id(cmd))) {
+				fdomain_finish_cmd(fd, DID_NO_CONNECT << 16);
+				goto out;
+			}
+			/* Stop arbitration and enable parity */
+			outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
+		}
+		cmd->SCp.phase = in_other;
+		outb(ICTL_FIFO | ICTL_REQ | FIFO_COUNT, fd->base + REG_ICTL);
+		outb(BCTL_BUSEN, fd->base + REG_BCTL);
+		goto out;
+	}
+
+	/* cur_cmd->SCp.phase == in_other: this is the body of the routine */
+	status = inb(fd->base + REG_BSTAT);
+
+	if (status & BSTAT_REQ) {
+		switch (status & (BSTAT_MSG | BSTAT_CMD | BSTAT_IO)) {
+		case BSTAT_CMD:	/* COMMAND OUT */
+			outb(cmd->cmnd[cmd->SCp.sent_command++],
+			     fd->base + REG_SCSI_DATA);
+			break;
+		case 0:	/* DATA OUT -- tmc18c50/tmc18c30 only */
+			if (fd->chip != tmc1800 && !cmd->SCp.have_data_in) {
+				cmd->SCp.have_data_in = -1;
+				outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN |
+				     PARITY_MASK, fd->base + REG_ACTL);
+			}
+			break;
+		case BSTAT_IO:	/* DATA IN -- tmc18c50/tmc18c30 only */
+			if (fd->chip != tmc1800 && !cmd->SCp.have_data_in) {
+				cmd->SCp.have_data_in = 1;
+				outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK,
+				     fd->base + REG_ACTL);
+			}
+			break;
+		case BSTAT_CMD | BSTAT_IO:	/* STATUS IN */
+			cmd->SCp.Status = inb(fd->base + REG_SCSI_DATA);
+			break;
+		case BSTAT_MSG | BSTAT_CMD:	/* MESSAGE OUT */
+			outb(MESSAGE_REJECT, fd->base + REG_SCSI_DATA);
+			break;
+		case BSTAT_MSG | BSTAT_CMD | BSTAT_IO:	/* MESSAGE IN */
+			cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA);
+			if (!cmd->SCp.Message)
+				++done;
+			break;
+		}
+	}
+
+	if (fd->chip == tmc1800 && !cmd->SCp.have_data_in &&
+	    cmd->SCp.sent_command >= cmd->cmd_len) {
+		if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+			cmd->SCp.have_data_in = -1;
+			outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN |
+			     PARITY_MASK, fd->base + REG_ACTL);
+		} else {
+			cmd->SCp.have_data_in = 1;
+			outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK,
+			     fd->base + REG_ACTL);
+		}
+	}
+
+	if (cmd->SCp.have_data_in == -1) /* DATA OUT */
+		fdomain_write_data(cmd);
+
+	if (cmd->SCp.have_data_in == 1) /* DATA IN */
+		fdomain_read_data(cmd);
+
+	if (done) {
+		fdomain_finish_cmd(fd, (cmd->SCp.Status & 0xff) |
+				   ((cmd->SCp.Message & 0xff) << 8) |
+				   (DID_OK << 16));
+	} else {
+		if (cmd->SCp.phase & disconnect) {
+			outb(ICTL_FIFO | ICTL_SEL | ICTL_REQ | FIFO_COUNT,
+			     fd->base + REG_ICTL);
+			outb(0, fd->base + REG_BCTL);
+		} else
+			outb(ICTL_FIFO | ICTL_REQ | FIFO_COUNT,
+			     fd->base + REG_ICTL);
+	}
+out:
+	spin_unlock_irqrestore(sh->host_lock, flags);
+}
+
+static irqreturn_t fdomain_irq(int irq, void *dev_id)
+{
+	struct fdomain *fd = dev_id;
+
+	/* Is it our IRQ? */
+	if ((inb(fd->base + REG_ASTAT) & ASTAT_IRQ) == 0)
+		return IRQ_NONE;
+
+	outb(0, fd->base + REG_ICTL);
+
+	/* We usually have one spurious interrupt after each command. */
+	if (!fd->cur_cmd)	/* Spurious interrupt */
+		return IRQ_NONE;
+
+	schedule_work(&fd->work);
+
+	return IRQ_HANDLED;
+}
+
+static int fdomain_queue(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
+{
+	struct fdomain *fd = shost_priv(cmd->device->host);
+	unsigned long flags;
+
+	cmd->SCp.Status		= 0;
+	cmd->SCp.Message	= 0;
+	cmd->SCp.have_data_in	= 0;
+	cmd->SCp.sent_command	= 0;
+	cmd->SCp.phase		= in_arbitration;
+	scsi_set_resid(cmd, scsi_bufflen(cmd));
+
+	spin_lock_irqsave(sh->host_lock, flags);
+
+	fd->cur_cmd = cmd;
+
+	fdomain_make_bus_idle(fd);
+
+	/* Start arbitration */
+	outb(0, fd->base + REG_ICTL);
+	outb(0, fd->base + REG_BCTL);	/* Disable data drivers */
+	/* Set our id bit */
+	outb(BIT(cmd->device->host->this_id), fd->base + REG_SCSI_DATA_NOACK);
+	outb(ICTL_ARB, fd->base + REG_ICTL);
+	/* Start arbitration */
+	outb(ACTL_ARB | ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
+
+	spin_unlock_irqrestore(sh->host_lock, flags);
+
+	return 0;
+}
+
+static int fdomain_abort(struct scsi_cmnd *cmd)
+{
+	struct Scsi_Host *sh = cmd->device->host;
+	struct fdomain *fd = shost_priv(sh);
+	unsigned long flags;
+
+	if (!fd->cur_cmd)
+		return FAILED;
+
+	spin_lock_irqsave(sh->host_lock, flags);
+
+	fdomain_make_bus_idle(fd);
+	fd->cur_cmd->SCp.phase |= aborted;
+	fd->cur_cmd->result = DID_ABORT << 16;
+
+	/* Aborts are not done well. . . */
+	fdomain_finish_cmd(fd, DID_ABORT << 16);
+	spin_unlock_irqrestore(sh->host_lock, flags);
+	return SUCCESS;
+}
+
+static int fdomain_host_reset(struct scsi_cmnd *cmd)
+{
+	struct Scsi_Host *sh = cmd->device->host;
+	struct fdomain *fd = shost_priv(sh);
+	unsigned long flags;
+
+	spin_lock_irqsave(sh->host_lock, flags);
+	fdomain_reset(fd->base);
+	spin_unlock_irqrestore(sh->host_lock, flags);
+	return SUCCESS;
+}
+
+static int fdomain_biosparam(struct scsi_device *sdev,
+			     struct block_device *bdev,	sector_t capacity,
+			     int geom[])
+{
+	unsigned char *p = scsi_bios_ptable(bdev);
+
+	if (p && p[65] == 0xaa && p[64] == 0x55 /* Partition table valid */
+	    && p[4]) {	 /* Partition type */
+		geom[0] = p[5] + 1;	/* heads */
+		geom[1] = p[6] & 0x3f;	/* sectors */
+	} else {
+		if (capacity >= 0x7e0000) {
+			geom[0] = 255;	/* heads */
+			geom[1] = 63;	/* sectors */
+		} else if (capacity >= 0x200000) {
+			geom[0] = 128;	/* heads */
+			geom[1] = 63;	/* sectors */
+		} else {
+			geom[0] = 64;	/* heads */
+			geom[1] = 32;	/* sectors */
+		}
+	}
+	geom[2] = sector_div(capacity, geom[0] * geom[1]);
+	kfree(p);
+
+	return 0;
+}
+
+static struct scsi_host_template fdomain_template = {
+	.module			= THIS_MODULE,
+	.name			= "Future Domain TMC-16x0",
+	.proc_name		= "fdomain",
+	.queuecommand		= fdomain_queue,
+	.eh_abort_handler	= fdomain_abort,
+	.eh_host_reset_handler	= fdomain_host_reset,
+	.bios_param		= fdomain_biosparam,
+	.can_queue		= 1,
+	.this_id		= 7,
+	.sg_tablesize		= 64,
+	.dma_boundary		= PAGE_SIZE - 1,
+};
+
+struct Scsi_Host *fdomain_create(int base, int irq, int this_id,
+				 struct device *dev)
+{
+	struct Scsi_Host *sh;
+	struct fdomain *fd;
+	enum chip_type chip;
+	static const char * const chip_names[] = {
+		"Unknown", "TMC-1800", "TMC-18C50", "TMC-18C30"
+	};
+	unsigned long irq_flags = 0;
+
+	chip = fdomain_identify(base);
+	if (!chip)
+		return NULL;
+
+	fdomain_reset(base);
+
+	if (fdomain_test_loopback(base))
+		return NULL;
+
+	if (!irq) {
+		dev_err(dev, "card has no IRQ assigned");
+		return NULL;
+	}
+
+	sh = scsi_host_alloc(&fdomain_template, sizeof(struct fdomain));
+	if (!sh)
+		return NULL;
+
+	if (this_id)
+		sh->this_id = this_id & 0x07;
+
+	sh->irq = irq;
+	sh->io_port = base;
+	sh->n_io_port = FDOMAIN_REGION_SIZE;
+
+	fd = shost_priv(sh);
+	fd->base = base;
+	fd->chip = chip;
+	INIT_WORK(&fd->work, fdomain_work);
+
+	if (dev_is_pci(dev) || !strcmp(dev->bus->name, "pcmcia"))
+		irq_flags = IRQF_SHARED;
+
+	if (request_irq(irq, fdomain_irq, irq_flags, "fdomain", fd))
+		goto fail_put;
+
+	shost_printk(KERN_INFO, sh, "%s chip at 0x%x irq %d SCSI ID %d\n",
+		     dev_is_pci(dev) ? "TMC-36C70 (PCI bus)" : chip_names[chip],
+		     base, irq, sh->this_id);
+
+	if (scsi_add_host(sh, dev))
+		goto fail_free_irq;
+
+	scsi_scan_host(sh);
+
+	return sh;
+
+fail_free_irq:
+	free_irq(irq, fd);
+fail_put:
+	scsi_host_put(sh);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(fdomain_create);
+
+int fdomain_destroy(struct Scsi_Host *sh)
+{
+	struct fdomain *fd = shost_priv(sh);
+
+	cancel_work_sync(&fd->work);
+	scsi_remove_host(sh);
+	if (sh->irq)
+		free_irq(sh->irq, fd);
+	scsi_host_put(sh);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fdomain_destroy);
+
+#ifdef CONFIG_PM_SLEEP
+static int fdomain_resume(struct device *dev)
+{
+	struct fdomain *fd = shost_priv(dev_get_drvdata(dev));
+
+	fdomain_reset(fd->base);
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(fdomain_pm_ops, NULL, fdomain_resume);
+#endif /* CONFIG_PM_SLEEP */
+
+MODULE_AUTHOR("Ondrej Zary, Rickard E. Faith");
+MODULE_DESCRIPTION("Future Domain TMC-16x0/TMC-3260 SCSI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/fdomain.h b/drivers/scsi/fdomain.h
new file mode 100644
index 0000000..6f63fc6
--- /dev/null
+++ b/drivers/scsi/fdomain.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define FDOMAIN_REGION_SIZE	0x10
+#define FDOMAIN_BIOS_SIZE	0x2000
+
+enum {
+	in_arbitration	= 0x02,
+	in_selection	= 0x04,
+	in_other	= 0x08,
+	disconnect	= 0x10,
+	aborted		= 0x20,
+	sent_ident	= 0x40,
+};
+
+/* (@) = not present on TMC1800, (#) = not present on TMC1800 and TMC18C50 */
+#define REG_SCSI_DATA		0	/* R/W: SCSI Data (with ACK) */
+#define REG_BSTAT		1	/* R: SCSI Bus Status */
+#define		BSTAT_BSY	BIT(0)	 /* Busy */
+#define		BSTAT_MSG	BIT(1)	 /* Message */
+#define		BSTAT_IO	BIT(2)	 /* Input/Output */
+#define		BSTAT_CMD	BIT(3)	 /* Command/Data */
+#define		BSTAT_REQ	BIT(4)	 /* Request and Not Ack */
+#define		BSTAT_SEL	BIT(5)	 /* Select */
+#define		BSTAT_ACK	BIT(6)	 /* Acknowledge and Request */
+#define		BSTAT_ATN	BIT(7)	 /* Attention */
+#define REG_BCTL		1	/* W: SCSI Bus Control */
+#define		BCTL_RST	BIT(0)	 /* Bus Reset */
+#define		BCTL_SEL	BIT(1)	 /* Select */
+#define		BCTL_BSY	BIT(2)	 /* Busy */
+#define		BCTL_ATN	BIT(3)	 /* Attention */
+#define		BCTL_IO		BIT(4)	 /* Input/Output */
+#define		BCTL_CMD	BIT(5)	 /* Command/Data */
+#define		BCTL_MSG	BIT(6)	 /* Message */
+#define		BCTL_BUSEN	BIT(7)	 /* Enable bus drivers */
+#define REG_ASTAT		2	/* R: Adapter Status 1 */
+#define		ASTAT_IRQ	BIT(0)	 /* Interrupt active */
+#define		ASTAT_ARB	BIT(1)	 /* Arbitration complete */
+#define		ASTAT_PARERR	BIT(2)	 /* Parity error */
+#define		ASTAT_RST	BIT(3)	 /* SCSI reset occurred */
+#define		ASTAT_FIFODIR	BIT(4)	 /* FIFO direction */
+#define		ASTAT_FIFOEN	BIT(5)	 /* FIFO enabled */
+#define		ASTAT_PAREN	BIT(6)	 /* Parity enabled */
+#define		ASTAT_BUSEN	BIT(7)	 /* Bus drivers enabled */
+#define REG_ICTL		2	/* W: Interrupt Control */
+#define		ICTL_FIFO_MASK	0x0f	 /* FIFO threshold, 1/16 FIFO size */
+#define		ICTL_FIFO	BIT(4)	 /* Int. on FIFO count */
+#define		ICTL_ARB	BIT(5)	 /* Int. on Arbitration complete */
+#define		ICTL_SEL	BIT(6)	 /* Int. on SCSI Select */
+#define		ICTL_REQ	BIT(7)	 /* Int. on SCSI Request */
+#define REG_FSTAT		3	/* R: Adapter Status 2 (FIFO) - (@) */
+#define		FSTAT_ONOTEMPTY	BIT(0)	 /* Output FIFO not empty */
+#define		FSTAT_INOTEMPTY	BIT(1)	 /* Input FIFO not empty */
+#define		FSTAT_NOTEMPTY	BIT(2)	 /* Main FIFO not empty */
+#define		FSTAT_NOTFULL	BIT(3)	 /* Main FIFO not full */
+#define REG_MCTL		3	/* W: SCSI Data Mode Control */
+#define		MCTL_ACK_MASK	0x0f	 /* Acknowledge period */
+#define		MCTL_ACTDEASS	BIT(4)	 /* Active deassert of REQ and ACK */
+#define		MCTL_TARGET	BIT(5)	 /* Enable target mode */
+#define		MCTL_FASTSYNC	BIT(6)	 /* Enable Fast Synchronous */
+#define		MCTL_SYNC	BIT(7)	 /* Enable Synchronous */
+#define REG_INTCOND		4	/* R: Interrupt Condition - (@) */
+#define		IRQ_FIFO	BIT(1)	 /* FIFO interrupt */
+#define		IRQ_REQ		BIT(2)	 /* SCSI Request interrupt */
+#define		IRQ_SEL		BIT(3)	 /* SCSI Select interrupt */
+#define		IRQ_ARB		BIT(4)	 /* SCSI Arbitration interrupt */
+#define		IRQ_RST		BIT(5)	 /* SCSI Reset interrupt */
+#define		IRQ_FORCED	BIT(6)	 /* Forced interrupt */
+#define		IRQ_TIMEOUT	BIT(7)	 /* Bus timeout */
+#define REG_ACTL		4	/* W: Adapter Control 1 */
+#define		ACTL_RESET	BIT(0)	 /* Reset FIFO, parity, reset int. */
+#define		ACTL_FIRQ	BIT(1)	 /* Set Forced interrupt */
+#define		ACTL_ARB	BIT(2)	 /* Initiate Bus Arbitration */
+#define		ACTL_PAREN	BIT(3)	 /* Enable SCSI Parity */
+#define		ACTL_IRQEN	BIT(4)	 /* Enable interrupts */
+#define		ACTL_CLRFIRQ	BIT(5)	 /* Clear Forced interrupt */
+#define		ACTL_FIFOWR	BIT(6)	 /* FIFO Direction (1=write) */
+#define		ACTL_FIFOEN	BIT(7)	 /* Enable FIFO */
+#define REG_ID_LSB		5	/* R: ID Code (LSB) */
+#define REG_ACTL2		5	/* Adapter Control 2 - (@) */
+#define		ACTL2_RAMOVRLY	BIT(0)	 /* Enable RAM overlay */
+#define		ACTL2_SLEEP	BIT(7)	 /* Sleep mode */
+#define REG_ID_MSB		6	/* R: ID Code (MSB) */
+#define REG_LOOPBACK		7	/* R/W: Loopback */
+#define REG_SCSI_DATA_NOACK	8	/* R/W: SCSI Data (no ACK) */
+#define REG_ASTAT3		9	/* R: Adapter Status 3 */
+#define		ASTAT3_ACTDEASS	BIT(0)	 /* Active deassert enabled */
+#define		ASTAT3_RAMOVRLY	BIT(1)	 /* RAM overlay enabled */
+#define		ASTAT3_TARGERR	BIT(2)	 /* Target error */
+#define		ASTAT3_IRQEN	BIT(3)	 /* Interrupts enabled */
+#define		ASTAT3_IRQMASK	0xf0	 /* Enabled interrupts mask */
+#define REG_CFG1		10	/* R: Configuration Register 1 */
+#define		CFG1_BUS	BIT(0)	 /* 0 = ISA */
+#define		CFG1_IRQ_MASK	0x0e	 /* IRQ jumpers */
+#define		CFG1_IO_MASK	0x30	 /* I/O base jumpers */
+#define		CFG1_BIOS_MASK	0xc0	 /* BIOS base jumpers */
+#define REG_CFG2		11	/* R/W: Configuration Register 2 (@) */
+#define		CFG2_ROMDIS	BIT(0)	 /* ROM disabled */
+#define		CFG2_RAMDIS	BIT(1)	 /* RAM disabled */
+#define		CFG2_IRQEDGE	BIT(2)	 /* Edge-triggered interrupts */
+#define		CFG2_NOWS	BIT(3)	 /* No wait states */
+#define		CFG2_32BIT	BIT(7)	 /* 32-bit mode */
+#define REG_FIFO		12	/* R/W: FIFO */
+#define REG_FIFO_COUNT		14	/* R: FIFO Data Count */
+
+#ifdef CONFIG_PM_SLEEP
+static const struct dev_pm_ops fdomain_pm_ops;
+#define FDOMAIN_PM_OPS	(&fdomain_pm_ops)
+#else
+#define FDOMAIN_PM_OPS	NULL
+#endif /* CONFIG_PM_SLEEP */
+
+struct Scsi_Host *fdomain_create(int base, int irq, int this_id,
+				 struct device *dev);
+int fdomain_destroy(struct Scsi_Host *sh);
diff --git a/drivers/scsi/fdomain_isa.c b/drivers/scsi/fdomain_isa.c
new file mode 100644
index 0000000..f2da4fa
--- /dev/null
+++ b/drivers/scsi/fdomain_isa.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/isa.h>
+#include <scsi/scsi_host.h>
+#include "fdomain.h"
+
+#define MAXBOARDS_PARAM 4
+static int io[MAXBOARDS_PARAM] = { 0, 0, 0, 0 };
+module_param_hw_array(io, int, ioport, NULL, 0);
+MODULE_PARM_DESC(io, "base I/O address of controller (0x140, 0x150, 0x160, 0x170)");
+
+static int irq[MAXBOARDS_PARAM] = { 0, 0, 0, 0 };
+module_param_hw_array(irq, int, irq, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ of controller (0=auto [default])");
+
+static int scsi_id[MAXBOARDS_PARAM] = { 0, 0, 0, 0 };
+module_param_hw_array(scsi_id, int, other, NULL, 0);
+MODULE_PARM_DESC(scsi_id, "SCSI ID of controller (default = 7)");
+
+static unsigned long addresses[] = {
+	0xc8000,
+	0xca000,
+	0xce000,
+	0xde000,
+};
+#define ADDRESS_COUNT ARRAY_SIZE(addresses)
+
+static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
+#define PORT_COUNT ARRAY_SIZE(ports)
+
+static unsigned short irqs[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
+
+/* This driver works *ONLY* for Future Domain cards using the TMC-1800,
+ * TMC-18C50, or TMC-18C30 chip.  This includes models TMC-1650, 1660, 1670,
+ * and 1680. These are all 16-bit cards.
+ * BIOS versions prior to 3.2 assigned SCSI ID 6 to SCSI adapter.
+ *
+ * The following BIOS signature signatures are for boards which do *NOT*
+ * work with this driver (these TMC-8xx and TMC-9xx boards may work with the
+ * Seagate driver):
+ *
+ * FUTURE DOMAIN CORP. (C) 1986-1988 V4.0I 03/16/88
+ * FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89
+ * FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89
+ * FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90
+ * FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90
+ * FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90
+ * FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92
+ *
+ * (The cards which do *NOT* work are all 8-bit cards -- although some of
+ * them have a 16-bit form-factor, the upper 8-bits are used only for IRQs
+ * and are *NOT* used for data. You can tell the difference by following
+ * the tracings on the circuit board -- if only the IRQ lines are involved,
+ * you have a "8-bit" card, and should *NOT* use this driver.)
+ */
+
+static struct signature {
+	const char *signature;
+	int offset;
+	int length;
+	int this_id;
+	int base_offset;
+} signatures[] = {
+/*          1         2         3         4         5         6 */
+/* 123456789012345678901234567890123456789012345678901234567890 */
+{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89",	 5, 50,  6, 0x1fcc },
+{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V1.07/28/89",	 5, 50,  6, 0x1fcc },
+{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 72, 50,  6, 0x1fa2 },
+{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.0",	73, 43,  6, 0x1fa2 },
+{ "FUTURE DOMAIN CORP. (C) 1991 1800-V2.0.",		72, 39,  6, 0x1fa3 },
+{ "FUTURE DOMAIN CORP. (C) 1992 V3.00.004/02/92",	 5, 44,  6, 0 },
+{ "FUTURE DOMAIN TMC-18XX (C) 1993 V3.203/12/93",	 5, 44,  7, 0 },
+{ "IBM F1 P2 BIOS v1.0011/09/92",			 5, 28,  7, 0x1ff3 },
+{ "IBM F1 P2 BIOS v1.0104/29/93",			 5, 28,  7, 0 },
+{ "Future Domain Corp. V1.0008/18/93",			 5, 33,  7, 0 },
+{ "Future Domain Corp. V2.0108/18/93",			 5, 33,  7, 0 },
+{ "FUTURE DOMAIN CORP.  V3.5008/18/93",			 5, 34,  7, 0 },
+{ "FUTURE DOMAIN 18c30/18c50/1800 (C) 1994 V3.5",	 5, 44,  7, 0 },
+{ "FUTURE DOMAIN CORP.  V3.6008/18/93",			 5, 34,  7, 0 },
+{ "FUTURE DOMAIN CORP.  V3.6108/18/93",			 5, 34,  7, 0 },
+};
+#define SIGNATURE_COUNT ARRAY_SIZE(signatures)
+
+static int fdomain_isa_match(struct device *dev, unsigned int ndev)
+{
+	struct Scsi_Host *sh;
+	int i, base = 0, irq = 0;
+	unsigned long bios_base = 0;
+	struct signature *sig = NULL;
+	void __iomem *p;
+	static struct signature *saved_sig;
+	int this_id = 7;
+
+	if (ndev < ADDRESS_COUNT) {	/* scan supported ISA BIOS addresses */
+		p = ioremap(addresses[ndev], FDOMAIN_BIOS_SIZE);
+		if (!p)
+			return 0;
+		for (i = 0; i < SIGNATURE_COUNT; i++)
+			if (check_signature(p + signatures[i].offset,
+					    signatures[i].signature,
+					    signatures[i].length))
+				break;
+		if (i == SIGNATURE_COUNT)	/* no signature found */
+			goto fail_unmap;
+		sig = &signatures[i];
+		bios_base = addresses[ndev];
+		/* read I/O base from BIOS area */
+		if (sig->base_offset)
+			base = readb(p + sig->base_offset) +
+			      (readb(p + sig->base_offset + 1) << 8);
+		iounmap(p);
+		if (base)
+			dev_info(dev, "BIOS at 0x%lx specifies I/O base 0x%x\n",
+				 bios_base, base);
+		else
+			dev_info(dev, "BIOS at 0x%lx\n", bios_base);
+		if (!base) {	/* no I/O base in BIOS area */
+			/* save BIOS signature for later use in port probing */
+			saved_sig = sig;
+			return 0;
+		}
+	} else	/* scan supported I/O ports */
+		base = ports[ndev - ADDRESS_COUNT];
+
+	/* use saved BIOS signature if present */
+	if (!sig && saved_sig)
+		sig = saved_sig;
+
+	if (!request_region(base, FDOMAIN_REGION_SIZE, "fdomain_isa"))
+		return 0;
+
+	irq = irqs[(inb(base + REG_CFG1) & CFG1_IRQ_MASK) >> 1];
+
+	if (sig)
+		this_id = sig->this_id;
+
+	sh = fdomain_create(base, irq, this_id, dev);
+	if (!sh) {
+		release_region(base, FDOMAIN_REGION_SIZE);
+		return 0;
+	}
+
+	dev_set_drvdata(dev, sh);
+	return 1;
+fail_unmap:
+	iounmap(p);
+	return 0;
+}
+
+static int fdomain_isa_param_match(struct device *dev, unsigned int ndev)
+{
+	struct Scsi_Host *sh;
+	int irq_ = irq[ndev];
+
+	if (!io[ndev])
+		return 0;
+
+	if (!request_region(io[ndev], FDOMAIN_REGION_SIZE, "fdomain_isa")) {
+		dev_err(dev, "base 0x%x already in use", io[ndev]);
+		return 0;
+	}
+
+	if (irq_ <= 0)
+		irq_ = irqs[(inb(io[ndev] + REG_CFG1) & CFG1_IRQ_MASK) >> 1];
+
+	sh = fdomain_create(io[ndev], irq_, scsi_id[ndev], dev);
+	if (!sh) {
+		dev_err(dev, "controller not found at base 0x%x", io[ndev]);
+		release_region(io[ndev], FDOMAIN_REGION_SIZE);
+		return 0;
+	}
+
+	dev_set_drvdata(dev, sh);
+	return 1;
+}
+
+static int fdomain_isa_remove(struct device *dev, unsigned int ndev)
+{
+	struct Scsi_Host *sh = dev_get_drvdata(dev);
+	int base = sh->io_port;
+
+	fdomain_destroy(sh);
+	release_region(base, FDOMAIN_REGION_SIZE);
+	dev_set_drvdata(dev, NULL);
+	return 0;
+}
+
+static struct isa_driver fdomain_isa_driver = {
+	.match		= fdomain_isa_match,
+	.remove		= fdomain_isa_remove,
+	.driver = {
+		.name	= "fdomain_isa",
+		.pm	= FDOMAIN_PM_OPS,
+	},
+};
+
+static int __init fdomain_isa_init(void)
+{
+	int isa_probe_count = ADDRESS_COUNT + PORT_COUNT;
+
+	if (io[0]) {	/* use module parameters if present */
+		fdomain_isa_driver.match = fdomain_isa_param_match;
+		isa_probe_count = MAXBOARDS_PARAM;
+	}
+
+	return isa_register_driver(&fdomain_isa_driver, isa_probe_count);
+}
+
+static void __exit fdomain_isa_exit(void)
+{
+	isa_unregister_driver(&fdomain_isa_driver);
+}
+
+module_init(fdomain_isa_init);
+module_exit(fdomain_isa_exit);
+
+MODULE_AUTHOR("Ondrej Zary, Rickard E. Faith");
+MODULE_DESCRIPTION("Future Domain TMC-16x0 ISA SCSI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/fdomain_pci.c b/drivers/scsi/fdomain_pci.c
new file mode 100644
index 0000000..3e05ce7
--- /dev/null
+++ b/drivers/scsi/fdomain_pci.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "fdomain.h"
+
+static int fdomain_pci_probe(struct pci_dev *pdev,
+			     const struct pci_device_id *d)
+{
+	int err;
+	struct Scsi_Host *sh;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		goto fail;
+
+	err = pci_request_regions(pdev, "fdomain_pci");
+	if (err)
+		goto disable_device;
+
+	err = -ENODEV;
+	if (pci_resource_len(pdev, 0) == 0)
+		goto release_region;
+
+	sh = fdomain_create(pci_resource_start(pdev, 0), pdev->irq, 7,
+			    &pdev->dev);
+	if (!sh)
+		goto release_region;
+
+	pci_set_drvdata(pdev, sh);
+	return 0;
+
+release_region:
+	pci_release_regions(pdev);
+disable_device:
+	pci_disable_device(pdev);
+fail:
+	return err;
+}
+
+static void fdomain_pci_remove(struct pci_dev *pdev)
+{
+	struct Scsi_Host *sh = pci_get_drvdata(pdev);
+
+	fdomain_destroy(sh);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_device_id fdomain_pci_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70) },
+	{}
+};
+MODULE_DEVICE_TABLE(pci, fdomain_pci_table);
+
+static struct pci_driver fdomain_pci_driver = {
+	.name		= "fdomain_pci",
+	.id_table	= fdomain_pci_table,
+	.probe		= fdomain_pci_probe,
+	.remove		= fdomain_pci_remove,
+	.driver.pm	= FDOMAIN_PM_OPS,
+};
+
+module_pci_driver(fdomain_pci_driver);
+
+MODULE_AUTHOR("Ondrej Zary, Rickard E. Faith");
+MODULE_DESCRIPTION("Future Domain TMC-3260 PCI SCSI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index d094ba5..477513d 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
 
 #define DRV_NAME		"fnic"
 #define DRV_DESCRIPTION		"Cisco FCoE HBA Driver"
-#define DRV_VERSION		"1.6.0.34"
+#define DRV_VERSION		"1.6.0.47"
 #define PFX			DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
@@ -49,7 +49,7 @@
 #define FNIC_MAX_IO_REQ		1024 /* scsi_cmnd tag map entries */
 #define FNIC_DFLT_IO_REQ        256 /* Default scsi_cmnd tag map entries */
 #define	FNIC_IO_LOCKS		64 /* IO locks: power of 2 */
-#define FNIC_DFLT_QUEUE_DEPTH	32
+#define FNIC_DFLT_QUEUE_DEPTH	256
 #define	FNIC_STATS_RATE_LIMIT	4 /* limit rate at which stats are pulled up */
 
 /*
@@ -128,6 +128,7 @@
 	__fnic_set_state_flags(fnicp, st_flags, 1)
 
 extern unsigned int fnic_log_level;
+extern unsigned int io_completions;
 
 #define FNIC_MAIN_LOGGING 0x01
 #define FNIC_FCS_LOGGING 0x02
@@ -196,6 +197,7 @@
 #define FNIC_WQ_MAX 1
 #define FNIC_RQ_MAX 1
 #define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
+#define FNIC_DFLT_IO_COMPLETIONS 256
 
 struct mempool;
 
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 139fffa..13f7d88 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -52,25 +52,10 @@
  */
 int fnic_debugfs_init(void)
 {
-	int rc = -1;
 	fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
-	if (!fnic_trace_debugfs_root) {
-		printk(KERN_DEBUG "Cannot create debugfs root\n");
-		return rc;
-	}
-
-	if (!fnic_trace_debugfs_root) {
-		printk(KERN_DEBUG
-			"fnic root directory doesn't exist in debugfs\n");
-		return rc;
-	}
 
 	fnic_stats_debugfs_root = debugfs_create_dir("statistics",
 						fnic_trace_debugfs_root);
-	if (!fnic_stats_debugfs_root) {
-		printk(KERN_DEBUG "Cannot create Statistics directory\n");
-		return rc;
-	}
 
 	/* Allocate memory to structure */
 	fc_trc_flag = (struct fc_trace_flag_type *)
@@ -84,8 +69,7 @@
 		fc_trc_flag->fc_clear = 4;
 	}
 
-	rc = 0;
-	return rc;
+	return 0;
 }
 
 /*
@@ -356,39 +340,19 @@
  * it will also create file trace_enable to control enable/disable of
  * trace logging into trace buffer.
  */
-int fnic_trace_debugfs_init(void)
+void fnic_trace_debugfs_init(void)
 {
-	int rc = -1;
-	if (!fnic_trace_debugfs_root) {
-		printk(KERN_DEBUG
-			"FNIC Debugfs root directory doesn't exist\n");
-		return rc;
-	}
 	fnic_trace_enable = debugfs_create_file("tracing_enable",
 					S_IFREG|S_IRUGO|S_IWUSR,
 					fnic_trace_debugfs_root,
 					&(fc_trc_flag->fnic_trace),
 					&fnic_trace_ctrl_fops);
 
-	if (!fnic_trace_enable) {
-		printk(KERN_DEBUG
-			"Cannot create trace_enable file under debugfs\n");
-		return rc;
-	}
-
 	fnic_trace_debugfs_file = debugfs_create_file("trace",
 					S_IFREG|S_IRUGO|S_IWUSR,
 					fnic_trace_debugfs_root,
 					&(fc_trc_flag->fnic_trace),
 					&fnic_trace_debugfs_fops);
-
-	if (!fnic_trace_debugfs_file) {
-		printk(KERN_DEBUG
-			"Cannot create trace file under debugfs\n");
-		return rc;
-	}
-	rc = 0;
-	return rc;
 }
 
 /*
@@ -419,37 +383,20 @@
  * trace logging into trace buffer.
  */
 
-int fnic_fc_trace_debugfs_init(void)
+void fnic_fc_trace_debugfs_init(void)
 {
-	int rc = -1;
-
-	if (!fnic_trace_debugfs_root) {
-		pr_err("fnic:Debugfs root directory doesn't exist\n");
-		return rc;
-	}
-
 	fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable",
 					S_IFREG|S_IRUGO|S_IWUSR,
 					fnic_trace_debugfs_root,
 					&(fc_trc_flag->fc_trace),
 					&fnic_trace_ctrl_fops);
 
-	if (!fnic_fc_trace_enable) {
-		pr_err("fnic: Failed create fc_trace_enable file\n");
-		return rc;
-	}
-
 	fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear",
 					S_IFREG|S_IRUGO|S_IWUSR,
 					fnic_trace_debugfs_root,
 					&(fc_trc_flag->fc_clear),
 					&fnic_trace_ctrl_fops);
 
-	if (!fnic_fc_trace_clear) {
-		pr_err("fnic: Failed to create fc_trace_enable file\n");
-		return rc;
-	}
-
 	fnic_fc_rdata_trace_debugfs_file =
 		debugfs_create_file("fc_trace_rdata",
 				    S_IFREG|S_IRUGO|S_IWUSR,
@@ -457,24 +404,12 @@
 				    &(fc_trc_flag->fc_normal_file),
 				    &fnic_trace_debugfs_fops);
 
-	if (!fnic_fc_rdata_trace_debugfs_file) {
-		pr_err("fnic: Failed create fc_rdata_trace file\n");
-		return rc;
-	}
-
 	fnic_fc_trace_debugfs_file =
 		debugfs_create_file("fc_trace",
 				    S_IFREG|S_IRUGO|S_IWUSR,
 				    fnic_trace_debugfs_root,
 				    &(fc_trc_flag->fc_row_file),
 				    &fnic_trace_debugfs_fops);
-
-	if (!fnic_fc_trace_debugfs_file) {
-		pr_err("fnic: Failed to create fc_trace file\n");
-		return rc;
-	}
-	rc = 0;
-	return rc;
 }
 
 /*
@@ -757,45 +692,26 @@
  * It will create file stats and reset_stats under statistics/host# directory
  * to log per fnic stats.
  */
-int fnic_stats_debugfs_init(struct fnic *fnic)
+void fnic_stats_debugfs_init(struct fnic *fnic)
 {
-	int rc = -1;
 	char name[16];
 
 	snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
 
-	if (!fnic_stats_debugfs_root) {
-		printk(KERN_DEBUG "fnic_stats root doesn't exist\n");
-		return rc;
-	}
 	fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
 						fnic_stats_debugfs_root);
-	if (!fnic->fnic_stats_debugfs_host) {
-		printk(KERN_DEBUG "Cannot create host directory\n");
-		return rc;
-	}
 
 	fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
 						S_IFREG|S_IRUGO|S_IWUSR,
 						fnic->fnic_stats_debugfs_host,
 						fnic,
 						&fnic_stats_debugfs_fops);
-	if (!fnic->fnic_stats_debugfs_file) {
-		printk(KERN_DEBUG "Cannot create host stats file\n");
-		return rc;
-	}
 
 	fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
 						S_IFREG|S_IRUGO|S_IWUSR,
 						fnic->fnic_stats_debugfs_host,
 						fnic,
 						&fnic_reset_debugfs_fops);
-	if (!fnic->fnic_reset_debugfs_file) {
-		printk(KERN_DEBUG "Cannot create host stats file\n");
-		return rc;
-	}
-	rc = 0;
-	return rc;
 }
 
 /*
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index c7bf316..673887e 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -52,6 +52,7 @@
 	unsigned long flags;
 	int old_link_status;
 	u32 old_link_down_cnt;
+	u64 old_port_speed, new_port_speed;
 
 	spin_lock_irqsave(&fnic->fnic_lock, flags);
 
@@ -62,14 +63,29 @@
 
 	old_link_down_cnt = fnic->link_down_cnt;
 	old_link_status = fnic->link_status;
+	old_port_speed = atomic64_read(
+			&fnic->fnic_stats.misc_stats.current_port_speed);
+
 	fnic->link_status = vnic_dev_link_status(fnic->vdev);
 	fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
 
+	new_port_speed = vnic_dev_port_speed(fnic->vdev);
+	atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
+			new_port_speed);
+	if (old_port_speed != new_port_speed)
+		shost_printk(KERN_INFO, fnic->lport->host,
+				"Current vnic speed set to :  %llu\n",
+				new_port_speed);
+
 	switch (vnic_dev_port_speed(fnic->vdev)) {
 	case DCEM_PORTSPEED_10G:
 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_10GBIT;
 		fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
 		break;
+	case DCEM_PORTSPEED_20G:
+		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_20GBIT;
+		fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
+		break;
 	case DCEM_PORTSPEED_25G:
 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_25GBIT;
 		fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
@@ -836,8 +852,8 @@
 	u32 fcp_bytes_written = 0;
 	unsigned long flags;
 
-	pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
-			 PCI_DMA_FROMDEVICE);
+	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+			 DMA_FROM_DEVICE);
 	skb = buf->os_buf;
 	fp = (struct fc_frame *)skb;
 	buf->os_buf = NULL;
@@ -977,9 +993,8 @@
 	skb_reset_transport_header(skb);
 	skb_reset_network_header(skb);
 	skb_put(skb, len);
-	pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
-
-	if (pci_dma_mapping_error(fnic->pdev, pa)) {
+	pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
+	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
 		r = -ENOMEM;
 		printk(KERN_ERR "PCI mapping failed with error %d\n", r);
 		goto free_skb;
@@ -998,8 +1013,8 @@
 	struct fc_frame *fp = buf->os_buf;
 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
 
-	pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
-			 PCI_DMA_FROMDEVICE);
+	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+			 DMA_FROM_DEVICE);
 
 	dev_kfree_skb(fp_skb(fp));
 	buf->os_buf = NULL;
@@ -1018,7 +1033,6 @@
 	struct ethhdr *eth_hdr;
 	struct vlan_ethhdr *vlan_hdr;
 	unsigned long flags;
-	int r;
 
 	if (!fnic->vlan_hw_insert) {
 		eth_hdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1038,11 +1052,10 @@
 		}
 	}
 
-	pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
-
-	r = pci_dma_mapping_error(fnic->pdev, pa);
-	if (r) {
-		printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+	pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
+			DMA_TO_DEVICE);
+	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
+		printk(KERN_ERR "DMA mapping failed\n");
 		goto free_skb;
 	}
 
@@ -1058,7 +1071,7 @@
 
 irq_restore:
 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
-	pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
+	dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
 free_skb:
 	kfree_skb(skb);
 }
@@ -1115,9 +1128,8 @@
 	if (FC_FCOE_VER)
 		FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
 
-	pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
-
-	if (pci_dma_mapping_error(fnic->pdev, pa)) {
+	pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
 		ret = -ENOMEM;
 		printk(KERN_ERR "DMA map failed with error %d\n", ret);
 		goto free_skb_on_err;
@@ -1131,8 +1143,7 @@
 	spin_lock_irqsave(&fnic->wq_lock[0], flags);
 
 	if (!vnic_wq_desc_avail(wq)) {
-		pci_unmap_single(fnic->pdev, pa,
-				 tot_len, PCI_DMA_TODEVICE);
+		dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
 		ret = -1;
 		goto irq_restore;
 	}
@@ -1247,8 +1258,8 @@
 	struct fc_frame *fp = (struct fc_frame *)skb;
 	struct fnic *fnic = vnic_dev_priv(wq->vdev);
 
-	pci_unmap_single(fnic->pdev, buf->dma_addr,
-			 buf->len, PCI_DMA_TODEVICE);
+	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+			 DMA_TO_DEVICE);
 	dev_kfree_skb_irq(fp_skb(fp));
 	buf->os_buf = NULL;
 }
@@ -1290,8 +1301,8 @@
 	struct fc_frame *fp = buf->os_buf;
 	struct fnic *fnic = vnic_dev_priv(wq->vdev);
 
-	pci_unmap_single(fnic->pdev, buf->dma_addr,
-			 buf->len, PCI_DMA_TODEVICE);
+	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+			 DMA_TO_DEVICE);
 
 	dev_kfree_skb(fp_skb(fp));
 	buf->os_buf = NULL;
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index e0bc659..1cb6a68 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -70,9 +70,10 @@
 	DCEM_PORTSPEED_NONE = 0,
 	DCEM_PORTSPEED_1G    = 1000,
 	DCEM_PORTSPEED_10G   = 10000,
+	DCEM_PORTSPEED_20G   = 20000,
+	DCEM_PORTSPEED_25G   = 25000,
 	DCEM_PORTSPEED_40G   = 40000,
 	DCEM_PORTSPEED_4x10G = 41000,
-	DCEM_PORTSPEED_25G   = 25000,
 	DCEM_PORTSPEED_100G  = 100000,
 };
 #endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 4e3a502..2fb2731 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -51,7 +51,7 @@
 	}
 
 	if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
-		work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
+		work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions);
 		work_done += fnic_wq_cmpl_handler(fnic, -1);
 		work_done += fnic_rq_cmpl_handler(fnic, -1);
 
@@ -72,7 +72,7 @@
 	fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
 	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
 
-	work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
+	work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions);
 	work_done += fnic_wq_cmpl_handler(fnic, -1);
 	work_done += fnic_rq_cmpl_handler(fnic, -1);
 
@@ -125,7 +125,7 @@
 	fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
 	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
 
-	wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);
+	wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions);
 	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
 				 wq_copy_work_done,
 				 1 /* unmask intr */,
@@ -254,7 +254,7 @@
 		int vecs = n + m + o + 1;
 
 		if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs,
-				PCI_IRQ_MSIX) < 0) {
+				PCI_IRQ_MSIX) == vecs) {
 			fnic->rq_count = n;
 			fnic->raw_wq_count = m;
 			fnic->wq_copy_count = o;
@@ -280,7 +280,7 @@
 	    fnic->wq_copy_count >= 1 &&
 	    fnic->cq_count >= 3 &&
 	    fnic->intr_count >= 1 &&
-	    pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) < 0) {
+	    pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) == 1) {
 		fnic->rq_count = 1;
 		fnic->raw_wq_count = 1;
 		fnic->wq_copy_count = 1;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index e52599f..18584ab 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -69,6 +69,11 @@
 module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
 
+
+unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS;
+module_param(io_completions, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(io_completions, "Max CQ entries to process at a time");
+
 unsigned int fnic_trace_max_pages = 16;
 module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
@@ -115,7 +120,6 @@
 	.this_id = -1,
 	.cmd_per_lun = 3,
 	.can_queue = FNIC_DFLT_IO_REQ,
-	.use_clustering = ENABLE_CLUSTERING,
 	.sg_tablesize = FNIC_MAX_SG_DESC_CNT,
 	.max_sectors = 0xffff,
 	.shost_attrs = fnic_attrs,
@@ -179,6 +183,9 @@
 	case DCEM_PORTSPEED_10G:
 		fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
 		break;
+	case DCEM_PORTSPEED_20G:
+		fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
+		break;
 	case DCEM_PORTSPEED_25G:
 		fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
 		break;
@@ -501,7 +508,7 @@
 	}
 
 	/* Clean up completed IOs and FCS frames */
-	fnic_wq_copy_cmpl_handler(fnic, -1);
+	fnic_wq_copy_cmpl_handler(fnic, io_completions);
 	fnic_wq_cmpl_handler(fnic, -1);
 	fnic_rq_cmpl_handler(fnic, -1);
 
@@ -579,12 +586,7 @@
 
 	host->transportt = fnic_fc_transport;
 
-	err = fnic_stats_debugfs_init(fnic);
-	if (err) {
-		shost_printk(KERN_ERR, fnic->lport->host,
-				"Failed to initialize debugfs for stats\n");
-		fnic_stats_debugfs_remove(fnic);
-	}
+	fnic_stats_debugfs_init(fnic);
 
 	/* Setup PCI resources */
 	pci_set_drvdata(pdev, fnic);
@@ -611,30 +613,15 @@
 	 * limitation for the device.  Try 64-bit first, and
 	 * fail to 32-bit.
 	 */
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 	if (err) {
-		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
 			shost_printk(KERN_ERR, fnic->lport->host,
 				     "No usable DMA configuration "
 				     "aborting\n");
 			goto err_out_release_regions;
 		}
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (err) {
-			shost_printk(KERN_ERR, fnic->lport->host,
-				     "Unable to obtain 32-bit DMA "
-				     "for consistent allocations, aborting.\n");
-			goto err_out_release_regions;
-		}
-	} else {
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-		if (err) {
-			shost_printk(KERN_ERR, fnic->lport->host,
-				     "Unable to obtain 64-bit DMA "
-				     "for consistent allocations, aborting.\n");
-			goto err_out_release_regions;
-		}
 	}
 
 	/* Map vNIC resources from BAR0 */
@@ -666,12 +653,20 @@
 		goto err_out_iounmap;
 	}
 
+	err = vnic_dev_cmd_init(fnic->vdev);
+	if (err) {
+		shost_printk(KERN_ERR, fnic->lport->host,
+				"vnic_dev_cmd_init() returns %d, aborting\n",
+				err);
+		goto err_out_vnic_unregister;
+	}
+
 	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
-			    vnic_dev_open_done, 0);
+			    vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST);
 	if (err) {
 		shost_printk(KERN_ERR, fnic->lport->host,
 			     "vNIC dev open failed, aborting.\n");
-		goto err_out_vnic_unregister;
+		goto err_out_dev_cmd_deinit;
 	}
 
 	err = vnic_dev_init(fnic->vdev, 0);
@@ -812,6 +807,7 @@
 
 	/* allocate RQ buffers and post them to RQ*/
 	for (i = 0; i < fnic->rq_count; i++) {
+		vnic_rq_enable(&fnic->rq[i]);
 		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
 		if (err) {
 			shost_printk(KERN_ERR, fnic->lport->host,
@@ -886,15 +882,11 @@
 	/* Enable all queues */
 	for (i = 0; i < fnic->raw_wq_count; i++)
 		vnic_wq_enable(&fnic->wq[i]);
-	for (i = 0; i < fnic->rq_count; i++)
-		vnic_rq_enable(&fnic->rq[i]);
 	for (i = 0; i < fnic->wq_copy_count; i++)
 		vnic_wq_copy_enable(&fnic->wq_copy[i]);
 
 	fc_fabric_login(lp);
 
-	vnic_dev_enable(fnic->vdev);
-
 	err = fnic_request_intr(fnic);
 	if (err) {
 		shost_printk(KERN_ERR, fnic->lport->host,
@@ -902,6 +894,8 @@
 		goto err_out_free_exch_mgr;
 	}
 
+	vnic_dev_enable(fnic->vdev);
+
 	for (i = 0; i < fnic->intr_count; i++)
 		vnic_intr_unmask(&fnic->intr[i]);
 
@@ -930,6 +924,7 @@
 	fnic_clear_intr_mode(fnic);
 err_out_dev_close:
 	vnic_dev_close(fnic->vdev);
+err_out_dev_cmd_deinit:
 err_out_vnic_unregister:
 	vnic_dev_unregister(fnic->vdev);
 err_out_iounmap:
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 8cbd3c9..80608b5 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -126,17 +126,17 @@
 				   struct scsi_cmnd *sc)
 {
 	if (io_req->sgl_list_pa)
-		pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+		dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
 				 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
-				 PCI_DMA_TODEVICE);
+				 DMA_TO_DEVICE);
 	scsi_dma_unmap(sc);
 
 	if (io_req->sgl_cnt)
 		mempool_free(io_req->sgl_list_alloc,
 			     fnic->io_sgl_pool[io_req->sgl_type]);
 	if (io_req->sense_buf_pa)
-		pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
-				 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+		dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
+				 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 }
 
 /* Free up Copy Wq descriptors. Called with copy_wq lock held */
@@ -180,20 +180,19 @@
 __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
 			unsigned long clearbits)
 {
-	struct Scsi_Host *host = fnic->lport->host;
-	int sh_locked = spin_is_locked(host->host_lock);
 	unsigned long flags = 0;
+	unsigned long host_lock_flags = 0;
 
-	if (!sh_locked)
-		spin_lock_irqsave(host->host_lock, flags);
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags);
 
 	if (clearbits)
 		fnic->state_flags &= ~st_flags;
 	else
 		fnic->state_flags |= st_flags;
 
-	if (!sh_locked)
-		spin_unlock_irqrestore(host->host_lock, flags);
+	spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags);
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
 	return;
 }
@@ -330,7 +329,6 @@
 	int flags;
 	u8 exch_flags;
 	struct scsi_lun fc_lun;
-	int r;
 
 	if (sg_count) {
 		/* For each SGE, create a device desc entry */
@@ -342,30 +340,25 @@
 			desc++;
 		}
 
-		io_req->sgl_list_pa = pci_map_single
-			(fnic->pdev,
-			 io_req->sgl_list,
-			 sizeof(io_req->sgl_list[0]) * sg_count,
-			 PCI_DMA_TODEVICE);
-
-		r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
-		if (r) {
-			printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+		io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
+				io_req->sgl_list,
+				sizeof(io_req->sgl_list[0]) * sg_count,
+				DMA_TO_DEVICE);
+		if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
+			printk(KERN_ERR "DMA mapping failed\n");
 			return SCSI_MLQUEUE_HOST_BUSY;
 		}
 	}
 
-	io_req->sense_buf_pa = pci_map_single(fnic->pdev,
+	io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
 					      sc->sense_buffer,
 					      SCSI_SENSE_BUFFERSIZE,
-					      PCI_DMA_FROMDEVICE);
-
-	r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa);
-	if (r) {
-		pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+					      DMA_FROM_DEVICE);
+	if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
+		dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
 				sizeof(io_req->sgl_list[0]) * sg_count,
-				PCI_DMA_TODEVICE);
-		printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+				DMA_TO_DEVICE);
+		printk(KERN_ERR "DMA mapping failed\n");
 		return SCSI_MLQUEUE_HOST_BUSY;
 	}
 
@@ -1332,13 +1325,32 @@
 	unsigned int wq_work_done = 0;
 	unsigned int i, cq_index;
 	unsigned int cur_work_done;
+	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
+	u64 start_jiffies = 0;
+	u64 end_jiffies = 0;
+	u64 delta_jiffies = 0;
+	u64 delta_ms = 0;
 
 	for (i = 0; i < fnic->wq_copy_count; i++) {
 		cq_index = i + fnic->raw_wq_count + fnic->rq_count;
+
+		start_jiffies = jiffies;
 		cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
 						     fnic_fcpio_cmpl_handler,
 						     copy_work_to_do);
+		end_jiffies = jiffies;
+
 		wq_work_done += cur_work_done;
+		delta_jiffies = end_jiffies - start_jiffies;
+		if (delta_jiffies >
+			(u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
+			atomic64_set(&misc_stats->max_isr_jiffies,
+					delta_jiffies);
+			delta_ms = jiffies_to_msecs(delta_jiffies);
+			atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
+			atomic64_set(&misc_stats->corr_work_done,
+					cur_work_done);
+		}
 	}
 	return wq_work_done;
 }
@@ -1403,8 +1415,9 @@
 cleanup_scsi_cmd:
 		sc->result = DID_TRANSPORT_DISRUPTED << 16;
 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
-			      "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
-			      __func__, (jiffies - start_time));
+			      "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
+			      __func__, sc->request->tag, sc,
+			      (jiffies - start_time));
 
 		if (atomic64_read(&fnic->io_cmpl_skip))
 			atomic64_dec(&fnic->io_cmpl_skip);
@@ -1413,6 +1426,11 @@
 
 		/* Complete the command to SCSI */
 		if (sc->scsi_done) {
+			if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
+				shost_printk(KERN_ERR, fnic->lport->host,
+				"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
+				 sc->request->tag, sc);
+
 			FNIC_TRACE(fnic_cleanup_io,
 				  sc->device->host->host_no, i, sc,
 				  jiffies_to_msecs(jiffies - start_time),
@@ -2272,33 +2290,17 @@
 static inline int
 fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
 {
-	struct blk_queue_tag *bqt = fnic->lport->host->bqt;
-	int tag, ret = SCSI_NO_TAG;
+	struct request_queue *q = sc->request->q;
+	struct request *dummy;
 
-	BUG_ON(!bqt);
-	if (!bqt) {
-		pr_err("Tags are not supported\n");
-		goto end;
-	}
+	dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
+	if (IS_ERR(dummy))
+		return SCSI_NO_TAG;
 
-	do {
-		tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
-		if (tag >= bqt->max_depth) {
-			pr_err("Tag allocation failure\n");
-			goto end;
-		}
-	} while (test_and_set_bit(tag, bqt->tag_map));
+	sc->tag = sc->request->tag = dummy->tag;
+	sc->host_scribble = (unsigned char *)dummy;
 
-	bqt->tag_index[tag] = sc->request;
-	sc->request->tag = tag;
-	sc->tag = tag;
-	if (!sc->request->special)
-		sc->request->special = sc;
-
-	ret = tag;
-
-end:
-	return ret;
+	return dummy->tag;
 }
 
 /**
@@ -2308,20 +2310,9 @@
 static inline void
 fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
 {
-	struct blk_queue_tag *bqt = fnic->lport->host->bqt;
-	int tag = sc->request->tag;
+	struct request *dummy = (struct request *)sc->host_scribble;
 
-	if (tag == SCSI_NO_TAG)
-		return;
-
-	BUG_ON(!bqt || !bqt->tag_index[tag]);
-	if (!bqt)
-		return;
-
-	bqt->tag_index[tag] = NULL;
-	clear_bit(tag, bqt->tag_map);
-
-	return;
+	blk_mq_free_request(dummy);
 }
 
 /*
@@ -2380,19 +2371,9 @@
 	tag = sc->request->tag;
 	if (unlikely(tag < 0)) {
 		/*
-		 * XXX(hch): current the midlayer fakes up a struct
-		 * request for the explicit reset ioctls, and those
-		 * don't have a tag allocated to them.  The below
-		 * code pokes into midlayer structures to paper over
-		 * this design issue, but that won't work for blk-mq.
-		 *
-		 * Either someone who can actually test the hardware
-		 * will have to come up with a similar hack for the
-		 * blk-mq case, or we'll have to bite the bullet and
-		 * fix the way the EH ioctls work for real, but until
-		 * that happens we fail these explicit requests here.
+		 * Really should fix the midlayer to pass in a proper
+		 * request for ioctls...
 		 */
-
 		tag = fnic_scsi_host_start_tag(fnic, sc);
 		if (unlikely(tag == SCSI_NO_TAG))
 			goto fnic_device_reset_end;
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index 9daa6ad..086f729 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -97,6 +97,9 @@
 struct misc_stats {
 	u64 last_isr_time;
 	u64 last_ack_time;
+	atomic64_t max_isr_jiffies;
+	atomic64_t max_isr_time_ms;
+	atomic64_t corr_work_done;
 	atomic64_t isr_count;
 	atomic64_t max_cq_entries;
 	atomic64_t ack_index_out_of_range;
@@ -113,6 +116,7 @@
 	atomic64_t queue_fulls;
 	atomic64_t rport_not_ready;
 	atomic64_t frame_errors;
+	atomic64_t current_port_speed;
 };
 
 struct fnic_stats {
@@ -134,6 +138,6 @@
 };
 
 int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
-int fnic_stats_debugfs_init(struct fnic *);
+void fnic_stats_debugfs_init(struct fnic *);
 void fnic_stats_debugfs_remove(struct fnic *);
 #endif /* _FNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 8271785..a0d01ae 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -409,6 +409,9 @@
 	len += snprintf(debug->debug_buffer + len, buf_size - len,
 		  "Last ISR time: %llu (%8llu.%09lu)\n"
 		  "Last ACK time: %llu (%8llu.%09lu)\n"
+		  "Max ISR jiffies: %llu\n"
+		  "Max ISR time (ms) (0 denotes < 1 ms): %llu\n"
+		  "Corr. work done: %llu\n"
 		  "Number of ISRs: %lld\n"
 		  "Maximum CQ Entries: %lld\n"
 		  "Number of ACK index out of range: %lld\n"
@@ -428,6 +431,9 @@
 		  (s64)val1.tv_sec, val1.tv_nsec,
 		  (u64)stats->misc_stats.last_ack_time,
 		  (s64)val2.tv_sec, val2.tv_nsec,
+		  (u64)atomic64_read(&stats->misc_stats.max_isr_jiffies),
+		  (u64)atomic64_read(&stats->misc_stats.max_isr_time_ms),
+		  (u64)atomic64_read(&stats->misc_stats.corr_work_done),
 		  (u64)atomic64_read(&stats->misc_stats.isr_count),
 		  (u64)atomic64_read(&stats->misc_stats.max_cq_entries),
 		  (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
@@ -446,6 +452,11 @@
 		  (u64)atomic64_read(&stats->misc_stats.rport_not_ready),
 		  (u64)atomic64_read(&stats->misc_stats.frame_errors));
 
+	len += snprintf(debug->debug_buffer + len, buf_size - len,
+			"Firmware reported port speed: %llu\n",
+			(u64)atomic64_read(
+				&stats->misc_stats.current_port_speed));
+
 	return len;
 
 }
@@ -468,14 +479,13 @@
 	fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
 					  FNIC_ENTRY_SIZE_BYTES;
 
-	fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE));
+	fnic_trace_buf_p = (unsigned long)vzalloc(trace_max_pages * PAGE_SIZE);
 	if (!fnic_trace_buf_p) {
 		printk(KERN_ERR PFX "Failed to allocate memory "
 				  "for fnic_trace_buf_p\n");
 		err = -ENOMEM;
 		goto err_fnic_trace_buf_init;
 	}
-	memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
 
 	fnic_trace_entries.page_offset =
 		vmalloc(array_size(fnic_max_trace_entries,
@@ -504,15 +514,10 @@
 		fnic_trace_entries.page_offset[i] = fnic_buf_head;
 		fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
 	}
-	err = fnic_trace_debugfs_init();
-	if (err < 0) {
-		pr_err("fnic: Failed to initialize debugfs for tracing\n");
-		goto err_fnic_trace_debugfs_init;
-	}
+	fnic_trace_debugfs_init();
 	pr_info("fnic: Successfully Initialized Trace Buffer\n");
 	return err;
-err_fnic_trace_debugfs_init:
-	fnic_trace_free();
+
 err_fnic_trace_buf_init:
 	return err;
 }
@@ -597,16 +602,10 @@
 		fc_trace_entries.page_offset[i] = fc_trace_buf_head;
 		fc_trace_buf_head += FC_TRC_SIZE_BYTES;
 	}
-	err = fnic_fc_trace_debugfs_init();
-	if (err < 0) {
-		pr_err("fnic: Failed to initialize FC_CTLR tracing.\n");
-		goto err_fnic_fc_ctlr_trace_debugfs_init;
-	}
+	fnic_fc_trace_debugfs_init();
 	pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n");
 	return err;
 
-err_fnic_fc_ctlr_trace_debugfs_init:
-	fnic_fc_trace_free();
 err_fnic_fc_ctlr_trace_buf_init:
 	return err;
 }
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index e375d0c..8aa55c1 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -111,7 +111,7 @@
 void fnic_trace_free(void);
 int fnic_debugfs_init(void);
 void fnic_debugfs_terminate(void);
-int fnic_trace_debugfs_init(void);
+void fnic_trace_debugfs_init(void);
 void fnic_trace_debugfs_terminate(void);
 
 /* Fnic FC CTLR Trace releated function */
@@ -123,7 +123,7 @@
 void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
 				fnic_dbgfs_t *fnic_dbgfs_prt,
 				int *len, u8 rdata_flag);
-int fnic_fc_trace_debugfs_init(void);
+void fnic_fc_trace_debugfs_init(void);
 void fnic_fc_trace_debugfs_terminate(void);
 
 #endif
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index ba69d61..78af9cc 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -27,6 +27,24 @@
 #include "vnic_devcmd.h"
 #include "vnic_dev.h"
 #include "vnic_stats.h"
+#include "vnic_wq.h"
+
+struct devcmd2_controller {
+	struct vnic_wq_ctrl *wq_ctrl;
+	struct vnic_dev_ring results_ring;
+	struct vnic_wq wq;
+	struct vnic_devcmd2 *cmd_ring;
+	struct devcmd2_result *result;
+	u16 next_result;
+	u16 result_size;
+	int color;
+};
+
+enum vnic_proxy_type {
+	PROXY_NONE,
+	PROXY_BY_BDF,
+	PROXY_BY_INDEX,
+};
 
 struct vnic_res {
 	void __iomem *vaddr;
@@ -48,6 +66,12 @@
 	dma_addr_t stats_pa;
 	struct vnic_devcmd_fw_info *fw_info;
 	dma_addr_t fw_info_pa;
+	enum vnic_proxy_type proxy;
+	u32 proxy_index;
+	u64 args[VNIC_DEVCMD_NARGS];
+	struct devcmd2_controller *devcmd2;
+	int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+			int wait);
 };
 
 #define VNIC_MAX_RES_HDR_SIZE \
@@ -119,6 +143,7 @@
 			}
 			break;
 		case RES_TYPE_INTR_PBA_LEGACY:
+		case RES_TYPE_DEVCMD2:
 		case RES_TYPE_DEVCMD:
 			len = count;
 			break;
@@ -195,9 +220,9 @@
 {
 	vnic_dev_desc_ring_size(ring, desc_count, desc_size);
 
-	ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+	ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
 		ring->size_unaligned,
-		&ring->base_addr_unaligned);
+		&ring->base_addr_unaligned, GFP_KERNEL);
 
 	if (!ring->descs_unaligned) {
 		printk(KERN_ERR
@@ -221,7 +246,7 @@
 void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
 {
 	if (ring->descs) {
-		pci_free_consistent(vdev->pdev,
+		dma_free_coherent(&vdev->pdev->dev,
 			ring->size_unaligned,
 			ring->descs_unaligned,
 			ring->base_addr_unaligned);
@@ -229,8 +254,7 @@
 	}
 }
 
-int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
-	u64 *a0, u64 *a1, int wait)
+int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
 {
 	struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
 	int delay;
@@ -244,6 +268,8 @@
 		EBUSY,  /* ERR_EBUSY */
 	};
 	int err;
+	u64 *a0 = &vdev->args[0];
+	u64 *a1 = &vdev->args[1];
 
 	status = ioread32(&devcmd->status);
 	if (status & STAT_BUSY) {
@@ -290,6 +316,223 @@
 	return -ETIMEDOUT;
 }
 
+int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+		int wait)
+{
+	struct devcmd2_controller *dc2c = vdev->devcmd2;
+	struct devcmd2_result *result;
+	u8 color;
+	unsigned int i;
+	int delay;
+	int err;
+	u32 fetch_index;
+	u32 posted;
+	u32 new_posted;
+
+	posted = ioread32(&dc2c->wq_ctrl->posted_index);
+	fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
+
+	if (posted == 0xFFFFFFFF || fetch_index == 0xFFFFFFFF) {
+		/* Hardware surprise removal: return error */
+		pr_err("%s: devcmd2 invalid posted or fetch index on cmd %d\n",
+				pci_name(vdev->pdev), _CMD_N(cmd));
+		pr_err("%s: fetch index: %u, posted index: %u\n",
+				pci_name(vdev->pdev), fetch_index, posted);
+
+		return -ENODEV;
+
+	}
+
+	new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+
+	if (new_posted == fetch_index) {
+		pr_err("%s: devcmd2 wq full while issuing cmd %d\n",
+				pci_name(vdev->pdev), _CMD_N(cmd));
+		pr_err("%s: fetch index: %u, posted index: %u\n",
+				pci_name(vdev->pdev), fetch_index, posted);
+		return -EBUSY;
+
+	}
+	dc2c->cmd_ring[posted].cmd = cmd;
+	dc2c->cmd_ring[posted].flags = 0;
+
+	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+		dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
+	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+		for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+			dc2c->cmd_ring[posted].args[i] = vdev->args[i];
+
+	}
+
+	/* Adding write memory barrier prevents compiler and/or CPU
+	 * reordering, thus avoiding descriptor posting before
+	 * descriptor is initialized. Otherwise, hardware can read
+	 * stale descriptor fields.
+	 */
+	wmb();
+	iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
+
+	if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+		return 0;
+
+	result = dc2c->result + dc2c->next_result;
+	color = dc2c->color;
+
+	dc2c->next_result++;
+	if (dc2c->next_result == dc2c->result_size) {
+		dc2c->next_result = 0;
+		dc2c->color = dc2c->color ? 0 : 1;
+	}
+
+	for (delay = 0; delay < wait; delay++) {
+		udelay(100);
+		if (result->color == color) {
+			if (result->error) {
+				err = -(int) result->error;
+				if (err != ERR_ECMDUNKNOWN ||
+						cmd != CMD_CAPABILITY)
+					pr_err("%s:Error %d devcmd %d\n",
+						pci_name(vdev->pdev),
+						err, _CMD_N(cmd));
+				return err;
+			}
+			if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+				rmb(); /*prevent reorder while reding result*/
+				for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+					vdev->args[i] = result->results[i];
+			}
+			return 0;
+		}
+	}
+
+	pr_err("%s:Timed out devcmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd));
+
+	return -ETIMEDOUT;
+}
+
+
+int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+{
+	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+	if (!vdev->devcmd)
+		return -ENODEV;
+
+	vdev->devcmd_rtn = &vnic_dev_cmd1;
+	return 0;
+}
+
+
+int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+{
+	int err;
+	unsigned int fetch_index;
+
+	if (vdev->devcmd2)
+		return 0;
+
+	vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_ATOMIC);
+	if (!vdev->devcmd2)
+		return -ENOMEM;
+
+	vdev->devcmd2->color = 1;
+	vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
+	err = vnic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq,
+				DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
+	if (err)
+		goto err_free_devcmd2;
+
+	fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
+	if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
+		pr_err("error in devcmd2 init");
+		return -ENODEV;
+	}
+
+	/*
+	 * Don't change fetch_index ever and
+	 * set posted_index same as fetch_index
+	 * when setting up the WQ for devcmd2.
+	 */
+	vnic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index,
+			fetch_index, 0, 0);
+
+	vnic_wq_enable(&vdev->devcmd2->wq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
+			DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
+	if (err)
+		goto err_free_wq;
+
+	vdev->devcmd2->result =
+		(struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
+	vdev->devcmd2->cmd_ring =
+		(struct vnic_devcmd2 *) vdev->devcmd2->wq.ring.descs;
+	vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
+	vdev->args[0] = (u64) vdev->devcmd2->results_ring.base_addr |
+				VNIC_PADDR_TARGET;
+	vdev->args[1] = DEVCMD2_RING_SIZE;
+
+	err = vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
+	if (err)
+		goto err_free_desc_ring;
+
+	vdev->devcmd_rtn = &vnic_dev_cmd2;
+
+	return 0;
+
+err_free_desc_ring:
+	vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+err_free_wq:
+	vnic_wq_disable(&vdev->devcmd2->wq);
+	vnic_wq_free(&vdev->devcmd2->wq);
+err_free_devcmd2:
+	kfree(vdev->devcmd2);
+	vdev->devcmd2 = NULL;
+
+	return err;
+}
+
+
+void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+{
+	vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+	vnic_wq_disable(&vdev->devcmd2->wq);
+	vnic_wq_free(&vdev->devcmd2->wq);
+	kfree(vdev->devcmd2);
+	vdev->devcmd2 = NULL;
+	vdev->devcmd_rtn = &vnic_dev_cmd1;
+}
+
+
+int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
+	enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+	int err;
+
+	vdev->args[0] = *a0;
+	vdev->args[1] = *a1;
+
+	err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
+
+	*a0 = vdev->args[0];
+	*a1 = vdev->args[1];
+
+	return err;
+}
+
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+	u64 *a0, u64 *a1, int wait)
+{
+	memset(vdev->args, 0, sizeof(vdev->args));
+
+	switch (vdev->proxy) {
+	case PROXY_NONE:
+	default:
+		return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
+	}
+}
+
+
 int vnic_dev_fw_info(struct vnic_dev *vdev,
 	struct vnic_devcmd_fw_info **fw_info)
 {
@@ -298,9 +541,9 @@
 	int err = 0;
 
 	if (!vdev->fw_info) {
-		vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+		vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
 			sizeof(struct vnic_devcmd_fw_info),
-			&vdev->fw_info_pa);
+			&vdev->fw_info_pa, GFP_KERNEL);
 		if (!vdev->fw_info)
 			return -ENOMEM;
 
@@ -361,8 +604,8 @@
 	int wait = 1000;
 
 	if (!vdev->stats) {
-		vdev->stats = pci_alloc_consistent(vdev->pdev,
-			sizeof(struct vnic_stats), &vdev->stats_pa);
+		vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
+			sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
 		if (!vdev->stats)
 			return -ENOMEM;
 	}
@@ -523,9 +766,9 @@
 	int wait = 1000;
 
 	if (!vdev->notify) {
-		vdev->notify = pci_alloc_consistent(vdev->pdev,
+		vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
 			sizeof(struct vnic_devcmd_notify),
-			&vdev->notify_pa);
+			&vdev->notify_pa, GFP_KERNEL);
 		if (!vdev->notify)
 			return -ENOMEM;
 	}
@@ -647,23 +890,25 @@
 {
 	if (vdev) {
 		if (vdev->notify)
-			pci_free_consistent(vdev->pdev,
+			dma_free_coherent(&vdev->pdev->dev,
 				sizeof(struct vnic_devcmd_notify),
 				vdev->notify,
 				vdev->notify_pa);
 		if (vdev->linkstatus)
-			pci_free_consistent(vdev->pdev,
+			dma_free_coherent(&vdev->pdev->dev,
 				sizeof(u32),
 				vdev->linkstatus,
 				vdev->linkstatus_pa);
 		if (vdev->stats)
-			pci_free_consistent(vdev->pdev,
+			dma_free_coherent(&vdev->pdev->dev,
 				sizeof(struct vnic_stats),
 				vdev->stats, vdev->stats_pa);
 		if (vdev->fw_info)
-			pci_free_consistent(vdev->pdev,
+			dma_free_coherent(&vdev->pdev->dev,
 				sizeof(struct vnic_devcmd_fw_info),
 				vdev->fw_info, vdev->fw_info_pa);
+		if (vdev->devcmd2)
+			vnic_dev_deinit_devcmd2(vdev);
 		kfree(vdev);
 	}
 }
@@ -683,13 +928,26 @@
 	if (vnic_dev_discover_res(vdev, bar))
 		goto err_out;
 
-	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
-	if (!vdev->devcmd)
-		goto err_out;
-
 	return vdev;
 
 err_out:
 	vnic_dev_unregister(vdev);
 	return NULL;
 }
+
+int vnic_dev_cmd_init(struct vnic_dev *vdev)
+{
+	int err;
+	void *p;
+
+	p = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+	if (p) {
+		pr_err("fnic: DEVCMD2 resource found!\n");
+		err = vnic_dev_init_devcmd2(vdev);
+	} else {
+		pr_err("fnic: DEVCMD2 not found, fall back to Devcmd\n");
+		err = vnic_dev_init_devcmd1(vdev);
+	}
+
+	return err;
+}
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
index 40d4195..ef5309a 100644
--- a/drivers/scsi/fnic/vnic_dev.h
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -36,6 +36,7 @@
 #define vnic_dev_fw_info fnic_dev_fw_info
 #define vnic_dev_spec fnic_dev_spec
 #define vnic_dev_stats_clear fnic_dev_stats_clear
+#define vnic_dev_cmd_init fnic_dev_cmd_init
 #define vnic_dev_stats_dump fnic_dev_stats_dump
 #define vnic_dev_hang_notify fnic_dev_hang_notify
 #define vnic_dev_packet_filter fnic_dev_packet_filter
@@ -128,6 +129,7 @@
 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
 		  unsigned int size, void *value);
 int vnic_dev_stats_clear(struct vnic_dev *vdev);
+int vnic_dev_cmd_init(struct vnic_dev *vdev);
 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
 int vnic_dev_hang_notify(struct vnic_dev *vdev);
 void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index 3e2fcbd..c5dde55 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -170,7 +170,8 @@
 
 	/* variant of CMD_INIT, with provisioning info
 	 *     (u64)a0=paddr of vnic_devcmd_provinfo
-	 *     (u32)a1=sizeof provision info */
+	 *     (u32)a1=sizeof provision info
+	 */
 	CMD_INIT_PROV_INFO	= _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
 
 	/* enable virtual link */
@@ -262,12 +263,132 @@
 	 *             non-zero for resetting vlan to the default
 	 * out: (u16)a0=old default vlan
 	 */
-	CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46)
+	CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
+
+	/* init_prov_info2:
+	 * Variant of CMD_INIT_PROV_INFO, where it will not try to enable
+	 * the vnic until CMD_ENABLE2 is issued.
+	 *     (u64)a0=paddr of vnic_devcmd_provinfo
+	 *     (u32)a1=sizeof provision info
+	 */
+	CMD_INIT_PROV_INFO2  = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
+
+	/* enable2:
+	 *      (u32)a0=0                  ==> standby
+	 *             =CMD_ENABLE2_ACTIVE ==> active
+	 */
+	CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48),
+
+	/*
+	 * cmd_status:
+	 *     Returns the status of the specified command
+	 * Input:
+	 *     a0 = command for which status is being queried.
+	 *          Possible values are:
+	 *              CMD_SOFT_RESET
+	 *              CMD_HANG_RESET
+	 *              CMD_OPEN
+	 *              CMD_INIT
+	 *              CMD_INIT_PROV_INFO
+	 *              CMD_DEINIT
+	 *              CMD_INIT_PROV_INFO2
+	 *              CMD_ENABLE2
+	 * Output:
+	 *     if status == STAT_ERROR
+	 *        a0 = ERR_ENOTSUPPORTED - status for command in a0 is
+	 *                                 not supported
+	 *     if status == STAT_NONE
+	 *        a0 = status of the devcmd specified in a0 as follows.
+	 *             ERR_SUCCESS   - command in a0 completed successfully
+	 *             ERR_EINPROGRESS - command in a0 is still in progress
+	 */
+	CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49),
+
+	/*
+	 * Returns interrupt coalescing timer conversion factors.
+	 * After calling this devcmd, ENIC driver can convert
+	 * interrupt coalescing timer in usec into CPU cycles as follows:
+	 *
+	 *   intr_timer_cycles = intr_timer_usec * multiplier / divisor
+	 *
+	 * Interrupt coalescing timer in usecs can be be converted/obtained
+	 * from CPU cycles as follows:
+	 *
+	 *   intr_timer_usec = intr_timer_cycles * divisor / multiplier
+	 *
+	 * in: none
+	 * out: (u32)a0 = multiplier
+	 *      (u32)a1 = divisor
+	 *      (u32)a2 = maximum timer value in usec
+	 */
+	CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
+
+	/*
+	 * ISCSI DUMP API:
+	 * in: (u64)a0=paddr of the param or param itself
+	 *     (u32)a1=ISCSI_CMD_xxx
+	 */
+	CMD_ISCSI_DUMP_REQ = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 51),
+
+	/*
+	 * ISCSI DUMP STATUS API:
+	 * in: (u32)a0=cmd tag
+	 * in: (u32)a1=ISCSI_CMD_xxx
+	 * out: (u32)a0=cmd status
+	 */
+	CMD_ISCSI_DUMP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 52),
+
+	/*
+	 * Subvnic migration from MQ <--> VF.
+	 * Enable the LIF migration from MQ to VF and vice versa. MQ and VF
+	 * indexes are statically bound at the time of initialization.
+	 * Based on the
+	 * direction of migration, the resources of either MQ or the VF shall
+	 * be attached to the LIF.
+	 * in:        (u32)a0=Direction of Migration
+	 *					0=> Migrate to VF
+	 *					1=> Migrate to MQ
+	 *            (u32)a1=VF index (MQ index)
+	 */
+	CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53),
+
+	/*
+	 * Register / Deregister the notification block for MQ subvnics
+	 * in:
+	 *   (u64)a0=paddr to notify (set paddr=0 to unset)
+	 *   (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+	 *   (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+	 * out:
+	 *   (u32)a1 = effective size
+	 */
+	CMD_SUBVNIC_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 54),
+
+	/*
+	 * Set the predefined mac address as default
+	 * in:
+	 *   (u48)a0=mac addr
+	 */
+	CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
+
+	/* Update the provisioning info of the given VIF
+	 *     (u64)a0=paddr of vnic_devcmd_provinfo
+	 *     (u32)a1=sizeof provision info
+	 */
+	CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+
+	/*
+	 * Initialization for the devcmd2 interface.
+	 * in: (u64) a0=host result buffer physical address
+	 * in: (u16) a1=number of entries in result buffer
+	 */
+	CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57)
 };
 
 /* flags for CMD_OPEN */
 #define CMD_OPENF_OPROM		0x1	/* open coming from option rom */
 
+#define CMD_OPENF_RQ_ENABLE_THEN_POST   0x2
+
 /* flags for CMD_INIT */
 #define CMD_INITF_DEFAULT_MAC	0x1	/* init with default mac addr */
 
@@ -345,4 +466,39 @@
 	u64 args[VNIC_DEVCMD_NARGS];	/* RW cmd args (little-endian) */
 };
 
+/*
+ * Version 2 of the interface.
+ *
+ * Some things are carried over, notably the vnic_devcmd_cmd enum.
+ */
+
+/*
+ * Flags for vnic_devcmd2.flags
+ */
+
+#define DEVCMD2_FNORESULT	0x1 /* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS			VNIC_DEVCMD_NARGS
+
+struct vnic_devcmd2 {
+	u16 pad;
+	u16 flags;
+	u32 cmd;                /* same command #defines as original */
+	u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS			VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+	u64 results[VNIC_DEVCMD2_NRESULTS];
+	u32 pad;
+	u16 completed_index;    /* into copy WQ */
+	u8  error;              /* same error codes as original */
+	u8  color;              /* 0 or 1 as with completion queues */
+};
+
+#define DEVCMD2_RING_SIZE			32
+#define DEVCMD2_DESC_SIZE			128
+
+#define DEVCMD2_RESULTS_SIZE_MAX		((1 << 16) - 1)
+
 #endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h
index 2d842f7..7c6163f 100644
--- a/drivers/scsi/fnic/vnic_resource.h
+++ b/drivers/scsi/fnic/vnic_resource.h
@@ -41,6 +41,13 @@
 	RES_TYPE_RSVD7,
 	RES_TYPE_DEVCMD,		/* Device command region */
 	RES_TYPE_PASS_THRU_PAGE,	/* Pass-thru page */
+	RES_TYPE_SUBVNIC,               /* subvnic resource type */
+	RES_TYPE_MQ_WQ,                 /* MQ Work queues */
+	RES_TYPE_MQ_RQ,                 /* MQ Receive queues */
+	RES_TYPE_MQ_CQ,                 /* MQ Completion queues */
+	RES_TYPE_DEPRECATED1,           /* Old version of devcmd 2 */
+	RES_TYPE_DEPRECATED2,           /* Old version of devcmd 2 */
+	RES_TYPE_DEVCMD2,               /* Device control region */
 
 	RES_TYPE_MAX,			/* Count of resource types */
 };
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c
index fd2068f..6a35b1b 100644
--- a/drivers/scsi/fnic/vnic_rq.c
+++ b/drivers/scsi/fnic/vnic_rq.c
@@ -27,12 +27,9 @@
 static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
 {
 	struct vnic_rq_buf *buf;
-	struct vnic_dev *vdev;
 	unsigned int i, j, count = rq->ring.desc_count;
 	unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
 
-	vdev = rq->vdev;
-
 	for (i = 0; i < blks; i++) {
 		rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
 		if (!rq->bufs[i]) {
@@ -171,7 +168,7 @@
 	struct vnic_rq_buf *buf;
 	u32 fetch_index;
 
-	BUG_ON(ioread32(&rq->ctrl->enable));
+	WARN_ON(ioread32(&rq->ctrl->enable));
 
 	buf = rq->to_clean;
 
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
index a414135..015af2c 100644
--- a/drivers/scsi/fnic/vnic_wq.c
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -24,15 +24,32 @@
 #include "vnic_dev.h"
 #include "vnic_wq.h"
 
+
+int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
+		unsigned int index, enum vnic_res_type res_type)
+{
+	wq->ctrl = vnic_dev_get_res(vdev, res_type, index);
+
+	if (!wq->ctrl)
+		return -EINVAL;
+
+	return 0;
+}
+
+
+int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
+		unsigned int desc_count, unsigned int desc_size)
+{
+	return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+}
+
+
 static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
 {
 	struct vnic_wq_buf *buf;
-	struct vnic_dev *vdev;
 	unsigned int i, j, count = wq->ring.desc_count;
 	unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
 
-	vdev = wq->vdev;
-
 	for (i = 0; i < blks; i++) {
 		wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
 		if (!wq->bufs[i]) {
@@ -111,6 +128,52 @@
 	return 0;
 }
 
+
+int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+		unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	wq->index = 0;
+	wq->vdev = vdev;
+
+	err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2);
+	if (err) {
+		pr_err("Failed to get devcmd2 resource\n");
+		return err;
+	}
+	vnic_wq_disable(wq);
+
+	err = vnic_wq_alloc_ring(vdev, wq, desc_count, desc_size);
+	if (err)
+		return err;
+	return 0;
+}
+
+void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+		unsigned int fetch_index, unsigned int posted_index,
+		unsigned int error_interrupt_enable,
+		unsigned int error_interrupt_offset)
+{
+	u64 paddr;
+	unsigned int count = wq->ring.desc_count;
+
+	paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &wq->ctrl->ring_base);
+	iowrite32(count, &wq->ctrl->ring_size);
+	iowrite32(fetch_index, &wq->ctrl->fetch_index);
+	iowrite32(posted_index, &wq->ctrl->posted_index);
+	iowrite32(cq_index, &wq->ctrl->cq_index);
+	iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+	iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+	iowrite32(0, &wq->ctrl->error_status);
+
+	wq->to_use = wq->to_clean =
+		&wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES]
+		[fetch_index % VNIC_WQ_BUF_BLK_ENTRIES];
+}
+
+
 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
 	unsigned int error_interrupt_enable,
 	unsigned int error_interrupt_offset)
diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h
index 5cd094f..5d1e0a4 100644
--- a/drivers/scsi/fnic/vnic_wq.h
+++ b/drivers/scsi/fnic/vnic_wq.h
@@ -33,6 +33,8 @@
 #define vnic_wq_service fnic_wq_service
 #define vnic_wq_free fnic_wq_free
 #define vnic_wq_alloc fnic_wq_alloc
+#define vnic_wq_devcmd2_alloc fnic_wq_devcmd2_alloc
+#define vnic_wq_init_start fnic_wq_init_start
 #define vnic_wq_init fnic_wq_init
 #define vnic_wq_error_status fnic_wq_error_status
 #define vnic_wq_enable fnic_wq_enable
@@ -163,6 +165,12 @@
 void vnic_wq_free(struct vnic_wq *wq);
 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
 	unsigned int desc_count, unsigned int desc_size);
+int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+		unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+		unsigned int fetch_index, unsigned int posted_index,
+		unsigned int error_interrupt_enable,
+		unsigned int error_interrupt_offset);
 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
 	unsigned int error_interrupt_enable,
 	unsigned int error_interrupt_offset);
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index fc53818..2ab774e 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Generic Generic NCR5380 driver
  *
@@ -700,7 +701,7 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= 2,
-	.use_clustering		= DISABLE_CLUSTERING,
+	.dma_boundary		= PAGE_SIZE - 1,
 	.cmd_size		= NCR5380_CMD_SIZE,
 	.max_sectors		= 128,
 };
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 1670973..fe03410 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /************************************************************************
  * Linux driver for                                                     *  
- * ICP vortex GmbH:    GDT ISA/EISA/PCI Disk Array Controllers          *
+ * ICP vortex GmbH:    GDT PCI Disk Array Controllers                   *
  * Intel Corporation:  Storage RAID Controllers                         *
  *                                                                      *
  * gdth.c                                                               *
@@ -13,34 +14,16 @@
  * Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>               *
  * Johannes Dinner <johannes_dinner@adaptec.com>                        *
  *                                                                      *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License as published    *
- * by the Free Software Foundation; either version 2 of the License,    *
- * or (at your option) any later version.                               *
- *                                                                      *
- * This program is distributed in the hope that it will be useful,      *
- * but WITHOUT ANY WARRANTY; without even the implied warranty of       *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the         *
- * GNU General Public License for more details.                         *
- *                                                                      *
- * You should have received a copy of the GNU General Public License    *
- * along with this kernel; if not, write to the Free Software           *
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.            *
  *                                                                      *
  * Linux kernel 2.6.x supported						*
  *                                                                      *
  ************************************************************************/
 
 /* All GDT Disk Array Controllers are fully supported by this driver.
- * This includes the PCI/EISA/ISA SCSI Disk Array Controllers and the
+ * This includes the PCI SCSI Disk Array Controllers and the
  * PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete
  * list of all controller types.
  * 
- * If you have one or more GDT3000/3020 EISA controllers with 
- * controller BIOS disabled, you have to set the IRQ values with the 
- * command line option "gdth=irq1,irq2,...", where the irq1,irq2,... are
- * the IRQ values for the EISA controllers.
- * 
  * After the optional list of IRQ values, other possible 
  * command line options are:
  * disable:Y                    disable driver
@@ -61,14 +44,12 @@
  *                              access a shared resource from several nodes, 
  *                              appropriate controller firmware required
  * shared_access:N              enable driver reserve/release protocol
- * probe_eisa_isa:Y             scan for EISA/ISA controllers
- * probe_eisa_isa:N             do not scan for EISA/ISA controllers
  * force_dma32:Y                use only 32 bit DMA mode
  * force_dma32:N                use 64 bit DMA mode, if supported
  *
  * The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N,
  *                          max_ids:127,rescan:N,hdr_channel:0,
- *                          shared_access:Y,probe_eisa_isa:N,force_dma32:N".
+ *                          shared_access:Y,force_dma32:N".
  * Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y".
  * 
  * When loading the gdth driver as a module, the same options are available. 
@@ -79,7 +60,7 @@
  * 
  * Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0
  *           max_ids=127 rescan=0 hdr_channel=0 shared_access=0
- *           probe_eisa_isa=0 force_dma32=0"
+ *           force_dma32=0"
  * The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1".
  */
 
@@ -96,10 +77,6 @@
  * phase:                   unused
  */
 
-
-/* interrupt coalescing */
-/* #define INT_COAL */
-
 /* statistics */
 #define GDTH_STATISTICS
 
@@ -122,10 +99,6 @@
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-
-#ifdef GDTH_RTC
-#include <linux/mc146818rtc.h>
-#endif
 #include <linux/reboot.h>
 
 #include <asm/dma.h>
@@ -192,79 +165,9 @@
 
 #ifdef DEBUG_GDTH
 static u8   DebugState = DEBUG_GDTH;
-
-#ifdef __SERIAL__
-#define MAX_SERBUF 160
-static void ser_init(void);
-static void ser_puts(char *str);
-static void ser_putc(char c);
-static int  ser_printk(const char *fmt, ...);
-static char strbuf[MAX_SERBUF+1];
-#ifdef __COM2__
-#define COM_BASE 0x2f8
-#else
-#define COM_BASE 0x3f8
-#endif
-static void ser_init()
-{
-    unsigned port=COM_BASE;
-
-    outb(0x80,port+3);
-    outb(0,port+1);
-    /* 19200 Baud, if 9600: outb(12,port) */
-    outb(6, port);
-    outb(3,port+3);
-    outb(0,port+1);
-    /*
-    ser_putc('I');
-    ser_putc(' ');
-    */
-}
-
-static void ser_puts(char *str)
-{
-    char *ptr;
-
-    ser_init();
-    for (ptr=str;*ptr;++ptr)
-        ser_putc(*ptr);
-}
-
-static void ser_putc(char c)
-{
-    unsigned port=COM_BASE;
-
-    while ((inb(port+5) & 0x20)==0);
-    outb(c,port);
-    if (c==0x0a)
-    {
-        while ((inb(port+5) & 0x20)==0);
-        outb(0x0d,port);
-    }
-}
-
-static int ser_printk(const char *fmt, ...)
-{
-    va_list args;
-    int i;
-
-    va_start(args,fmt);
-    i = vsprintf(strbuf,fmt,args);
-    ser_puts(strbuf);
-    va_end(args);
-    return i;
-}
-
-#define TRACE(a)    {if (DebugState==1) {ser_printk a;}}
-#define TRACE2(a)   {if (DebugState==1 || DebugState==2) {ser_printk a;}}
-#define TRACE3(a)   {if (DebugState!=0) {ser_printk a;}}
-
-#else /* !__SERIAL__ */
 #define TRACE(a)    {if (DebugState==1) {printk a;}}
 #define TRACE2(a)   {if (DebugState==1 || DebugState==2) {printk a;}}
 #define TRACE3(a)   {if (DebugState!=0) {printk a;}}
-#endif
-
 #else /* !DEBUG */
 #define TRACE(a)
 #define TRACE2(a)
@@ -273,9 +176,6 @@
 
 #ifdef GDTH_STATISTICS
 static u32 max_rq=0, max_index=0, max_sg=0;
-#ifdef INT_COAL
-static u32 max_int_coal=0;
-#endif
 static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
 static struct timer_list gdth_timer;
 #endif
@@ -286,12 +186,6 @@
 
 #define BUS_L2P(a,b)    ((b)>(a)->virt_bus ? (b-1):(b))
 
-#ifdef CONFIG_ISA
-static u8   gdth_drq_tab[4] = {5,6,7,7};            /* DRQ table */
-#endif
-#if defined(CONFIG_EISA) || defined(CONFIG_ISA)
-static u8   gdth_irq_tab[6] = {0,10,11,12,14,0};    /* IRQ table */
-#endif
 static u8   gdth_polling;                           /* polling if TRUE */
 static int      gdth_ctr_count  = 0;                    /* controller count */
 static LIST_HEAD(gdth_instances);                       /* controller list */
@@ -325,10 +219,6 @@
 };
 
 /* LILO and modprobe/insmod parameters */
-/* IRQ list for GDT3000/3020 EISA controllers */
-static int irq[MAXHA] __initdata = 
-{0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
- 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
 /* disable driver flag */
 static int disable __initdata = 0;
 /* reserve flag */
@@ -348,13 +238,10 @@
 static int rescan = 0;
 /* shared access */
 static int shared_access = 1;
-/* enable support for EISA and ISA controllers */
-static int probe_eisa_isa = 0;
 /* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */
 static int force_dma32 = 0;
 
 /* parameters for modprobe/insmod */
-module_param_hw_array(irq, int, irq, NULL, 0);
 module_param(disable, int, 0);
 module_param(reserve_mode, int, 0);
 module_param_array(reserve_list, int, NULL, 0);
@@ -363,7 +250,6 @@
 module_param(max_ids, int, 0);
 module_param(rescan, int, 0);
 module_param(shared_access, int, 0);
-module_param(probe_eisa_isa, int, 0);
 module_param(force_dma32, int, 0);
 MODULE_AUTHOR("Achim Leubner");
 MODULE_LICENSE("GPL");
@@ -515,45 +401,6 @@
     }
 }
 
-/* controller search and initialization functions */
-#ifdef CONFIG_EISA
-static int __init gdth_search_eisa(u16 eisa_adr)
-{
-    u32 id;
-    
-    TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr));
-    id = inl(eisa_adr+ID0REG);
-    if (id == GDT3A_ID || id == GDT3B_ID) {     /* GDT3000A or GDT3000B */
-        if ((inb(eisa_adr+EISAREG) & 8) == 0)   
-            return 0;                           /* not EISA configured */
-        return 1;
-    }
-    if (id == GDT3_ID)                          /* GDT3000 */
-        return 1;
-
-    return 0;                                   
-}
-#endif /* CONFIG_EISA */
-
-#ifdef CONFIG_ISA
-static int __init gdth_search_isa(u32 bios_adr)
-{
-    void __iomem *addr;
-    u32 id;
-
-    TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr));
-    if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(u32))) != NULL) {
-        id = readl(addr);
-        iounmap(addr);
-        if (id == GDT2_ID)                          /* GDT2000 */
-            return 1;
-    }
-    return 0;
-}
-#endif /* CONFIG_ISA */
-
-#ifdef CONFIG_PCI
-
 static bool gdth_search_vortex(u16 device)
 {
 	if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
@@ -656,204 +503,7 @@
 
 	return 0;
 }
-#endif /* CONFIG_PCI */
 
-#ifdef CONFIG_EISA
-static int __init gdth_init_eisa(u16 eisa_adr,gdth_ha_str *ha)
-{
-    u32 retries,id;
-    u8 prot_ver,eisacf,i,irq_found;
-
-    TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr));
-    
-    /* disable board interrupts, deinitialize services */
-    outb(0xff,eisa_adr+EDOORREG);
-    outb(0x00,eisa_adr+EDENABREG);
-    outb(0x00,eisa_adr+EINTENABREG);
-    
-    outb(0xff,eisa_adr+LDOORREG);
-    retries = INIT_RETRIES;
-    gdth_delay(20);
-    while (inb(eisa_adr+EDOORREG) != 0xff) {
-        if (--retries == 0) {
-            printk("GDT-EISA: Initialization error (DEINIT failed)\n");
-            return 0;
-        }
-        gdth_delay(1);
-        TRACE2(("wait for DEINIT: retries=%d\n",retries));
-    }
-    prot_ver = inb(eisa_adr+MAILBOXREG);
-    outb(0xff,eisa_adr+EDOORREG);
-    if (prot_ver != PROTOCOL_VERSION) {
-        printk("GDT-EISA: Illegal protocol version\n");
-        return 0;
-    }
-    ha->bmic = eisa_adr;
-    ha->brd_phys = (u32)eisa_adr >> 12;
-
-    outl(0,eisa_adr+MAILBOXREG);
-    outl(0,eisa_adr+MAILBOXREG+4);
-    outl(0,eisa_adr+MAILBOXREG+8);
-    outl(0,eisa_adr+MAILBOXREG+12);
-
-    /* detect IRQ */ 
-    if ((id = inl(eisa_adr+ID0REG)) == GDT3_ID) {
-        ha->oem_id = OEM_ID_ICP;
-        ha->type = GDT_EISA;
-        ha->stype = id;
-        outl(1,eisa_adr+MAILBOXREG+8);
-        outb(0xfe,eisa_adr+LDOORREG);
-        retries = INIT_RETRIES;
-        gdth_delay(20);
-        while (inb(eisa_adr+EDOORREG) != 0xfe) {
-            if (--retries == 0) {
-                printk("GDT-EISA: Initialization error (get IRQ failed)\n");
-                return 0;
-            }
-            gdth_delay(1);
-        }
-        ha->irq = inb(eisa_adr+MAILBOXREG);
-        outb(0xff,eisa_adr+EDOORREG);
-        TRACE2(("GDT3000/3020: IRQ=%d\n",ha->irq));
-        /* check the result */
-        if (ha->irq == 0) {
-                TRACE2(("Unknown IRQ, use IRQ table from cmd line !\n"));
-                for (i = 0, irq_found = FALSE; 
-                     i < MAXHA && irq[i] != 0xff; ++i) {
-                if (irq[i]==10 || irq[i]==11 || irq[i]==12 || irq[i]==14) {
-                    irq_found = TRUE;
-                    break;
-                }
-                }
-            if (irq_found) {
-                ha->irq = irq[i];
-                irq[i] = 0;
-                printk("GDT-EISA: Can not detect controller IRQ,\n");
-                printk("Use IRQ setting from command line (IRQ = %d)\n",
-                       ha->irq);
-            } else {
-                printk("GDT-EISA: Initialization error (unknown IRQ), Enable\n");
-                printk("the controller BIOS or use command line parameters\n");
-                return 0;
-            }
-        }
-    } else {
-        eisacf = inb(eisa_adr+EISAREG) & 7;
-        if (eisacf > 4)                         /* level triggered */
-            eisacf -= 4;
-        ha->irq = gdth_irq_tab[eisacf];
-        ha->oem_id = OEM_ID_ICP;
-        ha->type = GDT_EISA;
-        ha->stype = id;
-    }
-
-    ha->dma64_support = 0;
-    return 1;
-}
-#endif /* CONFIG_EISA */
-
-#ifdef CONFIG_ISA
-static int __init gdth_init_isa(u32 bios_adr,gdth_ha_str *ha)
-{
-    register gdt2_dpram_str __iomem *dp2_ptr;
-    int i;
-    u8 irq_drq,prot_ver;
-    u32 retries;
-
-    TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr));
-
-    ha->brd = ioremap(bios_adr, sizeof(gdt2_dpram_str));
-    if (ha->brd == NULL) {
-        printk("GDT-ISA: Initialization error (DPMEM remap error)\n");
-        return 0;
-    }
-    dp2_ptr = ha->brd;
-    writeb(1, &dp2_ptr->io.memlock); /* switch off write protection */
-    /* reset interface area */
-    memset_io(&dp2_ptr->u, 0, sizeof(dp2_ptr->u));
-    if (readl(&dp2_ptr->u) != 0) {
-        printk("GDT-ISA: Initialization error (DPMEM write error)\n");
-        iounmap(ha->brd);
-        return 0;
-    }
-
-    /* disable board interrupts, read DRQ and IRQ */
-    writeb(0xff, &dp2_ptr->io.irqdel);
-    writeb(0x00, &dp2_ptr->io.irqen);
-    writeb(0x00, &dp2_ptr->u.ic.S_Status);
-    writeb(0x00, &dp2_ptr->u.ic.Cmd_Index);
-
-    irq_drq = readb(&dp2_ptr->io.rq);
-    for (i=0; i<3; ++i) {
-        if ((irq_drq & 1)==0)
-            break;
-        irq_drq >>= 1;
-    }
-    ha->drq = gdth_drq_tab[i];
-
-    irq_drq = readb(&dp2_ptr->io.rq) >> 3;
-    for (i=1; i<5; ++i) {
-        if ((irq_drq & 1)==0)
-            break;
-        irq_drq >>= 1;
-    }
-    ha->irq = gdth_irq_tab[i];
-
-    /* deinitialize services */
-    writel(bios_adr, &dp2_ptr->u.ic.S_Info[0]);
-    writeb(0xff, &dp2_ptr->u.ic.S_Cmd_Indx);
-    writeb(0, &dp2_ptr->io.event);
-    retries = INIT_RETRIES;
-    gdth_delay(20);
-    while (readb(&dp2_ptr->u.ic.S_Status) != 0xff) {
-        if (--retries == 0) {
-            printk("GDT-ISA: Initialization error (DEINIT failed)\n");
-            iounmap(ha->brd);
-            return 0;
-        }
-        gdth_delay(1);
-    }
-    prot_ver = (u8)readl(&dp2_ptr->u.ic.S_Info[0]);
-    writeb(0, &dp2_ptr->u.ic.Status);
-    writeb(0xff, &dp2_ptr->io.irqdel);
-    if (prot_ver != PROTOCOL_VERSION) {
-        printk("GDT-ISA: Illegal protocol version\n");
-        iounmap(ha->brd);
-        return 0;
-    }
-
-    ha->oem_id = OEM_ID_ICP;
-    ha->type = GDT_ISA;
-    ha->ic_all_size = sizeof(dp2_ptr->u);
-    ha->stype= GDT2_ID;
-    ha->brd_phys = bios_adr >> 4;
-
-    /* special request to controller BIOS */
-    writel(0x00, &dp2_ptr->u.ic.S_Info[0]);
-    writel(0x00, &dp2_ptr->u.ic.S_Info[1]);
-    writel(0x01, &dp2_ptr->u.ic.S_Info[2]);
-    writel(0x00, &dp2_ptr->u.ic.S_Info[3]);
-    writeb(0xfe, &dp2_ptr->u.ic.S_Cmd_Indx);
-    writeb(0, &dp2_ptr->io.event);
-    retries = INIT_RETRIES;
-    gdth_delay(20);
-    while (readb(&dp2_ptr->u.ic.S_Status) != 0xfe) {
-        if (--retries == 0) {
-            printk("GDT-ISA: Initialization error\n");
-            iounmap(ha->brd);
-            return 0;
-        }
-        gdth_delay(1);
-    }
-    writeb(0, &dp2_ptr->u.ic.Status);
-    writeb(0xff, &dp2_ptr->io.irqdel);
-
-    ha->dma64_support = 0;
-    return 1;
-}
-#endif /* CONFIG_ISA */
-
-#ifdef CONFIG_PCI
 static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
 			 gdth_ha_str *ha)
 {
@@ -1228,30 +878,19 @@
 
     return 1;
 }
-#endif /* CONFIG_PCI */
 
 /* controller protocol functions */
 
 static void gdth_enable_int(gdth_ha_str *ha)
 {
     unsigned long flags;
-    gdt2_dpram_str __iomem *dp2_ptr;
     gdt6_dpram_str __iomem *dp6_ptr;
     gdt6m_dpram_str __iomem *dp6m_ptr;
 
     TRACE(("gdth_enable_int() hanum %d\n",ha->hanum));
     spin_lock_irqsave(&ha->smp_lock, flags);
 
-    if (ha->type == GDT_EISA) {
-        outb(0xff, ha->bmic + EDOORREG);
-        outb(0xff, ha->bmic + EDENABREG);
-        outb(0x01, ha->bmic + EINTENABREG);
-    } else if (ha->type == GDT_ISA) {
-        dp2_ptr = ha->brd;
-        writeb(1, &dp2_ptr->io.irqdel);
-        writeb(0, &dp2_ptr->u.ic.Cmd_Index);
-        writeb(1, &dp2_ptr->io.irqen);
-    } else if (ha->type == GDT_PCI) {
+    if (ha->type == GDT_PCI) {
         dp6_ptr = ha->brd;
         writeb(1, &dp6_ptr->io.irqdel);
         writeb(0, &dp6_ptr->u.ic.Cmd_Index);
@@ -1275,12 +914,7 @@
 
     TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
 
-        if (ha->type == GDT_EISA)
-            IStatus = inb((u16)ha->bmic + EDOORREG);
-        else if (ha->type == GDT_ISA)
-            IStatus =
-                readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
-        else if (ha->type == GDT_PCI)
+        if (ha->type == GDT_PCI)
             IStatus =
                 readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
         else if (ha->type == GDT_PCINEW) 
@@ -1298,11 +932,7 @@
 
     TRACE(("gdth_test_busy() hanum %d\n", ha->hanum));
 
-    if (ha->type == GDT_EISA)
-        gdtsema0 = (int)inb(ha->bmic + SEMA0REG);
-    else if (ha->type == GDT_ISA)
-        gdtsema0 = (int)readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
-    else if (ha->type == GDT_PCI)
+    if (ha->type == GDT_PCI)
         gdtsema0 = (int)readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
     else if (ha->type == GDT_PCINEW) 
         gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg));
@@ -1336,11 +966,7 @@
 {
     TRACE(("gdth_set_sema0() hanum %d\n", ha->hanum));
 
-    if (ha->type == GDT_EISA) {
-        outb(1, ha->bmic + SEMA0REG);
-    } else if (ha->type == GDT_ISA) {
-        writeb(1, &((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
-    } else if (ha->type == GDT_PCI) {
+    if (ha->type == GDT_PCI) {
         writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
     } else if (ha->type == GDT_PCINEW) { 
         outb(1, PTR2USHORT(&ha->plx->sema0_reg));
@@ -1356,7 +982,6 @@
     register gdt6m_dpram_str __iomem *dp6m_ptr;
     register gdt6c_dpram_str __iomem *dp6c_ptr;
     gdt6_dpram_str __iomem *dp6_ptr;
-    gdt2_dpram_str __iomem *dp2_ptr;
     u16 cp_count,dp_offset,cmd_no;
     
     TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
@@ -1367,8 +992,6 @@
     cmd_ptr  = ha->pccb;
 
     ++ha->cmd_cnt;                                                      
-    if (ha->type == GDT_EISA)
-        return;                                 /* no DPMEM, no copy */
 
     /* set cpcount dword aligned */
     if (cp_count & 3)
@@ -1377,14 +1000,7 @@
     ha->cmd_offs_dpmem += cp_count;
     
     /* set offset and service, copy command to DPMEM */
-    if (ha->type == GDT_ISA) {
-        dp2_ptr = ha->brd;
-        writew(dp_offset + DPMEM_COMMAND_OFFSET,
-                    &dp2_ptr->u.ic.comm_queue[cmd_no].offset);
-        writew((u16)cmd_ptr->Service,
-                    &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id);
-        memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
-    } else if (ha->type == GDT_PCI) {
+    if (ha->type == GDT_PCI) {
         dp6_ptr = ha->brd;
         writew(dp_offset + DPMEM_COMMAND_OFFSET,
                     &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
@@ -1430,13 +1046,7 @@
     if (ha->pccb->OpCode == GDT_INIT)
         ha->pccb->Service |= 0x80;
 
-    if (ha->type == GDT_EISA) {
-        if (ha->pccb->OpCode == GDT_INIT)               /* store DMA buffer */
-            outl(ha->ccb_phys, ha->bmic + MAILBOXREG);
-        outb(ha->pccb->Service, ha->bmic + LDOORREG);
-    } else if (ha->type == GDT_ISA) {
-        writeb(0, &((gdt2_dpram_str __iomem *)ha->brd)->io.event);
-    } else if (ha->type == GDT_PCI) {
+    if (ha->type == GDT_PCI) {
         writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event);
     } else if (ha->type == GDT_PCINEW) { 
         outb(1, PTR2USHORT(&ha->plx->ldoor_reg));
@@ -1560,15 +1170,7 @@
     gdth_arcdl_str *alst;
     gdth_alist_str *alst2;
     gdth_oem_str_ioctl *oemstr;
-#ifdef INT_COAL
-    gdth_perf_modes *pmod;
-#endif
 
-#ifdef GDTH_RTC
-    u8 rtc[12];
-    unsigned long flags;
-#endif     
-   
     TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
     ok = 0;
 
@@ -1588,29 +1190,6 @@
     }
     TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n"));
 
-#ifdef GDTH_RTC
-    /* read realtime clock info, send to controller */
-    /* 1. wait for the falling edge of update flag */
-    spin_lock_irqsave(&rtc_lock, flags);
-    for (j = 0; j < 1000000; ++j)
-        if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
-            break;
-    for (j = 0; j < 1000000; ++j)
-        if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
-            break;
-    /* 2. read info */
-    do {
-        for (j = 0; j < 12; ++j) 
-            rtc[j] = CMOS_READ(j);
-    } while (rtc[0] != CMOS_READ(0));
-    spin_unlock_irqrestore(&rtc_lock, flags);
-    TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(u32 *)&rtc[0],
-            *(u32 *)&rtc[4], *(u32 *)&rtc[8]));
-    /* 3. send to controller firmware */
-    gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(u32 *)&rtc[0],
-                      *(u32 *)&rtc[4], *(u32 *)&rtc[8]);
-#endif  
- 
     /* unfreeze all IOs */
     gdth_internal_cmd(ha, CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0);
  
@@ -1633,35 +1212,6 @@
     cdev_cnt = (u16)ha->info;
     ha->fw_vers = ha->service;
 
-#ifdef INT_COAL
-    if (ha->type == GDT_PCIMPR) {
-        /* set perf. modes */
-        pmod = (gdth_perf_modes *)ha->pscratch;
-        pmod->version          = 1;
-        pmod->st_mode          = 1;    /* enable one status buffer */
-        *((u64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys;
-        pmod->st_buff_indx1    = COALINDEX;
-        pmod->st_buff_addr2    = 0;
-        pmod->st_buff_u_addr2  = 0;
-        pmod->st_buff_indx2    = 0;
-        pmod->st_buff_size     = sizeof(gdth_coal_status) * MAXOFFSETS;
-        pmod->cmd_mode         = 0;    // disable all cmd buffers
-        pmod->cmd_buff_addr1   = 0;
-        pmod->cmd_buff_u_addr1 = 0;
-        pmod->cmd_buff_indx1   = 0;
-        pmod->cmd_buff_addr2   = 0;
-        pmod->cmd_buff_u_addr2 = 0;
-        pmod->cmd_buff_indx2   = 0;
-        pmod->cmd_buff_size    = 0;
-        pmod->reserved1        = 0;            
-        pmod->reserved2        = 0;            
-        if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, SET_PERF_MODES,
-                              INVALID_CHANNEL,sizeof(gdth_perf_modes))) {
-            printk("GDT-HA %d: Interrupt coalescing activated\n", ha->hanum);
-        }
-    }
-#endif
-
     /* detect number of buses - try new IOCTL */
     iocr = (gdth_raw_iochan_str *)ha->pscratch;
     iocr->hdr.version        = 0xffffffff;
@@ -2433,9 +1983,6 @@
     TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
                  scp->cmnd[0],scp->cmd_len,hdrive));
 
-    if (ha->type==GDT_EISA && ha->cmd_cnt>0) 
-        return 0;
-
     mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE;
     /* test for READ_16, WRITE_16 if !mode64 ? ---
        not required, should not occur due to error return on 
@@ -2518,9 +2065,9 @@
 
         if (scsi_bufflen(scp)) {
             cmndinfo->dma_dir = (read_write == 1 ?
-                PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);   
-            sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp),
-                               cmndinfo->dma_dir);
+                DMA_TO_DEVICE : DMA_FROM_DEVICE);
+            sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
+			       scsi_sg_count(scp), cmndinfo->dma_dir);
             if (mode64) {
                 struct scatterlist *sl;
 
@@ -2528,12 +2075,6 @@
                 cmdp->u.cache64.sg_canz = sgcnt;
                 scsi_for_each_sg(scp, sl, sgcnt, i) {
                     cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
-#ifdef GDTH_DMA_STATISTICS
-                    if (cmdp->u.cache64.sg_lst[i].sg_ptr > (u64)0xffffffff)
-                        ha->dma64_cnt++;
-                    else
-                        ha->dma32_cnt++;
-#endif
                     cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl);
                 }
             } else {
@@ -2543,9 +2084,6 @@
                 cmdp->u.cache.sg_canz = sgcnt;
                 scsi_for_each_sg(scp, sl, sgcnt, i) {
                     cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl);
-#ifdef GDTH_DMA_STATISTICS
-                    ha->dma32_cnt++;
-#endif
                     cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl);
                 }
             }
@@ -2603,8 +2141,6 @@
     dma_addr_t sense_paddr;
     int cmd_index, sgcnt, mode64;
     u8 t,l;
-    struct page *page;
-    unsigned long offset;
     struct gdth_cmndinfo *cmndinfo;
 
     t = scp->device->id;
@@ -2613,9 +2149,6 @@
     TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n",
            scp->cmnd[0],b,t,l));
 
-    if (ha->type==GDT_EISA && ha->cmd_cnt>0) 
-        return 0;
-
     mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE;
 
     cmdp->Service = SCSIRAWSERVICE;
@@ -2649,10 +2182,8 @@
         }
 
     } else {
-        page = virt_to_page(scp->sense_buffer);
-        offset = (unsigned long)scp->sense_buffer & ~PAGE_MASK;
-        sense_paddr = pci_map_page(ha->pdev,page,offset,
-                                   16,PCI_DMA_FROMDEVICE);
+        sense_paddr = dma_map_single(&ha->pdev->dev, scp->sense_buffer, 16,
+				     DMA_FROM_DEVICE);
 
 	cmndinfo->sense_paddr  = sense_paddr;
         cmdp->OpCode           = GDT_WRITE;             /* always */
@@ -2693,9 +2224,9 @@
         }
 
         if (scsi_bufflen(scp)) {
-            cmndinfo->dma_dir = PCI_DMA_BIDIRECTIONAL;
-            sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp),
-                               cmndinfo->dma_dir);
+            cmndinfo->dma_dir = DMA_BIDIRECTIONAL;
+            sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
+			       scsi_sg_count(scp), cmndinfo->dma_dir);
             if (mode64) {
                 struct scatterlist *sl;
 
@@ -2703,12 +2234,6 @@
                 cmdp->u.raw64.sg_ranz = sgcnt;
                 scsi_for_each_sg(scp, sl, sgcnt, i) {
                     cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
-#ifdef GDTH_DMA_STATISTICS
-                    if (cmdp->u.raw64.sg_lst[i].sg_ptr > (u64)0xffffffff)
-                        ha->dma64_cnt++;
-                    else
-                        ha->dma32_cnt++;
-#endif
                     cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl);
                 }
             } else {
@@ -2718,9 +2243,6 @@
                 cmdp->u.raw.sg_ranz = sgcnt;
                 scsi_for_each_sg(scp, sl, sgcnt, i) {
                     cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl);
-#ifdef GDTH_DMA_STATISTICS
-                    ha->dma32_cnt++;
-#endif
                     cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl);
                 }
             }
@@ -2778,9 +2300,6 @@
     cmdp= ha->pccb;
     TRACE2(("gdth_special_cmd(): "));
 
-    if (ha->type==GDT_EISA && ha->cmd_cnt>0) 
-        return 0;
-
     *cmdp = *cmndinfo->internal_cmd_str;
     cmdp->RequestBuffer = scp;
 
@@ -2959,18 +2478,11 @@
 {
     gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
     gdt6_dpram_str __iomem *dp6_ptr;
-    gdt2_dpram_str __iomem *dp2_ptr;
     struct scsi_cmnd *scp;
     int rval, i;
     u8 IStatus;
     u16 Service;
     unsigned long flags = 0;
-#ifdef INT_COAL
-    int coalesced = FALSE;
-    int next = FALSE;
-    gdth_coal_status *pcs = NULL;
-    int act_int_coal = 0;       
-#endif
 
     TRACE(("gdth_interrupt() IRQ %d\n", ha->irq));
 
@@ -2997,53 +2509,7 @@
     ++act_ints;
 #endif
 
-#ifdef INT_COAL
-    /* See if the fw is returning coalesced status */
-    if (IStatus == COALINDEX) {
-        /* Coalesced status.  Setup the initial status 
-           buffer pointer and flags */
-        pcs = ha->coal_stat;
-        coalesced = TRUE;        
-        next = TRUE;
-    }
-
-    do {
-        if (coalesced) {
-            /* For coalesced requests all status
-               information is found in the status buffer */
-            IStatus = (u8)(pcs->status & 0xff);
-        }
-#endif
-    
-        if (ha->type == GDT_EISA) {
-            if (IStatus & 0x80) {                       /* error flag */
-                IStatus &= ~0x80;
-                ha->status = inw(ha->bmic + MAILBOXREG+8);
-                TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
-            } else                                      /* no error */
-                ha->status = S_OK;
-            ha->info = inl(ha->bmic + MAILBOXREG+12);
-            ha->service = inw(ha->bmic + MAILBOXREG+10);
-            ha->info2 = inl(ha->bmic + MAILBOXREG+4);
-
-            outb(0xff, ha->bmic + EDOORREG);    /* acknowledge interrupt */
-            outb(0x00, ha->bmic + SEMA1REG);    /* reset status semaphore */
-        } else if (ha->type == GDT_ISA) {
-            dp2_ptr = ha->brd;
-            if (IStatus & 0x80) {                       /* error flag */
-                IStatus &= ~0x80;
-                ha->status = readw(&dp2_ptr->u.ic.Status);
-                TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
-            } else                                      /* no error */
-                ha->status = S_OK;
-            ha->info = readl(&dp2_ptr->u.ic.Info[0]);
-            ha->service = readw(&dp2_ptr->u.ic.Service);
-            ha->info2 = readl(&dp2_ptr->u.ic.Info[1]);
-
-            writeb(0xff, &dp2_ptr->io.irqdel); /* acknowledge interrupt */
-            writeb(0, &dp2_ptr->u.ic.Cmd_Index);/* reset command index */
-            writeb(0, &dp2_ptr->io.Sema1);     /* reset status semaphore */
-        } else if (ha->type == GDT_PCI) {
+        if (ha->type == GDT_PCI) {
             dp6_ptr = ha->brd;
             if (IStatus & 0x80) {                       /* error flag */
                 IStatus &= ~0x80;
@@ -3075,28 +2541,15 @@
             dp6m_ptr = ha->brd;
             if (IStatus & 0x80) {                       /* error flag */
                 IStatus &= ~0x80;
-#ifdef INT_COAL
-                if (coalesced)
-                    ha->status = pcs->ext_status & 0xffff;
-                else 
-#endif
-                    ha->status = readw(&dp6m_ptr->i960r.status);
+                ha->status = readw(&dp6m_ptr->i960r.status);
                 TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
             } else                                      /* no error */
                 ha->status = S_OK;
-#ifdef INT_COAL
-            /* get information */
-            if (coalesced) {    
-                ha->info = pcs->info0;
-                ha->info2 = pcs->info1;
-                ha->service = (pcs->ext_status >> 16) & 0xffff;
-            } else
-#endif
-            {
-                ha->info = readl(&dp6m_ptr->i960r.info[0]);
-                ha->service = readw(&dp6m_ptr->i960r.service);
-                ha->info2 = readl(&dp6m_ptr->i960r.info[1]);
-            }
+
+            ha->info = readl(&dp6m_ptr->i960r.info[0]);
+            ha->service = readw(&dp6m_ptr->i960r.service);
+            ha->info2 = readl(&dp6m_ptr->i960r.info[1]);
+
             /* event string */
             if (IStatus == ASYNCINDEX) {
                 if (ha->service != SCREENSERVICE &&
@@ -3111,15 +2564,8 @@
                     }
                 }
             }
-#ifdef INT_COAL
-            /* Make sure that non coalesced interrupts get cleared
-               before being handled by gdth_async_event/gdth_sync_event */
-            if (!coalesced)
-#endif                          
-            {
-                writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
-                writeb(0, &dp6m_ptr->i960r.sema1_reg);
-            }
+            writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
+            writeb(0, &dp6m_ptr->i960r.sema1_reg);
         } else {
             TRACE2(("gdth_interrupt() unknown controller type\n"));
             if (!gdth_polling)
@@ -3182,31 +2628,6 @@
             gdth_scsi_done(scp);
         }
 
-#ifdef INT_COAL
-        if (coalesced) {
-            /* go to the next status in the status buffer */
-            ++pcs;
-#ifdef GDTH_STATISTICS
-            ++act_int_coal;
-            if (act_int_coal > max_int_coal) {
-                max_int_coal = act_int_coal;
-                printk("GDT: max_int_coal = %d\n",(u16)max_int_coal);
-            }
-#endif      
-            /* see if there is another status */
-            if (pcs->status == 0)    
-                /* Stop the coalesce loop */
-                next = FALSE;
-        }
-    } while (next);
-
-    /* coalescing only for new GDT_PCIMPR controllers available */      
-    if (ha->type == GDT_PCIMPR && coalesced) {
-        writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
-        writeb(0, &dp6m_ptr->i960r.sema1_reg);
-    }
-#endif
-
     gdth_next(ha);
     return IRQ_HANDLED;
 }
@@ -3313,12 +2734,12 @@
             return 2;
         }
         if (scsi_bufflen(scp))
-            pci_unmap_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp),
+            dma_unmap_sg(&ha->pdev->dev, scsi_sglist(scp), scsi_sg_count(scp),
                          cmndinfo->dma_dir);
 
         if (cmndinfo->sense_paddr)
-            pci_unmap_page(ha->pdev, cmndinfo->sense_paddr, 16,
-                                                           PCI_DMA_FROMDEVICE);
+            dma_unmap_page(&ha->pdev->dev, cmndinfo->sense_paddr, 16,
+			   DMA_FROM_DEVICE);
 
         if (ha->status == S_OK) {
             cmndinfo->status = S_OK;
@@ -3610,12 +3031,7 @@
                 + sizeof(u64);
             ha->cmd_cnt = 0;
             gdth_copy_command(ha);
-            if (ha->type == GDT_EISA)
-                printk("[EISA slot %d] ",(u16)ha->brd_phys);
-            else if (ha->type == GDT_ISA)
-                printk("[DPMEM 0x%4X] ",(u16)ha->brd_phys);
-            else 
-                printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
+            printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
                        (u16)((ha->brd_phys>>3)&0x1f));
             gdth_release_event(ha);
         }
@@ -3756,23 +3172,12 @@
 
 static void __init internal_setup(char *str,int *ints)
 {
-    int i, argc;
+    int i;
     char *cur_str, *argv;
 
     TRACE2(("internal_setup() str %s ints[0] %d\n", 
             str ? str:"NULL", ints ? ints[0]:0));
 
-    /* read irq[] from ints[] */
-    if (ints) {
-        argc = ints[0];
-        if (argc > 0) {
-            if (argc > MAXHA)
-                argc = MAXHA;
-            for (i = 0; i < argc; ++i)
-                irq[i] = ints[i+1];
-        }
-    }
-
     /* analyse string */
     argv = str;
     while (argv && (cur_str = strchr(argv, ':'))) {
@@ -3799,8 +3204,6 @@
             rescan = val;
         else if (!strncmp(argv, "shared_access:", 14))
             shared_access = val;
-        else if (!strncmp(argv, "probe_eisa_isa:", 15))
-            probe_eisa_isa = val;
         else if (!strncmp(argv, "reserve_list:", 13)) {
             reserve_list[0] = val;
             for (i = 1; i < MAX_RES_ARGS; i++) {
@@ -3847,18 +3250,7 @@
 {
     TRACE2(("gdth_ctr_name()\n"));
 
-    if (ha->type == GDT_EISA) {
-        switch (ha->stype) {
-          case GDT3_ID:
-            return("GDT3000/3020");
-          case GDT3A_ID:
-            return("GDT3000A/3020A/3050A");
-          case GDT3B_ID:
-            return("GDT3000B/3010A");
-        }
-    } else if (ha->type == GDT_ISA) {
-        return("GDT2000/2020");
-    } else if (ha->type == GDT_PCI) {
+    if (ha->type == GDT_PCI) {
         switch (ha->pdev->device) {
           case PCI_DEVICE_ID_VORTEX_GDT60x0:
             return("GDT6000/6020/6050");
@@ -4155,131 +3547,148 @@
     return 0;
 }
 
+static void gdth_ioc_cacheservice(gdth_ha_str *ha, gdth_ioctl_general *gen,
+		u64 paddr)
+{
+	if (ha->cache_feat & GDT_64BIT) {
+		/* copy elements from 32-bit IOCTL structure */
+		gen->command.u.cache64.BlockCnt = gen->command.u.cache.BlockCnt;
+		gen->command.u.cache64.BlockNo = gen->command.u.cache.BlockNo;
+		gen->command.u.cache64.DeviceNo = gen->command.u.cache.DeviceNo;
+
+		if (ha->cache_feat & SCATTER_GATHER) {
+			gen->command.u.cache64.DestAddr = (u64)-1;
+			gen->command.u.cache64.sg_canz = 1;
+			gen->command.u.cache64.sg_lst[0].sg_ptr = paddr;
+			gen->command.u.cache64.sg_lst[0].sg_len = gen->data_len;
+			gen->command.u.cache64.sg_lst[1].sg_len = 0;
+		} else {
+			gen->command.u.cache64.DestAddr = paddr;
+			gen->command.u.cache64.sg_canz = 0;
+		}
+	} else {
+		if (ha->cache_feat & SCATTER_GATHER) {
+			gen->command.u.cache.DestAddr = 0xffffffff;
+				gen->command.u.cache.sg_canz = 1;
+			gen->command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
+			gen->command.u.cache.sg_lst[0].sg_len = gen->data_len;
+			gen->command.u.cache.sg_lst[1].sg_len = 0;
+		} else {
+			gen->command.u.cache.DestAddr = paddr;
+			gen->command.u.cache.sg_canz = 0;
+		}
+	}
+}
+
+static void gdth_ioc_scsiraw(gdth_ha_str *ha, gdth_ioctl_general *gen,
+		u64 paddr)
+{
+	if (ha->raw_feat & GDT_64BIT) {
+		/* copy elements from 32-bit IOCTL structure */
+		char cmd[16];
+
+		gen->command.u.raw64.sense_len = gen->command.u.raw.sense_len;
+		gen->command.u.raw64.bus = gen->command.u.raw.bus;
+		gen->command.u.raw64.lun = gen->command.u.raw.lun;
+		gen->command.u.raw64.target = gen->command.u.raw.target;
+		memcpy(cmd, gen->command.u.raw.cmd, 16);
+		memcpy(gen->command.u.raw64.cmd, cmd, 16);
+		gen->command.u.raw64.clen = gen->command.u.raw.clen;
+		gen->command.u.raw64.sdlen = gen->command.u.raw.sdlen;
+		gen->command.u.raw64.direction = gen->command.u.raw.direction;
+
+		/* addresses */
+		if (ha->raw_feat & SCATTER_GATHER) {
+			gen->command.u.raw64.sdata = (u64)-1;
+			gen->command.u.raw64.sg_ranz = 1;
+			gen->command.u.raw64.sg_lst[0].sg_ptr = paddr;
+			gen->command.u.raw64.sg_lst[0].sg_len = gen->data_len;
+			gen->command.u.raw64.sg_lst[1].sg_len = 0;
+		} else {
+			gen->command.u.raw64.sdata = paddr;
+			gen->command.u.raw64.sg_ranz = 0;
+                }
+
+		gen->command.u.raw64.sense_data = paddr + gen->data_len;
+	} else {
+		if (ha->raw_feat & SCATTER_GATHER) {
+			gen->command.u.raw.sdata = 0xffffffff;
+			gen->command.u.raw.sg_ranz = 1;
+			gen->command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
+			gen->command.u.raw.sg_lst[0].sg_len = gen->data_len;
+			gen->command.u.raw.sg_lst[1].sg_len = 0;
+		} else {
+			gen->command.u.raw.sdata = paddr;
+			gen->command.u.raw.sg_ranz = 0;
+                }
+
+		gen->command.u.raw.sense_data = (u32)paddr + gen->data_len;
+	}
+}
+
 static int ioc_general(void __user *arg, char *cmnd)
 {
-    gdth_ioctl_general gen;
-    char *buf = NULL;
-    u64 paddr; 
-    gdth_ha_str *ha;
-    int rval;
+	gdth_ioctl_general gen;
+	gdth_ha_str *ha;
+	char *buf = NULL;
+	dma_addr_t paddr;
+	int rval;
 
-    if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
-        return -EFAULT;
-    ha = gdth_find_ha(gen.ionode);
-    if (!ha)
-        return -EFAULT;
+	if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
+		return -EFAULT;
+	ha = gdth_find_ha(gen.ionode);
+	if (!ha)
+		return -EFAULT;
 
-    if (gen.data_len > INT_MAX)
-        return -EINVAL;
-    if (gen.sense_len > INT_MAX)
-        return -EINVAL;
-    if (gen.data_len + gen.sense_len > INT_MAX)
-        return -EINVAL;
+	if (gen.data_len > INT_MAX)
+		return -EINVAL;
+	if (gen.sense_len > INT_MAX)
+		return -EINVAL;
+	if (gen.data_len + gen.sense_len > INT_MAX)
+		return -EINVAL;
 
-    if (gen.data_len + gen.sense_len != 0) {
-        if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len,
-                                     FALSE, &paddr)))
-            return -EFAULT;
-        if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general),  
-                           gen.data_len + gen.sense_len)) {
-            gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
-            return -EFAULT;
-        }
+	if (gen.data_len + gen.sense_len > 0) {
+		buf = dma_alloc_coherent(&ha->pdev->dev,
+				gen.data_len + gen.sense_len, &paddr,
+				GFP_KERNEL);
+		if (!buf)
+			return -EFAULT;
 
-        if (gen.command.OpCode == GDT_IOCTL) {
-            gen.command.u.ioctl.p_param = paddr;
-        } else if (gen.command.Service == CACHESERVICE) {
-            if (ha->cache_feat & GDT_64BIT) {
-                /* copy elements from 32-bit IOCTL structure */
-                gen.command.u.cache64.BlockCnt = gen.command.u.cache.BlockCnt;
-                gen.command.u.cache64.BlockNo = gen.command.u.cache.BlockNo;
-                gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo;
-                /* addresses */
-                if (ha->cache_feat & SCATTER_GATHER) {
-                    gen.command.u.cache64.DestAddr = (u64)-1;
-                    gen.command.u.cache64.sg_canz = 1;
-                    gen.command.u.cache64.sg_lst[0].sg_ptr = paddr;
-                    gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len;
-                    gen.command.u.cache64.sg_lst[1].sg_len = 0;
-                } else {
-                    gen.command.u.cache64.DestAddr = paddr;
-                    gen.command.u.cache64.sg_canz = 0;
-                }
-            } else {
-                if (ha->cache_feat & SCATTER_GATHER) {
-                    gen.command.u.cache.DestAddr = 0xffffffff;
-                    gen.command.u.cache.sg_canz = 1;
-                    gen.command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
-                    gen.command.u.cache.sg_lst[0].sg_len = gen.data_len;
-                    gen.command.u.cache.sg_lst[1].sg_len = 0;
-                } else {
-                    gen.command.u.cache.DestAddr = paddr;
-                    gen.command.u.cache.sg_canz = 0;
-                }
-            }
-        } else if (gen.command.Service == SCSIRAWSERVICE) {
-            if (ha->raw_feat & GDT_64BIT) {
-                /* copy elements from 32-bit IOCTL structure */
-                char cmd[16];
-                gen.command.u.raw64.sense_len = gen.command.u.raw.sense_len;
-                gen.command.u.raw64.bus = gen.command.u.raw.bus;
-                gen.command.u.raw64.lun = gen.command.u.raw.lun;
-                gen.command.u.raw64.target = gen.command.u.raw.target;
-                memcpy(cmd, gen.command.u.raw.cmd, 16);
-                memcpy(gen.command.u.raw64.cmd, cmd, 16);
-                gen.command.u.raw64.clen = gen.command.u.raw.clen;
-                gen.command.u.raw64.sdlen = gen.command.u.raw.sdlen;
-                gen.command.u.raw64.direction = gen.command.u.raw.direction;
-                /* addresses */
-                if (ha->raw_feat & SCATTER_GATHER) {
-                    gen.command.u.raw64.sdata = (u64)-1;
-                    gen.command.u.raw64.sg_ranz = 1;
-                    gen.command.u.raw64.sg_lst[0].sg_ptr = paddr;
-                    gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len;
-                    gen.command.u.raw64.sg_lst[1].sg_len = 0;
-                } else {
-                    gen.command.u.raw64.sdata = paddr;
-                    gen.command.u.raw64.sg_ranz = 0;
-                }
-                gen.command.u.raw64.sense_data = paddr + gen.data_len;
-            } else {
-                if (ha->raw_feat & SCATTER_GATHER) {
-                    gen.command.u.raw.sdata = 0xffffffff;
-                    gen.command.u.raw.sg_ranz = 1;
-                    gen.command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
-                    gen.command.u.raw.sg_lst[0].sg_len = gen.data_len;
-                    gen.command.u.raw.sg_lst[1].sg_len = 0;
-                } else {
-                    gen.command.u.raw.sdata = paddr;
-                    gen.command.u.raw.sg_ranz = 0;
-                }
-                gen.command.u.raw.sense_data = (u32)paddr + gen.data_len;
-            }
-        } else {
-            gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
-            return -EFAULT;
-        }
-    }
+		rval = -EFAULT;
+		if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general),
+				   gen.data_len + gen.sense_len))
+			goto out_free_buf;
 
-    rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info);
-    if (rval < 0) {
-	gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
-        return rval;
-    }
-    gen.status = rval;
+		if (gen.command.OpCode == GDT_IOCTL)
+			gen.command.u.ioctl.p_param = paddr;
+		else if (gen.command.Service == CACHESERVICE)
+			gdth_ioc_cacheservice(ha, &gen, paddr);
+		else if (gen.command.Service == SCSIRAWSERVICE)
+			gdth_ioc_scsiraw(ha, &gen, paddr);
+		else
+			goto out_free_buf;
+	}
 
-    if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf, 
-                     gen.data_len + gen.sense_len)) {
-        gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
-        return -EFAULT; 
-    } 
-    if (copy_to_user(arg, &gen, 
-        sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str))) {
-        gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
-        return -EFAULT;
-    }
-    gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
-    return 0;
+	rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout,
+			&gen.info);
+	if (rval < 0)
+		goto out_free_buf;
+	gen.status = rval;
+
+	rval = -EFAULT;
+	if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf,
+			 gen.data_len + gen.sense_len))
+		goto out_free_buf;
+	if (copy_to_user(arg, &gen,
+			sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str)))
+		goto out_free_buf;
+
+	rval = 0;
+out_free_buf:
+	if (buf)
+		dma_free_coherent(&ha->pdev->dev, gen.data_len + gen.sense_len,
+				  buf, paddr);
+	return rval;
 }
  
 static int ioc_hdrlist(void __user *arg, char *cmnd)
@@ -4514,22 +3923,17 @@
             (NULL == (ha = gdth_find_ha(ctrt.ionode))))
             return -EFAULT;
 
-        if (ha->type == GDT_ISA || ha->type == GDT_EISA) {
-            ctrt.type = (u8)((ha->stype>>20) - 0x10);
+        if (ha->type != GDT_PCIMPR) {
+	    ctrt.type = (u8)((ha->stype<<4) + 6);
         } else {
-            if (ha->type != GDT_PCIMPR) {
-                ctrt.type = (u8)((ha->stype<<4) + 6);
-            } else {
-                ctrt.type = 
-                    (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
-                if (ha->stype >= 0x300)
-                    ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device;
-                else 
-                    ctrt.ext_type = 0x6000 | ha->stype;
-            }
-            ctrt.device_id = ha->pdev->device;
-            ctrt.sub_device_id = ha->pdev->subsystem_device;
+            ctrt.type =  (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
+            if (ha->stype >= 0x300)
+                ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device;
+            else
+                ctrt.ext_type = 0x6000 | ha->stype;
         }
+        ctrt.device_id = ha->pdev->device;
+        ctrt.sub_device_id = ha->pdev->subsystem_device;
         ctrt.info = ha->brd_phys;
         ctrt.oem_id = ha->oem_id;
         if (copy_to_user(argp, &ctrt, sizeof(gdth_ioctl_ctrtype)))
@@ -4680,276 +4084,9 @@
         .sg_tablesize           = GDTH_MAXSG,
         .cmd_per_lun            = GDTH_MAXC_P_L,
         .unchecked_isa_dma      = 1,
-        .use_clustering         = ENABLE_CLUSTERING,
 	.no_write_same		= 1,
 };
 
-#ifdef CONFIG_ISA
-static int __init gdth_isa_probe_one(u32 isa_bios)
-{
-	struct Scsi_Host *shp;
-	gdth_ha_str *ha;
-	dma_addr_t scratch_dma_handle = 0;
-	int error, i;
-
-	if (!gdth_search_isa(isa_bios))
-		return -ENXIO;
-
-	shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
-	if (!shp)
-		return -ENOMEM;
-	ha = shost_priv(shp);
-
-	error = -ENODEV;
-	if (!gdth_init_isa(isa_bios,ha))
-		goto out_host_put;
-
-	/* controller found and initialized */
-	printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n",
-		isa_bios, ha->irq, ha->drq);
-
-	error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
-	if (error) {
-		printk("GDT-ISA: Unable to allocate IRQ\n");
-		goto out_host_put;
-	}
-
-	error = request_dma(ha->drq, "gdth");
-	if (error) {
-		printk("GDT-ISA: Unable to allocate DMA channel\n");
-		goto out_free_irq;
-	}
-
-	set_dma_mode(ha->drq,DMA_MODE_CASCADE);
-	enable_dma(ha->drq);
-	shp->unchecked_isa_dma = 1;
-	shp->irq = ha->irq;
-	shp->dma_channel = ha->drq;
-
-	ha->hanum = gdth_ctr_count++;
-	ha->shost = shp;
-
-	ha->pccb = &ha->cmdext;
-	ha->ccb_phys = 0L;
-	ha->pdev = NULL;
-
-	error = -ENOMEM;
-
-	ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH,
-						&scratch_dma_handle);
-	if (!ha->pscratch)
-		goto out_dec_counters;
-	ha->scratch_phys = scratch_dma_handle;
-
-	ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str),
-						&scratch_dma_handle);
-	if (!ha->pmsg)
-		goto out_free_pscratch;
-	ha->msg_phys = scratch_dma_handle;
-
-#ifdef INT_COAL
-	ha->coal_stat = pci_alloc_consistent(ha->pdev,
-				sizeof(gdth_coal_status) * MAXOFFSETS,
-				&scratch_dma_handle);
-	if (!ha->coal_stat)
-		goto out_free_pmsg;
-	ha->coal_stat_phys = scratch_dma_handle;
-#endif
-
-	ha->scratch_busy = FALSE;
-	ha->req_first = NULL;
-	ha->tid_cnt = MAX_HDRIVES;
-	if (max_ids > 0 && max_ids < ha->tid_cnt)
-		ha->tid_cnt = max_ids;
-	for (i = 0; i < GDTH_MAXCMDS; ++i)
-		ha->cmd_tab[i].cmnd = UNUSED_CMND;
-	ha->scan_mode = rescan ? 0x10 : 0;
-
-	error = -ENODEV;
-	if (!gdth_search_drives(ha)) {
-		printk("GDT-ISA: Error during device scan\n");
-		goto out_free_coal_stat;
-	}
-
-	if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
-		hdr_channel = ha->bus_cnt;
-	ha->virt_bus = hdr_channel;
-
-	if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT)
-		shp->max_cmd_len = 16;
-
-	shp->max_id      = ha->tid_cnt;
-	shp->max_lun     = MAXLUN;
-	shp->max_channel = ha->bus_cnt;
-
-	spin_lock_init(&ha->smp_lock);
-	gdth_enable_int(ha);
-
-	error = scsi_add_host(shp, NULL);
-	if (error)
-		goto out_free_coal_stat;
-	list_add_tail(&ha->list, &gdth_instances);
-	gdth_timer_init();
-
-	scsi_scan_host(shp);
-
-	return 0;
-
- out_free_coal_stat:
-#ifdef INT_COAL
-	pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS,
-				ha->coal_stat, ha->coal_stat_phys);
- out_free_pmsg:
-#endif
-	pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
-				ha->pmsg, ha->msg_phys);
- out_free_pscratch:
-	pci_free_consistent(ha->pdev, GDTH_SCRATCH,
-				ha->pscratch, ha->scratch_phys);
- out_dec_counters:
-	gdth_ctr_count--;
- out_free_irq:
-	free_irq(ha->irq, ha);
- out_host_put:
-	scsi_host_put(shp);
-	return error;
-}
-#endif /* CONFIG_ISA */
-
-#ifdef CONFIG_EISA
-static int __init gdth_eisa_probe_one(u16 eisa_slot)
-{
-	struct Scsi_Host *shp;
-	gdth_ha_str *ha;
-	dma_addr_t scratch_dma_handle = 0;
-	int error, i;
-
-	if (!gdth_search_eisa(eisa_slot))
-		return -ENXIO;
-
-	shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
-	if (!shp)
-		return -ENOMEM;
-	ha = shost_priv(shp);
-
-	error = -ENODEV;
-	if (!gdth_init_eisa(eisa_slot,ha))
-		goto out_host_put;
-
-	/* controller found and initialized */
-	printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",
-		eisa_slot >> 12, ha->irq);
-
-	error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
-	if (error) {
-		printk("GDT-EISA: Unable to allocate IRQ\n");
-		goto out_host_put;
-	}
-
-	shp->unchecked_isa_dma = 0;
-	shp->irq = ha->irq;
-	shp->dma_channel = 0xff;
-
-	ha->hanum = gdth_ctr_count++;
-	ha->shost = shp;
-
-	TRACE2(("EISA detect Bus 0: hanum %d\n", ha->hanum));
-
-	ha->pccb = &ha->cmdext;
-	ha->ccb_phys = 0L;
-
-	error = -ENOMEM;
-
-	ha->pdev = NULL;
-	ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH,
-						&scratch_dma_handle);
-	if (!ha->pscratch)
-		goto out_free_irq;
-	ha->scratch_phys = scratch_dma_handle;
-
-	ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str),
-						&scratch_dma_handle);
-	if (!ha->pmsg)
-		goto out_free_pscratch;
-	ha->msg_phys = scratch_dma_handle;
-
-#ifdef INT_COAL
-	ha->coal_stat = pci_alloc_consistent(ha->pdev,
-			sizeof(gdth_coal_status) * MAXOFFSETS,
-			&scratch_dma_handle);
-	if (!ha->coal_stat)
-		goto out_free_pmsg;
-	ha->coal_stat_phys = scratch_dma_handle;
-#endif
-
-	ha->ccb_phys = pci_map_single(ha->pdev,ha->pccb,
-			sizeof(gdth_cmd_str), PCI_DMA_BIDIRECTIONAL);
-	if (!ha->ccb_phys)
-		goto out_free_coal_stat;
-
-	ha->scratch_busy = FALSE;
-	ha->req_first = NULL;
-	ha->tid_cnt = MAX_HDRIVES;
-	if (max_ids > 0 && max_ids < ha->tid_cnt)
-		ha->tid_cnt = max_ids;
-	for (i = 0; i < GDTH_MAXCMDS; ++i)
-		ha->cmd_tab[i].cmnd = UNUSED_CMND;
-	ha->scan_mode = rescan ? 0x10 : 0;
-
-	if (!gdth_search_drives(ha)) {
-		printk("GDT-EISA: Error during device scan\n");
-		error = -ENODEV;
-		goto out_free_ccb_phys;
-	}
-
-	if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
-		hdr_channel = ha->bus_cnt;
-	ha->virt_bus = hdr_channel;
-
-	if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT)
-		shp->max_cmd_len = 16;
-
-	shp->max_id      = ha->tid_cnt;
-	shp->max_lun     = MAXLUN;
-	shp->max_channel = ha->bus_cnt;
-
-	spin_lock_init(&ha->smp_lock);
-	gdth_enable_int(ha);
-
-	error = scsi_add_host(shp, NULL);
-	if (error)
-		goto out_free_ccb_phys;
-	list_add_tail(&ha->list, &gdth_instances);
-	gdth_timer_init();
-
-	scsi_scan_host(shp);
-
-	return 0;
-
- out_free_ccb_phys:
-	pci_unmap_single(ha->pdev,ha->ccb_phys, sizeof(gdth_cmd_str),
-			PCI_DMA_BIDIRECTIONAL);
- out_free_coal_stat:
-#ifdef INT_COAL
-	pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS,
-				ha->coal_stat, ha->coal_stat_phys);
- out_free_pmsg:
-#endif
-	pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
-				ha->pmsg, ha->msg_phys);
- out_free_pscratch:
-	pci_free_consistent(ha->pdev, GDTH_SCRATCH,
-				ha->pscratch, ha->scratch_phys);
- out_free_irq:
-	free_irq(ha->irq, ha);
-	gdth_ctr_count--;
- out_host_put:
-	scsi_host_put(shp);
-	return error;
-}
-#endif /* CONFIG_EISA */
-
-#ifdef CONFIG_PCI
 static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
 {
 	struct Scsi_Host *shp;
@@ -4994,27 +4131,18 @@
 
 	error = -ENOMEM;
 
-	ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH,
-						&scratch_dma_handle);
+	ha->pscratch = dma_alloc_coherent(&ha->pdev->dev, GDTH_SCRATCH,
+				&scratch_dma_handle, GFP_KERNEL);
 	if (!ha->pscratch)
 		goto out_free_irq;
 	ha->scratch_phys = scratch_dma_handle;
 
-	ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str),
-					&scratch_dma_handle);
+	ha->pmsg = dma_alloc_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
+				&scratch_dma_handle, GFP_KERNEL);
 	if (!ha->pmsg)
 		goto out_free_pscratch;
 	ha->msg_phys = scratch_dma_handle;
 
-#ifdef INT_COAL
-	ha->coal_stat = pci_alloc_consistent(ha->pdev,
-			sizeof(gdth_coal_status) * MAXOFFSETS,
-			&scratch_dma_handle);
-	if (!ha->coal_stat)
-		goto out_free_pmsg;
-	ha->coal_stat_phys = scratch_dma_handle;
-#endif
-
 	ha->scratch_busy = FALSE;
 	ha->req_first = NULL;
 	ha->tid_cnt = pdev->device >= 0x200 ? MAXID : MAX_HDRIVES;
@@ -5027,7 +4155,7 @@
 	error = -ENODEV;
 	if (!gdth_search_drives(ha)) {
 		printk("GDT-PCI %d: Error during device scan\n", ha->hanum);
-		goto out_free_coal_stat;
+		goto out_free_pmsg;
 	}
 
 	if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
@@ -5037,19 +4165,19 @@
 	/* 64-bit DMA only supported from FW >= x.43 */
 	if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) ||
 	    !ha->dma64_support) {
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 			printk(KERN_WARNING "GDT-PCI %d: "
 				"Unable to set 32-bit DMA\n", ha->hanum);
-				goto out_free_coal_stat;
+				goto out_free_pmsg;
 		}
 	} else {
 		shp->max_cmd_len = 16;
-		if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
 			printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum);
-		} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+		} else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 			printk(KERN_WARNING "GDT-PCI %d: "
 				"Unable to set 64/32-bit DMA\n", ha->hanum);
-			goto out_free_coal_stat;
+			goto out_free_pmsg;
 		}
 	}
 
@@ -5062,7 +4190,7 @@
 
 	error = scsi_add_host(shp, &pdev->dev);
 	if (error)
-		goto out_free_coal_stat;
+		goto out_free_pmsg;
 	list_add_tail(&ha->list, &gdth_instances);
 
 	pci_set_drvdata(ha->pdev, ha);
@@ -5074,16 +4202,11 @@
 
 	return 0;
 
- out_free_coal_stat:
-#ifdef INT_COAL
-	pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS,
-				ha->coal_stat, ha->coal_stat_phys);
  out_free_pmsg:
-#endif
-	pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
+	dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
 				ha->pmsg, ha->msg_phys);
  out_free_pscratch:
-	pci_free_consistent(ha->pdev, GDTH_SCRATCH,
+	dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
 				ha->pscratch, ha->scratch_phys);
  out_free_irq:
 	free_irq(ha->irq, ha);
@@ -5092,7 +4215,6 @@
 	scsi_host_put(shp);
 	return error;
 }
-#endif /* CONFIG_PCI */
 
 static void gdth_remove_one(gdth_ha_str *ha)
 {
@@ -5112,24 +4234,15 @@
 	if (shp->irq)
 		free_irq(shp->irq,ha);
 
-#ifdef CONFIG_ISA
-	if (shp->dma_channel != 0xff)
-		free_dma(shp->dma_channel);
-#endif
-#ifdef INT_COAL
-	if (ha->coal_stat)
-		pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) *
-			MAXOFFSETS, ha->coal_stat, ha->coal_stat_phys);
-#endif
 	if (ha->pscratch)
-		pci_free_consistent(ha->pdev, GDTH_SCRATCH,
+		dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
 			ha->pscratch, ha->scratch_phys);
 	if (ha->pmsg)
-		pci_free_consistent(ha->pdev, sizeof(gdth_msg_str),
+		dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
 			ha->pmsg, ha->msg_phys);
 	if (ha->ccb_phys)
-		pci_unmap_single(ha->pdev,ha->ccb_phys,
-			sizeof(gdth_cmd_str),PCI_DMA_BIDIRECTIONAL);
+		dma_unmap_single(&ha->pdev->dev, ha->ccb_phys,
+			sizeof(gdth_cmd_str), DMA_BIDIRECTIONAL);
 
 	scsi_host_put(shp);
 }
@@ -5168,26 +4281,6 @@
 	gdth_clear_events();
 	timer_setup(&gdth_timer, gdth_timeout, 0);
 
-	/* As default we do not probe for EISA or ISA controllers */
-	if (probe_eisa_isa) {
-		/* scanning for controllers, at first: ISA controller */
-#ifdef CONFIG_ISA
-		u32 isa_bios;
-		for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL;
-		                isa_bios += 0x8000UL)
-			gdth_isa_probe_one(isa_bios);
-#endif
-#ifdef CONFIG_EISA
-		{
-			u16 eisa_slot;
-			for (eisa_slot = 0x1000; eisa_slot <= 0x8000;
-			                         eisa_slot += 0x1000)
-				gdth_eisa_probe_one(eisa_slot);
-		}
-#endif
-	}
-
-#ifdef CONFIG_PCI
 	/* scanning for PCI controllers */
 	if (pci_register_driver(&gdth_pci_driver)) {
 		gdth_ha_str *ha;
@@ -5196,7 +4289,6 @@
 			gdth_remove_one(ha);
 		return -ENODEV;
 	}
-#endif /* CONFIG_PCI */
 
 	TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count));
 
@@ -5217,9 +4309,7 @@
 	del_timer_sync(&gdth_timer);
 #endif
 
-#ifdef CONFIG_PCI
 	pci_unregister_driver(&gdth_pci_driver);
-#endif
 
 	list_for_each_entry(ha, &gdth_instances, list)
 		gdth_remove_one(ha);
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index ee6ffcf..5a13d40 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -38,17 +38,9 @@
 #define OEM_ID_INTEL    0x8000
 
 /* controller classes */
-#define GDT_ISA         0x01                    /* ISA controller */
-#define GDT_EISA        0x02                    /* EISA controller */
 #define GDT_PCI         0x03                    /* PCI controller */
 #define GDT_PCINEW      0x04                    /* new PCI controller */
 #define GDT_PCIMPR      0x05                    /* PCI MPR controller */
-/* GDT_EISA, controller subtypes EISA */
-#define GDT3_ID         0x0130941c              /* GDT3000/3020 */
-#define GDT3A_ID        0x0230941c              /* GDT3000A/3020A/3050A */
-#define GDT3B_ID        0x0330941c              /* GDT3000B/3010A */
-/* GDT_ISA */
-#define GDT2_ID         0x0120941c              /* GDT2000/2020 */
 
 #ifndef PCI_DEVICE_ID_VORTEX_GDT60x0
 /* GDT_PCI */
@@ -281,17 +273,6 @@
 #define GDTH_DATA_IN    0x01000000L             /* data from target */
 #define GDTH_DATA_OUT   0x00000000L             /* data to target */
 
-/* BMIC registers (EISA controllers) */
-#define ID0REG          0x0c80                  /* board ID */
-#define EINTENABREG     0x0c89                  /* interrupt enable */
-#define SEMA0REG        0x0c8a                  /* command semaphore */
-#define SEMA1REG        0x0c8b                  /* status semaphore */
-#define LDOORREG        0x0c8d                  /* local doorbell */
-#define EDENABREG       0x0c8e                  /* EISA system doorbell enab. */
-#define EDOORREG        0x0c8f                  /* EISA system doorbell */
-#define MAILBOXREG      0x0c90                  /* mailbox reg. (16 bytes) */
-#define EISAREG         0x0cc0                  /* EISA configuration */
-
 /* other defines */
 #define LINUX_OS        8                       /* used for cache optim. */
 #define SECS32          0x1f                    /* round capacity */
@@ -706,21 +687,11 @@
     u8      fw_magic;                       /* contr. ID from firmware */
 } __attribute__((packed)) gdt_pci_sram;
 
-/* SRAM structure EISA controllers (but NOT GDT3000/3020) */
-typedef struct {
-    u8      os_used[16];                    /* OS code per service */
-    u16      need_deinit;                    /* switch betw. BIOS/driver */
-    u8      switch_support;                 /* see need_deinit */
-    u8      padding;
-} __attribute__((packed)) gdt_eisa_sram;
-
-
 /* DPRAM ISA controllers */
 typedef struct {
     union {
         struct {
             u8      bios_used[0x3c00-32];   /* 15KB - 32Bytes BIOS */
-            u32     magic;                  /* controller (EISA) ID */
             u16      need_deinit;            /* switch betw. BIOS/driver */
             u8      switch_support;         /* see need_deinit */
             u8      padding[9];
@@ -843,7 +814,6 @@
     u16              cache_feat;             /* feat. cache serv. (s/g,..)*/
     u16              raw_feat;               /* feat. raw service (s/g,..)*/
     u16              screen_feat;            /* feat. raw service (s/g,..)*/
-    u16              bmic;                   /* BMIC address (EISA) */
     void __iomem        *brd;                   /* DPRAM address */
     u32             brd_phys;               /* slot number/BIOS address */
     gdt6c_plx_regs      *plx;                   /* PLX regs (new PCI contr.) */
diff --git a/drivers/scsi/gdth_ioctl.h b/drivers/scsi/gdth_ioctl.h
index 4c91894..ee4c9bf 100644
--- a/drivers/scsi/gdth_ioctl.h
+++ b/drivers/scsi/gdth_ioctl.h
@@ -27,11 +27,7 @@
 #define GDTH_MAXSG      32                      /* max. s/g elements */
 
 #define MAX_LDRIVES     255                     /* max. log. drive count */
-#ifdef GDTH_IOCTL_PROC
-#define MAX_HDRIVES     100                     /* max. host drive count */
-#else
 #define MAX_HDRIVES     MAX_LDRIVES             /* max. host drive count */
-#endif
 
 /* scatter/gather element */
 typedef struct {
@@ -178,91 +174,6 @@
     gdth_evt_data   event_data;
 } __attribute__((packed)) gdth_evt_str;
 
-
-#ifdef GDTH_IOCTL_PROC
-/* IOCTL structure (write) */
-typedef struct {
-    u32                 magic;              /* IOCTL magic */
-    u16                  ioctl;              /* IOCTL */
-    u16                  ionode;             /* controller number */
-    u16                  service;            /* controller service */
-    u16                  timeout;            /* timeout */
-    union {
-        struct {
-            u8          command[512];       /* controller command */
-            u8          data[1];            /* add. data */
-        } general;
-        struct {
-            u8          lock;               /* lock/unlock */
-            u8          drive_cnt;          /* drive count */
-            u16          drives[MAX_HDRIVES];/* drives */
-        } lockdrv;
-        struct {
-            u8          lock;               /* lock/unlock */
-            u8          channel;            /* channel */
-        } lockchn;
-        struct {
-            int             erase;              /* erase event ? */
-            int             handle;
-            u8          evt[EVENT_SIZE];    /* event structure */
-        } event;
-        struct {
-            u8          bus;                /* SCSI bus */
-            u8          target;             /* target ID */
-            u8          lun;                /* LUN */
-            u8          cmd_len;            /* command length */
-            u8          cmd[12];            /* SCSI command */
-        } scsi;
-        struct {
-            u16          hdr_no;             /* host drive number */
-            u8          flag;               /* old meth./add/remove */
-        } rescan;
-    } iu;
-} gdth_iowr_str;
-
-/* IOCTL structure (read) */
-typedef struct {
-    u32                 size;               /* buffer size */
-    u32                 status;             /* IOCTL error code */
-    union {
-        struct {
-            u8          data[1];            /* data */
-        } general;
-        struct {
-            u16          version;            /* driver version */
-        } drvers;
-        struct {
-            u8          type;               /* controller type */
-            u16          info;               /* slot etc. */
-            u16          oem_id;             /* OEM ID */
-            u16          bios_ver;           /* not used */
-            u16          access;             /* not used */
-            u16          ext_type;           /* extended type */
-            u16          device_id;          /* device ID */
-            u16          sub_device_id;      /* sub device ID */
-        } ctrtype;
-        struct {
-            u8          version;            /* OS version */
-            u8          subversion;         /* OS subversion */
-            u16          revision;           /* revision */
-        } osvers;
-        struct {
-            u16          count;              /* controller count */
-        } ctrcnt;
-        struct {
-            int             handle;
-            u8          evt[EVENT_SIZE];    /* event structure */
-        } event;
-        struct {
-            u8          bus;                /* SCSI bus, 0xff: invalid */
-            u8          target;             /* target ID */
-            u8          lun;                /* LUN */
-            u8          cluster_type;       /* cluster properties */
-        } hdr_list[MAX_HDRIVES];                /* index is host drive number */
-    } iu;
-} gdth_iord_str;
-#endif
-
 /* GDTIOCTL_GENERAL */
 typedef struct {
     u16 ionode;                              /* controller number */
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 3a9751a..381d849 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -31,7 +31,6 @@
     int i, found;
     gdth_cmd_str    gdtcmd;
     gdth_cpar_str   *pcpar;
-    u64         paddr;
 
     char            cmnd[MAX_COMMAND_SIZE];
     memset(cmnd, 0xff, 12);
@@ -113,13 +112,23 @@
     }
 
     if (wb_mode) {
-        if (!gdth_ioctl_alloc(ha, sizeof(gdth_cpar_str), TRUE, &paddr))
-            return(-EBUSY);
+	unsigned long flags;
+
+	BUILD_BUG_ON(sizeof(gdth_cpar_str) > GDTH_SCRATCH);
+
+	spin_lock_irqsave(&ha->smp_lock, flags);
+	if (ha->scratch_busy) {
+	    spin_unlock_irqrestore(&ha->smp_lock, flags);
+            return -EBUSY;
+	}
+	ha->scratch_busy = TRUE;
+	spin_unlock_irqrestore(&ha->smp_lock, flags);
+
         pcpar = (gdth_cpar_str *)ha->pscratch;
         memcpy( pcpar, &ha->cpar, sizeof(gdth_cpar_str) );
         gdtcmd.Service = CACHESERVICE;
         gdtcmd.OpCode = GDT_IOCTL;
-        gdtcmd.u.ioctl.p_param = paddr;
+        gdtcmd.u.ioctl.p_param = ha->scratch_phys;
         gdtcmd.u.ioctl.param_size = sizeof(gdth_cpar_str);
         gdtcmd.u.ioctl.subfunc = CACHE_CONFIG;
         gdtcmd.u.ioctl.channel = INVALID_CHANNEL;
@@ -127,7 +136,10 @@
 
         gdth_execute(host, &gdtcmd, cmnd, 30, NULL);
 
-        gdth_ioctl_free(ha, GDTH_SCRATCH, ha->pscratch, paddr);
+	spin_lock_irqsave(&ha->smp_lock, flags);
+	ha->scratch_busy = FALSE;
+	spin_unlock_irqrestore(&ha->smp_lock, flags);
+
         printk("Done.\n");
         return(orig_length);
     }
@@ -143,7 +155,7 @@
     int id, i, j, k, sec, flag;
     int no_mdrv = 0, drv_no, is_mirr;
     u32 cnt;
-    u64 paddr;
+    dma_addr_t paddr;
     int rc = -ENOMEM;
 
     gdth_cmd_str *gdtcmd;
@@ -217,20 +229,14 @@
                        " Serial No.:   \t0x%8X\tCache RAM size:\t%d KB\n",
                        ha->binfo.ser_no, ha->binfo.memsize / 1024);
 
-#ifdef GDTH_DMA_STATISTICS
-    /* controller statistics */
-    seq_puts(m, "\nController Statistics:\n");
-    seq_printf(m,
-                   " 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n",
-                   ha->dma32_cnt, ha->dma64_cnt);
-#endif
-
     if (ha->more_proc) {
+        size_t size = max_t(size_t, GDTH_SCRATCH, sizeof(gdth_hget_str));
+
         /* more information: 2. about physical devices */
         seq_puts(m, "\nPhysical Devices:");
         flag = FALSE;
             
-        buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
+        buf = dma_alloc_coherent(&ha->pdev->dev, size, &paddr, GFP_KERNEL);
         if (!buf) 
             goto stop_output;
         for (i = 0; i < ha->bus_cnt; ++i) {
@@ -323,7 +329,6 @@
                 }
             }
         }
-        gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
 
         if (!flag)
             seq_puts(m, "\n --\n");
@@ -332,9 +337,6 @@
         seq_puts(m, "\nLogical Drives:");
         flag = FALSE;
 
-        buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
-        if (!buf) 
-            goto stop_output;
         for (i = 0; i < MAX_LDRIVES; ++i) {
             if (!ha->hdr[i].is_logdrv)
                 continue;
@@ -408,8 +410,7 @@
             seq_printf(m,
                            " To Array Drv.:\t%s\n", hrec);
         }       
-        gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
-        
+
         if (!flag)
             seq_puts(m, "\n --\n");
 
@@ -417,9 +418,6 @@
         seq_puts(m, "\nArray Drives:");
         flag = FALSE;
 
-        buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr);
-        if (!buf) 
-            goto stop_output;
         for (i = 0; i < MAX_LDRIVES; ++i) {
             if (!(ha->hdr[i].is_arraydrv && ha->hdr[i].is_master))
                 continue;
@@ -468,8 +466,7 @@
                                hrec);
             }
         }
-        gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
-        
+
         if (!flag)
             seq_puts(m, "\n --\n");
 
@@ -477,9 +474,6 @@
         seq_puts(m, "\nHost Drives:");
         flag = FALSE;
 
-        buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr);
-        if (!buf) 
-            goto stop_output;
         for (i = 0; i < MAX_LDRIVES; ++i) {
             if (!ha->hdr[i].is_logdrv || 
                 (ha->hdr[i].is_arraydrv && !ha->hdr[i].is_master))
@@ -510,7 +504,7 @@
                 }
             }
         }
-        gdth_ioctl_free(ha, sizeof(gdth_hget_str), buf, paddr);
+	dma_free_coherent(&ha->pdev->dev, size, buf, paddr);
 
         for (i = 0; i < MAX_HDRIVES; ++i) {
             if (!(ha->hdr[i].present))
@@ -563,65 +557,6 @@
     return rc;
 }
 
-static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
-                              u64 *paddr)
-{
-    unsigned long flags;
-    char *ret_val;
-
-    if (size == 0)
-        return NULL;
-
-    spin_lock_irqsave(&ha->smp_lock, flags);
-
-    if (!ha->scratch_busy && size <= GDTH_SCRATCH) {
-        ha->scratch_busy = TRUE;
-        ret_val = ha->pscratch;
-        *paddr = ha->scratch_phys;
-    } else if (scratch) {
-        ret_val = NULL;
-    } else {
-        dma_addr_t dma_addr;
-
-        ret_val = pci_alloc_consistent(ha->pdev, size, &dma_addr);
-        *paddr = dma_addr;
-    }
-
-    spin_unlock_irqrestore(&ha->smp_lock, flags);
-    return ret_val;
-}
-
-static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr)
-{
-    unsigned long flags;
-
-    if (buf == ha->pscratch) {
-	spin_lock_irqsave(&ha->smp_lock, flags);
-        ha->scratch_busy = FALSE;
-	spin_unlock_irqrestore(&ha->smp_lock, flags);
-    } else {
-        pci_free_consistent(ha->pdev, size, buf, paddr);
-    }
-}
-
-#ifdef GDTH_IOCTL_PROC
-static int gdth_ioctl_check_bin(gdth_ha_str *ha, u16 size)
-{
-    unsigned long flags;
-    int ret_val;
-
-    spin_lock_irqsave(&ha->smp_lock, flags);
-
-    ret_val = FALSE;
-    if (ha->scratch_busy) {
-        if (((gdth_iord_str *)ha->pscratch)->size == (u32)size)
-            ret_val = TRUE;
-    }
-    spin_unlock_irqrestore(&ha->smp_lock, flags);
-    return ret_val;
-}
-#endif
-
 static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
 {
     unsigned long flags;
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index d7d0aa2..4cc5377 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -12,9 +12,6 @@
 static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
                              int length, gdth_ha_str *ha);
 
-static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
-                              u64 *paddr);
-static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr);
 static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
 
 #endif
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index a27fc49..11df0ec 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -184,7 +185,7 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= CMD_PER_LUN,
-	.use_clustering		= DISABLE_CLUSTERING
+	.dma_boundary		= PAGE_SIZE - 1,
 };
 
 static int check_wd33c93(struct gvp11_scsiregs *regs)
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index 57183fc..90a1745 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config SCSI_HISI_SAS
 	tristate "HiSilicon SAS"
 	depends on HAS_IOMEM
diff --git a/drivers/scsi/hisi_sas/Makefile b/drivers/scsi/hisi_sas/Makefile
index 24623f2..742e732 100644
--- a/drivers/scsi/hisi_sas/Makefile
+++ b/drivers/scsi/hisi_sas/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_SCSI_HISI_SAS)		+= hisi_sas_main.o
 obj-$(CONFIG_SCSI_HISI_SAS)		+= hisi_sas_v1_hw.o hisi_sas_v2_hw.o
 obj-$(CONFIG_SCSI_HISI_SAS_PCI)		+= hisi_sas_v3_hw.o
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 6c7d2e2..720c4d6 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -1,12 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Copyright (c) 2015 Linaro Ltd.
  * Copyright (c) 2015 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  */
 
 #ifndef _HISI_SAS_H_
@@ -14,9 +9,11 @@
 
 #include <linux/acpi.h>
 #include <linux/clk.h>
+#include <linux/debugfs.h>
 #include <linux/dmapool.h>
 #include <linux/iopoll.h>
 #include <linux/lcm.h>
+#include <linux/libata.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
@@ -29,45 +26,65 @@
 
 #define HISI_SAS_MAX_PHYS	9
 #define HISI_SAS_MAX_QUEUES	32
-#define HISI_SAS_QUEUE_SLOTS 512
+#define HISI_SAS_QUEUE_SLOTS	4096
 #define HISI_SAS_MAX_ITCT_ENTRIES 1024
 #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
 #define HISI_SAS_RESET_BIT	0
 #define HISI_SAS_REJECT_CMD_BIT	1
+#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS)
+#define HISI_SAS_RESERVED_IPTT  96
+#define HISI_SAS_UNRESERVED_IPTT \
+	(HISI_SAS_MAX_COMMANDS - HISI_SAS_RESERVED_IPTT)
+
+#define HISI_SAS_IOST_ITCT_CACHE_NUM 64
+#define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10
 
 #define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
 #define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
 
 #define hisi_sas_status_buf_addr(buf) \
-	(buf + offsetof(struct hisi_sas_slot_buf_table, status_buffer))
-#define hisi_sas_status_buf_addr_mem(slot) hisi_sas_status_buf_addr(slot->buf)
+	((buf) + offsetof(struct hisi_sas_slot_buf_table, status_buffer))
+#define hisi_sas_status_buf_addr_mem(slot) hisi_sas_status_buf_addr((slot)->buf)
 #define hisi_sas_status_buf_addr_dma(slot) \
-	hisi_sas_status_buf_addr(slot->buf_dma)
+	hisi_sas_status_buf_addr((slot)->buf_dma)
 
 #define hisi_sas_cmd_hdr_addr(buf) \
-	(buf + offsetof(struct hisi_sas_slot_buf_table, command_header))
-#define hisi_sas_cmd_hdr_addr_mem(slot) hisi_sas_cmd_hdr_addr(slot->buf)
-#define hisi_sas_cmd_hdr_addr_dma(slot) hisi_sas_cmd_hdr_addr(slot->buf_dma)
+	((buf) + offsetof(struct hisi_sas_slot_buf_table, command_header))
+#define hisi_sas_cmd_hdr_addr_mem(slot) hisi_sas_cmd_hdr_addr((slot)->buf)
+#define hisi_sas_cmd_hdr_addr_dma(slot) hisi_sas_cmd_hdr_addr((slot)->buf_dma)
 
 #define hisi_sas_sge_addr(buf) \
-	(buf + offsetof(struct hisi_sas_slot_buf_table, sge_page))
-#define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr(slot->buf)
-#define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr(slot->buf_dma)
+	((buf) + offsetof(struct hisi_sas_slot_buf_table, sge_page))
+#define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr((slot)->buf)
+#define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr((slot)->buf_dma)
+
+#define hisi_sas_sge_dif_addr(buf) \
+	((buf) + offsetof(struct hisi_sas_slot_dif_buf_table, sge_dif_page))
+#define hisi_sas_sge_dif_addr_mem(slot) hisi_sas_sge_dif_addr((slot)->buf)
+#define hisi_sas_sge_dif_addr_dma(slot) hisi_sas_sge_dif_addr((slot)->buf_dma)
 
 #define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024)
 #define HISI_SAS_MAX_SMP_RESP_SZ 1028
 #define HISI_SAS_MAX_STP_RESP_SZ 28
 
-#define DEV_IS_EXPANDER(type) \
-	((type == SAS_EDGE_EXPANDER_DEVICE) || \
-	(type == SAS_FANOUT_EXPANDER_DEVICE))
-
 #define HISI_SAS_SATA_PROTOCOL_NONDATA		0x1
 #define HISI_SAS_SATA_PROTOCOL_PIO			0x2
 #define HISI_SAS_SATA_PROTOCOL_DMA			0x4
 #define HISI_SAS_SATA_PROTOCOL_FPDMA		0x8
 #define HISI_SAS_SATA_PROTOCOL_ATAPI		0x10
 
+#define HISI_SAS_DIF_PROT_MASK (SHOST_DIF_TYPE1_PROTECTION | \
+				SHOST_DIF_TYPE2_PROTECTION | \
+				SHOST_DIF_TYPE3_PROTECTION)
+
+#define HISI_SAS_DIX_PROT_MASK (SHOST_DIX_TYPE1_PROTECTION | \
+				SHOST_DIX_TYPE2_PROTECTION | \
+				SHOST_DIX_TYPE3_PROTECTION)
+
+#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
+
+#define HISI_SAS_WAIT_PHYUP_TIMEOUT 20
+
 struct hisi_hba;
 
 enum {
@@ -76,8 +93,8 @@
 };
 
 enum dev_status {
+	HISI_SAS_DEV_INIT,
 	HISI_SAS_DEV_NORMAL,
-	HISI_SAS_DEV_EH,
 };
 
 enum {
@@ -117,7 +134,6 @@
 
 #define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \
 	DECLARE_COMPLETION_ONSTACK(c); \
-	DECLARE_WORK(w, hisi_sas_sync_rst_work_handler); \
 	struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c)
 
 enum hisi_sas_bit_err_type {
@@ -138,6 +154,7 @@
 	struct asd_sas_phy	sas_phy;
 	struct sas_identify	identify;
 	struct completion *reset_completion;
+	struct timer_list timer;
 	spinlock_t lock;
 	u64		port_id; /* from hw */
 	u64		frame_rcvd_size;
@@ -146,8 +163,10 @@
 	u8		in_reset;
 	u8		reserved[2];
 	u32		phy_type;
+	u32		code_violation_err_count;
 	enum sas_linkrate	minimum_linkrate;
 	enum sas_linkrate	maximum_linkrate;
+	int enable;
 };
 
 struct hisi_sas_port {
@@ -158,6 +177,7 @@
 
 struct hisi_sas_cq {
 	struct hisi_hba *hisi_hba;
+	const struct cpumask *pci_irq_mask;
 	struct tasklet_struct tasklet;
 	int	rd_point;
 	int	id;
@@ -178,9 +198,10 @@
 	struct hisi_sas_dq	*dq;
 	struct list_head	list;
 	enum sas_device_type	dev_type;
+	enum dev_status dev_status;
 	int device_id;
 	int sata_idx;
-	u8 dev_status;
+	spinlock_t lock; /* For protecting slots */
 };
 
 struct hisi_sas_tmf_task {
@@ -196,12 +217,14 @@
 	struct sas_task *task;
 	struct hisi_sas_port	*port;
 	u64	n_elem;
+	u64	n_elem_dif;
 	int	dlvry_queue;
 	int	dlvry_queue_slot;
 	int	cmplt_queue;
 	int	cmplt_queue_slot;
 	int	abort;
 	int	ready;
+	int	device_id;
 	void	*cmd_hdr;
 	dma_addr_t cmd_hdr_dma;
 	struct timer_list internal_abort_timer;
@@ -210,18 +233,51 @@
 	/* Do not reorder/change members after here */
 	void	*buf;
 	dma_addr_t buf_dma;
-	int	idx;
+	u16	idx;
+};
+
+#define HISI_SAS_DEBUGFS_REG(x) {#x, x}
+
+struct hisi_sas_debugfs_reg_lu {
+	char *name;
+	int off;
+};
+
+struct hisi_sas_debugfs_reg {
+	const struct hisi_sas_debugfs_reg_lu *lu;
+	int count;
+	int base_off;
+	union {
+		u32 (*read_global_reg)(struct hisi_hba *hisi_hba, u32 off);
+		u32 (*read_port_reg)(struct hisi_hba *hisi_hba, int port,
+				     u32 off);
+	};
+};
+
+struct hisi_sas_iost_itct_cache {
+	u32 data[HISI_SAS_IOST_ITCT_CACHE_DW_SZ];
+};
+
+enum hisi_sas_debugfs_reg_array_member {
+	DEBUGFS_GLOBAL = 0,
+	DEBUGFS_AXI,
+	DEBUGFS_RAS,
+	DEBUGFS_REGS_NUM
+};
+
+enum hisi_sas_debugfs_cache_type {
+	HISI_SAS_ITCT_CACHE,
+	HISI_SAS_IOST_CACHE,
 };
 
 struct hisi_sas_hw {
 	int (*hw_init)(struct hisi_hba *hisi_hba);
 	void (*setup_itct)(struct hisi_hba *hisi_hba,
 			   struct hisi_sas_device *device);
-	int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx,
+	int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
 				struct domain_device *device);
 	struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
-	void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
-	int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq);
+	void (*sl_notify_ssp)(struct hisi_hba *hisi_hba, int phy_no);
 	void (*start_delivery)(struct hisi_sas_dq *dq);
 	void (*prep_ssp)(struct hisi_hba *hisi_hba,
 			struct hisi_sas_slot *slot);
@@ -232,8 +288,6 @@
 	void (*prep_abort)(struct hisi_hba *hisi_hba,
 			  struct hisi_sas_slot *slot,
 			  int device_id, int abort_flag, int tag_to_abort);
-	int (*slot_complete)(struct hisi_hba *hisi_hba,
-			     struct hisi_sas_slot *slot);
 	void (*phys_init)(struct hisi_hba *hisi_hba);
 	void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no);
 	void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
@@ -254,9 +308,17 @@
 				u8 reg_index, u8 reg_count, u8 *write_data);
 	void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
 					   int delay_ms, int timeout_ms);
-	int max_command_entries;
+	void (*snapshot_prepare)(struct hisi_hba *hisi_hba);
+	void (*snapshot_restore)(struct hisi_hba *hisi_hba);
+	int (*set_bist)(struct hisi_hba *hisi_hba, bool enable);
+	void (*read_iost_itct_cache)(struct hisi_hba *hisi_hba,
+				     enum hisi_sas_debugfs_cache_type type,
+				     u32 *cache);
 	int complete_hdr_size;
 	struct scsi_host_template *sht;
+
+	const struct hisi_sas_debugfs_reg *debugfs_reg_array[DEBUGFS_REGS_NUM];
+	const struct hisi_sas_debugfs_reg *debugfs_reg_port;
 };
 
 struct hisi_hba {
@@ -267,6 +329,8 @@
 	struct pci_dev *pci_dev;
 	struct device *dev;
 
+	int prot_mask;
+
 	void __iomem *regs;
 	void __iomem *sgpio_regs;
 	struct regmap *ctrl;
@@ -320,7 +384,37 @@
 	const struct hisi_sas_hw *hw;	/* Low level hw interface */
 	unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
 	struct work_struct rst_work;
+	struct work_struct debugfs_work;
 	u32 phy_state;
+	u32 intr_coal_ticks;	/* Time of interrupt coalesce in us */
+	u32 intr_coal_count;	/* Interrupt count to coalesce */
+
+	int cq_nvecs;
+	unsigned int *reply_map;
+
+	/* bist */
+	enum sas_linkrate debugfs_bist_linkrate;
+	int debugfs_bist_code_mode;
+	int debugfs_bist_phy_no;
+	int debugfs_bist_mode;
+	u32 debugfs_bist_cnt;
+	int debugfs_bist_enable;
+
+	/* debugfs memories */
+	/* Put Global AXI and RAS Register into register array */
+	u32 *debugfs_regs[DEBUGFS_REGS_NUM];
+	u32 *debugfs_port_reg[HISI_SAS_MAX_PHYS];
+	void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES];
+	struct hisi_sas_cmd_hdr	*debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES];
+	struct hisi_sas_iost *debugfs_iost;
+	struct hisi_sas_itct *debugfs_itct;
+	u64 *debugfs_iost_cache;
+	u64 *debugfs_itct_cache;
+
+	struct dentry *debugfs_dir;
+	struct dentry *debugfs_dump_dentry;
+	struct dentry *debugfs_bist_dentry;
+	bool debugfs_snapshot;
 };
 
 /* Generic HW DMA host memory structures */
@@ -414,11 +508,16 @@
 	u8	atapi_cdb[ATAPI_CDB_LEN];
 };
 
-#define HISI_SAS_SGE_PAGE_CNT SG_CHUNK_SIZE
+#define HISI_SAS_SGE_PAGE_CNT (124)
 struct hisi_sas_sge_page {
 	struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT];
 }  __aligned(16);
 
+#define HISI_SAS_SGE_DIF_PAGE_CNT   HISI_SAS_SGE_PAGE_CNT
+struct hisi_sas_sge_dif_page {
+	struct hisi_sas_sge sge[HISI_SAS_SGE_DIF_PAGE_CNT];
+}  __aligned(16);
+
 struct hisi_sas_command_table_ssp {
 	struct ssp_frame_hdr hdr;
 	union {
@@ -449,16 +548,24 @@
 	struct hisi_sas_sge_page sge_page;
 };
 
+struct hisi_sas_slot_dif_buf_table {
+	struct hisi_sas_slot_buf_table slot_buf;
+	struct hisi_sas_sge_dif_page sge_dif_page;
+};
+
 extern struct scsi_transport_template *hisi_sas_stt;
+
+extern bool hisi_sas_debugfs_enable;
+extern struct dentry *hisi_sas_debugfs_dir;
+
 extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
-extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
+extern int hisi_sas_alloc(struct hisi_hba *hisi_hba);
 extern void hisi_sas_free(struct hisi_hba *hisi_hba);
 extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
 				int direction);
 extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
 extern void hisi_sas_sata_done(struct sas_task *task,
 			    struct hisi_sas_slot *slot);
-extern int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag);
 extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba);
 extern int hisi_sas_probe(struct platform_device *pdev,
 			  const struct hisi_sas_hw *ops);
@@ -467,8 +574,9 @@
 extern int hisi_sas_slave_configure(struct scsi_device *sdev);
 extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
 extern void hisi_sas_scan_start(struct Scsi_Host *shost);
-extern struct device_attribute *host_attrs[];
 extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
+extern void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no,
+				int enable);
 extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy);
 extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
 				    struct sas_task *task,
@@ -477,10 +585,14 @@
 extern void hisi_sas_rst_work_handler(struct work_struct *work);
 extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
 extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
+extern void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no);
 extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
 				enum hisi_sas_phy_event event);
 extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
 extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
 extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba);
 extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba);
+extern void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba);
+extern void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba);
+extern void hisi_sas_debugfs_work_handler(struct work_struct *work);
 #endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index a4e2e6a..0847e68 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright (c) 2015 Linaro Ltd.
  * Copyright (c) 2015 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  */
 
 #include "hisi_sas.h"
@@ -123,28 +118,13 @@
 }
 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
 
-int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
-{
-	struct ata_queued_cmd *qc = task->uldd_task;
-
-	if (qc) {
-		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
-			qc->tf.command == ATA_CMD_FPDMA_READ) {
-			*tag = qc->tag;
-			return 1;
-		}
-	}
-	return 0;
-}
-EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
-
 /*
  * This function assumes linkrate mask fits in 8 bits, which it
  * does for all HW versions supported.
  */
 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
 {
-	u16 rate = 0;
+	u8 rate = 0;
 	int i;
 
 	max -= SAS_LINK_RATE_1_5_GBPS;
@@ -170,7 +150,7 @@
 	int phy_no;
 
 	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
-		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
 }
 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
 
@@ -183,7 +163,14 @@
 
 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
 {
-	hisi_sas_slot_index_clear(hisi_hba, slot_idx);
+	unsigned long flags;
+
+	if (hisi_hba->hw->slot_index_alloc ||
+	    slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
+		spin_lock_irqsave(&hisi_hba->lock, flags);
+		hisi_sas_slot_index_clear(hisi_hba, slot_idx);
+		spin_unlock_irqrestore(&hisi_hba->lock, flags);
+	}
 }
 
 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
@@ -193,24 +180,33 @@
 	set_bit(slot_idx, bitmap);
 }
 
-static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
+static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
+				     struct scsi_cmnd *scsi_cmnd)
 {
-	unsigned int index;
+	int index;
 	void *bitmap = hisi_hba->slot_index_tags;
+	unsigned long flags;
 
+	if (scsi_cmnd)
+		return scsi_cmnd->request->tag;
+
+	spin_lock_irqsave(&hisi_hba->lock, flags);
 	index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
-			hisi_hba->last_slot_index + 1);
+				   hisi_hba->last_slot_index + 1);
 	if (index >= hisi_hba->slot_index_count) {
-		index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
-					   0);
-		if (index >= hisi_hba->slot_index_count)
+		index = find_next_zero_bit(bitmap,
+				hisi_hba->slot_index_count,
+				HISI_SAS_UNRESERVED_IPTT);
+		if (index >= hisi_hba->slot_index_count) {
+			spin_unlock_irqrestore(&hisi_hba->lock, flags);
 			return -SAS_QUEUE_FULL;
+		}
 	}
 	hisi_sas_slot_index_set(hisi_hba, index);
-	*slot_idx = index;
 	hisi_hba->last_slot_index = index;
+	spin_unlock_irqrestore(&hisi_hba->lock, flags);
 
-	return 0;
+	return index;
 }
 
 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
@@ -224,8 +220,9 @@
 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
 			     struct hisi_sas_slot *slot)
 {
-	struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
 	unsigned long flags;
+	int device_id = slot->device_id;
+	struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
 
 	if (task) {
 		struct device *dev = hisi_hba->dev;
@@ -235,23 +232,28 @@
 
 		task->lldd_task = NULL;
 
-		if (!sas_protocol_ata(task->task_proto))
+		if (!sas_protocol_ata(task->task_proto)) {
+			struct sas_ssp_task *ssp_task = &task->ssp_task;
+			struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
+
 			if (slot->n_elem)
 				dma_unmap_sg(dev, task->scatter,
 					     task->num_scatter,
 					     task->data_dir);
+			if (slot->n_elem_dif)
+				dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
+					     scsi_prot_sg_count(scsi_cmnd),
+					     task->data_dir);
+		}
 	}
 
-
-	spin_lock_irqsave(&dq->lock, flags);
+	spin_lock_irqsave(&sas_dev->lock, flags);
 	list_del_init(&slot->entry);
-	spin_unlock_irqrestore(&dq->lock, flags);
+	spin_unlock_irqrestore(&sas_dev->lock, flags);
 
 	memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
 
-	spin_lock_irqsave(&hisi_hba->lock, flags);
 	hisi_sas_slot_index_free(hisi_hba, slot->idx);
-	spin_unlock_irqrestore(&hisi_hba->lock, flags);
 }
 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
 
@@ -281,6 +283,129 @@
 			device_id, abort_flag, tag_to_abort);
 }
 
+static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
+			       struct sas_task *task, int n_elem,
+			       int n_elem_req)
+{
+	struct device *dev = hisi_hba->dev;
+
+	if (!sas_protocol_ata(task->task_proto)) {
+		if (task->num_scatter) {
+			if (n_elem)
+				dma_unmap_sg(dev, task->scatter,
+					     task->num_scatter,
+					     task->data_dir);
+		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
+			if (n_elem_req)
+				dma_unmap_sg(dev, &task->smp_task.smp_req,
+					     1, DMA_TO_DEVICE);
+		}
+	}
+}
+
+static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
+			    struct sas_task *task, int *n_elem,
+			    int *n_elem_req)
+{
+	struct device *dev = hisi_hba->dev;
+	int rc;
+
+	if (sas_protocol_ata(task->task_proto)) {
+		*n_elem = task->num_scatter;
+	} else {
+		unsigned int req_len;
+
+		if (task->num_scatter) {
+			*n_elem = dma_map_sg(dev, task->scatter,
+					     task->num_scatter, task->data_dir);
+			if (!*n_elem) {
+				rc = -ENOMEM;
+				goto prep_out;
+			}
+		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
+			*n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
+						 1, DMA_TO_DEVICE);
+			if (!*n_elem_req) {
+				rc = -ENOMEM;
+				goto prep_out;
+			}
+			req_len = sg_dma_len(&task->smp_task.smp_req);
+			if (req_len & 0x3) {
+				rc = -EINVAL;
+				goto err_out_dma_unmap;
+			}
+		}
+	}
+
+	if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
+		dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
+			*n_elem);
+		rc = -EINVAL;
+		goto err_out_dma_unmap;
+	}
+	return 0;
+
+err_out_dma_unmap:
+	/* It would be better to call dma_unmap_sg() here, but it's messy */
+	hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
+			   *n_elem_req);
+prep_out:
+	return rc;
+}
+
+static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
+				   struct sas_task *task, int n_elem_dif)
+{
+	struct device *dev = hisi_hba->dev;
+
+	if (n_elem_dif) {
+		struct sas_ssp_task *ssp_task = &task->ssp_task;
+		struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
+
+		dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
+			     scsi_prot_sg_count(scsi_cmnd),
+			     task->data_dir);
+	}
+}
+
+static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
+				int *n_elem_dif, struct sas_task *task)
+{
+	struct device *dev = hisi_hba->dev;
+	struct sas_ssp_task *ssp_task;
+	struct scsi_cmnd *scsi_cmnd;
+	int rc;
+
+	if (task->num_scatter) {
+		ssp_task = &task->ssp_task;
+		scsi_cmnd = ssp_task->cmd;
+
+		if (scsi_prot_sg_count(scsi_cmnd)) {
+			*n_elem_dif = dma_map_sg(dev,
+						 scsi_prot_sglist(scsi_cmnd),
+						 scsi_prot_sg_count(scsi_cmnd),
+						 task->data_dir);
+
+			if (!*n_elem_dif)
+				return -ENOMEM;
+
+			if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
+				dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
+					*n_elem_dif);
+				rc = -EINVAL;
+				goto err_out_dif_dma_unmap;
+			}
+		}
+	}
+
+	return 0;
+
+err_out_dif_dma_unmap:
+	dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
+		     scsi_prot_sg_count(scsi_cmnd), task->data_dir);
+	return rc;
+}
+
 static int hisi_sas_task_prep(struct sas_task *task,
 			      struct hisi_sas_dq **dq_pointer,
 			      bool is_tmf, struct hisi_sas_tmf_task *tmf,
@@ -295,25 +420,11 @@
 	struct asd_sas_port *sas_port = device->port;
 	struct device *dev = hisi_hba->dev;
 	int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
-	int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
+	int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
 	struct hisi_sas_dq *dq;
 	unsigned long flags;
 	int wr_q_index;
 
-	if (!sas_port) {
-		struct task_status_struct *ts = &task->task_status;
-
-		ts->resp = SAS_TASK_UNDELIVERED;
-		ts->stat = SAS_PHY_DOWN;
-		/*
-		 * libsas will use dev->port, should
-		 * not call task_done for sata
-		 */
-		if (device->dev_type != SAS_SATA_DEV)
-			task->task_done(task);
-		return -ECOMM;
-	}
-
 	if (DEV_IS_GONE(sas_dev)) {
 		if (sas_dev)
 			dev_info(dev, "task prep: device %d not ready\n",
@@ -325,7 +436,14 @@
 		return -ECOMM;
 	}
 
-	*dq_pointer = dq = sas_dev->dq;
+	if (hisi_hba->reply_map) {
+		int cpu = raw_smp_processor_id();
+		unsigned int dq_index = hisi_hba->reply_map[cpu];
+
+		*dq_pointer = dq = &hisi_hba->dq[dq_index];
+	} else {
+		*dq_pointer = dq = sas_dev->dq;
+	}
 
 	port = to_hisi_sas_port(sas_port);
 	if (port && !port->port_attached) {
@@ -337,78 +455,55 @@
 		return -ECOMM;
 	}
 
+	rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
+			      &n_elem_req);
+	if (rc < 0)
+		goto prep_out;
+
 	if (!sas_protocol_ata(task->task_proto)) {
-		unsigned int req_len, resp_len;
-
-		if (task->num_scatter) {
-			n_elem = dma_map_sg(dev, task->scatter,
-					    task->num_scatter, task->data_dir);
-			if (!n_elem) {
-				rc = -ENOMEM;
-				goto prep_out;
-			}
-		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
-			n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
-						1, DMA_TO_DEVICE);
-			if (!n_elem_req) {
-				rc = -ENOMEM;
-				goto prep_out;
-			}
-			req_len = sg_dma_len(&task->smp_task.smp_req);
-			if (req_len & 0x3) {
-				rc = -EINVAL;
-				goto err_out_dma_unmap;
-			}
-			n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
-						 1, DMA_FROM_DEVICE);
-			if (!n_elem_resp) {
-				rc = -ENOMEM;
-				goto err_out_dma_unmap;
-			}
-			resp_len = sg_dma_len(&task->smp_task.smp_resp);
-			if (resp_len & 0x3) {
-				rc = -EINVAL;
-				goto err_out_dma_unmap;
-			}
-		}
-	} else
-		n_elem = task->num_scatter;
-
-	if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
-		dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
-			n_elem);
-		rc = -EINVAL;
-		goto err_out_dma_unmap;
+		rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
+		if (rc < 0)
+			goto err_out_dma_unmap;
 	}
 
-	spin_lock_irqsave(&hisi_hba->lock, flags);
 	if (hisi_hba->hw->slot_index_alloc)
-		rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
-						    device);
-	else
-		rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
-	spin_unlock_irqrestore(&hisi_hba->lock, flags);
-	if (rc)
-		goto err_out_dma_unmap;
+		rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
+	else {
+		struct scsi_cmnd *scsi_cmnd = NULL;
 
+		if (task->uldd_task) {
+			struct ata_queued_cmd *qc;
+
+			if (dev_is_sata(device)) {
+				qc = task->uldd_task;
+				scsi_cmnd = qc->scsicmd;
+			} else {
+				scsi_cmnd = task->uldd_task;
+			}
+		}
+		rc  = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
+	}
+	if (rc < 0)
+		goto err_out_dif_dma_unmap;
+
+	slot_idx = rc;
 	slot = &hisi_hba->slot_info[slot_idx];
 
 	spin_lock_irqsave(&dq->lock, flags);
-	wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
-	if (wr_q_index < 0) {
-		spin_unlock_irqrestore(&dq->lock, flags);
-		rc = -EAGAIN;
-		goto err_out_tag;
-	}
-
+	wr_q_index = dq->wr_point;
+	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
 	list_add_tail(&slot->delivery, &dq->list);
-	list_add_tail(&slot->entry, &sas_dev->list);
 	spin_unlock_irqrestore(&dq->lock, flags);
+	spin_lock_irqsave(&sas_dev->lock, flags);
+	list_add_tail(&slot->entry, &sas_dev->list);
+	spin_unlock_irqrestore(&sas_dev->lock, flags);
 
 	dlvry_queue = dq->id;
 	dlvry_queue_slot = wr_q_index;
 
+	slot->device_id = sas_dev->device_id;
 	slot->n_elem = n_elem;
+	slot->n_elem_dif = n_elem_dif;
 	slot->dlvry_queue = dlvry_queue;
 	slot->dlvry_queue_slot = dlvry_queue_slot;
 	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
@@ -421,7 +516,8 @@
 
 	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
 	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
-	memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
+	memset(hisi_sas_status_buf_addr_mem(slot), 0,
+	       sizeof(struct hisi_sas_err_record));
 
 	switch (task->task_proto) {
 	case SAS_PROTOCOL_SMP:
@@ -450,24 +546,12 @@
 
 	return 0;
 
-err_out_tag:
-	spin_lock_irqsave(&hisi_hba->lock, flags);
-	hisi_sas_slot_index_free(hisi_hba, slot_idx);
-	spin_unlock_irqrestore(&hisi_hba->lock, flags);
+err_out_dif_dma_unmap:
+	if (!sas_protocol_ata(task->task_proto))
+		hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
 err_out_dma_unmap:
-	if (!sas_protocol_ata(task->task_proto)) {
-		if (task->num_scatter) {
-			dma_unmap_sg(dev, task->scatter, task->num_scatter,
-			     task->data_dir);
-		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
-			if (n_elem_req)
-				dma_unmap_sg(dev, &task->smp_task.smp_req,
-					     1, DMA_TO_DEVICE);
-			if (n_elem_resp)
-				dma_unmap_sg(dev, &task->smp_task.smp_resp,
-					     1, DMA_FROM_DEVICE);
-		}
-	}
+	hisi_sas_dma_unmap(hisi_hba, task, n_elem,
+			   n_elem_req);
 prep_out:
 	dev_err(dev, "task prep: failed[%d]!\n", rc);
 	return rc;
@@ -479,10 +563,29 @@
 	u32 rc;
 	u32 pass = 0;
 	unsigned long flags;
-	struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
-	struct device *dev = hisi_hba->dev;
+	struct hisi_hba *hisi_hba;
+	struct device *dev;
+	struct domain_device *device = task->dev;
+	struct asd_sas_port *sas_port = device->port;
 	struct hisi_sas_dq *dq = NULL;
 
+	if (!sas_port) {
+		struct task_status_struct *ts = &task->task_status;
+
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_PHY_DOWN;
+		/*
+		 * libsas will use dev->port, should
+		 * not call task_done for sata
+		 */
+		if (device->dev_type != SAS_SATA_DEV)
+			task->task_done(task);
+		return -ECOMM;
+	}
+
+	hisi_hba = dev_to_hisi_hba(device);
+	dev = hisi_hba->dev;
+
 	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
 		if (in_softirq())
 			return -EINVAL;
@@ -539,7 +642,7 @@
 		id->initiator_bits = SAS_PROTOCOL_ALL;
 		id->target_bits = phy->identify.target_port_protocols;
 	} else if (phy->phy_type & PORT_TYPE_SATA) {
-		/*Nothing*/
+		/* Nothing */
 	}
 
 	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
@@ -563,11 +666,12 @@
 
 			hisi_hba->devices[i].device_id = i;
 			sas_dev = &hisi_hba->devices[i];
-			sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+			sas_dev->dev_status = HISI_SAS_DEV_INIT;
 			sas_dev->dev_type = device->dev_type;
 			sas_dev->hisi_hba = hisi_hba;
 			sas_dev->sas_device = device;
 			sas_dev->dq = dq;
+			spin_lock_init(&sas_dev->lock);
 			INIT_LIST_HEAD(&hisi_hba->devices[i].list);
 			break;
 		}
@@ -579,29 +683,60 @@
 	return sas_dev;
 }
 
-#define HISI_SAS_SRST_ATA_DISK_CNT 3
+#define HISI_SAS_DISK_RECOVER_CNT 3
 static int hisi_sas_init_device(struct domain_device *device)
 {
 	int rc = TMF_RESP_FUNC_COMPLETE;
 	struct scsi_lun lun;
 	struct hisi_sas_tmf_task tmf_task;
-	int retry = HISI_SAS_SRST_ATA_DISK_CNT;
+	int retry = HISI_SAS_DISK_RECOVER_CNT;
 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+	struct device *dev = hisi_hba->dev;
+	struct sas_phy *local_phy;
 
 	switch (device->dev_type) {
 	case SAS_END_DEVICE:
 		int_to_scsilun(0, &lun);
 
 		tmf_task.tmf = TMF_CLEAR_TASK_SET;
-		rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
-						  &tmf_task);
-		if (rc == TMF_RESP_FUNC_COMPLETE)
-			hisi_sas_release_task(hisi_hba, device);
+		while (retry-- > 0) {
+			rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
+							  &tmf_task);
+			if (rc == TMF_RESP_FUNC_COMPLETE) {
+				hisi_sas_release_task(hisi_hba, device);
+				break;
+			}
+		}
 		break;
 	case SAS_SATA_DEV:
 	case SAS_SATA_PM:
 	case SAS_SATA_PM_PORT:
 	case SAS_SATA_PENDING:
+		/*
+		 * send HARD RESET to clear previous affiliation of
+		 * STP target port
+		 */
+		local_phy = sas_get_local_phy(device);
+		if (!scsi_is_sas_phy_local(local_phy) &&
+		    !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
+			unsigned long deadline = ata_deadline(jiffies, 20000);
+			struct sata_device *sata_dev = &device->sata_dev;
+			struct ata_host *ata_host = sata_dev->ata_host;
+			struct ata_port_operations *ops = ata_host->ops;
+			struct ata_port *ap = sata_dev->ap;
+			struct ata_link *link;
+			unsigned int classes;
+
+			ata_for_each_link(link, ap, EDGE)
+				rc = ops->hardreset(link, &classes,
+						    deadline);
+		}
+		sas_put_local_phy(local_phy);
+		if (rc) {
+			dev_warn(dev, "SATA disk hardreset fail: %d\n", rc);
+			return rc;
+		}
+
 		while (retry-- > 0) {
 			rc = hisi_sas_softreset_ata_disk(device);
 			if (!rc)
@@ -636,7 +771,7 @@
 	device->lldd_dev = sas_dev;
 	hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
 
-	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
 		int phy_no;
 		u8 phy_num = parent_dev->ex_dev.num_phys;
 		struct ex_phy *phy;
@@ -664,6 +799,7 @@
 	rc = hisi_sas_init_device(device);
 	if (rc)
 		goto err_out;
+	sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
 	return 0;
 
 err_out:
@@ -715,7 +851,8 @@
 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
 	int phy_no = sas_phy->id;
 
-	hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
+	if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
+		hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
 	hisi_sas_bytes_dmaed(hisi_hba, phy_no);
 }
 
@@ -745,6 +882,30 @@
 }
 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
 
+static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
+{
+	struct hisi_sas_phy *phy = from_timer(phy, t, timer);
+	struct hisi_hba *hisi_hba = phy->hisi_hba;
+	struct device *dev = hisi_hba->dev;
+	int phy_no = phy->sas_phy.id;
+
+	dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
+	hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+}
+
+void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
+{
+	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+	struct device *dev = hisi_hba->dev;
+
+	if (!timer_pending(&phy->timer)) {
+		dev_dbg(dev, "phy%d OOB ready\n", phy_no);
+		phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
+		add_timer(&phy->timer);
+	}
+}
+EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
+
 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
 {
 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
@@ -773,8 +934,34 @@
 		INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
 
 	spin_lock_init(&phy->lock);
+
+	timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
 }
 
+/* Wrapper to ensure we track hisi_sas_phy.enable properly */
+void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
+{
+	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+	struct asd_sas_phy *aphy = &phy->sas_phy;
+	struct sas_phy *sphy = aphy->phy;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phy->lock, flags);
+
+	if (enable) {
+		/* We may have been enabled already; if so, don't touch */
+		if (!phy->enable)
+			sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+		hisi_hba->hw->phy_start(hisi_hba, phy_no);
+	} else {
+		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
+		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+	}
+	phy->enable = enable;
+	spin_unlock_irqrestore(&phy->lock, flags);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
+
 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
 {
 	struct sas_ha_struct *sas_ha = sas_phy->ha;
@@ -809,7 +996,8 @@
 		spin_lock_irqsave(&task->task_state_lock, flags);
 		task->task_state_flags &=
 			~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
-		task->task_state_flags |= SAS_TASK_STATE_DONE;
+		if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
+			task->task_state_flags |= SAS_TASK_STATE_DONE;
 		spin_unlock_irqrestore(&task->task_state_lock, flags);
 	}
 
@@ -861,21 +1049,22 @@
 	dev_info(dev, "dev[%d:%x] is gone\n",
 		 sas_dev->device_id, sas_dev->dev_type);
 
+	down(&hisi_hba->sem);
 	if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
 		hisi_sas_internal_task_abort(hisi_hba, device,
-				     HISI_SAS_INT_ABT_DEV, 0);
+					     HISI_SAS_INT_ABT_DEV, 0);
 
 		hisi_sas_dereg_device(hisi_hba, device);
 
-		down(&hisi_hba->sem);
 		hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
-		up(&hisi_hba->sem);
 		device->lldd_dev = NULL;
 	}
 
 	if (hisi_hba->hw->free_device)
 		hisi_hba->hw->free_device(sas_dev);
 	sas_dev->dev_type = SAS_PHY_UNUSED;
+	sas_dev->sas_device = NULL;
+	up(&hisi_hba->sem);
 }
 
 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
@@ -883,7 +1072,7 @@
 	return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
 }
 
-static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
+static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
 			struct sas_phy_linkrates *r)
 {
 	struct sas_phy_linkrates _r;
@@ -892,6 +1081,9 @@
 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
 	enum sas_linkrate min, max;
 
+	if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
+		return -EINVAL;
+
 	if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
 		max = sas_phy->phy->maximum_linkrate;
 		min = r->minimum_linkrate;
@@ -899,15 +1091,20 @@
 		max = r->maximum_linkrate;
 		min = sas_phy->phy->minimum_linkrate;
 	} else
-		return;
+		return -EINVAL;
 
 	_r.maximum_linkrate = max;
 	_r.minimum_linkrate = min;
 
-	hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+	sas_phy->phy->maximum_linkrate = max;
+	sas_phy->phy->minimum_linkrate = min;
+
+	hisi_sas_phy_enable(hisi_hba, phy_no, 0);
 	msleep(100);
 	hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
-	hisi_hba->hw->phy_start(hisi_hba, phy_no);
+	hisi_sas_phy_enable(hisi_hba, phy_no, 1);
+
+	return 0;
 }
 
 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
@@ -923,18 +1120,17 @@
 		break;
 
 	case PHY_FUNC_LINK_RESET:
-		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
 		msleep(100);
-		hisi_hba->hw->phy_start(hisi_hba, phy_no);
+		hisi_sas_phy_enable(hisi_hba, phy_no, 1);
 		break;
 
 	case PHY_FUNC_DISABLE:
-		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
 		break;
 
 	case PHY_FUNC_SET_LINK_RATE:
-		hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
-		break;
+		return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
 	case PHY_FUNC_GET_EVENTS:
 		if (hisi_hba->hw->get_events) {
 			hisi_hba->hw->get_events(hisi_hba, phy_no);
@@ -950,8 +1146,7 @@
 
 static void hisi_sas_task_done(struct sas_task *task)
 {
-	if (!del_timer(&task->slow_task->timer))
-		return;
+	del_timer(&task->slow_task->timer);
 	complete(&task->slow_task->completion);
 }
 
@@ -960,13 +1155,17 @@
 	struct sas_task_slow *slow = from_timer(slow, t, timer);
 	struct sas_task *task = slow->task;
 	unsigned long flags;
+	bool is_completed = true;
 
 	spin_lock_irqsave(&task->task_state_lock, flags);
-	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
 		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+		is_completed = false;
+	}
 	spin_unlock_irqrestore(&task->task_state_lock, flags);
 
-	complete(&task->slow_task->completion);
+	if (!is_completed)
+		complete(&task->slow_task->completion);
 }
 
 #define TASK_TIMEOUT 20
@@ -999,7 +1198,7 @@
 		task->task_done = hisi_sas_task_done;
 
 		task->slow_task->timer.function = hisi_sas_tmf_timedout;
-		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
+		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ;
 		add_timer(&task->slow_task->timer);
 
 		res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
@@ -1019,8 +1218,16 @@
 				struct hisi_sas_slot *slot = task->lldd_task;
 
 				dev_err(dev, "abort tmf: TMF task timeout and not done\n");
-				if (slot)
+				if (slot) {
+					struct hisi_sas_cq *cq =
+					       &hisi_hba->cq[slot->dlvry_queue];
+					/*
+					 * flush tasklet to avoid free'ing task
+					 * before using task in IO completion
+					 */
+					tasklet_kill(&cq->tasklet);
 					slot->task = NULL;
+				}
 
 				goto ex_err;
 			} else
@@ -1044,8 +1251,7 @@
 			/* no error, but return the number of bytes of
 			 * underrun
 			 */
-			dev_warn(dev, "abort tmf: task to dev %016llx "
-				 "resp: 0x%x sts 0x%x underrun\n",
+			dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
 				 SAS_ADDR(device->sas_addr),
 				 task->task_status.resp,
 				 task->task_status.stat);
@@ -1060,10 +1266,16 @@
 			break;
 		}
 
-		dev_warn(dev, "abort tmf: task to dev "
-			 "%016llx resp: 0x%x status 0x%x\n",
-			 SAS_ADDR(device->sas_addr), task->task_status.resp,
-			 task->task_status.stat);
+		if (task->task_status.resp == SAS_TASK_COMPLETE &&
+		    task->task_status.stat == SAS_OPEN_REJECT) {
+			dev_warn(dev, "abort tmf: open reject failed\n");
+			res = -EIO;
+		} else {
+			dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n",
+				 SAS_ADDR(device->sas_addr),
+				 task->task_status.resp,
+				 task->task_status.stat);
+		}
 		sas_free_task(task);
 		task = NULL;
 	}
@@ -1180,8 +1392,7 @@
 	}
 }
 
-static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
-			      u32 state)
+static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
 {
 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
 	struct asd_sas_port *_sas_port = NULL;
@@ -1203,13 +1414,13 @@
 
 				_sas_port = sas_port;
 
-				if (DEV_IS_EXPANDER(dev->dev_type))
+				if (dev_is_expander(dev->dev_type))
 					sas_ha->notify_port_event(sas_phy,
 							PORTE_BROADCAST_RCVD);
 			}
-		} else if (old_state & (1 << phy_no))
-			/* PHY down but was up before */
+		} else {
 			hisi_sas_phy_down(hisi_hba, phy_no, 0);
+		}
 
 	}
 }
@@ -1290,7 +1501,7 @@
 		struct domain_device *port_dev = sas_port->port_dev;
 		struct domain_device *device;
 
-		if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type))
+		if (!port_dev || !dev_is_expander(port_dev->dev_type))
 			continue;
 
 		/* Try to find a SATA device */
@@ -1333,16 +1544,16 @@
 	msleep(1000);
 	hisi_sas_refresh_port_id(hisi_hba);
 	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
-	up(&hisi_hba->sem);
 
 	if (hisi_hba->reject_stp_links_msk)
 		hisi_sas_terminate_stp_reject(hisi_hba);
 	hisi_sas_reset_init_all_devices(hisi_hba);
+	up(&hisi_hba->sem);
 	scsi_unblock_requests(shost);
 	clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
 
 	state = hisi_hba->hw->get_phys_state(hisi_hba);
-	hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state);
+	hisi_sas_rescan_topology(hisi_hba, state);
 }
 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
 
@@ -1352,6 +1563,9 @@
 	struct Scsi_Host *shost = hisi_hba->shost;
 	int rc;
 
+	if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
+		queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
+
 	if (!hisi_hba->hw->soft_reset)
 		return -1;
 
@@ -1396,6 +1610,17 @@
 
 	spin_lock_irqsave(&task->task_state_lock, flags);
 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+		struct hisi_sas_slot *slot = task->lldd_task;
+		struct hisi_sas_cq *cq;
+
+		if (slot) {
+			/*
+			 * flush tasklet to avoid free'ing task
+			 * before using task in IO completion
+			 */
+			cq = &hisi_hba->cq[slot->dlvry_queue];
+			tasklet_kill(&cq->tasklet);
+		}
 		spin_unlock_irqrestore(&task->task_state_lock, flags);
 		rc = TMF_RESP_FUNC_COMPLETE;
 		goto out;
@@ -1403,16 +1628,15 @@
 	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
 	spin_unlock_irqrestore(&task->task_state_lock, flags);
 
-	sas_dev->dev_status = HISI_SAS_DEV_EH;
 	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
 		struct scsi_cmnd *cmnd = task->uldd_task;
 		struct hisi_sas_slot *slot = task->lldd_task;
-		u32 tag = slot->idx;
+		u16 tag = slot->idx;
 		int rc2;
 
 		int_to_scsilun(cmnd->device->lun, &lun);
 		tmf_task.tmf = TMF_ABORT_TASK;
-		tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+		tmf_task.tag_of_task_to_be_managed = tag;
 
 		rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
 						  &tmf_task);
@@ -1439,7 +1663,8 @@
 		task->task_proto & SAS_PROTOCOL_STP) {
 		if (task->dev->dev_type == SAS_SATA_DEV) {
 			rc = hisi_sas_internal_task_abort(hisi_hba, device,
-						HISI_SAS_INT_ABT_DEV, 0);
+							  HISI_SAS_INT_ABT_DEV,
+							  0);
 			if (rc < 0) {
 				dev_err(dev, "abort task: internal abort failed\n");
 				goto out;
@@ -1451,12 +1676,19 @@
 		/* SMP */
 		struct hisi_sas_slot *slot = task->lldd_task;
 		u32 tag = slot->idx;
+		struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
 
 		rc = hisi_sas_internal_task_abort(hisi_hba, device,
-			     HISI_SAS_INT_ABT_CMD, tag);
+						  HISI_SAS_INT_ABT_CMD, tag);
 		if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
-					task->lldd_task)
-			hisi_sas_do_release_task(hisi_hba, task, slot);
+					task->lldd_task) {
+			/*
+			 * flush tasklet to avoid free'ing task
+			 * before using task in IO completion
+			 */
+			tasklet_kill(&cq->tasklet);
+			slot->task = NULL;
+		}
 	}
 
 out:
@@ -1470,10 +1702,10 @@
 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 	struct device *dev = hisi_hba->dev;
 	struct hisi_sas_tmf_task tmf_task;
-	int rc = TMF_RESP_FUNC_FAILED;
+	int rc;
 
 	rc = hisi_sas_internal_task_abort(hisi_hba, device,
-					HISI_SAS_INT_ABT_DEV, 0);
+					  HISI_SAS_INT_ABT_DEV, 0);
 	if (rc < 0) {
 		dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
 		return TMF_RESP_FUNC_FAILED;
@@ -1491,8 +1723,8 @@
 
 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
 {
-	int rc = TMF_RESP_FUNC_FAILED;
 	struct hisi_sas_tmf_task tmf_task;
+	int rc;
 
 	tmf_task.tmf = TMF_CLEAR_ACA;
 	rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
@@ -1503,24 +1735,37 @@
 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
 {
 	struct sas_phy *local_phy = sas_get_local_phy(device);
-	int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
-			(device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
+	struct hisi_sas_device *sas_dev = device->lldd_dev;
 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
-	struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
-	struct hisi_sas_phy *phy = container_of(sas_phy,
-			struct hisi_sas_phy, sas_phy);
 	DECLARE_COMPLETION_ONSTACK(phyreset);
+	int rc, reset_type;
+
+	if (!local_phy->enabled) {
+		sas_put_local_phy(local_phy);
+		return -ENODEV;
+	}
 
 	if (scsi_is_sas_phy_local(local_phy)) {
+		struct asd_sas_phy *sas_phy =
+			sas_ha->sas_phy[local_phy->number];
+		struct hisi_sas_phy *phy =
+			container_of(sas_phy, struct hisi_sas_phy, sas_phy);
 		phy->in_reset = 1;
 		phy->reset_completion = &phyreset;
 	}
 
+	reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
+		      !dev_is_sata(device)) ? true : false;
+
 	rc = sas_phy_reset(local_phy, reset_type);
 	sas_put_local_phy(local_phy);
 
 	if (scsi_is_sas_phy_local(local_phy)) {
+		struct asd_sas_phy *sas_phy =
+			sas_ha->sas_phy[local_phy->number];
+		struct hisi_sas_phy *phy =
+			container_of(sas_phy, struct hisi_sas_phy, sas_phy);
 		int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
 		unsigned long flags;
 
@@ -1532,31 +1777,38 @@
 		/* report PHY down if timed out */
 		if (!ret)
 			hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
-	} else
-		msleep(2000);
+	} else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
+		/*
+		 * If in init state, we rely on caller to wait for link to be
+		 * ready; otherwise, except phy reset is fail, delay.
+		 */
+		if (!rc)
+			msleep(2000);
+	}
 
 	return rc;
 }
 
 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
 {
-	struct hisi_sas_device *sas_dev = device->lldd_dev;
 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
 	struct device *dev = hisi_hba->dev;
-	int rc = TMF_RESP_FUNC_FAILED;
-
-	if (sas_dev->dev_status != HISI_SAS_DEV_EH)
-		return TMF_RESP_FUNC_FAILED;
-	sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+	int rc;
 
 	rc = hisi_sas_internal_task_abort(hisi_hba, device,
-					HISI_SAS_INT_ABT_DEV, 0);
+					  HISI_SAS_INT_ABT_DEV, 0);
 	if (rc < 0) {
 		dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
 		return TMF_RESP_FUNC_FAILED;
 	}
 	hisi_sas_dereg_device(hisi_hba, device);
 
+	if (dev_is_sata(device)) {
+		rc = hisi_sas_softreset_ata_disk(device);
+		if (rc == TMF_RESP_FUNC_FAILED)
+			return TMF_RESP_FUNC_FAILED;
+	}
+
 	rc = hisi_sas_debug_I_T_nexus_reset(device);
 
 	if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
@@ -1572,22 +1824,21 @@
 	struct device *dev = hisi_hba->dev;
 	int rc = TMF_RESP_FUNC_FAILED;
 
-	sas_dev->dev_status = HISI_SAS_DEV_EH;
+	/* Clear internal IO and then lu reset */
+	rc = hisi_sas_internal_task_abort(hisi_hba, device,
+					  HISI_SAS_INT_ABT_DEV, 0);
+	if (rc < 0) {
+		dev_err(dev, "lu_reset: internal abort failed\n");
+		goto out;
+	}
+	hisi_sas_dereg_device(hisi_hba, device);
+
 	if (dev_is_sata(device)) {
 		struct sas_phy *phy;
 
-		/* Clear internal IO and then hardreset */
-		rc = hisi_sas_internal_task_abort(hisi_hba, device,
-						  HISI_SAS_INT_ABT_DEV, 0);
-		if (rc < 0) {
-			dev_err(dev, "lu_reset: internal abort failed\n");
-			goto out;
-		}
-		hisi_sas_dereg_device(hisi_hba, device);
-
 		phy = sas_get_local_phy(device);
 
-		rc = sas_phy_reset(phy, 1);
+		rc = sas_phy_reset(phy, true);
 
 		if (rc == 0)
 			hisi_sas_release_task(hisi_hba, device);
@@ -1595,14 +1846,6 @@
 	} else {
 		struct hisi_sas_tmf_task tmf_task = { .tmf =  TMF_LU_RESET };
 
-		rc = hisi_sas_internal_task_abort(hisi_hba, device,
-						HISI_SAS_INT_ABT_DEV, 0);
-		if (rc < 0) {
-			dev_err(dev, "lu_reset: internal abort failed\n");
-			goto out;
-		}
-		hisi_sas_dereg_device(hisi_hba, device);
-
 		rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
 		if (rc == TMF_RESP_FUNC_COMPLETE)
 			hisi_sas_release_task(hisi_hba, device);
@@ -1631,7 +1874,7 @@
 		struct domain_device *device = sas_dev->sas_device;
 
 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
-		    DEV_IS_EXPANDER(device->dev_type))
+		    dev_is_expander(device->dev_type))
 			continue;
 
 		rc = hisi_sas_debug_I_T_nexus_reset(device);
@@ -1659,7 +1902,7 @@
 
 		int_to_scsilun(cmnd->device->lun, &lun);
 		tmf_task.tmf = TMF_QUERY_TASK;
-		tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+		tmf_task.tag_of_task_to_be_managed = tag;
 
 		rc = hisi_sas_debug_issue_ssp_tmf(device,
 						  lun.scsi_lun,
@@ -1682,7 +1925,7 @@
 static int
 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
 				  struct sas_task *task, int abort_flag,
-				  int task_tag)
+				  int task_tag, struct hisi_sas_dq *dq)
 {
 	struct domain_device *device = task->dev;
 	struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -1691,9 +1934,8 @@
 	struct hisi_sas_slot *slot;
 	struct asd_sas_port *sas_port = device->port;
 	struct hisi_sas_cmd_hdr *cmd_hdr_base;
-	struct hisi_sas_dq *dq = sas_dev->dq;
 	int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
-	unsigned long flags, flags_dq = 0;
+	unsigned long flags;
 	int wr_q_index;
 
 	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
@@ -1705,29 +1947,26 @@
 	port = to_hisi_sas_port(sas_port);
 
 	/* simply get a slot and send abort command */
-	spin_lock_irqsave(&hisi_hba->lock, flags);
-	rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
-	if (rc) {
-		spin_unlock_irqrestore(&hisi_hba->lock, flags);
+	rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
+	if (rc < 0)
 		goto err_out;
-	}
-	spin_unlock_irqrestore(&hisi_hba->lock, flags);
 
+	slot_idx = rc;
 	slot = &hisi_hba->slot_info[slot_idx];
 
-	spin_lock_irqsave(&dq->lock, flags_dq);
-	wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
-	if (wr_q_index < 0) {
-		spin_unlock_irqrestore(&dq->lock, flags_dq);
-		rc = -EAGAIN;
-		goto err_out_tag;
-	}
+	spin_lock_irqsave(&dq->lock, flags);
+	wr_q_index = dq->wr_point;
+	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
 	list_add_tail(&slot->delivery, &dq->list);
-	spin_unlock_irqrestore(&dq->lock, flags_dq);
+	spin_unlock_irqrestore(&dq->lock, flags);
+	spin_lock_irqsave(&sas_dev->lock, flags);
+	list_add_tail(&slot->entry, &sas_dev->list);
+	spin_unlock_irqrestore(&sas_dev->lock, flags);
 
 	dlvry_queue = dq->id;
 	dlvry_queue_slot = wr_q_index;
 
+	slot->device_id = sas_dev->device_id;
 	slot->n_elem = n_elem;
 	slot->dlvry_queue = dlvry_queue;
 	slot->dlvry_queue_slot = dlvry_queue_slot;
@@ -1740,7 +1979,8 @@
 
 	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
 	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
-	memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
+	memset(hisi_sas_status_buf_addr_mem(slot), 0,
+	       sizeof(struct hisi_sas_err_record));
 
 	hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
 				      abort_flag, task_tag);
@@ -1748,20 +1988,14 @@
 	spin_lock_irqsave(&task->task_state_lock, flags);
 	task->task_state_flags |= SAS_TASK_AT_INITIATOR;
 	spin_unlock_irqrestore(&task->task_state_lock, flags);
-
 	WRITE_ONCE(slot->ready, 1);
 	/* send abort command to the chip */
 	spin_lock_irqsave(&dq->lock, flags);
-	list_add_tail(&slot->entry, &sas_dev->list);
 	hisi_hba->hw->start_delivery(dq);
 	spin_unlock_irqrestore(&dq->lock, flags);
 
 	return 0;
 
-err_out_tag:
-	spin_lock_irqsave(&hisi_hba->lock, flags);
-	hisi_sas_slot_index_free(hisi_hba, slot_idx);
-	spin_unlock_irqrestore(&hisi_hba->lock, flags);
 err_out:
 	dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
 
@@ -1769,18 +2003,19 @@
 }
 
 /**
- * hisi_sas_internal_task_abort -- execute an internal
+ * _hisi_sas_internal_task_abort -- execute an internal
  * abort command for single IO command or a device
  * @hisi_hba: host controller struct
  * @device: domain device
  * @abort_flag: mode of operation, device or single IO
  * @tag: tag of IO to be aborted (only relevant to single
  *       IO mode)
+ * @dq: delivery queue for this internal abort command
  */
 static int
-hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
-			     struct domain_device *device,
-			     int abort_flag, int tag)
+_hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
+			      struct domain_device *device, int abort_flag,
+			      int tag, struct hisi_sas_dq *dq)
 {
 	struct sas_task *task;
 	struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -1804,11 +2039,11 @@
 	task->task_proto = device->tproto;
 	task->task_done = hisi_sas_task_done;
 	task->slow_task->timer.function = hisi_sas_tmf_timedout;
-	task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ;
+	task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ;
 	add_timer(&task->slow_task->timer);
 
 	res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
-						task, abort_flag, tag);
+						task, abort_flag, tag, dq);
 	if (res) {
 		del_timer(&task->slow_task->timer);
 		dev_err(dev, "internal task abort: executing internal task failed: %d\n",
@@ -1820,12 +2055,24 @@
 
 	/* Internal abort timed out */
 	if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+		if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
+			queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
+
 		if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
 			struct hisi_sas_slot *slot = task->lldd_task;
 
-			if (slot)
+			if (slot) {
+				struct hisi_sas_cq *cq =
+					&hisi_hba->cq[slot->dlvry_queue];
+				/*
+				 * flush tasklet to avoid free'ing task
+				 * before using task in IO completion
+				 */
+				tasklet_kill(&cq->tasklet);
 				slot->task = NULL;
+			}
 			dev_err(dev, "internal task abort: timeout and not done.\n");
+
 			res = -EIO;
 			goto exit;
 		} else
@@ -1845,10 +2092,8 @@
 	}
 
 exit:
-	dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
-		"resp: 0x%x sts 0x%x\n",
-		SAS_ADDR(device->sas_addr),
-		task,
+	dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n",
+		SAS_ADDR(device->sas_addr), task,
 		task->task_status.resp, /* 0 is complete, -1 is undelivered */
 		task->task_status.stat);
 	sas_free_task(task);
@@ -1856,15 +2101,51 @@
 	return res;
 }
 
+static int
+hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
+			     struct domain_device *device,
+			     int abort_flag, int tag)
+{
+	struct hisi_sas_slot *slot;
+	struct device *dev = hisi_hba->dev;
+	struct hisi_sas_dq *dq;
+	int i, rc;
+
+	switch (abort_flag) {
+	case HISI_SAS_INT_ABT_CMD:
+		slot = &hisi_hba->slot_info[tag];
+		dq = &hisi_hba->dq[slot->dlvry_queue];
+		return _hisi_sas_internal_task_abort(hisi_hba, device,
+						     abort_flag, tag, dq);
+	case HISI_SAS_INT_ABT_DEV:
+		for (i = 0; i < hisi_hba->cq_nvecs; i++) {
+			struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+			const struct cpumask *mask = cq->pci_irq_mask;
+
+			if (mask && !cpumask_intersects(cpu_online_mask, mask))
+				continue;
+			dq = &hisi_hba->dq[i];
+			rc = _hisi_sas_internal_task_abort(hisi_hba, device,
+							   abort_flag, tag,
+							   dq);
+			if (rc)
+				return rc;
+		}
+		break;
+	default:
+		dev_err(dev, "Unrecognised internal abort flag (%d)\n",
+			abort_flag);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
 {
 	hisi_sas_port_notify_formed(sas_phy);
 }
 
-static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
-{
-}
-
 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
 			u8 reg_index, u8 reg_count, u8 *write_data)
 {
@@ -1879,9 +2160,20 @@
 
 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
 {
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	struct sas_phy *sphy = sas_phy->phy;
+	unsigned long flags;
+
 	phy->phy_attached = 0;
 	phy->phy_type = 0;
 	phy->port = NULL;
+
+	spin_lock_irqsave(&phy->lock, flags);
+	if (phy->enable)
+		sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+	else
+		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
+	spin_unlock_irqrestore(&phy->lock, flags);
 }
 
 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
@@ -1926,7 +2218,7 @@
 {
 	int i;
 
-	for (i = 0; i < hisi_hba->queue_count; i++) {
+	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
 
 		tasklet_kill(&cq->tasklet);
@@ -1934,15 +2226,22 @@
 }
 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
 
+int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+	struct hisi_hba *hisi_hba = shost_priv(shost);
+
+	if (reset_type != SCSI_ADAPTER_RESET)
+		return -EOPNOTSUPP;
+
+	queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_sas_host_reset);
+
 struct scsi_transport_template *hisi_sas_stt;
 EXPORT_SYMBOL_GPL(hisi_sas_stt);
 
-struct device_attribute *host_attrs[] = {
-	&dev_attr_phy_event_threshold,
-	NULL,
-};
-EXPORT_SYMBOL_GPL(host_attrs);
-
 static struct sas_domain_function_template hisi_sas_transport_ops = {
 	.lldd_dev_found		= hisi_sas_dev_found,
 	.lldd_dev_gone		= hisi_sas_dev_gone,
@@ -1954,22 +2253,25 @@
 	.lldd_I_T_nexus_reset	= hisi_sas_I_T_nexus_reset,
 	.lldd_lu_reset		= hisi_sas_lu_reset,
 	.lldd_query_task	= hisi_sas_query_task,
-	.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
+	.lldd_clear_nexus_ha	= hisi_sas_clear_nexus_ha,
 	.lldd_port_formed	= hisi_sas_port_formed,
-	.lldd_port_deformed = hisi_sas_port_deformed,
-	.lldd_write_gpio = hisi_sas_write_gpio,
+	.lldd_write_gpio	= hisi_sas_write_gpio,
 };
 
 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
 {
-	int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
+	int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS;
+	struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
 
 	for (i = 0; i < hisi_hba->queue_count; i++) {
 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
 		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
+		struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];
 
-		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
-		memset(hisi_hba->cmd_hdr[i], 0, s);
+		s = sizeof(struct hisi_sas_cmd_hdr);
+		for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
+			memset(&cmd_hdr[j], 0, s);
+
 		dq->wr_point = 0;
 
 		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
@@ -1986,15 +2288,16 @@
 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
 	memset(hisi_hba->breakpoint, 0, s);
 
-	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
-	memset(hisi_hba->sata_breakpoint, 0, s);
+	s = sizeof(struct hisi_sas_sata_breakpoint);
+	for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
+		memset(&sata_breakpoint[j], 0, s);
 }
 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
 
-int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
+int hisi_sas_alloc(struct hisi_hba *hisi_hba)
 {
 	struct device *dev = hisi_hba->dev;
-	int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
+	int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS;
 	int max_command_entries_ru, sz_slot_buf_ru;
 	int blk_cnt, slots_per_blk;
 
@@ -2009,7 +2312,7 @@
 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
 		hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
 		hisi_hba->devices[i].device_id = i;
-		hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
+		hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
 	}
 
 	for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -2048,7 +2351,6 @@
 					     GFP_KERNEL);
 	if (!hisi_hba->itct)
 		goto err_out;
-	memset(hisi_hba->itct, 0, s);
 
 	hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
 					   sizeof(struct hisi_sas_slot),
@@ -2058,19 +2360,24 @@
 
 	/* roundup to avoid overly large block size */
 	max_command_entries_ru = roundup(max_command_entries, 64);
-	sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64);
-	s = lcm(max_command_entries_ru, sz_slot_buf_ru);
+	if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
+		sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
+	else
+		sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
+	sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
+	s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
 	blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
 	slots_per_blk = s / sz_slot_buf_ru;
-	for (i = 0; i < blk_cnt; i++) {
-		struct hisi_sas_slot_buf_table *buf;
-		dma_addr_t buf_dma;
-		int slot_index = i * slots_per_blk;
 
-		buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL);
+	for (i = 0; i < blk_cnt; i++) {
+		int slot_index = i * slots_per_blk;
+		dma_addr_t buf_dma;
+		void *buf;
+
+		buf = dmam_alloc_coherent(dev, s, &buf_dma,
+					  GFP_KERNEL);
 		if (!buf)
 			goto err_out;
-		memset(buf, 0, s);
 
 		for (j = 0; j < slots_per_blk; j++, slot_index++) {
 			struct hisi_sas_slot *slot;
@@ -2080,8 +2387,8 @@
 			slot->buf_dma = buf_dma;
 			slot->idx = slot_index;
 
-			buf++;
-			buf_dma += sizeof(*buf);
+			buf += sz_slot_buf_ru;
+			buf_dma += sz_slot_buf_ru;
 		}
 	}
 
@@ -2117,9 +2424,9 @@
 					GFP_KERNEL);
 	if (!hisi_hba->sata_breakpoint)
 		goto err_out;
-	hisi_sas_init_mem(hisi_hba);
 
 	hisi_sas_slot_index_init(hisi_hba);
+	hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
 
 	hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
 	if (!hisi_hba->wq) {
@@ -2135,6 +2442,14 @@
 
 void hisi_sas_free(struct hisi_hba *hisi_hba)
 {
+	int i;
+
+	for (i = 0; i < hisi_hba->n_phy; i++) {
+		struct hisi_sas_phy *phy = &hisi_hba->phy[i];
+
+		del_timer_sync(&phy->timer);
+	}
+
 	if (hisi_hba->wq)
 		destroy_workqueue(hisi_hba->wq);
 }
@@ -2187,22 +2502,19 @@
 
 		if (device_property_read_u32(dev, "ctrl-reset-reg",
 					     &hisi_hba->ctrl_reset_reg)) {
-			dev_err(dev,
-				"could not get property ctrl-reset-reg\n");
+			dev_err(dev, "could not get property ctrl-reset-reg\n");
 			return -ENOENT;
 		}
 
 		if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
 					     &hisi_hba->ctrl_reset_sts_reg)) {
-			dev_err(dev,
-				"could not get property ctrl-reset-sts-reg\n");
+			dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
 			return -ENOENT;
 		}
 
 		if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
 					     &hisi_hba->ctrl_clock_ena_reg)) {
-			dev_err(dev,
-				"could not get property ctrl-clock-ena-reg\n");
+			dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
 			return -ENOENT;
 		}
 	}
@@ -2235,6 +2547,7 @@
 	struct Scsi_Host *shost;
 	struct hisi_hba *hisi_hba;
 	struct device *dev = &pdev->dev;
+	int error;
 
 	shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
 	if (!shost) {
@@ -2255,14 +2568,16 @@
 	if (hisi_sas_get_fw_info(hisi_hba) < 0)
 		goto err_out;
 
-	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
-	    dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+	error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+	if (error)
+		error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+	if (error) {
 		dev_err(dev, "No usable DMA addressing method\n");
 		goto err_out;
 	}
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	hisi_hba->regs = devm_ioremap_resource(dev, res);
+	hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(hisi_hba->regs))
 		goto err_out;
 
@@ -2273,7 +2588,7 @@
 			goto err_out;
 	}
 
-	if (hisi_sas_alloc(hisi_hba, shost)) {
+	if (hisi_sas_alloc(hisi_hba)) {
 		hisi_sas_free(hisi_hba);
 		goto err_out;
 	}
@@ -2322,9 +2637,13 @@
 	shost->max_lun = ~0;
 	shost->max_channel = 1;
 	shost->max_cmd_len = 16;
-	shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
-	shost->can_queue = hisi_hba->hw->max_command_entries;
-	shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+	if (hisi_hba->hw->slot_index_alloc) {
+		shost->can_queue = HISI_SAS_MAX_COMMANDS;
+		shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
+	} else {
+		shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
+		shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
+	}
 
 	sha->sas_ha_name = DRV_NAME;
 	sha->dev = hisi_hba->dev;
@@ -2363,6 +2682,1175 @@
 }
 EXPORT_SYMBOL_GPL(hisi_sas_probe);
 
+struct dentry *hisi_sas_debugfs_dir;
+
+static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
+{
+	int queue_entry_size = hisi_hba->hw->complete_hdr_size;
+	int i;
+
+	for (i = 0; i < hisi_hba->queue_count; i++)
+		memcpy(hisi_hba->debugfs_complete_hdr[i],
+		       hisi_hba->complete_hdr[i],
+		       HISI_SAS_QUEUE_SLOTS * queue_entry_size);
+}
+
+static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
+{
+	int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
+	int i;
+
+	for (i = 0; i < hisi_hba->queue_count; i++) {
+		struct hisi_sas_cmd_hdr	*debugfs_cmd_hdr, *cmd_hdr;
+		int j;
+
+		debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i];
+		cmd_hdr = hisi_hba->cmd_hdr[i];
+
+		for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
+			memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j],
+			       queue_entry_size);
+	}
+}
+
+static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
+{
+	const struct hisi_sas_debugfs_reg *port =
+		hisi_hba->hw->debugfs_reg_port;
+	int i, phy_cnt;
+	u32 offset;
+	u32 *databuf;
+
+	for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
+		databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt];
+		for (i = 0; i < port->count; i++, databuf++) {
+			offset = port->base_off + 4 * i;
+			*databuf = port->read_port_reg(hisi_hba, phy_cnt,
+						       offset);
+		}
+	}
+}
+
+static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
+{
+	u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_GLOBAL];
+	const struct hisi_sas_hw *hw = hisi_hba->hw;
+	const struct hisi_sas_debugfs_reg *global =
+			hw->debugfs_reg_array[DEBUGFS_GLOBAL];
+	int i;
+
+	for (i = 0; i < global->count; i++, databuf++)
+		*databuf = global->read_global_reg(hisi_hba, 4 * i);
+}
+
+static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
+{
+	u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_AXI];
+	const struct hisi_sas_hw *hw = hisi_hba->hw;
+	const struct hisi_sas_debugfs_reg *axi =
+			hw->debugfs_reg_array[DEBUGFS_AXI];
+	int i;
+
+	for (i = 0; i < axi->count; i++, databuf++)
+		*databuf = axi->read_global_reg(hisi_hba,
+						4 * i + axi->base_off);
+}
+
+static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
+{
+	u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_RAS];
+	const struct hisi_sas_hw *hw = hisi_hba->hw;
+	const struct hisi_sas_debugfs_reg *ras =
+			hw->debugfs_reg_array[DEBUGFS_RAS];
+	int i;
+
+	for (i = 0; i < ras->count; i++, databuf++)
+		*databuf = ras->read_global_reg(hisi_hba,
+						4 * i + ras->base_off);
+}
+
+static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
+{
+	void *cachebuf = hisi_hba->debugfs_itct_cache;
+	void *databuf = hisi_hba->debugfs_itct;
+	struct hisi_sas_itct *itct;
+	int i;
+
+	hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_ITCT_CACHE,
+					   cachebuf);
+
+	itct = hisi_hba->itct;
+
+	for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
+		memcpy(databuf, itct, sizeof(struct hisi_sas_itct));
+		databuf += sizeof(struct hisi_sas_itct);
+	}
+}
+
+static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
+{
+	int max_command_entries = HISI_SAS_MAX_COMMANDS;
+	void *cachebuf = hisi_hba->debugfs_iost_cache;
+	void *databuf = hisi_hba->debugfs_iost;
+	struct hisi_sas_iost *iost;
+	int i;
+
+	hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_IOST_CACHE,
+					   cachebuf);
+
+	iost = hisi_hba->iost;
+
+	for (i = 0; i < max_command_entries; i++, iost++) {
+		memcpy(databuf, iost, sizeof(struct hisi_sas_iost));
+		databuf += sizeof(struct hisi_sas_iost);
+	}
+}
+
+static const char *
+hisi_sas_debugfs_to_reg_name(int off, int base_off,
+			     const struct hisi_sas_debugfs_reg_lu *lu)
+{
+	for (; lu->name; lu++) {
+		if (off == lu->off - base_off)
+			return lu->name;
+	}
+
+	return NULL;
+}
+
+static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
+				       struct seq_file *s)
+{
+	const struct hisi_sas_debugfs_reg *reg = ptr;
+	int i;
+
+	for (i = 0; i < reg->count; i++) {
+		int off = i * 4;
+		const char *name;
+
+		name = hisi_sas_debugfs_to_reg_name(off, reg->base_off,
+						    reg->lu);
+
+		if (name)
+			seq_printf(s, "0x%08x 0x%08x %s\n", off,
+				   regs_val[i], name);
+		else
+			seq_printf(s, "0x%08x 0x%08x\n", off,
+				   regs_val[i]);
+	}
+}
+
+static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
+{
+	struct hisi_hba *hisi_hba = s->private;
+	const struct hisi_sas_hw *hw = hisi_hba->hw;
+	const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL];
+
+	hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_GLOBAL],
+				   reg_global, s);
+
+	return 0;
+}
+
+static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_global_show,
+			   inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_global_fops = {
+	.open = hisi_sas_debugfs_global_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
+{
+	struct hisi_hba *hisi_hba = s->private;
+	const struct hisi_sas_hw *hw = hisi_hba->hw;
+	const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI];
+
+	hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_AXI],
+				   reg_axi, s);
+
+	return 0;
+}
+
+static int hisi_sas_debugfs_axi_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_axi_show,
+			   inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_axi_fops = {
+	.open = hisi_sas_debugfs_axi_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
+{
+	struct hisi_hba *hisi_hba = s->private;
+	const struct hisi_sas_hw *hw = hisi_hba->hw;
+	const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS];
+
+	hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_RAS],
+				   reg_ras, s);
+
+	return 0;
+}
+
+static int hisi_sas_debugfs_ras_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_ras_show,
+			   inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_ras_fops = {
+	.open = hisi_sas_debugfs_ras_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
+{
+	struct hisi_sas_phy *phy = s->private;
+	struct hisi_hba *hisi_hba = phy->hisi_hba;
+	const struct hisi_sas_hw *hw = hisi_hba->hw;
+	const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
+	u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id];
+
+	hisi_sas_debugfs_print_reg(databuf, reg_port, s);
+
+	return 0;
+}
+
+static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_port_fops = {
+	.open = hisi_sas_debugfs_port_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static void hisi_sas_show_row_64(struct seq_file *s, int index,
+				 int sz, __le64 *ptr)
+{
+	int i;
+
+	/* completion header size not fixed per HW version */
+	seq_printf(s, "index %04d:\n\t", index);
+	for (i = 1; i <= sz / 8; i++, ptr++) {
+		seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
+		if (!(i % 2))
+			seq_puts(s, "\n\t");
+	}
+
+	seq_puts(s, "\n");
+}
+
+static void hisi_sas_show_row_32(struct seq_file *s, int index,
+				 int sz, __le32 *ptr)
+{
+	int i;
+
+	/* completion header size not fixed per HW version */
+	seq_printf(s, "index %04d:\n\t", index);
+	for (i = 1; i <= sz / 4; i++, ptr++) {
+		seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
+		if (!(i % 4))
+			seq_puts(s, "\n\t");
+	}
+	seq_puts(s, "\n");
+}
+
+static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
+{
+	struct hisi_sas_cq *cq = cq_ptr;
+	struct hisi_hba *hisi_hba = cq->hisi_hba;
+	void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id];
+	__le32 *complete_hdr = complete_queue +
+			(hisi_hba->hw->complete_hdr_size * slot);
+
+	hisi_sas_show_row_32(s, slot,
+			     hisi_hba->hw->complete_hdr_size,
+			     complete_hdr);
+}
+
+static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
+{
+	struct hisi_sas_cq *cq = s->private;
+	int slot;
+
+	for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
+		hisi_sas_cq_show_slot(s, slot, cq);
+	}
+	return 0;
+}
+
+static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_cq_fops = {
+	.open = hisi_sas_debugfs_cq_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
+{
+	struct hisi_sas_dq *dq = dq_ptr;
+	struct hisi_hba *hisi_hba = dq->hisi_hba;
+	void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id];
+	__le32 *cmd_hdr = cmd_queue +
+		sizeof(struct hisi_sas_cmd_hdr) * slot;
+
+	hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), cmd_hdr);
+}
+
+static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p)
+{
+	int slot;
+
+	for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
+		hisi_sas_dq_show_slot(s, slot, s->private);
+	}
+	return 0;
+}
+
+static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_dq_fops = {
+	.open = hisi_sas_debugfs_dq_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
+{
+	struct hisi_hba *hisi_hba = s->private;
+	struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost;
+	int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
+
+	for (i = 0; i < max_command_entries; i++, debugfs_iost++) {
+		__le64 *iost = &debugfs_iost->qw0;
+
+		hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), iost);
+	}
+
+	return 0;
+}
+
+static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_iost_fops = {
+	.open = hisi_sas_debugfs_iost_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
+{
+	struct hisi_hba *hisi_hba = s->private;
+	struct hisi_sas_iost_itct_cache *iost_cache =
+		(struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_iost_cache;
+	u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+	int i, tab_idx;
+	__le64 *iost;
+
+	for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) {
+		/*
+		 * Data struct of IOST cache:
+		 * Data[1]: BIT0~15: Table index
+		 *	    Bit16:   Valid mask
+		 * Data[2]~[9]: IOST table
+		 */
+		tab_idx = (iost_cache->data[1] & 0xffff);
+		iost = (__le64 *)iost_cache;
+
+		hisi_sas_show_row_64(s, tab_idx, cache_size, iost);
+	}
+
+	return 0;
+}
+
+static int hisi_sas_debugfs_iost_cache_open(struct inode *inode,
+					    struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_iost_cache_show,
+			   inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_iost_cache_fops = {
+	.open = hisi_sas_debugfs_iost_cache_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
+{
+	int i;
+	struct hisi_hba *hisi_hba = s->private;
+	struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct;
+
+	for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) {
+		__le64 *itct = &debugfs_itct->qw0;
+
+		hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), itct);
+	}
+
+	return 0;
+}
+
+static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_itct_fops = {
+	.open = hisi_sas_debugfs_itct_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
+{
+	struct hisi_hba *hisi_hba = s->private;
+	struct hisi_sas_iost_itct_cache *itct_cache =
+		(struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_itct_cache;
+	u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+	int i, tab_idx;
+	__le64 *itct;
+
+	for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) {
+		/*
+		 * Data struct of ITCT cache:
+		 * Data[1]: BIT0~15: Table index
+		 *	    Bit16:   Valid mask
+		 * Data[2]~[9]: ITCT table
+		 */
+		tab_idx = itct_cache->data[1] & 0xffff;
+		itct = (__le64 *)itct_cache;
+
+		hisi_sas_show_row_64(s, tab_idx, cache_size, itct);
+	}
+
+	return 0;
+}
+
+static int hisi_sas_debugfs_itct_cache_open(struct inode *inode,
+					    struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_itct_cache_show,
+			   inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_itct_cache_fops = {
+	.open = hisi_sas_debugfs_itct_cache_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
+{
+	struct dentry *dump_dentry;
+	struct dentry *dentry;
+	char name[256];
+	int p;
+	int c;
+	int d;
+
+	/* Create dump dir inside device dir */
+	dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+	hisi_hba->debugfs_dump_dentry = dump_dentry;
+
+	debugfs_create_file("global", 0400, dump_dentry, hisi_hba,
+			    &hisi_sas_debugfs_global_fops);
+
+	/* Create port dir and files */
+	dentry = debugfs_create_dir("port", dump_dentry);
+	for (p = 0; p < hisi_hba->n_phy; p++) {
+		snprintf(name, 256, "%d", p);
+
+		debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p],
+				    &hisi_sas_debugfs_port_fops);
+	}
+
+	/* Create CQ dir and files */
+	dentry = debugfs_create_dir("cq", dump_dentry);
+	for (c = 0; c < hisi_hba->queue_count; c++) {
+		snprintf(name, 256, "%d", c);
+
+		debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c],
+				    &hisi_sas_debugfs_cq_fops);
+	}
+
+	/* Create DQ dir and files */
+	dentry = debugfs_create_dir("dq", dump_dentry);
+	for (d = 0; d < hisi_hba->queue_count; d++) {
+		snprintf(name, 256, "%d", d);
+
+		debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d],
+				    &hisi_sas_debugfs_dq_fops);
+	}
+
+	debugfs_create_file("iost", 0400, dump_dentry, hisi_hba,
+			    &hisi_sas_debugfs_iost_fops);
+
+	debugfs_create_file("iost_cache", 0400, dump_dentry, hisi_hba,
+			    &hisi_sas_debugfs_iost_cache_fops);
+
+	debugfs_create_file("itct", 0400, dump_dentry, hisi_hba,
+			    &hisi_sas_debugfs_itct_fops);
+
+	debugfs_create_file("itct_cache", 0400, dump_dentry, hisi_hba,
+			    &hisi_sas_debugfs_itct_cache_fops);
+
+	debugfs_create_file("axi", 0400, dump_dentry, hisi_hba,
+			    &hisi_sas_debugfs_axi_fops);
+
+	debugfs_create_file("ras", 0400, dump_dentry, hisi_hba,
+			    &hisi_sas_debugfs_ras_fops);
+
+	return;
+}
+
+static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba)
+{
+	hisi_hba->hw->snapshot_prepare(hisi_hba);
+
+	hisi_sas_debugfs_snapshot_global_reg(hisi_hba);
+	hisi_sas_debugfs_snapshot_port_reg(hisi_hba);
+	hisi_sas_debugfs_snapshot_axi_reg(hisi_hba);
+	hisi_sas_debugfs_snapshot_ras_reg(hisi_hba);
+	hisi_sas_debugfs_snapshot_cq_reg(hisi_hba);
+	hisi_sas_debugfs_snapshot_dq_reg(hisi_hba);
+	hisi_sas_debugfs_snapshot_itct_reg(hisi_hba);
+	hisi_sas_debugfs_snapshot_iost_reg(hisi_hba);
+
+	hisi_sas_debugfs_create_files(hisi_hba);
+
+	hisi_hba->hw->snapshot_restore(hisi_hba);
+}
+
+static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
+						   const char __user *user_buf,
+						   size_t count, loff_t *ppos)
+{
+	struct hisi_hba *hisi_hba = file->f_inode->i_private;
+	char buf[8];
+
+	/* A bit racy, but don't care too much since it's only debugfs */
+	if (hisi_hba->debugfs_snapshot)
+		return -EFAULT;
+
+	if (count > 8)
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	if (buf[0] != '1')
+		return -EFAULT;
+
+	queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
+
+	return count;
+}
+
+static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = {
+	.write = &hisi_sas_debugfs_trigger_dump_write,
+	.owner = THIS_MODULE,
+};
+
+enum {
+	HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0,
+	HISI_SAS_BIST_LOOPBACK_MODE_SERDES,
+	HISI_SAS_BIST_LOOPBACK_MODE_REMOTE,
+};
+
+enum {
+	HISI_SAS_BIST_CODE_MODE_PRBS7 = 0,
+	HISI_SAS_BIST_CODE_MODE_PRBS23,
+	HISI_SAS_BIST_CODE_MODE_PRBS31,
+	HISI_SAS_BIST_CODE_MODE_JTPAT,
+	HISI_SAS_BIST_CODE_MODE_CJTPAT,
+	HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
+	HISI_SAS_BIST_CODE_MODE_TRAIN,
+	HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
+	HISI_SAS_BIST_CODE_MODE_HFTP,
+	HISI_SAS_BIST_CODE_MODE_MFTP,
+	HISI_SAS_BIST_CODE_MODE_LFTP,
+	HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
+};
+
+static const struct {
+	int		value;
+	char		*name;
+} hisi_sas_debugfs_loop_linkrate[] = {
+	{ SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
+	{ SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
+	{ SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
+	{ SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
+};
+
+static int hisi_sas_debugfs_bist_linkrate_show(struct seq_file *s, void *p)
+{
+	struct hisi_hba *hisi_hba = s->private;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
+		int match = (hisi_hba->debugfs_bist_linkrate ==
+			     hisi_sas_debugfs_loop_linkrate[i].value);
+
+		seq_printf(s, "%s%s%s ", match ? "[" : "",
+			   hisi_sas_debugfs_loop_linkrate[i].name,
+			   match ? "]" : "");
+	}
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+static ssize_t hisi_sas_debugfs_bist_linkrate_write(struct file *filp,
+						    const char __user *buf,
+						    size_t count, loff_t *ppos)
+{
+	struct seq_file *m = filp->private_data;
+	struct hisi_hba *hisi_hba = m->private;
+	char kbuf[16] = {}, *pkbuf;
+	bool found = false;
+	int i;
+
+	if (hisi_hba->debugfs_bist_enable)
+		return -EPERM;
+
+	if (count >= sizeof(kbuf))
+		return -EOVERFLOW;
+
+	if (copy_from_user(kbuf, buf, count))
+		return -EINVAL;
+
+	pkbuf = strstrip(kbuf);
+
+	for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
+		if (!strncmp(hisi_sas_debugfs_loop_linkrate[i].name,
+			     pkbuf, 16)) {
+			hisi_hba->debugfs_bist_linkrate =
+				hisi_sas_debugfs_loop_linkrate[i].value;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	return count;
+}
+
+static int hisi_sas_debugfs_bist_linkrate_open(struct inode *inode,
+					       struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_bist_linkrate_show,
+			   inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_bist_linkrate_ops = {
+	.open = hisi_sas_debugfs_bist_linkrate_open,
+	.read = seq_read,
+	.write = hisi_sas_debugfs_bist_linkrate_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static const struct {
+	int		value;
+	char		*name;
+} hisi_sas_debugfs_loop_code_mode[] = {
+	{ HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" },
+	{ HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" },
+	{ HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" },
+	{ HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" },
+	{ HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" },
+	{ HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" },
+	{ HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" },
+	{ HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" },
+	{ HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" },
+	{ HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" },
+	{ HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" },
+	{ HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" },
+};
+
+static int hisi_sas_debugfs_bist_code_mode_show(struct seq_file *s, void *p)
+{
+	struct hisi_hba *hisi_hba = s->private;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
+		int match = (hisi_hba->debugfs_bist_code_mode ==
+			     hisi_sas_debugfs_loop_code_mode[i].value);
+
+		seq_printf(s, "%s%s%s ", match ? "[" : "",
+			   hisi_sas_debugfs_loop_code_mode[i].name,
+			   match ? "]" : "");
+	}
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+static ssize_t hisi_sas_debugfs_bist_code_mode_write(struct file *filp,
+						     const char __user *buf,
+						     size_t count,
+						     loff_t *ppos)
+{
+	struct seq_file *m = filp->private_data;
+	struct hisi_hba *hisi_hba = m->private;
+	char kbuf[16] = {}, *pkbuf;
+	bool found = false;
+	int i;
+
+	if (hisi_hba->debugfs_bist_enable)
+		return -EPERM;
+
+	if (count >= sizeof(kbuf))
+		return -EINVAL;
+
+	if (copy_from_user(kbuf, buf, count))
+		return -EOVERFLOW;
+
+	pkbuf = strstrip(kbuf);
+
+	for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
+		if (!strncmp(hisi_sas_debugfs_loop_code_mode[i].name,
+			     pkbuf, 16)) {
+			hisi_hba->debugfs_bist_code_mode =
+				hisi_sas_debugfs_loop_code_mode[i].value;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	return count;
+}
+
+static int hisi_sas_debugfs_bist_code_mode_open(struct inode *inode,
+						struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_bist_code_mode_show,
+			   inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_bist_code_mode_ops = {
+	.open = hisi_sas_debugfs_bist_code_mode_open,
+	.read = seq_read,
+	.write = hisi_sas_debugfs_bist_code_mode_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static ssize_t hisi_sas_debugfs_bist_phy_write(struct file *filp,
+					       const char __user *buf,
+					       size_t count, loff_t *ppos)
+{
+	struct seq_file *m = filp->private_data;
+	struct hisi_hba *hisi_hba = m->private;
+	unsigned int phy_no;
+	int val;
+
+	if (hisi_hba->debugfs_bist_enable)
+		return -EPERM;
+
+	val = kstrtouint_from_user(buf, count, 0, &phy_no);
+	if (val)
+		return val;
+
+	if (phy_no >= hisi_hba->n_phy)
+		return -EINVAL;
+
+	hisi_hba->debugfs_bist_phy_no = phy_no;
+
+	return count;
+}
+
+static int hisi_sas_debugfs_bist_phy_show(struct seq_file *s, void *p)
+{
+	struct hisi_hba *hisi_hba = s->private;
+
+	seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no);
+
+	return 0;
+}
+
+static int hisi_sas_debugfs_bist_phy_open(struct inode *inode,
+					  struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_bist_phy_show,
+			   inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_bist_phy_ops = {
+	.open = hisi_sas_debugfs_bist_phy_open,
+	.read = seq_read,
+	.write = hisi_sas_debugfs_bist_phy_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static const struct {
+	int		value;
+	char		*name;
+} hisi_sas_debugfs_loop_modes[] = {
+	{ HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digial" },
+	{ HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
+	{ HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
+};
+
+static int hisi_sas_debugfs_bist_mode_show(struct seq_file *s, void *p)
+{
+	struct hisi_hba *hisi_hba = s->private;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
+		int match = (hisi_hba->debugfs_bist_mode ==
+			     hisi_sas_debugfs_loop_modes[i].value);
+
+		seq_printf(s, "%s%s%s ", match ? "[" : "",
+			   hisi_sas_debugfs_loop_modes[i].name,
+			   match ? "]" : "");
+	}
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+static ssize_t hisi_sas_debugfs_bist_mode_write(struct file *filp,
+						const char __user *buf,
+						size_t count, loff_t *ppos)
+{
+	struct seq_file *m = filp->private_data;
+	struct hisi_hba *hisi_hba = m->private;
+	char kbuf[16] = {}, *pkbuf;
+	bool found = false;
+	int i;
+
+	if (hisi_hba->debugfs_bist_enable)
+		return -EPERM;
+
+	if (count >= sizeof(kbuf))
+		return -EINVAL;
+
+	if (copy_from_user(kbuf, buf, count))
+		return -EOVERFLOW;
+
+	pkbuf = strstrip(kbuf);
+
+	for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
+		if (!strncmp(hisi_sas_debugfs_loop_modes[i].name, pkbuf, 16)) {
+			hisi_hba->debugfs_bist_mode =
+				hisi_sas_debugfs_loop_modes[i].value;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	return count;
+}
+
+static int hisi_sas_debugfs_bist_mode_open(struct inode *inode,
+					   struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_bist_mode_show,
+			   inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_bist_mode_ops = {
+	.open = hisi_sas_debugfs_bist_mode_open,
+	.read = seq_read,
+	.write = hisi_sas_debugfs_bist_mode_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static ssize_t hisi_sas_debugfs_bist_enable_write(struct file *filp,
+						  const char __user *buf,
+						  size_t count, loff_t *ppos)
+{
+	struct seq_file *m = filp->private_data;
+	struct hisi_hba *hisi_hba = m->private;
+	unsigned int enable;
+	int val;
+
+	val = kstrtouint_from_user(buf, count, 0, &enable);
+	if (val)
+		return val;
+
+	if (enable > 1)
+		return -EINVAL;
+
+	if (enable == hisi_hba->debugfs_bist_enable)
+		return count;
+
+	if (!hisi_hba->hw->set_bist)
+		return -EPERM;
+
+	val = hisi_hba->hw->set_bist(hisi_hba, enable);
+	if (val < 0)
+		return val;
+
+	hisi_hba->debugfs_bist_enable = enable;
+
+	return count;
+}
+
+static int hisi_sas_debugfs_bist_enable_show(struct seq_file *s, void *p)
+{
+	struct hisi_hba *hisi_hba = s->private;
+
+	seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable);
+
+	return 0;
+}
+
+static int hisi_sas_debugfs_bist_enable_open(struct inode *inode,
+					     struct file *filp)
+{
+	return single_open(filp, hisi_sas_debugfs_bist_enable_show,
+			   inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
+	.open = hisi_sas_debugfs_bist_enable_open,
+	.read = seq_read,
+	.write = hisi_sas_debugfs_bist_enable_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+void hisi_sas_debugfs_work_handler(struct work_struct *work)
+{
+	struct hisi_hba *hisi_hba =
+		container_of(work, struct hisi_hba, debugfs_work);
+
+	if (hisi_hba->debugfs_snapshot)
+		return;
+	hisi_hba->debugfs_snapshot = true;
+
+	hisi_sas_debugfs_snapshot_regs(hisi_hba);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
+
+static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
+{
+	struct device *dev = hisi_hba->dev;
+	int i;
+
+	devm_kfree(dev, hisi_hba->debugfs_iost_cache);
+	devm_kfree(dev, hisi_hba->debugfs_itct_cache);
+	devm_kfree(dev, hisi_hba->debugfs_iost);
+
+	for (i = 0; i < hisi_hba->queue_count; i++)
+		devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]);
+
+	for (i = 0; i < hisi_hba->queue_count; i++)
+		devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]);
+
+	for (i = 0; i < DEBUGFS_REGS_NUM; i++)
+		devm_kfree(dev, hisi_hba->debugfs_regs[i]);
+
+	for (i = 0; i < hisi_hba->n_phy; i++)
+		devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
+}
+
+static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
+{
+	const struct hisi_sas_hw *hw = hisi_hba->hw;
+	struct device *dev = hisi_hba->dev;
+	int p, c, d;
+	size_t sz;
+
+	hisi_hba->debugfs_dump_dentry =
+			debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+
+	sz = hw->debugfs_reg_array[DEBUGFS_GLOBAL]->count * 4;
+	hisi_hba->debugfs_regs[DEBUGFS_GLOBAL] =
+				devm_kmalloc(dev, sz, GFP_KERNEL);
+
+	if (!hisi_hba->debugfs_regs[DEBUGFS_GLOBAL])
+		goto fail;
+
+	sz = hw->debugfs_reg_port->count * 4;
+	for (p = 0; p < hisi_hba->n_phy; p++) {
+		hisi_hba->debugfs_port_reg[p] =
+			devm_kmalloc(dev, sz, GFP_KERNEL);
+
+		if (!hisi_hba->debugfs_port_reg[p])
+			goto fail;
+	}
+
+	sz = hw->debugfs_reg_array[DEBUGFS_AXI]->count * 4;
+	hisi_hba->debugfs_regs[DEBUGFS_AXI] =
+		devm_kmalloc(dev, sz, GFP_KERNEL);
+
+	if (!hisi_hba->debugfs_regs[DEBUGFS_AXI])
+		goto fail;
+
+	sz = hw->debugfs_reg_array[DEBUGFS_RAS]->count * 4;
+	hisi_hba->debugfs_regs[DEBUGFS_RAS] =
+		devm_kmalloc(dev, sz, GFP_KERNEL);
+
+	if (!hisi_hba->debugfs_regs[DEBUGFS_RAS])
+		goto fail;
+
+	sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
+	for (c = 0; c < hisi_hba->queue_count; c++) {
+		hisi_hba->debugfs_complete_hdr[c] =
+			devm_kmalloc(dev, sz, GFP_KERNEL);
+
+		if (!hisi_hba->debugfs_complete_hdr[c])
+			goto fail;
+	}
+
+	sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
+	for (d = 0; d < hisi_hba->queue_count; d++) {
+		hisi_hba->debugfs_cmd_hdr[d] =
+			devm_kmalloc(dev, sz, GFP_KERNEL);
+
+		if (!hisi_hba->debugfs_cmd_hdr[d])
+			goto fail;
+	}
+
+	sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
+
+	hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL);
+	if (!hisi_hba->debugfs_iost)
+		goto fail;
+
+	sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
+	     sizeof(struct hisi_sas_iost_itct_cache);
+
+	hisi_hba->debugfs_iost_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
+	if (!hisi_hba->debugfs_iost_cache)
+		goto fail;
+
+	sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
+	     sizeof(struct hisi_sas_iost_itct_cache);
+
+	hisi_hba->debugfs_itct_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
+	if (!hisi_hba->debugfs_itct_cache)
+		goto fail;
+
+	/* New memory allocation must be locate before itct */
+	sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
+
+	hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL);
+	if (!hisi_hba->debugfs_itct)
+		goto fail;
+
+	return 0;
+fail:
+	hisi_sas_debugfs_release(hisi_hba);
+	return -ENOMEM;
+}
+
+static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
+{
+	hisi_hba->debugfs_bist_dentry =
+			debugfs_create_dir("bist", hisi_hba->debugfs_dir);
+	debugfs_create_file("link_rate", 0600,
+			    hisi_hba->debugfs_bist_dentry, hisi_hba,
+			    &hisi_sas_debugfs_bist_linkrate_ops);
+
+	debugfs_create_file("code_mode", 0600,
+			    hisi_hba->debugfs_bist_dentry, hisi_hba,
+			    &hisi_sas_debugfs_bist_code_mode_ops);
+
+	debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
+			    hisi_hba, &hisi_sas_debugfs_bist_phy_ops);
+
+	debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry,
+			   &hisi_hba->debugfs_bist_cnt);
+
+	debugfs_create_file("loopback_mode", 0600,
+			    hisi_hba->debugfs_bist_dentry,
+			    hisi_hba, &hisi_sas_debugfs_bist_mode_ops);
+
+	debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry,
+			    hisi_hba, &hisi_sas_debugfs_bist_enable_ops);
+
+	hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
+}
+
+void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
+{
+	struct device *dev = hisi_hba->dev;
+
+	hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
+						   hisi_sas_debugfs_dir);
+	debugfs_create_file("trigger_dump", 0600,
+			    hisi_hba->debugfs_dir,
+			    hisi_hba,
+			    &hisi_sas_debugfs_trigger_dump_fops);
+
+	/* create bist structures */
+	hisi_sas_debugfs_bist_init(hisi_hba);
+
+	if (hisi_sas_debugfs_alloc(hisi_hba)) {
+		debugfs_remove_recursive(hisi_hba->debugfs_dir);
+		dev_dbg(dev, "failed to init debugfs!\n");
+	}
+}
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
+
+void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba)
+{
+	debugfs_remove_recursive(hisi_hba->debugfs_dir);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit);
+
 int hisi_sas_remove(struct platform_device *pdev)
 {
 	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
@@ -2381,18 +3869,28 @@
 }
 EXPORT_SYMBOL_GPL(hisi_sas_remove);
 
+bool hisi_sas_debugfs_enable;
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
+module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
+MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
+
 static __init int hisi_sas_init(void)
 {
 	hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
 	if (!hisi_sas_stt)
 		return -ENOMEM;
 
+	if (hisi_sas_debugfs_enable)
+		hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
+
 	return 0;
 }
 
 static __exit void hisi_sas_exit(void)
 {
 	sas_release_transport(hisi_sas_stt);
+
+	debugfs_remove(hisi_sas_debugfs_dir);
 }
 
 module_init(hisi_sas_init);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 410eccf..b861a0f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright (c) 2015 Linaro Ltd.
  * Copyright (c) 2015 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  */
 
 #include "hisi_sas.h"
@@ -406,8 +401,6 @@
 	TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x31a */
 };
 
-#define HISI_SAS_COMMAND_ENTRIES_V1_HW 8192
-
 #define HISI_SAS_PHY_MAX_INT_NR (HISI_SAS_PHY_INT_NR * HISI_SAS_MAX_PHYS)
 #define HISI_SAS_CQ_MAX_INT_NR (HISI_SAS_MAX_QUEUES)
 #define HISI_SAS_FATAL_INT_NR (2)
@@ -423,13 +416,6 @@
 	return readl(regs);
 }
 
-static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
-{
-	void __iomem *regs = hisi_hba->regs + off;
-
-	return readl_relaxed(regs);
-}
-
 static void hisi_sas_write32(struct hisi_hba *hisi_hba,
 				    u32 off, u32 val)
 {
@@ -510,6 +496,7 @@
 	struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
 	struct asd_sas_port *sas_port = device->port;
 	struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+	u64 sas_addr;
 
 	memset(itct, 0, sizeof(*itct));
 
@@ -534,8 +521,8 @@
 	itct->qw0 = cpu_to_le64(qw0);
 
 	/* qw1 */
-	memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
-	itct->sas_addr = __swab64(itct->sas_addr);
+	memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
+	itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
 
 	/* qw2 */
 	itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_IT_NEXUS_LOSS_TL_OFF) |
@@ -561,7 +548,7 @@
 	reg_val &= ~CFG_AGING_TIME_ITCT_REL_MSK;
 	hisi_sas_write32(hisi_hba, CFG_AGING_TIME, reg_val);
 
-	qw0 = cpu_to_le64(itct->qw0);
+	qw0 = le64_to_cpu(itct->qw0);
 	qw0 &= ~ITCT_HDR_VALID_MSK;
 	itct->qw0 = cpu_to_le64(qw0);
 }
@@ -797,16 +784,11 @@
 	enable_phy_v1_hw(hisi_hba, phy_no);
 }
 
-static void stop_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
-{
-	disable_phy_v1_hw(hisi_hba, phy_no);
-}
-
 static void phy_hard_reset_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
-	stop_phy_v1_hw(hisi_hba, phy_no);
+	hisi_sas_phy_enable(hisi_hba, phy_no, 0);
 	msleep(100);
-	start_phy_v1_hw(hisi_hba, phy_no);
+	hisi_sas_phy_enable(hisi_hba, phy_no, 1);
 }
 
 static void start_phys_v1_hw(struct timer_list *t)
@@ -816,7 +798,7 @@
 
 	for (i = 0; i < hisi_hba->n_phy; i++) {
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x12a);
-		start_phy_v1_hw(hisi_hba, i);
+		hisi_sas_phy_enable(hisi_hba, i, 1);
 	}
 }
 
@@ -834,7 +816,7 @@
 	mod_timer(timer, jiffies + HZ);
 }
 
-static void sl_notify_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
+static void sl_notify_ssp_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
 	u32 sl_control;
 
@@ -875,30 +857,6 @@
 	return bitmap;
 }
 
-/*
- * The callpath to this function and upto writing the write
- * queue pointer should be safe from interruption.
- */
-static int
-get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
-{
-	struct device *dev = hisi_hba->dev;
-	int queue = dq->id;
-	u32 r, w;
-
-	w = dq->wr_point;
-	r = hisi_sas_read32_relaxed(hisi_hba,
-				DLVRY_Q_0_RD_PTR + (queue * 0x14));
-	if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
-		dev_warn(dev, "could not find free slot\n");
-		return -EAGAIN;
-	}
-
-	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
-
-	return w;
-}
-
 /* DQ lock must be taken here */
 static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
 {
@@ -1100,7 +1058,7 @@
 	case SAS_PROTOCOL_SSP:
 	{
 		int error = -1;
-		u32 dma_err_type = cpu_to_le32(err_record->dma_err_type);
+		u32 dma_err_type = le32_to_cpu(err_record->dma_err_type);
 		u32 dma_tx_err_type = ((dma_err_type &
 					ERR_HDR_DMA_TX_ERR_TYPE_MSK)) >>
 					ERR_HDR_DMA_TX_ERR_TYPE_OFF;
@@ -1108,9 +1066,9 @@
 					ERR_HDR_DMA_RX_ERR_TYPE_MSK)) >>
 					ERR_HDR_DMA_RX_ERR_TYPE_OFF;
 		u32 trans_tx_fail_type =
-				cpu_to_le32(err_record->trans_tx_fail_type);
+				le32_to_cpu(err_record->trans_tx_fail_type);
 		u32 trans_rx_fail_type =
-				cpu_to_le32(err_record->trans_rx_fail_type);
+				le32_to_cpu(err_record->trans_rx_fail_type);
 
 		if (dma_tx_err_type) {
 			/* dma tx err */
@@ -1317,21 +1275,17 @@
 	}
 	case SAS_PROTOCOL_SMP:
 	{
-		void *to;
 		struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+		void *to = page_address(sg_page(sg_resp));
 
 		ts->stat = SAM_STAT_GOOD;
-		to = kmap_atomic(sg_page(sg_resp));
 
-		dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
-			     DMA_FROM_DEVICE);
 		dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
 			     DMA_TO_DEVICE);
 		memcpy(to + sg_resp->offset,
 		       hisi_sas_status_buf_addr_mem(slot) +
 		       sizeof(struct hisi_sas_err_record),
-		       sg_dma_len(sg_resp));
-		kunmap_atomic(to);
+		       sg_resp->length);
 		break;
 	}
 	case SAS_PROTOCOL_SATA:
@@ -1543,11 +1497,9 @@
 	struct hisi_sas_complete_v1_hdr *complete_queue =
 			(struct hisi_sas_complete_v1_hdr *)
 			hisi_hba->complete_hdr[queue];
-	u32 irq_value, rd_point = cq->rd_point, wr_point;
+	u32 rd_point = cq->rd_point, wr_point;
 
 	spin_lock(&hisi_hba->lock);
-	irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
-
 	hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
 	wr_point = hisi_sas_read32(hisi_hba,
 			COMPL_Q_0_WR_PTR + (0x14 * queue));
@@ -1558,7 +1510,7 @@
 		u32 cmplt_hdr_data;
 
 		complete_hdr = &complete_queue[rd_point];
-		cmplt_hdr_data = cpu_to_le32(complete_hdr->data);
+		cmplt_hdr_data = le32_to_cpu(complete_hdr->data);
 		idx = (cmplt_hdr_data & CMPLT_HDR_IPTT_MSK) >>
 		      CMPLT_HDR_IPTT_OFF;
 		slot = &hisi_hba->slot_info[idx];
@@ -1694,8 +1646,7 @@
 		for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
 			irq = platform_get_irq(pdev, idx);
 			if (!irq) {
-				dev_err(dev,
-					"irq init: fail map phy interrupt %d\n",
+				dev_err(dev, "irq init: fail map phy interrupt %d\n",
 					idx);
 				return -ENOENT;
 			}
@@ -1703,8 +1654,7 @@
 			rc = devm_request_irq(dev, irq, phy_interrupts[j], 0,
 					      DRV_NAME " phy", phy);
 			if (rc) {
-				dev_err(dev, "irq init: could not request "
-					"phy interrupt %d, rc=%d\n",
+				dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
 					irq, rc);
 				return -ENOENT;
 			}
@@ -1741,13 +1691,14 @@
 		rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
 				      DRV_NAME " fatal", hisi_hba);
 		if (rc) {
-			dev_err(dev,
-				"irq init: could not request fatal interrupt %d, rc=%d\n",
+			dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
 				irq, rc);
 			return -ENOENT;
 		}
 	}
 
+	hisi_hba->cq_nvecs = hisi_hba->queue_count;
+
 	return 0;
 }
 
@@ -1797,6 +1748,11 @@
 	return 0;
 }
 
+static struct device_attribute *host_attrs_v1_hw[] = {
+	&dev_attr_phy_event_threshold,
+	NULL
+};
+
 static struct scsi_host_template sht_v1_hw = {
 	.name			= DRV_NAME,
 	.module			= THIS_MODULE,
@@ -1807,28 +1763,25 @@
 	.scan_start		= hisi_sas_scan_start,
 	.change_queue_depth	= sas_change_queue_depth,
 	.bios_param		= sas_bios_param,
-	.can_queue		= 1,
 	.this_id		= -1,
-	.sg_tablesize		= SG_ALL,
+	.sg_tablesize		= HISI_SAS_SGE_PAGE_CNT,
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
 	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
-	.shost_attrs		= host_attrs,
+	.shost_attrs		= host_attrs_v1_hw,
+	.host_reset             = hisi_sas_host_reset,
 };
 
 static const struct hisi_sas_hw hisi_sas_v1_hw = {
 	.hw_init = hisi_sas_v1_init,
 	.setup_itct = setup_itct_v1_hw,
-	.sl_notify = sl_notify_v1_hw,
+	.sl_notify_ssp = sl_notify_ssp_v1_hw,
 	.clear_itct = clear_itct_v1_hw,
 	.prep_smp = prep_smp_v1_hw,
 	.prep_ssp = prep_ssp_v1_hw,
-	.get_free_slot = get_free_slot_v1_hw,
 	.start_delivery = start_delivery_v1_hw,
-	.slot_complete = slot_complete_v1_hw,
 	.phys_init = phys_init_v1_hw,
 	.phy_start = start_phy_v1_hw,
 	.phy_disable = disable_phy_v1_hw,
@@ -1836,7 +1789,6 @@
 	.phy_set_linkrate = phy_set_linkrate_v1_hw,
 	.phy_get_max_linkrate = phy_get_max_linkrate_v1_hw,
 	.get_wideport_bitmap = get_wideport_bitmap_v1_hw,
-	.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
 	.complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
 	.sht = &sht_v1_hw,
 };
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 1c4ea58..8e96a25 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright (c) 2016 Linaro Ltd.
  * Copyright (c) 2016 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  */
 
 #include "hisi_sas.h"
@@ -427,70 +422,70 @@
 		.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF),
 		.msk = HGC_DQE_ECC_1B_ADDR_MSK,
 		.shift = HGC_DQE_ECC_1B_ADDR_OFF,
-		.msg = "hgc_dqe_acc1b_intr found: Ram address is 0x%08X\n",
+		.msg = "hgc_dqe_ecc1b_intr",
 		.reg = HGC_DQE_ECC_ADDR,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF),
 		.msk = HGC_IOST_ECC_1B_ADDR_MSK,
 		.shift = HGC_IOST_ECC_1B_ADDR_OFF,
-		.msg = "hgc_iost_acc1b_intr found: Ram address is 0x%08X\n",
+		.msg = "hgc_iost_ecc1b_intr",
 		.reg = HGC_IOST_ECC_ADDR,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF),
 		.msk = HGC_ITCT_ECC_1B_ADDR_MSK,
 		.shift = HGC_ITCT_ECC_1B_ADDR_OFF,
-		.msg = "hgc_itct_acc1b_intr found: am address is 0x%08X\n",
+		.msg = "hgc_itct_ecc1b_intr",
 		.reg = HGC_ITCT_ECC_ADDR,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF),
 		.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
 		.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
-		.msg = "hgc_iostl_acc1b_intr found: memory address is 0x%08X\n",
+		.msg = "hgc_iostl_ecc1b_intr",
 		.reg = HGC_LM_DFX_STATUS2,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF),
 		.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
 		.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
-		.msg = "hgc_itctl_acc1b_intr found: memory address is 0x%08X\n",
+		.msg = "hgc_itctl_ecc1b_intr",
 		.reg = HGC_LM_DFX_STATUS2,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF),
 		.msk = HGC_CQE_ECC_1B_ADDR_MSK,
 		.shift = HGC_CQE_ECC_1B_ADDR_OFF,
-		.msg = "hgc_cqe_acc1b_intr found: Ram address is 0x%08X\n",
+		.msg = "hgc_cqe_ecc1b_intr",
 		.reg = HGC_CQE_ECC_ADDR,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF),
 		.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
 		.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
-		.msg = "rxm_mem0_acc1b_intr found: memory address is 0x%08X\n",
+		.msg = "rxm_mem0_ecc1b_intr",
 		.reg = HGC_RXM_DFX_STATUS14,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF),
 		.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
 		.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
-		.msg = "rxm_mem1_acc1b_intr found: memory address is 0x%08X\n",
+		.msg = "rxm_mem1_ecc1b_intr",
 		.reg = HGC_RXM_DFX_STATUS14,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF),
 		.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
 		.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
-		.msg = "rxm_mem2_acc1b_intr found: memory address is 0x%08X\n",
+		.msg = "rxm_mem2_ecc1b_intr",
 		.reg = HGC_RXM_DFX_STATUS14,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF),
 		.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
 		.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
-		.msg = "rxm_mem3_acc1b_intr found: memory address is 0x%08X\n",
+		.msg = "rxm_mem3_ecc1b_intr",
 		.reg = HGC_RXM_DFX_STATUS15,
 	},
 };
@@ -500,70 +495,70 @@
 		.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
 		.msk = HGC_DQE_ECC_MB_ADDR_MSK,
 		.shift = HGC_DQE_ECC_MB_ADDR_OFF,
-		.msg = "hgc_dqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
+		.msg = "hgc_dqe_eccbad_intr",
 		.reg = HGC_DQE_ECC_ADDR,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
 		.msk = HGC_IOST_ECC_MB_ADDR_MSK,
 		.shift = HGC_IOST_ECC_MB_ADDR_OFF,
-		.msg = "hgc_iost_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
+		.msg = "hgc_iost_eccbad_intr",
 		.reg = HGC_IOST_ECC_ADDR,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
 		.msk = HGC_ITCT_ECC_MB_ADDR_MSK,
 		.shift = HGC_ITCT_ECC_MB_ADDR_OFF,
-		.msg = "hgc_itct_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
+		.msg = "hgc_itct_eccbad_intr",
 		.reg = HGC_ITCT_ECC_ADDR,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
 		.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
 		.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
-		.msg = "hgc_iostl_accbad_intr (0x%x) found: memory address is 0x%08X\n",
+		.msg = "hgc_iostl_eccbad_intr",
 		.reg = HGC_LM_DFX_STATUS2,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
 		.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
 		.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
-		.msg = "hgc_itctl_accbad_intr (0x%x) found: memory address is 0x%08X\n",
+		.msg = "hgc_itctl_eccbad_intr",
 		.reg = HGC_LM_DFX_STATUS2,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
 		.msk = HGC_CQE_ECC_MB_ADDR_MSK,
 		.shift = HGC_CQE_ECC_MB_ADDR_OFF,
-		.msg = "hgc_cqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
+		.msg = "hgc_cqe_eccbad_intr",
 		.reg = HGC_CQE_ECC_ADDR,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
 		.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
 		.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
-		.msg = "rxm_mem0_accbad_intr (0x%x) found: memory address is 0x%08X\n",
+		.msg = "rxm_mem0_eccbad_intr",
 		.reg = HGC_RXM_DFX_STATUS14,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
 		.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
 		.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
-		.msg = "rxm_mem1_accbad_intr (0x%x) found: memory address is 0x%08X\n",
+		.msg = "rxm_mem1_eccbad_intr",
 		.reg = HGC_RXM_DFX_STATUS14,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
 		.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
 		.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
-		.msg = "rxm_mem2_accbad_intr (0x%x) found: memory address is 0x%08X\n",
+		.msg = "rxm_mem2_eccbad_intr",
 		.reg = HGC_RXM_DFX_STATUS14,
 	},
 	{
 		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
 		.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
 		.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
-		.msg = "rxm_mem3_accbad_intr (0x%x) found: memory address is 0x%08X\n",
+		.msg = "rxm_mem3_eccbad_intr",
 		.reg = HGC_RXM_DFX_STATUS15,
 	},
 };
@@ -770,7 +765,7 @@
 
 /* This function needs to be protected from pre-emption. */
 static int
-slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
+slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
 			     struct domain_device *device)
 {
 	int sata_dev = dev_is_sata(device);
@@ -778,6 +773,7 @@
 	struct hisi_sas_device *sas_dev = device->lldd_dev;
 	int sata_idx = sas_dev->sata_idx;
 	int start, end;
+	unsigned long flags;
 
 	if (!sata_dev) {
 		/*
@@ -801,11 +797,14 @@
 		end = 64 * (sata_idx + 2);
 	}
 
+	spin_lock_irqsave(&hisi_hba->lock, flags);
 	while (1) {
 		start = find_next_zero_bit(bitmap,
 					hisi_hba->slot_index_count, start);
-		if (start >= end)
+		if (start >= end) {
+			spin_unlock_irqrestore(&hisi_hba->lock, flags);
 			return -SAS_QUEUE_FULL;
+		}
 		/*
 		  * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
 		  */
@@ -815,8 +814,8 @@
 	}
 
 	set_bit(start, bitmap);
-	*slot_idx = start;
-	return 0;
+	spin_unlock_irqrestore(&hisi_hba->lock, flags);
+	return start;
 }
 
 static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx)
@@ -864,12 +863,13 @@
 
 			hisi_hba->devices[i].device_id = i;
 			sas_dev = &hisi_hba->devices[i];
-			sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+			sas_dev->dev_status = HISI_SAS_DEV_INIT;
 			sas_dev->dev_type = device->dev_type;
 			sas_dev->hisi_hba = hisi_hba;
 			sas_dev->sas_device = device;
 			sas_dev->sata_idx = sata_idx;
 			sas_dev->dq = dq;
+			spin_lock_init(&sas_dev->lock);
 			INIT_LIST_HEAD(&hisi_hba->devices[i].list);
 			break;
 		}
@@ -930,6 +930,7 @@
 	struct domain_device *parent_dev = device->parent;
 	struct asd_sas_port *sas_port = device->port;
 	struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+	u64 sas_addr;
 
 	memset(itct, 0, sizeof(*itct));
 
@@ -943,7 +944,7 @@
 		break;
 	case SAS_SATA_DEV:
 	case SAS_SATA_PENDING:
-		if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+		if (parent_dev && dev_is_expander(parent_dev->dev_type))
 			qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
 		else
 			qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
@@ -962,8 +963,8 @@
 	itct->qw0 = cpu_to_le64(qw0);
 
 	/* qw1 */
-	memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
-	itct->sas_addr = __swab64(itct->sas_addr);
+	memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
+	itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
 
 	/* qw2 */
 	if (!dev_is_sata(device))
@@ -1540,14 +1541,14 @@
 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 	u32 txid_auto;
 
-	disable_phy_v2_hw(hisi_hba, phy_no);
+	hisi_sas_phy_enable(hisi_hba, phy_no, 0);
 	if (phy->identify.device_type == SAS_END_DEVICE) {
 		txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
 		hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
 					txid_auto | TX_HARDRST_MSK);
 	}
 	msleep(100);
-	start_phy_v2_hw(hisi_hba, phy_no);
+	hisi_sas_phy_enable(hisi_hba, phy_no, 1);
 }
 
 static void phy_get_events_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1580,11 +1581,11 @@
 		if (!sas_phy->phy->enabled)
 			continue;
 
-		start_phy_v2_hw(hisi_hba, i);
+		hisi_sas_phy_enable(hisi_hba, i, 1);
 	}
 }
 
-static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+static void sl_notify_ssp_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
 	u32 sl_control;
 
@@ -1636,31 +1637,6 @@
 	return bitmap;
 }
 
-/*
- * The callpath to this function and upto writing the write
- * queue pointer should be safe from interruption.
- */
-static int
-get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
-{
-	struct device *dev = hisi_hba->dev;
-	int queue = dq->id;
-	u32 r, w;
-
-	w = dq->wr_point;
-	r = hisi_sas_read32_relaxed(hisi_hba,
-				DLVRY_Q_0_RD_PTR + (queue * 0x14));
-	if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
-		dev_warn(dev, "full queue=%d r=%d w=%d\n",
-				queue, r, w);
-		return -EAGAIN;
-	}
-
-	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
-
-	return w;
-}
-
 /* DQ lock must be taken here */
 static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
 {
@@ -2040,11 +2016,11 @@
 	struct task_status_struct *ts = &task->task_status;
 	struct hisi_sas_err_record_v2 *err_record =
 			hisi_sas_status_buf_addr_mem(slot);
-	u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type);
-	u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type);
-	u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type);
-	u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type);
-	u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type);
+	u32 trans_tx_fail_type = le32_to_cpu(err_record->trans_tx_fail_type);
+	u32 trans_rx_fail_type = le32_to_cpu(err_record->trans_rx_fail_type);
+	u16 dma_tx_err_type = le16_to_cpu(err_record->dma_tx_err_type);
+	u16 sipc_rx_err_type = le16_to_cpu(err_record->sipc_rx_err_type);
+	u32 dma_rx_err_type = le32_to_cpu(err_record->dma_rx_err_type);
 	int error = -1;
 
 	if (err_phase == 1) {
@@ -2055,8 +2031,7 @@
 					trans_tx_fail_type);
 	} else if (err_phase == 2) {
 		/* error in RX phase, the priority is: DW1 > DW3 > DW2 */
-		error = parse_trans_rx_err_code_v2_hw(
-					trans_rx_fail_type);
+		error = parse_trans_rx_err_code_v2_hw(trans_rx_fail_type);
 		if (error == -1) {
 			error = parse_dma_rx_err_code_v2_hw(
 					dma_rx_err_type);
@@ -2354,6 +2329,7 @@
 			&complete_queue[slot->cmplt_queue_slot];
 	unsigned long flags;
 	bool is_internal = slot->is_internal;
+	u32 dw0;
 
 	if (unlikely(!task || !task->lldd_task || !task->dev))
 		return -EINVAL;
@@ -2378,8 +2354,9 @@
 	}
 
 	/* Use SAS+TMF status codes */
-	switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
-			>> CMPLT_HDR_ABORT_STAT_OFF) {
+	dw0 = le32_to_cpu(complete_hdr->dw0);
+	switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >>
+		CMPLT_HDR_ABORT_STAT_OFF) {
 	case STAT_IO_ABORTED:
 		/* this io has been aborted by abort command */
 		ts->stat = SAS_ABORTED_TASK;
@@ -2404,9 +2381,8 @@
 		break;
 	}
 
-	if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
-		(!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
-		u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
+	if ((dw0 & CMPLT_HDR_ERX_MSK) && (!(dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
+		u32 err_phase = (dw0 & CMPLT_HDR_ERR_PHASE_MSK)
 				>> CMPLT_HDR_ERR_PHASE_OFF;
 		u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
 
@@ -2417,14 +2393,12 @@
 			slot_err_v2_hw(hisi_hba, task, slot, 2);
 
 		if (ts->stat != SAS_DATA_UNDERRUN)
-			dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
-				"CQ hdr: 0x%x 0x%x 0x%x 0x%x "
-				"Error info: 0x%x 0x%x 0x%x 0x%x\n",
-				slot->idx, task, sas_dev->device_id,
-				complete_hdr->dw0, complete_hdr->dw1,
-				complete_hdr->act, complete_hdr->dw3,
-				error_info[0], error_info[1],
-				error_info[2], error_info[3]);
+			dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
+				 slot->idx, task, sas_dev->device_id,
+				 complete_hdr->dw0, complete_hdr->dw1,
+				 complete_hdr->act, complete_hdr->dw3,
+				 error_info[0], error_info[1],
+				 error_info[2], error_info[3]);
 
 		if (unlikely(slot->abort))
 			return ts->stat;
@@ -2445,20 +2419,16 @@
 	case SAS_PROTOCOL_SMP:
 	{
 		struct scatterlist *sg_resp = &task->smp_task.smp_resp;
-		void *to;
+		void *to = page_address(sg_page(sg_resp));
 
 		ts->stat = SAM_STAT_GOOD;
-		to = kmap_atomic(sg_page(sg_resp));
 
-		dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
-			     DMA_FROM_DEVICE);
 		dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
 			     DMA_TO_DEVICE);
 		memcpy(to + sg_resp->offset,
 		       hisi_sas_status_buf_addr_mem(slot) +
 		       sizeof(struct hisi_sas_err_record),
-		       sg_dma_len(sg_resp));
-		kunmap_atomic(to);
+		       sg_resp->length);
 		break;
 	}
 	case SAS_PROTOCOL_SATA:
@@ -2481,22 +2451,22 @@
 	}
 
 out:
-	hisi_sas_slot_task_free(hisi_hba, task, slot);
 	sts = ts->stat;
 	spin_lock_irqsave(&task->task_state_lock, flags);
 	if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
 		spin_unlock_irqrestore(&task->task_state_lock, flags);
-		dev_info(dev, "slot complete: task(%p) aborted\n", task);
+		dev_info(dev, "slot complete: task(%pK) aborted\n", task);
 		return SAS_ABORTED_TASK;
 	}
 	task->task_state_flags |= SAS_TASK_STATE_DONE;
 	spin_unlock_irqrestore(&task->task_state_lock, flags);
+	hisi_sas_slot_task_free(hisi_hba, task, slot);
 
 	if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
 		spin_lock_irqsave(&device->done_lock, flags);
 		if (test_bit(SAS_HA_FROZEN, &ha->state)) {
 			spin_unlock_irqrestore(&device->done_lock, flags);
-			dev_info(dev, "slot complete: task(%p) ignored\n ",
+			dev_info(dev, "slot complete: task(%pK) ignored\n",
 				 task);
 			return sts;
 		}
@@ -2522,22 +2492,23 @@
 	struct hisi_sas_tmf_task *tmf = slot->tmf;
 	u8 *buf_cmd;
 	int has_data = 0, hdr_tag = 0;
-	u32 dw1 = 0, dw2 = 0;
+	u32 dw0, dw1 = 0, dw2 = 0;
 
 	/* create header */
 	/* dw0 */
-	hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
-	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
-		hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
+	dw0 = port->id << CMD_HDR_PORT_OFF;
+	if (parent_dev && dev_is_expander(parent_dev->dev_type))
+		dw0 |= 3 << CMD_HDR_CMD_OFF;
 	else
-		hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
+		dw0 |= 4 << CMD_HDR_CMD_OFF;
 
 	if (tmf && tmf->force_phy) {
-		hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK;
-		hdr->dw0 |= cpu_to_le32((1 << tmf->phy_id)
-				<< CMD_HDR_PHY_ID_OFF);
+		dw0 |= CMD_HDR_FORCE_PHY_MSK;
+		dw0 |= (1 << tmf->phy_id) << CMD_HDR_PHY_ID_OFF;
 	}
 
+	hdr->dw0 = cpu_to_le32(dw0);
+
 	/* dw1 */
 	switch (task->data_dir) {
 	case DMA_TO_DEVICE:
@@ -2563,7 +2534,10 @@
 	hdr->dw1 = cpu_to_le32(dw1);
 
 	/* dw2 */
-	if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
+	if (task->ata_task.use_ncq) {
+		struct ata_queued_cmd *qc = task->uldd_task;
+
+		hdr_tag = qc->tag;
 		task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
 		dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
 	}
@@ -2671,6 +2645,8 @@
 	if (is_sata_phy_v2_hw(hisi_hba, phy_no))
 		goto end;
 
+	del_timer(&phy->timer);
+
 	if (phy_no == 8) {
 		u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
 
@@ -2750,6 +2726,7 @@
 	struct hisi_sas_port *port = phy->port;
 	struct device *dev = hisi_hba->dev;
 
+	del_timer(&phy->timer);
 	hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
 
 	phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
@@ -2925,7 +2902,7 @@
 
 			if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
 				dev_warn(dev, "phy%d identify timeout\n",
-						phy_no);
+					 phy_no);
 				hisi_sas_notify_phy_event(phy,
 						HISI_PHYE_LINK_RESET);
 			}
@@ -2938,6 +2915,9 @@
 			if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
 				phy_bcast_v2_hw(phy_no, hisi_hba);
 
+			if (irq_value0 & CHL_INT0_PHY_RDY_MSK)
+				hisi_sas_phy_oob_ready(hisi_hba, phy_no);
+
 			hisi_sas_phy_write32(hisi_hba, phy_no,
 					CHL_INT0, irq_value0
 					& (~CHL_INT0_HOTPLUG_TOUT_MSK)
@@ -2967,7 +2947,8 @@
 			val = hisi_sas_read32(hisi_hba, ecc_error->reg);
 			val &= ecc_error->msk;
 			val >>= ecc_error->shift;
-			dev_warn(dev, ecc_error->msg, val);
+			dev_warn(dev, "%s found: mem addr is 0x%08X\n",
+				 ecc_error->msg, val);
 		}
 	}
 }
@@ -2986,7 +2967,8 @@
 			val = hisi_sas_read32(hisi_hba, ecc_error->reg);
 			val &= ecc_error->msk;
 			val >>= ecc_error->shift;
-			dev_err(dev, ecc_error->msg, irq_value, val);
+			dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n",
+				ecc_error->msg, irq_value, val);
 			queue_work(hisi_hba->wq, &hisi_hba->rst_work);
 		}
 	}
@@ -3023,7 +3005,7 @@
 	{ .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
 	{ .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
 	{ .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
-	{},
+	{}
 };
 
 static const struct hisi_sas_hw_error fifo_error[] = {
@@ -3032,7 +3014,7 @@
 	{ .msk = BIT(10), .msg = "GETDQE_FIFO" },
 	{ .msk = BIT(11), .msg = "CMDP_FIFO" },
 	{ .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
-	{},
+	{}
 };
 
 static const struct hisi_sas_hw_error fatal_axi_errors[] = {
@@ -3096,12 +3078,12 @@
 				if (!(err_value & sub->msk))
 					continue;
 				dev_err(dev, "%s (0x%x) found!\n",
-					 sub->msg, irq_value);
+					sub->msg, irq_value);
 				queue_work(hisi_hba->wq, &hisi_hba->rst_work);
 			}
 		} else {
 			dev_err(dev, "%s (0x%x) found!\n",
-				 axi_error->msg, irq_value);
+				axi_error->msg, irq_value);
 			queue_work(hisi_hba->wq, &hisi_hba->rst_work);
 		}
 	}
@@ -3148,20 +3130,24 @@
 
 		/* Check for NCQ completion */
 		if (complete_hdr->act) {
-			u32 act_tmp = complete_hdr->act;
+			u32 act_tmp = le32_to_cpu(complete_hdr->act);
 			int ncq_tag_count = ffs(act_tmp);
+			u32 dw1 = le32_to_cpu(complete_hdr->dw1);
 
-			dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
+			dev_id = (dw1 & CMPLT_HDR_DEV_ID_MSK) >>
 				 CMPLT_HDR_DEV_ID_OFF;
 			itct = &hisi_hba->itct[dev_id];
 
 			/* The NCQ tags are held in the itct header */
 			while (ncq_tag_count) {
-				__le64 *ncq_tag = &itct->qw4_15[0];
+				__le64 *_ncq_tag = &itct->qw4_15[0], __ncq_tag;
+				u64 ncq_tag;
 
-				ncq_tag_count -= 1;
-				iptt = (ncq_tag[ncq_tag_count / 5]
-					>> (ncq_tag_count % 5) * 12) & 0xfff;
+				ncq_tag_count--;
+				__ncq_tag = _ncq_tag[ncq_tag_count / 5];
+				ncq_tag = le64_to_cpu(__ncq_tag);
+				iptt = (ncq_tag >> (ncq_tag_count % 5) * 12) &
+				       0xfff;
 
 				slot = &hisi_hba->slot_info[iptt];
 				slot->cmplt_queue_slot = rd_point;
@@ -3172,7 +3158,9 @@
 				ncq_tag_count = ffs(act_tmp);
 			}
 		} else {
-			iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+			u32 dw1 = le32_to_cpu(complete_hdr->dw1);
+
+			iptt = dw1 & CMPLT_HDR_IPTT_MSK;
 			slot = &hisi_hba->slot_info[iptt];
 			slot->cmplt_queue_slot = rd_point;
 			slot->cmplt_queue = queue;
@@ -3215,6 +3203,8 @@
 	unsigned long flags;
 	int phy_no, offset;
 
+	del_timer(&phy->timer);
+
 	phy_no = sas_phy->id;
 	initial_fis = &hisi_hba->initial_fis[phy_no];
 	fis = &initial_fis->fis;
@@ -3237,7 +3227,7 @@
 	/* check ERR bit of Status Register */
 	if (fis->status & ATA_ERR) {
 		dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
-				fis->status);
+			 fis->status);
 		hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
 		res = IRQ_NONE;
 		goto end;
@@ -3317,8 +3307,8 @@
 {
 	struct platform_device *pdev = hisi_hba->platform_dev;
 	struct device *dev = &pdev->dev;
-	int irq, rc, irq_map[128];
-	int i, phy_no, fatal_no, queue_no, k;
+	int irq, rc = 0, irq_map[128];
+	int i, phy_no, fatal_no, queue_no;
 
 	for (i = 0; i < 128; i++)
 		irq_map[i] = platform_get_irq(pdev, i);
@@ -3328,11 +3318,10 @@
 		rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
 				      DRV_NAME " phy", hisi_hba);
 		if (rc) {
-			dev_err(dev, "irq init: could not request "
-				"phy interrupt %d, rc=%d\n",
+			dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
 				irq, rc);
 			rc = -ENOENT;
-			goto free_phy_int_irqs;
+			goto err_out;
 		}
 	}
 
@@ -3343,11 +3332,10 @@
 		rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
 				      DRV_NAME " sata", phy);
 		if (rc) {
-			dev_err(dev, "irq init: could not request "
-				"sata interrupt %d, rc=%d\n",
+			dev_err(dev, "irq init: could not request sata interrupt %d, rc=%d\n",
 				irq, rc);
 			rc = -ENOENT;
-			goto free_sata_int_irqs;
+			goto err_out;
 		}
 	}
 
@@ -3356,11 +3344,10 @@
 		rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
 				      DRV_NAME " fatal", hisi_hba);
 		if (rc) {
-			dev_err(dev,
-				"irq init: could not request fatal interrupt %d, rc=%d\n",
+			dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
 				irq, rc);
 			rc = -ENOENT;
-			goto free_fatal_int_irqs;
+			goto err_out;
 		}
 	}
 
@@ -3372,36 +3359,17 @@
 		rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
 				      DRV_NAME " cq", cq);
 		if (rc) {
-			dev_err(dev,
-				"irq init: could not request cq interrupt %d, rc=%d\n",
+			dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
 				irq, rc);
 			rc = -ENOENT;
-			goto free_cq_int_irqs;
+			goto err_out;
 		}
 		tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
 	}
 
-	return 0;
+	hisi_hba->cq_nvecs = hisi_hba->queue_count;
 
-free_cq_int_irqs:
-	for (k = 0; k < queue_no; k++) {
-		struct hisi_sas_cq *cq = &hisi_hba->cq[k];
-
-		free_irq(irq_map[k + 96], cq);
-		tasklet_kill(&cq->tasklet);
-	}
-free_fatal_int_irqs:
-	for (k = 0; k < fatal_no; k++)
-		free_irq(irq_map[k + 81], hisi_hba);
-free_sata_int_irqs:
-	for (k = 0; k < phy_no; k++) {
-		struct hisi_sas_phy *phy = &hisi_hba->phy[k];
-
-		free_irq(irq_map[k + 72], phy);
-	}
-free_phy_int_irqs:
-	for (k = 0; k < i; k++)
-		free_irq(irq_map[k + 1], hisi_hba);
+err_out:
 	return rc;
 }
 
@@ -3523,7 +3491,7 @@
 		break;
 	default:
 		dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
-				reg_type);
+			reg_type);
 		return -EINVAL;
 	}
 
@@ -3545,9 +3513,20 @@
 		msleep(delay_ms);
 	}
 
+	if (time >= timeout_ms) {
+		dev_dbg(dev, "Wait commands complete timeout!\n");
+		return;
+	}
+
 	dev_dbg(dev, "wait commands complete %dms\n", time);
+
 }
 
+static struct device_attribute *host_attrs_v2_hw[] = {
+	&dev_attr_phy_event_threshold,
+	NULL
+};
+
 static struct scsi_host_template sht_v2_hw = {
 	.name			= DRV_NAME,
 	.module			= THIS_MODULE,
@@ -3558,16 +3537,15 @@
 	.scan_start		= hisi_sas_scan_start,
 	.change_queue_depth	= sas_change_queue_depth,
 	.bios_param		= sas_bios_param,
-	.can_queue		= 1,
 	.this_id		= -1,
-	.sg_tablesize		= SG_ALL,
+	.sg_tablesize		= HISI_SAS_SGE_PAGE_CNT,
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
 	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
-	.shost_attrs		= host_attrs,
+	.shost_attrs		= host_attrs_v2_hw,
+	.host_reset		= hisi_sas_host_reset,
 };
 
 static const struct hisi_sas_hw hisi_sas_v2_hw = {
@@ -3575,7 +3553,7 @@
 	.setup_itct = setup_itct_v2_hw,
 	.slot_index_alloc = slot_index_alloc_quirk_v2_hw,
 	.alloc_dev = alloc_dev_quirk_v2_hw,
-	.sl_notify = sl_notify_v2_hw,
+	.sl_notify_ssp = sl_notify_ssp_v2_hw,
 	.get_wideport_bitmap = get_wideport_bitmap_v2_hw,
 	.clear_itct = clear_itct_v2_hw,
 	.free_device = free_device_v2_hw,
@@ -3583,9 +3561,7 @@
 	.prep_ssp = prep_ssp_v2_hw,
 	.prep_stp = prep_ata_v2_hw,
 	.prep_abort = prep_abort_v2_hw,
-	.get_free_slot = get_free_slot_v2_hw,
 	.start_delivery = start_delivery_v2_hw,
-	.slot_complete = slot_complete_v2_hw,
 	.phys_init = phys_init_v2_hw,
 	.phy_start = start_phy_v2_hw,
 	.phy_disable = disable_phy_v2_hw,
@@ -3593,7 +3569,6 @@
 	.get_events = phy_get_events_v2_hw,
 	.phy_set_linkrate = phy_set_linkrate_v2_hw,
 	.phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
-	.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
 	.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
 	.soft_reset = soft_reset_v2_hw,
 	.get_phys_state = get_phys_state_v2_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 687ff61..cb8d087 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1,17 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright (c) 2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
  */
 
 #include "hisi_sas.h"
 #define DRV_NAME "hisi_sas_v3_hw"
 
-/* global registers need init*/
+/* global registers need init */
 #define DLVRY_QUEUE_ENABLE		0x0
 #define IOST_BASE_ADDR_LO		0x8
 #define IOST_BASE_ADDR_HI		0xc
@@ -28,6 +23,7 @@
 #define ITCT_CLR_EN_MSK			(0x1 << ITCT_CLR_EN_OFF)
 #define ITCT_DEV_OFF			0
 #define ITCT_DEV_MSK			(0x7ff << ITCT_DEV_OFF)
+#define SAS_AXI_USER3			0x50
 #define IO_SATA_BROKEN_MSG_ADDR_LO	0x58
 #define IO_SATA_BROKEN_MSG_ADDR_HI	0x5c
 #define SATA_INITI_D2H_STORE_ADDR_LO	0x60
@@ -42,6 +38,7 @@
 #define MAX_CON_TIME_LIMIT_TIME		0xa4
 #define BUS_INACTIVE_LIMIT_TIME		0xa8
 #define REJECT_TO_OPEN_LIMIT_TIME	0xac
+#define CQ_INT_CONVERGE_EN		0xb0
 #define CFG_AGING_TIME			0xbc
 #define HGC_DFX_CFG2			0xc0
 #define CFG_ABT_SET_QUERY_IPTT	0xd4
@@ -51,12 +48,43 @@
 #define CFG_ABT_SET_IPTT_DONE	0xd8
 #define CFG_ABT_SET_IPTT_DONE_OFF	0
 #define HGC_IOMB_PROC1_STATUS	0x104
+#define HGC_LM_DFX_STATUS2		0x128
+#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF		0
+#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK	(0xfff << \
+					 HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
+#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF		12
+#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK	(0x7ff << \
+					 HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
+#define HGC_CQE_ECC_ADDR		0x13c
+#define HGC_CQE_ECC_1B_ADDR_OFF	0
+#define HGC_CQE_ECC_1B_ADDR_MSK	(0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
+#define HGC_CQE_ECC_MB_ADDR_OFF	8
+#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
+#define HGC_IOST_ECC_ADDR		0x140
+#define HGC_IOST_ECC_1B_ADDR_OFF	0
+#define HGC_IOST_ECC_1B_ADDR_MSK	(0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
+#define HGC_IOST_ECC_MB_ADDR_OFF	16
+#define HGC_IOST_ECC_MB_ADDR_MSK	(0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
+#define HGC_DQE_ECC_ADDR		0x144
+#define HGC_DQE_ECC_1B_ADDR_OFF	0
+#define HGC_DQE_ECC_1B_ADDR_MSK	(0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
+#define HGC_DQE_ECC_MB_ADDR_OFF	16
+#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
 #define CHNL_INT_STATUS			0x148
+#define TAB_DFX				0x14c
+#define HGC_ITCT_ECC_ADDR		0x150
+#define HGC_ITCT_ECC_1B_ADDR_OFF		0
+#define HGC_ITCT_ECC_1B_ADDR_MSK		(0x3ff << \
+						 HGC_ITCT_ECC_1B_ADDR_OFF)
+#define HGC_ITCT_ECC_MB_ADDR_OFF		16
+#define HGC_ITCT_ECC_MB_ADDR_MSK		(0x3ff << \
+						 HGC_ITCT_ECC_MB_ADDR_OFF)
 #define HGC_AXI_FIFO_ERR_INFO  0x154
 #define AXI_ERR_INFO_OFF               0
 #define AXI_ERR_INFO_MSK               (0xff << AXI_ERR_INFO_OFF)
 #define FIFO_ERR_INFO_OFF              8
 #define FIFO_ERR_INFO_MSK              (0xff << FIFO_ERR_INFO_OFF)
+#define TAB_RD_TYPE			0x15c
 #define INT_COAL_EN			0x19c
 #define OQ_INT_COAL_TIME		0x1a0
 #define OQ_INT_COAL_CNT			0x1a4
@@ -80,6 +108,10 @@
 #define ENT_INT_SRC3_ITC_INT_OFF	15
 #define ENT_INT_SRC3_ITC_INT_MSK	(0x1 << ENT_INT_SRC3_ITC_INT_OFF)
 #define ENT_INT_SRC3_ABT_OFF		16
+#define ENT_INT_SRC3_DQE_POISON_OFF	18
+#define ENT_INT_SRC3_IOST_POISON_OFF	19
+#define ENT_INT_SRC3_ITCT_POISON_OFF	20
+#define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF	21
 #define ENT_INT_SRC_MSK1		0x1c4
 #define ENT_INT_SRC_MSK2		0x1c8
 #define ENT_INT_SRC_MSK3		0x1cc
@@ -89,6 +121,28 @@
 #define HGC_COM_INT_MSK				0x1d8
 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK	(0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
 #define SAS_ECC_INTR			0x1e8
+#define SAS_ECC_INTR_DQE_ECC_1B_OFF		0
+#define SAS_ECC_INTR_DQE_ECC_MB_OFF		1
+#define SAS_ECC_INTR_IOST_ECC_1B_OFF	2
+#define SAS_ECC_INTR_IOST_ECC_MB_OFF	3
+#define SAS_ECC_INTR_ITCT_ECC_1B_OFF	4
+#define SAS_ECC_INTR_ITCT_ECC_MB_OFF	5
+#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF	6
+#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF	7
+#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF	8
+#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF	9
+#define SAS_ECC_INTR_CQE_ECC_1B_OFF		10
+#define SAS_ECC_INTR_CQE_ECC_MB_OFF		11
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF	12
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF	13
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF	14
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF	15
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF	16
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF	17
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF	18
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF	19
+#define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF		20
+#define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF		21
 #define SAS_ECC_INTR_MSK		0x1ec
 #define HGC_ERR_STAT_EN			0x238
 #define CQE_SEND_CNT			0x248
@@ -104,6 +158,20 @@
 #define COMPL_Q_0_DEPTH			0x4e8
 #define COMPL_Q_0_WR_PTR		0x4ec
 #define COMPL_Q_0_RD_PTR		0x4f0
+#define HGC_RXM_DFX_STATUS14		0xae8
+#define HGC_RXM_DFX_STATUS14_MEM0_OFF	0
+#define HGC_RXM_DFX_STATUS14_MEM0_MSK	(0x1ff << \
+					 HGC_RXM_DFX_STATUS14_MEM0_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM1_OFF	9
+#define HGC_RXM_DFX_STATUS14_MEM1_MSK	(0x1ff << \
+					 HGC_RXM_DFX_STATUS14_MEM1_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM2_OFF	18
+#define HGC_RXM_DFX_STATUS14_MEM2_MSK	(0x1ff << \
+					 HGC_RXM_DFX_STATUS14_MEM2_OFF)
+#define HGC_RXM_DFX_STATUS15		0xaec
+#define HGC_RXM_DFX_STATUS15_MEM3_OFF	0
+#define HGC_RXM_DFX_STATUS15_MEM3_MSK	(0x1ff << \
+					 HGC_RXM_DFX_STATUS15_MEM3_OFF)
 #define AWQOS_AWCACHE_CFG	0xc84
 #define ARQOS_ARCACHE_CFG	0xc88
 #define HILINK_ERR_DFX		0xe04
@@ -123,10 +191,32 @@
 #define PHY_CFG_PHY_RST_OFF		3
 #define PHY_CFG_PHY_RST_MSK		(0x1 << PHY_CFG_PHY_RST_OFF)
 #define PROG_PHY_LINK_RATE		(PORT_BASE + 0x8)
+#define CFG_PROG_PHY_LINK_RATE_OFF	8
+#define CFG_PROG_PHY_LINK_RATE_MSK	(0xf << CFG_PROG_PHY_LINK_RATE_OFF)
 #define PHY_CTRL			(PORT_BASE + 0x14)
 #define PHY_CTRL_RESET_OFF		0
 #define PHY_CTRL_RESET_MSK		(0x1 << PHY_CTRL_RESET_OFF)
+#define CMD_HDR_PIR_OFF			8
+#define CMD_HDR_PIR_MSK			(0x1 << CMD_HDR_PIR_OFF)
+#define SERDES_CFG			(PORT_BASE + 0x1c)
+#define CFG_ALOS_CHK_DISABLE_OFF	9
+#define CFG_ALOS_CHK_DISABLE_MSK	(0x1 << CFG_ALOS_CHK_DISABLE_OFF)
+#define SAS_PHY_BIST_CTRL		(PORT_BASE + 0x2c)
+#define CFG_BIST_MODE_SEL_OFF		0
+#define CFG_BIST_MODE_SEL_MSK		(0xf << CFG_BIST_MODE_SEL_OFF)
+#define CFG_LOOP_TEST_MODE_OFF		14
+#define CFG_LOOP_TEST_MODE_MSK		(0x3 << CFG_LOOP_TEST_MODE_OFF)
+#define CFG_RX_BIST_EN_OFF		16
+#define CFG_RX_BIST_EN_MSK		(0x1 << CFG_RX_BIST_EN_OFF)
+#define CFG_TX_BIST_EN_OFF		17
+#define CFG_TX_BIST_EN_MSK		(0x1 << CFG_TX_BIST_EN_OFF)
+#define CFG_BIST_TEST_OFF		18
+#define CFG_BIST_TEST_MSK		(0x1 << CFG_BIST_TEST_OFF)
+#define SAS_PHY_BIST_CODE		(PORT_BASE + 0x30)
+#define SAS_PHY_BIST_CODE1		(PORT_BASE + 0x34)
+#define SAS_BIST_ERR_CNT		(PORT_BASE + 0x38)
 #define SL_CFG				(PORT_BASE + 0x84)
+#define AIP_LIMIT			(PORT_BASE + 0x90)
 #define SL_CONTROL			(PORT_BASE + 0x94)
 #define SL_CONTROL_NOTIFY_EN_OFF	0
 #define SL_CONTROL_NOTIFY_EN_MSK	(0x1 << SL_CONTROL_NOTIFY_EN_OFF)
@@ -167,21 +257,28 @@
 #define CHL_INT0_PHY_RDY_OFF		5
 #define CHL_INT0_PHY_RDY_MSK		(0x1 << CHL_INT0_PHY_RDY_OFF)
 #define CHL_INT1			(PORT_BASE + 0x1b8)
-#define CHL_INT1_DMAC_TX_ECC_ERR_OFF	15
-#define CHL_INT1_DMAC_TX_ECC_ERR_MSK	(0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
-#define CHL_INT1_DMAC_RX_ECC_ERR_OFF	17
-#define CHL_INT1_DMAC_RX_ECC_ERR_MSK	(0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF	15
+#define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF	16
+#define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF	17
+#define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF	18
 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF	19
 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF	20
 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF	21
 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF	22
+#define CHL_INT1_DMAC_TX_FIFO_ERR_OFF	23
+#define CHL_INT1_DMAC_RX_FIFO_ERR_OFF	24
+#define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF	26
+#define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF	27
 #define CHL_INT2			(PORT_BASE + 0x1bc)
 #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF	0
+#define CHL_INT2_RX_DISP_ERR_OFF	28
+#define CHL_INT2_RX_CODE_ERR_OFF	29
 #define CHL_INT2_RX_INVLD_DW_OFF	30
 #define CHL_INT2_STP_LINK_TIMEOUT_OFF	31
 #define CHL_INT0_MSK			(PORT_BASE + 0x1c0)
 #define CHL_INT1_MSK			(PORT_BASE + 0x1c4)
 #define CHL_INT2_MSK			(PORT_BASE + 0x1c8)
+#define SAS_EC_INT_COAL_TIME		(PORT_BASE + 0x1cc)
 #define CHL_INT_COAL_EN			(PORT_BASE + 0x1d0)
 #define SAS_RX_TRAIN_TIMER		(PORT_BASE + 0x2a4)
 #define PHY_CTRL_RDY_MSK		(PORT_BASE + 0x2b0)
@@ -201,6 +298,7 @@
 #define ERR_CNT_DWS_LOST		(PORT_BASE + 0x380)
 #define ERR_CNT_RESET_PROB		(PORT_BASE + 0x384)
 #define ERR_CNT_INVLD_DW		(PORT_BASE + 0x390)
+#define ERR_CNT_CODE_ERR		(PORT_BASE + 0x394)
 #define ERR_CNT_DISP_ERR		(PORT_BASE + 0x398)
 
 #define DEFAULT_ITCT_HW		2048 /* reset value, not reprogrammed */
@@ -218,10 +316,8 @@
 #define AM_CFG_SINGLE_PORT_MAX_TRANS	(0x5014)
 #define AXI_CFG					(0x5100)
 #define AM_ROB_ECC_ERR_ADDR		(0x510c)
-#define AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF	0
-#define AM_ROB_ECC_ONEBIT_ERR_ADDR_MSK	(0xff << AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF)
-#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF	8
-#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK	(0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
+#define AM_ROB_ECC_ERR_ADDR_OFF	0
+#define AM_ROB_ECC_ERR_ADDR_MSK	0xffffffff
 
 /* RAS registers need init */
 #define RAS_BASE		(0x6000)
@@ -331,6 +427,16 @@
 #define ITCT_HDR_RTOLT_OFF		48
 #define ITCT_HDR_RTOLT_MSK		(0xffffULL << ITCT_HDR_RTOLT_OFF)
 
+struct hisi_sas_protect_iu_v3_hw {
+	u32 dw0;
+	u32 lbrtcv;
+	u32 lbrtgv;
+	u32 dw3;
+	u32 dw4;
+	u32 dw5;
+	u32 rsv;
+};
+
 struct hisi_sas_complete_v3_hdr {
 	__le32 dw0;
 	__le32 dw1;
@@ -370,6 +476,42 @@
 	((fis.command == ATA_CMD_DEV_RESET) && \
 	((fis.control & ATA_SRST) != 0)))
 
+#define T10_INSRT_EN_OFF    0
+#define T10_INSRT_EN_MSK    (1 << T10_INSRT_EN_OFF)
+#define T10_RMV_EN_OFF	    1
+#define T10_RMV_EN_MSK	    (1 << T10_RMV_EN_OFF)
+#define T10_RPLC_EN_OFF	    2
+#define T10_RPLC_EN_MSK	    (1 << T10_RPLC_EN_OFF)
+#define T10_CHK_EN_OFF	    3
+#define T10_CHK_EN_MSK	    (1 << T10_CHK_EN_OFF)
+#define INCR_LBRT_OFF	    5
+#define INCR_LBRT_MSK	    (1 << INCR_LBRT_OFF)
+#define USR_DATA_BLOCK_SZ_OFF	20
+#define USR_DATA_BLOCK_SZ_MSK	(0x3 << USR_DATA_BLOCK_SZ_OFF)
+#define T10_CHK_MSK_OFF	    16
+#define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF)
+#define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF)
+
+#define BASE_VECTORS_V3_HW  16
+#define MIN_AFFINE_VECTORS_V3_HW  (BASE_VECTORS_V3_HW + 1)
+
+enum {
+	DSM_FUNC_ERR_HANDLE_MSI = 0,
+};
+
+static bool hisi_sas_intr_conv;
+MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)");
+
+/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
+static int prot_mask;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
+
+static bool auto_affine_msi_experimental;
+module_param(auto_affine_msi_experimental, bool, 0444);
+MODULE_PARM_DESC(auto_affine_msi_experimental, "Enable auto-affinity of MSI IRQs as experimental:\n"
+		 "default is off");
+
 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
 {
 	void __iomem *regs = hisi_hba->regs + off;
@@ -377,13 +519,6 @@
 	return readl(regs);
 }
 
-static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
-{
-	void __iomem *regs = hisi_hba->regs + off;
-
-	return readl_relaxed(regs);
-}
-
 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
 {
 	void __iomem *regs = hisi_hba->regs + off;
@@ -423,31 +558,31 @@
 
 static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 {
-	struct pci_dev *pdev = hisi_hba->pci_dev;
 	int i;
 
 	/* Global registers init */
 	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
 			 (u32)((1ULL << hisi_hba->queue_count) - 1));
+	hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0);
 	hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
 	hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
+	hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
 	hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
 	hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
 	hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
+	hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN,
+			 hisi_sas_intr_conv);
 	hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
-	if (pdev->revision >= 0x21)
-		hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff);
-	else
-		hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff);
 	hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
 	hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
 	hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
-	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x0);
+	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555);
 	hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
 	hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
 	for (i = 0; i < hisi_hba->queue_count; i++)
@@ -472,17 +607,13 @@
 		}
 		hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
 			prog_phy_link_rate);
+		hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
 		hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
 		hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
-		if (pdev->revision >= 0x21)
-			hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
-					0xffffffff);
-		else
-			hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
-					0xff87ffff);
+		hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
 		hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
 		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -492,9 +623,12 @@
 		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
 		hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
 		hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
-
+		hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
+		hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME,
+				     0x30f4240);
 		/* used for 12G negotiate */
 		hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
+		hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
 	}
 
 	for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -619,6 +753,7 @@
 	struct domain_device *parent_dev = device->parent;
 	struct asd_sas_port *sas_port = device->port;
 	struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+	u64 sas_addr;
 
 	memset(itct, 0, sizeof(*itct));
 
@@ -632,7 +767,7 @@
 		break;
 	case SAS_SATA_DEV:
 	case SAS_SATA_PENDING:
-		if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+		if (parent_dev && dev_is_expander(parent_dev->dev_type))
 			qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
 		else
 			qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
@@ -651,8 +786,8 @@
 	itct->qw0 = cpu_to_le64(qw0);
 
 	/* qw1 */
-	memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
-	itct->sas_addr = __swab64(itct->sas_addr);
+	memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
+	itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
 
 	/* qw2 */
 	if (!dev_is_sata(device))
@@ -675,7 +810,7 @@
 		hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
 				 ENT_INT_SRC3_ITC_INT_MSK);
 
-	/* clear the itct table*/
+	/* clear the itct table */
 	reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
 	hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
 
@@ -745,6 +880,8 @@
 static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
 {
 	struct device *dev = hisi_hba->dev;
+	union acpi_object *obj;
+	guid_t guid;
 	int rc;
 
 	rc = reset_hw_v3_hw(hisi_hba);
@@ -756,6 +893,19 @@
 	msleep(100);
 	init_reg_v3_hw(hisi_hba);
 
+	if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) {
+		dev_err(dev, "Parse GUID failed\n");
+		return -EINVAL;
+	}
+
+	/* Switch over to MSI handling , from PCI AER default */
+	obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0,
+				DSM_FUNC_ERR_HANDLE_MSI, NULL);
+	if (!obj)
+		dev_warn(dev, "Switch over to MSI handling failed\n");
+	else
+		ACPI_FREE(obj);
+
 	return 0;
 }
 
@@ -771,8 +921,14 @@
 static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
 	u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+	u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK);
+	static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) |
+			       BIT(CHL_INT2_RX_CODE_ERR_OFF) |
+			       BIT(CHL_INT2_RX_INVLD_DW_OFF);
 	u32 state;
 
+	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, msk | irq_msk);
+
 	cfg &= ~PHY_CFG_ENA_MSK;
 	hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
 
@@ -783,6 +939,15 @@
 		cfg |= PHY_CFG_PHY_RST_MSK;
 		hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
 	}
+
+	udelay(1);
+
+	hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
+	hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
+	hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR);
+
+	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, msk);
+	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2_MSK, irq_msk);
 }
 
 static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -797,14 +962,14 @@
 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 	u32 txid_auto;
 
-	disable_phy_v3_hw(hisi_hba, phy_no);
+	hisi_sas_phy_enable(hisi_hba, phy_no, 0);
 	if (phy->identify.device_type == SAS_END_DEVICE) {
 		txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
 		hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
 					txid_auto | TX_HARDRST_MSK);
 	}
 	msleep(100);
-	start_phy_v3_hw(hisi_hba, phy_no);
+	hisi_sas_phy_enable(hisi_hba, phy_no, 1);
 }
 
 static enum sas_linkrate phy_get_max_linkrate_v3_hw(void)
@@ -823,11 +988,11 @@
 		if (!sas_phy->phy->enabled)
 			continue;
 
-		start_phy_v3_hw(hisi_hba, i);
+		hisi_sas_phy_enable(hisi_hba, i, 1);
 	}
 }
 
-static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+static void sl_notify_ssp_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
 	u32 sl_control;
 
@@ -854,31 +1019,6 @@
 	return bitmap;
 }
 
-/**
- * The callpath to this function and upto writing the write
- * queue pointer should be safe from interruption.
- */
-static int
-get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
-{
-	struct device *dev = hisi_hba->dev;
-	int queue = dq->id;
-	u32 r, w;
-
-	w = dq->wr_point;
-	r = hisi_sas_read32_relaxed(hisi_hba,
-				DLVRY_Q_0_RD_PTR + (queue * 0x14));
-	if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
-		dev_warn(dev, "full queue=%d r=%d w=%d\n",
-				queue, r, w);
-		return -EAGAIN;
-	}
-
-	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
-
-	return w;
-}
-
 static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
 {
 	struct hisi_hba *hisi_hba = dq->hisi_hba;
@@ -926,7 +1066,102 @@
 
 	hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
 
-	hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
+	hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
+}
+
+static void prep_prd_sge_dif_v3_hw(struct hisi_hba *hisi_hba,
+				   struct hisi_sas_slot *slot,
+				   struct hisi_sas_cmd_hdr *hdr,
+				   struct scatterlist *scatter,
+				   int n_elem)
+{
+	struct hisi_sas_sge_dif_page *sge_dif_page;
+	struct scatterlist *sg;
+	int i;
+
+	sge_dif_page = hisi_sas_sge_dif_addr_mem(slot);
+
+	for_each_sg(scatter, sg, n_elem, i) {
+		struct hisi_sas_sge *entry = &sge_dif_page->sge[i];
+
+		entry->addr = cpu_to_le64(sg_dma_address(sg));
+		entry->page_ctrl_0 = 0;
+		entry->page_ctrl_1 = 0;
+		entry->data_len = cpu_to_le32(sg_dma_len(sg));
+		entry->data_off = 0;
+	}
+
+	hdr->dif_prd_table_addr =
+		cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot));
+
+	hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DIF_SGL_LEN_OFF);
+}
+
+static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd)
+{
+	unsigned char prot_flags = scsi_cmnd->prot_flags;
+
+	if (prot_flags & SCSI_PROT_REF_CHECK)
+		return T10_CHK_APP_TAG_MSK;
+	return T10_CHK_REF_TAG_MSK | T10_CHK_APP_TAG_MSK;
+}
+
+static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
+			    struct hisi_sas_protect_iu_v3_hw *prot)
+{
+	unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
+	unsigned int interval = scsi_prot_interval(scsi_cmnd);
+	u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request);
+
+	switch (prot_op) {
+	case SCSI_PROT_READ_INSERT:
+		prot->dw0 |= T10_INSRT_EN_MSK;
+		prot->lbrtgv = lbrt_chk_val;
+		break;
+	case SCSI_PROT_READ_STRIP:
+		prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK);
+		prot->lbrtcv = lbrt_chk_val;
+		prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
+		break;
+	case SCSI_PROT_READ_PASS:
+		prot->dw0 |= T10_CHK_EN_MSK;
+		prot->lbrtcv = lbrt_chk_val;
+		prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
+		break;
+	case SCSI_PROT_WRITE_INSERT:
+		prot->dw0 |= T10_INSRT_EN_MSK;
+		prot->lbrtgv = lbrt_chk_val;
+		break;
+	case SCSI_PROT_WRITE_STRIP:
+		prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK);
+		prot->lbrtcv = lbrt_chk_val;
+		break;
+	case SCSI_PROT_WRITE_PASS:
+		prot->dw0 |= T10_CHK_EN_MSK;
+		prot->lbrtcv = lbrt_chk_val;
+		prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
+		break;
+	default:
+		WARN(1, "prot_op(0x%x) is not valid\n", prot_op);
+		break;
+	}
+
+	switch (interval) {
+	case 512:
+		break;
+	case 4096:
+		prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF);
+		break;
+	case 520:
+		prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF);
+		break;
+	default:
+		WARN(1, "protection interval (0x%x) invalid\n",
+		     interval);
+		break;
+	}
+
+	prot->dw0 |= INCR_LBRT_MSK;
 }
 
 static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
@@ -941,8 +1176,9 @@
 	struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
 	struct hisi_sas_tmf_task *tmf = slot->tmf;
 	int has_data = 0, priority = !!tmf;
+	unsigned char prot_op;
 	u8 *buf_cmd;
-	u32 dw1 = 0, dw2 = 0;
+	u32 dw1 = 0, dw2 = 0, len = 0;
 
 	hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
 			       (2 << CMD_HDR_TLR_CTRL_OFF) |
@@ -955,6 +1191,7 @@
 		dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
 		dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
 	} else {
+		prot_op = scsi_get_prot_op(scsi_cmnd);
 		dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
 		switch (scsi_cmnd->sc_data_direction) {
 		case DMA_TO_DEVICE:
@@ -972,7 +1209,6 @@
 
 	/* map itct entry */
 	dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
-	hdr->dw1 = cpu_to_le32(dw1);
 
 	dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
 	      + 3) / 4) << CMD_HDR_CFL_OFF) |
@@ -981,11 +1217,16 @@
 	hdr->dw2 = cpu_to_le32(dw2);
 	hdr->transfer_tags = cpu_to_le32(slot->idx);
 
-	if (has_data)
+	if (has_data) {
 		prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
-					slot->n_elem);
+				   slot->n_elem);
 
-	hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
+		if (scsi_prot_sg_count(scsi_cmnd))
+			prep_prd_sge_dif_v3_hw(hisi_hba, slot, hdr,
+					       scsi_prot_sglist(scsi_cmnd),
+					       slot->n_elem_dif);
+	}
+
 	hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
 	hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
 
@@ -1010,6 +1251,39 @@
 			break;
 		}
 	}
+
+	if (has_data && (prot_op != SCSI_PROT_NORMAL)) {
+		struct hisi_sas_protect_iu_v3_hw prot;
+		u8 *buf_cmd_prot;
+
+		hdr->dw7 |= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF);
+		dw1 |= CMD_HDR_PIR_MSK;
+		buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) +
+			       sizeof(struct ssp_frame_hdr) +
+			       sizeof(struct ssp_command_iu);
+
+		memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw));
+		fill_prot_v3_hw(scsi_cmnd, &prot);
+		memcpy(buf_cmd_prot, &prot,
+		       sizeof(struct hisi_sas_protect_iu_v3_hw));
+		/*
+		 * For READ, we need length of info read to memory, while for
+		 * WRITE we need length of data written to the disk.
+		 */
+		if (prot_op == SCSI_PROT_WRITE_INSERT ||
+		    prot_op == SCSI_PROT_READ_INSERT ||
+		    prot_op == SCSI_PROT_WRITE_PASS ||
+		    prot_op == SCSI_PROT_READ_PASS) {
+			unsigned int interval = scsi_prot_interval(scsi_cmnd);
+			unsigned int ilog2_interval = ilog2(interval);
+
+			len = (task->total_xfer_len >> ilog2_interval) * 8;
+		}
+	}
+
+	hdr->dw1 = cpu_to_le32(dw1);
+
+	hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len);
 }
 
 static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
@@ -1067,10 +1341,10 @@
 	u32 dw1 = 0, dw2 = 0;
 
 	hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
-	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+	if (parent_dev && dev_is_expander(parent_dev->dev_type))
 		hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
 	else
-		hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
+		hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF);
 
 	switch (task->data_dir) {
 	case DMA_TO_DEVICE:
@@ -1100,7 +1374,10 @@
 	hdr->dw1 = cpu_to_le32(dw1);
 
 	/* dw2 */
-	if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
+	if (task->ata_task.use_ncq) {
+		struct ata_queued_cmd *qc = task->uldd_task;
+
+		hdr_tag = qc->tag;
 		task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
 		dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
 	}
@@ -1138,7 +1415,7 @@
 	struct hisi_sas_port *port = slot->port;
 
 	/* dw0 */
-	hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/
+	hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /*abort*/
 			       (port->id << CMD_HDR_PORT_OFF) |
 				   (dev_is_sata(dev)
 					<< CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
@@ -1157,13 +1434,15 @@
 
 static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
 {
-	int i, res;
+	int i;
+	irqreturn_t res;
 	u32 context, port_id, link_rate;
 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
 	struct device *dev = hisi_hba->dev;
 	unsigned long flags;
 
+	del_timer(&phy->timer);
 	hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
 
 	port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
@@ -1185,6 +1464,7 @@
 		struct hisi_sas_initial_fis *initial_fis;
 		struct dev_to_host_fis *fis;
 		u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
+		struct Scsi_Host *shost = hisi_hba->shost;
 
 		dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
 		initial_fis = &hisi_hba->initial_fis[phy_no];
@@ -1201,6 +1481,7 @@
 
 		sas_phy->oob_mode = SATA_OOB_MODE;
 		attached_sas_addr[0] = 0x50;
+		attached_sas_addr[6] = shost->host_no;
 		attached_sas_addr[7] = phy_no;
 		memcpy(sas_phy->attached_sas_addr,
 		       attached_sas_addr,
@@ -1257,9 +1538,11 @@
 
 static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
 {
+	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 	u32 phy_state, sl_ctrl, txid_auto;
 	struct device *dev = hisi_hba->dev;
 
+	del_timer(&phy->timer);
 	hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
 
 	phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
@@ -1343,6 +1626,14 @@
 
 static const struct hisi_sas_hw_error port_axi_error[] = {
 	{
+		.irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF),
+		.msg = "dmac_tx_ecc_bad_err",
+	},
+	{
+		.irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF),
+		.msg = "dmac_rx_ecc_bad_err",
+	},
+	{
 		.irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
 		.msg = "dma_tx_axi_wr_err",
 	},
@@ -1358,6 +1649,22 @@
 		.irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
 		.msg = "dma_rx_axi_rd_err",
 	},
+	{
+		.irq_msk = BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF),
+		.msg = "dma_tx_fifo_err",
+	},
+	{
+		.irq_msk = BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF),
+		.msg = "dma_rx_fifo_err",
+	},
+	{
+		.irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF),
+		.msg = "dma_tx_axi_ruser_err",
+	},
+	{
+		.irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF),
+		.msg = "dma_rx_axi_ruser_err",
+	},
 };
 
 static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1385,6 +1692,39 @@
 	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value);
 }
 
+static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	struct sas_phy *sphy = sas_phy->phy;
+	unsigned long flags;
+	u32 reg_value;
+
+	spin_lock_irqsave(&phy->lock, flags);
+
+	/* loss dword sync */
+	reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
+	sphy->loss_of_dword_sync_count += reg_value;
+
+	/* phy reset problem */
+	reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
+	sphy->phy_reset_problem_count += reg_value;
+
+	/* invalid dword */
+	reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
+	sphy->invalid_dword_count += reg_value;
+
+	/* disparity err */
+	reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
+	sphy->running_disparity_error_count += reg_value;
+
+	/* code violation error */
+	reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR);
+	phy->code_violation_err_count += reg_value;
+
+	spin_unlock_irqrestore(&phy->lock, flags);
+}
+
 static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
 	u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK);
@@ -1392,6 +1732,9 @@
 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 	struct pci_dev *pci_dev = hisi_hba->pci_dev;
 	struct device *dev = hisi_hba->dev;
+	static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) |
+			BIT(CHL_INT2_RX_CODE_ERR_OFF) |
+			BIT(CHL_INT2_RX_INVLD_DW_OFF);
 
 	irq_value &= ~irq_msk;
 	if (!irq_value)
@@ -1412,6 +1755,25 @@
 			hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
 	}
 
+	if (pci_dev->revision > 0x20 && (irq_value & msk)) {
+		struct asd_sas_phy *sas_phy = &phy->sas_phy;
+		struct sas_phy *sphy = sas_phy->phy;
+
+		phy_get_events_v3_hw(hisi_hba, phy_no);
+
+		if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF))
+			dev_info(dev, "phy%d invalid dword cnt:   %u\n", phy_no,
+				 sphy->invalid_dword_count);
+
+		if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF))
+			dev_info(dev, "phy%d code violation cnt:  %u\n", phy_no,
+				 phy->code_violation_err_count);
+
+		if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF))
+			dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no,
+				 sphy->running_disparity_error_count);
+	}
+
 	if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
 	    (pci_dev->revision == 0x20)) {
 		u32 reg_value;
@@ -1428,6 +1790,19 @@
 	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value);
 }
 
+static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+	u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0);
+
+	if (irq_value0 & CHL_INT0_PHY_RDY_MSK)
+		hisi_sas_phy_oob_ready(hisi_hba, phy_no);
+
+	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+			     irq_value0 & (~CHL_INT0_SL_RX_BCST_ACK_MSK)
+			     & (~CHL_INT0_SL_PHY_ENABLE_MSK)
+			     & (~CHL_INT0_NOT_RDY_MSK));
+}
+
 static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
 {
 	struct hisi_hba *hisi_hba = p;
@@ -1438,8 +1813,8 @@
 				& 0xeeeeeeee;
 
 	while (irq_msk) {
-		u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
-						     CHL_INT0);
+		if (irq_msk & (2 << (phy_no * 4)))
+			handle_chl_int0_v3_hw(hisi_hba, phy_no);
 
 		if (irq_msk & (4 << (phy_no * 4)))
 			handle_chl_int1_v3_hw(hisi_hba, phy_no);
@@ -1447,13 +1822,6 @@
 		if (irq_msk & (8 << (phy_no * 4)))
 			handle_chl_int2_v3_hw(hisi_hba, phy_no);
 
-		if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
-			hisi_sas_phy_write32(hisi_hba, phy_no,
-					CHL_INT0, irq_value0
-					& (~CHL_INT0_SL_RX_BCST_ACK_MSK)
-					& (~CHL_INT0_SL_PHY_ENABLE_MSK)
-					& (~CHL_INT0_NOT_RDY_MSK));
-		}
 		irq_msk &= ~(0xe << (phy_no * 4));
 		phy_no++;
 	}
@@ -1461,6 +1829,122 @@
 	return IRQ_HANDLED;
 }
 
+static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = {
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
+		.msk = HGC_DQE_ECC_MB_ADDR_MSK,
+		.shift = HGC_DQE_ECC_MB_ADDR_OFF,
+		.msg = "hgc_dqe_eccbad_intr",
+		.reg = HGC_DQE_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
+		.msk = HGC_IOST_ECC_MB_ADDR_MSK,
+		.shift = HGC_IOST_ECC_MB_ADDR_OFF,
+		.msg = "hgc_iost_eccbad_intr",
+		.reg = HGC_IOST_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
+		.msk = HGC_ITCT_ECC_MB_ADDR_MSK,
+		.shift = HGC_ITCT_ECC_MB_ADDR_OFF,
+		.msg = "hgc_itct_eccbad_intr",
+		.reg = HGC_ITCT_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
+		.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
+		.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
+		.msg = "hgc_iostl_eccbad_intr",
+		.reg = HGC_LM_DFX_STATUS2,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
+		.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
+		.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
+		.msg = "hgc_itctl_eccbad_intr",
+		.reg = HGC_LM_DFX_STATUS2,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
+		.msk = HGC_CQE_ECC_MB_ADDR_MSK,
+		.shift = HGC_CQE_ECC_MB_ADDR_OFF,
+		.msg = "hgc_cqe_eccbad_intr",
+		.reg = HGC_CQE_ECC_ADDR,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
+		.msg = "rxm_mem0_eccbad_intr",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
+		.msg = "rxm_mem1_eccbad_intr",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
+		.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
+		.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
+		.msg = "rxm_mem2_eccbad_intr",
+		.reg = HGC_RXM_DFX_STATUS14,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
+		.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
+		.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
+		.msg = "rxm_mem3_eccbad_intr",
+		.reg = HGC_RXM_DFX_STATUS15,
+	},
+	{
+		.irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF),
+		.msk = AM_ROB_ECC_ERR_ADDR_MSK,
+		.shift = AM_ROB_ECC_ERR_ADDR_OFF,
+		.msg = "ooo_ram_eccbad_intr",
+		.reg = AM_ROB_ECC_ERR_ADDR,
+	},
+};
+
+static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba,
+					      u32 irq_value)
+{
+	struct device *dev = hisi_hba->dev;
+	const struct hisi_sas_hw_error *ecc_error;
+	u32 val;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) {
+		ecc_error = &multi_bit_ecc_errors[i];
+		if (irq_value & ecc_error->irq_msk) {
+			val = hisi_sas_read32(hisi_hba, ecc_error->reg);
+			val &= ecc_error->msk;
+			val >>= ecc_error->shift;
+			dev_err(dev, "%s (0x%x) found: mem addr is 0x%08X\n",
+				ecc_error->msg, irq_value, val);
+			queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+		}
+	}
+}
+
+static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba)
+{
+	u32 irq_value, irq_msk;
+
+	irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
+	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff);
+
+	irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
+	if (irq_value)
+		multi_bit_ecc_error_process_v3_hw(hisi_hba, irq_value);
+
+	hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value);
+	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk);
+}
+
 static const struct hisi_sas_hw_error axi_error[] = {
 	{ .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
 	{ .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
@@ -1470,7 +1954,7 @@
 	{ .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
 	{ .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
 	{ .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
-	{},
+	{}
 };
 
 static const struct hisi_sas_hw_error fifo_error[] = {
@@ -1479,7 +1963,7 @@
 	{ .msk = BIT(10), .msg = "GETDQE_FIFO" },
 	{ .msk = BIT(11), .msg = "CMDP_FIFO" },
 	{ .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
-	{},
+	{}
 };
 
 static const struct hisi_sas_hw_error fatal_axi_error[] = {
@@ -1513,6 +1997,23 @@
 		.irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
 		.msg = "SAS_HGC_ABT fetch LM list",
 	},
+	{
+		.irq_msk = BIT(ENT_INT_SRC3_DQE_POISON_OFF),
+		.msg = "read dqe poison",
+	},
+	{
+		.irq_msk = BIT(ENT_INT_SRC3_IOST_POISON_OFF),
+		.msg = "read iost poison",
+	},
+	{
+		.irq_msk = BIT(ENT_INT_SRC3_ITCT_POISON_OFF),
+		.msg = "read itct poison",
+	},
+	{
+		.irq_msk = BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF),
+		.msg = "read itct ncq poison",
+	},
+
 };
 
 static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
@@ -1520,6 +2021,7 @@
 	u32 irq_value, irq_msk;
 	struct hisi_hba *hisi_hba = p;
 	struct device *dev = hisi_hba->dev;
+	struct pci_dev *pdev = hisi_hba->pci_dev;
 	int i;
 
 	irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
@@ -1551,8 +2053,21 @@
 				error->msg, irq_value);
 			queue_work(hisi_hba->wq, &hisi_hba->rst_work);
 		}
+
+		if (pdev->revision < 0x21) {
+			u32 reg_val;
+
+			reg_val = hisi_sas_read32(hisi_hba,
+						  AXI_MASTER_CFG_BASE +
+						  AM_CTRL_GLOBAL);
+			reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK;
+			hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+					 AM_CTRL_GLOBAL, reg_val);
+		}
 	}
 
+	fatal_ecc_int_v3_hw(hisi_hba);
+
 	if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
 		u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
 		u32 dev_id = reg_val & ITCT_DEV_MSK;
@@ -1581,15 +2096,16 @@
 			&complete_queue[slot->cmplt_queue_slot];
 	struct hisi_sas_err_record_v3 *record =
 			hisi_sas_status_buf_addr_mem(slot);
-	u32 dma_rx_err_type = record->dma_rx_err_type;
-	u32 trans_tx_fail_type = record->trans_tx_fail_type;
+	u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type);
+	u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
+	u32 dw3 = le32_to_cpu(complete_hdr->dw3);
 
 	switch (task->task_proto) {
 	case SAS_PROTOCOL_SSP:
 		if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
 			ts->residual = trans_tx_fail_type;
 			ts->stat = SAS_DATA_UNDERRUN;
-		} else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
+		} else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
 			ts->stat = SAS_QUEUE_FULL;
 			slot->abort = 1;
 		} else {
@@ -1603,7 +2119,7 @@
 		if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
 			ts->residual = trans_tx_fail_type;
 			ts->stat = SAS_DATA_UNDERRUN;
-		} else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
+		} else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
 			ts->stat = SAS_PHY_DOWN;
 			slot->abort = 1;
 		} else {
@@ -1636,6 +2152,7 @@
 			&complete_queue[slot->cmplt_queue_slot];
 	unsigned long flags;
 	bool is_internal = slot->is_internal;
+	u32 dw0, dw1, dw3;
 
 	if (unlikely(!task || !task->lldd_task || !task->dev))
 		return -EINVAL;
@@ -1659,11 +2176,14 @@
 		goto out;
 	}
 
+	dw0 = le32_to_cpu(complete_hdr->dw0);
+	dw1 = le32_to_cpu(complete_hdr->dw1);
+	dw3 = le32_to_cpu(complete_hdr->dw3);
+
 	/*
 	 * Use SAS+TMF status codes
 	 */
-	switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
-			>> CMPLT_HDR_ABORT_STAT_OFF) {
+	switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) {
 	case STAT_IO_ABORTED:
 		/* this IO has been aborted by abort command */
 		ts->stat = SAS_ABORTED_TASK;
@@ -1686,19 +2206,16 @@
 	}
 
 	/* check for erroneous completion */
-	if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
+	if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
 		u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
 
 		slot_err_v3_hw(hisi_hba, task, slot);
 		if (ts->stat != SAS_DATA_UNDERRUN)
-			dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
-				"CQ hdr: 0x%x 0x%x 0x%x 0x%x "
-				"Error info: 0x%x 0x%x 0x%x 0x%x\n",
-				slot->idx, task, sas_dev->device_id,
-				complete_hdr->dw0, complete_hdr->dw1,
-				complete_hdr->act, complete_hdr->dw3,
-				error_info[0], error_info[1],
-				error_info[2], error_info[3]);
+			dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
+				 slot->idx, task, sas_dev->device_id,
+				 dw0, dw1, complete_hdr->act, dw3,
+				 error_info[0], error_info[1],
+				 error_info[2], error_info[3]);
 		if (unlikely(slot->abort))
 			return ts->stat;
 		goto out;
@@ -1715,20 +2232,16 @@
 	}
 	case SAS_PROTOCOL_SMP: {
 		struct scatterlist *sg_resp = &task->smp_task.smp_resp;
-		void *to;
+		void *to = page_address(sg_page(sg_resp));
 
 		ts->stat = SAM_STAT_GOOD;
-		to = kmap_atomic(sg_page(sg_resp));
 
-		dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
-			     DMA_FROM_DEVICE);
 		dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
 			     DMA_TO_DEVICE);
 		memcpy(to + sg_resp->offset,
 			hisi_sas_status_buf_addr_mem(slot) +
 		       sizeof(struct hisi_sas_err_record),
-		       sg_dma_len(sg_resp));
-		kunmap_atomic(to);
+		       sg_resp->length);
 		break;
 	}
 	case SAS_PROTOCOL_SATA:
@@ -1749,22 +2262,22 @@
 	}
 
 out:
-	hisi_sas_slot_task_free(hisi_hba, task, slot);
 	sts = ts->stat;
 	spin_lock_irqsave(&task->task_state_lock, flags);
 	if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
 		spin_unlock_irqrestore(&task->task_state_lock, flags);
-		dev_info(dev, "slot complete: task(%p) aborted\n", task);
+		dev_info(dev, "slot complete: task(%pK) aborted\n", task);
 		return SAS_ABORTED_TASK;
 	}
 	task->task_state_flags |= SAS_TASK_STATE_DONE;
 	spin_unlock_irqrestore(&task->task_state_lock, flags);
+	hisi_sas_slot_task_free(hisi_hba, task, slot);
 
 	if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
 		spin_lock_irqsave(&device->done_lock, flags);
 		if (test_bit(SAS_HA_FROZEN, &ha->state)) {
 			spin_unlock_irqrestore(&device->done_lock, flags);
-			dev_info(dev, "slot complete: task(%p) ignored\n ",
+			dev_info(dev, "slot complete: task(%pK) ignored\n ",
 				 task);
 			return sts;
 		}
@@ -1794,11 +2307,13 @@
 	while (rd_point != wr_point) {
 		struct hisi_sas_complete_v3_hdr *complete_hdr;
 		struct device *dev = hisi_hba->dev;
+		u32 dw1;
 		int iptt;
 
 		complete_hdr = &complete_queue[rd_point];
+		dw1 = le32_to_cpu(complete_hdr->dw1);
 
-		iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+		iptt = dw1 & CMPLT_HDR_IPTT_MSK;
 		if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) {
 			slot = &hisi_hba->slot_info[iptt];
 			slot->cmplt_queue_slot = rd_point;
@@ -1829,21 +2344,67 @@
 	return IRQ_HANDLED;
 }
 
+static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
+{
+	const struct cpumask *mask;
+	int queue, cpu;
+
+	for (queue = 0; queue < nvecs; queue++) {
+		struct hisi_sas_cq *cq = &hisi_hba->cq[queue];
+
+		mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue +
+					    BASE_VECTORS_V3_HW);
+		if (!mask)
+			goto fallback;
+		cq->pci_irq_mask = mask;
+		for_each_cpu(cpu, mask)
+			hisi_hba->reply_map[cpu] = queue;
+	}
+	return;
+
+fallback:
+	for_each_possible_cpu(cpu)
+		hisi_hba->reply_map[cpu] = cpu % hisi_hba->queue_count;
+	/* Don't clean all CQ masks */
+}
+
 static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
 {
 	struct device *dev = hisi_hba->dev;
 	struct pci_dev *pdev = hisi_hba->pci_dev;
-	int vectors, rc;
-	int i, k;
-	int max_msi = HISI_SAS_MSI_COUNT_V3_HW;
+	int vectors, rc, i;
+	int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
 
-	vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1,
-					max_msi, PCI_IRQ_MSI);
-	if (vectors < max_msi) {
-		dev_err(dev, "could not allocate all msi (%d)\n", vectors);
-		return -ENOENT;
+	if (auto_affine_msi_experimental) {
+		struct irq_affinity desc = {
+			.pre_vectors = BASE_VECTORS_V3_HW,
+		};
+
+		min_msi = MIN_AFFINE_VECTORS_V3_HW;
+
+		hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids,
+						   sizeof(unsigned int),
+						   GFP_KERNEL);
+		if (!hisi_hba->reply_map)
+			return -ENOMEM;
+		vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev,
+							 min_msi, max_msi,
+							 PCI_IRQ_MSI |
+							 PCI_IRQ_AFFINITY,
+							 &desc);
+		if (vectors < 0)
+			return -ENOENT;
+		setup_reply_map_v3_hw(hisi_hba, vectors - BASE_VECTORS_V3_HW);
+	} else {
+		min_msi = max_msi;
+		vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, min_msi,
+						max_msi, PCI_IRQ_MSI);
+		if (vectors < 0)
+			return vectors;
 	}
 
+	hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
+
 	rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
 			      int_phy_up_down_bcast_v3_hw, 0,
 			      DRV_NAME " phy", hisi_hba);
@@ -1859,7 +2420,7 @@
 	if (rc) {
 		dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc);
 		rc = -ENOENT;
-		goto free_phy_irq;
+		goto free_irq_vectors;
 	}
 
 	rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
@@ -1868,23 +2429,24 @@
 	if (rc) {
 		dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc);
 		rc = -ENOENT;
-		goto free_chnl_interrupt;
+		goto free_irq_vectors;
 	}
 
 	/* Init tasklets for cq only */
-	for (i = 0; i < hisi_hba->queue_count; i++) {
+	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
 		struct tasklet_struct *t = &cq->tasklet;
+		int nr = hisi_sas_intr_conv ? 16 : 16 + i;
+		unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0;
 
-		rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16),
-					  cq_interrupt_v3_hw, 0,
-					  DRV_NAME " cq", cq);
+		rc = devm_request_irq(dev, pci_irq_vector(pdev, nr),
+				      cq_interrupt_v3_hw, irqflags,
+				      DRV_NAME " cq", cq);
 		if (rc) {
-			dev_err(dev,
-				"could not request cq%d interrupt, rc=%d\n",
+			dev_err(dev, "could not request cq%d interrupt, rc=%d\n",
 				i, rc);
 			rc = -ENOENT;
-			goto free_cq_irqs;
+			goto free_irq_vectors;
 		}
 
 		tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
@@ -1892,17 +2454,6 @@
 
 	return 0;
 
-free_cq_irqs:
-	for (k = 0; k < i; k++) {
-		struct hisi_sas_cq *cq = &hisi_hba->cq[k];
-
-		free_irq(pci_irq_vector(pdev, k+16), cq);
-	}
-	free_irq(pci_irq_vector(pdev, 11), hisi_hba);
-free_chnl_interrupt:
-	free_irq(pci_irq_vector(pdev, 2), hisi_hba);
-free_phy_irq:
-	free_irq(pci_irq_vector(pdev, 1), hisi_hba);
 free_irq_vectors:
 	pci_free_irq_vectors(pdev);
 	return rc;
@@ -1966,31 +2517,6 @@
 	return hisi_sas_read32(hisi_hba, PHY_STATE);
 }
 
-static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
-{
-	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
-	struct asd_sas_phy *sas_phy = &phy->sas_phy;
-	struct sas_phy *sphy = sas_phy->phy;
-	u32 reg_value;
-
-	/* loss dword sync */
-	reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
-	sphy->loss_of_dword_sync_count += reg_value;
-
-	/* phy reset problem */
-	reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
-	sphy->phy_reset_problem_count += reg_value;
-
-	/* invalid dword */
-	reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
-	sphy->invalid_dword_count += reg_value;
-
-	/* disparity err */
-	reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
-	sphy->running_disparity_error_count += reg_value;
-
-}
-
 static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
 {
 	struct device *dev = hisi_hba->dev;
@@ -2061,7 +2587,7 @@
 		break;
 	default:
 		dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
-				reg_type);
+			reg_type);
 		return -EINVAL;
 	}
 
@@ -2083,9 +2609,445 @@
 		msleep(delay_ms);
 	}
 
+	if (time >= timeout_ms) {
+		dev_dbg(dev, "Wait commands complete timeout!\n");
+		return;
+	}
+
 	dev_dbg(dev, "wait commands complete %dms\n", time);
 }
 
+static ssize_t intr_conv_v3_hw_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv);
+}
+static DEVICE_ATTR_RO(intr_conv_v3_hw);
+
+static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba)
+{
+	/* config those registers between enable and disable PHYs */
+	hisi_sas_stop_phys(hisi_hba);
+
+	if (hisi_hba->intr_coal_ticks == 0 ||
+	    hisi_hba->intr_coal_count == 0) {
+		hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
+		hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
+		hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
+	} else {
+		hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3);
+		hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME,
+				 hisi_hba->intr_coal_ticks);
+		hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT,
+				 hisi_hba->intr_coal_count);
+	}
+	phys_init_v3_hw(hisi_hba);
+}
+
+static ssize_t intr_coal_ticks_v3_hw_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct hisi_hba *hisi_hba = shost_priv(shost);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 hisi_hba->intr_coal_ticks);
+}
+
+static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct hisi_hba *hisi_hba = shost_priv(shost);
+	u32 intr_coal_ticks;
+	int ret;
+
+	ret = kstrtou32(buf, 10, &intr_coal_ticks);
+	if (ret) {
+		dev_err(dev, "Input data of interrupt coalesce unmatch\n");
+		return -EINVAL;
+	}
+
+	if (intr_coal_ticks >= BIT(24)) {
+		dev_err(dev, "intr_coal_ticks must be less than 2^24!\n");
+		return -EINVAL;
+	}
+
+	hisi_hba->intr_coal_ticks = intr_coal_ticks;
+
+	config_intr_coal_v3_hw(hisi_hba);
+
+	return count;
+}
+static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw);
+
+static ssize_t intr_coal_count_v3_hw_show(struct device *dev,
+					  struct device_attribute
+					  *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct hisi_hba *hisi_hba = shost_priv(shost);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 hisi_hba->intr_coal_count);
+}
+
+static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
+		struct device_attribute
+		*attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct hisi_hba *hisi_hba = shost_priv(shost);
+	u32 intr_coal_count;
+	int ret;
+
+	ret = kstrtou32(buf, 10, &intr_coal_count);
+	if (ret) {
+		dev_err(dev, "Input data of interrupt coalesce unmatch\n");
+		return -EINVAL;
+	}
+
+	if (intr_coal_count >= BIT(8)) {
+		dev_err(dev, "intr_coal_count must be less than 2^8!\n");
+		return -EINVAL;
+	}
+
+	hisi_hba->intr_coal_count = intr_coal_count;
+
+	config_intr_coal_v3_hw(hisi_hba);
+
+	return count;
+}
+static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
+
+static struct device_attribute *host_attrs_v3_hw[] = {
+	&dev_attr_phy_event_threshold,
+	&dev_attr_intr_conv_v3_hw,
+	&dev_attr_intr_coal_ticks_v3_hw,
+	&dev_attr_intr_coal_count_v3_hw,
+	NULL
+};
+
+static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = {
+	HISI_SAS_DEBUGFS_REG(PHY_CFG),
+	HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE),
+	HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE),
+	HISI_SAS_DEBUGFS_REG(PHY_CTRL),
+	HISI_SAS_DEBUGFS_REG(SL_CFG),
+	HISI_SAS_DEBUGFS_REG(AIP_LIMIT),
+	HISI_SAS_DEBUGFS_REG(SL_CONTROL),
+	HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS),
+	HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0),
+	HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1),
+	HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2),
+	HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3),
+	HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4),
+	HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5),
+	HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6),
+	HISI_SAS_DEBUGFS_REG(TXID_AUTO),
+	HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0),
+	HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H),
+	HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER),
+	HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE),
+	HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER),
+	HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG),
+	HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG),
+	HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG),
+	HISI_SAS_DEBUGFS_REG(CHL_INT0),
+	HISI_SAS_DEBUGFS_REG(CHL_INT1),
+	HISI_SAS_DEBUGFS_REG(CHL_INT2),
+	HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK),
+	HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK),
+	HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK),
+	HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME),
+	HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN),
+	HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER),
+	HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK),
+	HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK),
+	HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK),
+	HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK),
+	HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK),
+	HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK),
+	HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS),
+	HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS),
+	HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME),
+	HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST),
+	HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB),
+	HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW),
+	HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR),
+	HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR),
+	{}
+};
+
+static const struct hisi_sas_debugfs_reg debugfs_port_reg = {
+	.lu = debugfs_port_reg_lu,
+	.count = 0x100,
+	.base_off = PORT_BASE,
+	.read_port_reg = hisi_sas_phy_read32,
+};
+
+static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
+	HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE),
+	HISI_SAS_DEBUGFS_REG(PHY_CONTEXT),
+	HISI_SAS_DEBUGFS_REG(PHY_STATE),
+	HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA),
+	HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE),
+	HISI_SAS_DEBUGFS_REG(ITCT_CLR),
+	HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO),
+	HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI),
+	HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO),
+	HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI),
+	HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG),
+	HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL),
+	HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL),
+	HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME),
+	HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE),
+	HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME),
+	HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME),
+	HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME),
+	HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME),
+	HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME),
+	HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN),
+	HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME),
+	HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2),
+	HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT),
+	HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE),
+	HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS),
+	HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS),
+	HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO),
+	HISI_SAS_DEBUGFS_REG(INT_COAL_EN),
+	HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME),
+	HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT),
+	HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME),
+	HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT),
+	HISI_SAS_DEBUGFS_REG(OQ_INT_SRC),
+	HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK),
+	HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1),
+	HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2),
+	HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3),
+	HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1),
+	HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2),
+	HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3),
+	HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK),
+	HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK),
+	HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK),
+	HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR),
+	HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK),
+	HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN),
+	HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT),
+	HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH),
+	HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR),
+	HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR),
+	HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG),
+	HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK),
+	HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH),
+	HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR),
+	HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR),
+	HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG),
+	HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG),
+	HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX),
+	HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0),
+	HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1),
+	HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1),
+	HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD),
+	{}
+};
+
+static const struct hisi_sas_debugfs_reg debugfs_global_reg = {
+	.lu = debugfs_global_reg_lu,
+	.count = 0x800,
+	.read_global_reg = hisi_sas_read32,
+};
+
+static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = {
+	HISI_SAS_DEBUGFS_REG(AM_CFG_MAX_TRANS),
+	HISI_SAS_DEBUGFS_REG(AM_CFG_SINGLE_PORT_MAX_TRANS),
+	HISI_SAS_DEBUGFS_REG(AXI_CFG),
+	HISI_SAS_DEBUGFS_REG(AM_ROB_ECC_ERR_ADDR),
+	{}
+};
+
+static const struct hisi_sas_debugfs_reg debugfs_axi_reg = {
+	.lu = debugfs_axi_reg_lu,
+	.count = 0x61,
+	.base_off = AXI_MASTER_CFG_BASE,
+	.read_global_reg = hisi_sas_read32,
+};
+
+static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = {
+	HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1),
+	HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK),
+	HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK),
+	HISI_SAS_DEBUGFS_REG(CFG_SAS_RAS_INTR_MASK),
+	HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2),
+	HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2_MASK),
+	{}
+};
+
+static const struct hisi_sas_debugfs_reg debugfs_ras_reg = {
+	.lu = debugfs_ras_reg_lu,
+	.count = 0x10,
+	.base_off = RAS_BASE,
+	.read_global_reg = hisi_sas_read32,
+};
+
+static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
+{
+	set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+
+	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
+
+	wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
+
+	hisi_sas_kill_tasklets(hisi_hba);
+}
+
+static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
+{
+	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
+			 (u32)((1ULL << hisi_hba->queue_count) - 1));
+
+	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+}
+
+static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
+				       enum hisi_sas_debugfs_cache_type type,
+				       u32 *cache)
+{
+	u32 cache_dw_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ *
+			    HISI_SAS_IOST_ITCT_CACHE_NUM;
+	u32 *buf = cache;
+	u32 i, val;
+
+	hisi_sas_write32(hisi_hba, TAB_RD_TYPE, type);
+
+	for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_DW_SZ; i++) {
+		val = hisi_sas_read32(hisi_hba, TAB_DFX);
+		if (val == 0xffffffff)
+			break;
+	}
+
+	if (val != 0xffffffff) {
+		pr_err("Issue occur when reading IOST/ITCT cache!\n");
+		return;
+	}
+
+	memset(buf, 0, cache_dw_size * 4);
+	buf[0] = val;
+
+	for (i = 1; i < cache_dw_size; i++)
+		buf[i] = hisi_sas_read32(hisi_hba, TAB_DFX);
+}
+
+static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba)
+{
+	u32 reg_val;
+	int phy_id = hisi_hba->debugfs_bist_phy_no;
+
+	/* disable PHY */
+	hisi_sas_phy_enable(hisi_hba, phy_id, 0);
+
+	/* disable ALOS */
+	reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
+	reg_val |= CFG_ALOS_CHK_DISABLE_MSK;
+	hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
+}
+
+static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
+{
+	u32 reg_val;
+	int phy_id = hisi_hba->debugfs_bist_phy_no;
+
+	/* disable loopback */
+	reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL);
+	reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK |
+		     CFG_BIST_TEST_MSK);
+	hisi_sas_phy_write32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL, reg_val);
+
+	/* enable ALOS */
+	reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
+	reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK;
+	hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
+
+	/* restore the linkrate */
+	reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, PROG_PHY_LINK_RATE);
+	/* init OOB link rate as 1.5 Gbits */
+	reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
+	reg_val |= (0x8 << CFG_PROG_PHY_LINK_RATE_OFF);
+	hisi_sas_phy_write32(hisi_hba, phy_id, PROG_PHY_LINK_RATE, reg_val);
+
+	/* enable PHY */
+	hisi_sas_phy_enable(hisi_hba, phy_id, 1);
+}
+
+#define SAS_PHY_BIST_CODE_INIT	0x1
+#define SAS_PHY_BIST_CODE1_INIT	0X80
+static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
+{
+	u32 reg_val, mode_tmp;
+	u32 linkrate = hisi_hba->debugfs_bist_linkrate;
+	u32 phy_id = hisi_hba->debugfs_bist_phy_no;
+	u32 code_mode = hisi_hba->debugfs_bist_code_mode;
+	u32 path_mode = hisi_hba->debugfs_bist_mode;
+	struct device *dev = hisi_hba->dev;
+
+	dev_info(dev, "BIST info:linkrate=%d phy_id=%d code_mode=%d path_mode=%d\n",
+		 linkrate, phy_id, code_mode, path_mode);
+	mode_tmp = path_mode ? 2 : 1;
+	if (enable) {
+		/* some preparations before bist test */
+		hisi_sas_bist_test_prep_v3_hw(hisi_hba);
+
+		/* set linkrate of bit test*/
+		reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
+					      PROG_PHY_LINK_RATE);
+		reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
+		reg_val |= (linkrate << CFG_PROG_PHY_LINK_RATE_OFF);
+		hisi_sas_phy_write32(hisi_hba, phy_id,
+				     PROG_PHY_LINK_RATE, reg_val);
+
+		/* set code mode of bit test */
+		reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
+					      SAS_PHY_BIST_CTRL);
+		reg_val &= ~(CFG_BIST_MODE_SEL_MSK |
+				CFG_LOOP_TEST_MODE_MSK |
+				CFG_RX_BIST_EN_MSK |
+				CFG_TX_BIST_EN_MSK |
+				CFG_BIST_TEST_MSK);
+		reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) |
+			    (mode_tmp << CFG_LOOP_TEST_MODE_OFF) |
+			    CFG_BIST_TEST_MSK);
+		hisi_sas_phy_write32(hisi_hba, phy_id,
+				     SAS_PHY_BIST_CTRL, reg_val);
+
+		mdelay(100);
+		reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
+		hisi_sas_phy_write32(hisi_hba, phy_id,
+				     SAS_PHY_BIST_CTRL, reg_val);
+
+		/* set the bist init value */
+		hisi_sas_phy_write32(hisi_hba, phy_id,
+				     SAS_PHY_BIST_CODE,
+				     SAS_PHY_BIST_CODE_INIT);
+		hisi_sas_phy_write32(hisi_hba, phy_id,
+				     SAS_PHY_BIST_CODE1,
+				     SAS_PHY_BIST_CODE1_INIT);
+
+		/* clear error bit */
+		mdelay(100);
+		hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT);
+	} else {
+		/* disable bist test and recover it */
+		hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba,
+				phy_id, SAS_BIST_ERR_CNT);
+		hisi_sas_bist_test_restore_v3_hw(hisi_hba);
+	}
+
+	return 0;
+}
+
 static struct scsi_host_template sht_v3_hw = {
 	.name			= DRV_NAME,
 	.module			= THIS_MODULE,
@@ -2096,33 +3058,31 @@
 	.scan_start		= hisi_sas_scan_start,
 	.change_queue_depth	= sas_change_queue_depth,
 	.bios_param		= sas_bios_param,
-	.can_queue		= 1,
 	.this_id		= -1,
-	.sg_tablesize		= SG_ALL,
+	.sg_tablesize		= HISI_SAS_SGE_PAGE_CNT,
+	.sg_prot_tablesize	= HISI_SAS_SGE_PAGE_CNT,
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
 	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
 	.ioctl			= sas_ioctl,
-	.shost_attrs		= host_attrs,
+	.shost_attrs		= host_attrs_v3_hw,
+	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
+	.host_reset             = hisi_sas_host_reset,
 };
 
 static const struct hisi_sas_hw hisi_sas_v3_hw = {
 	.hw_init = hisi_sas_v3_init,
 	.setup_itct = setup_itct_v3_hw,
-	.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
 	.get_wideport_bitmap = get_wideport_bitmap_v3_hw,
 	.complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
 	.clear_itct = clear_itct_v3_hw,
-	.sl_notify = sl_notify_v3_hw,
+	.sl_notify_ssp = sl_notify_ssp_v3_hw,
 	.prep_ssp = prep_ssp_v3_hw,
 	.prep_smp = prep_smp_v3_hw,
 	.prep_stp = prep_ata_v3_hw,
 	.prep_abort = prep_abort_v3_hw,
-	.get_free_slot = get_free_slot_v3_hw,
 	.start_delivery = start_delivery_v3_hw,
-	.slot_complete = slot_complete_v3_hw,
 	.phys_init = phys_init_v3_hw,
 	.phy_start = start_phy_v3_hw,
 	.phy_disable = disable_phy_v3_hw,
@@ -2135,6 +3095,14 @@
 	.get_events = phy_get_events_v3_hw,
 	.write_gpio = write_gpio_v3_hw,
 	.wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
+	.debugfs_reg_array[DEBUGFS_GLOBAL] = &debugfs_global_reg,
+	.debugfs_reg_array[DEBUGFS_AXI] = &debugfs_axi_reg,
+	.debugfs_reg_array[DEBUGFS_RAS] = &debugfs_ras_reg,
+	.debugfs_reg_port = &debugfs_port_reg,
+	.snapshot_prepare = debugfs_snapshot_prepare_v3_hw,
+	.snapshot_restore = debugfs_snapshot_restore_v3_hw,
+	.read_iost_itct_cache = read_iost_itct_cache_v3_hw,
+	.set_bist = debugfs_set_bist_v3_hw,
 };
 
 static struct Scsi_Host *
@@ -2152,18 +3120,23 @@
 	hisi_hba = shost_priv(shost);
 
 	INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
+	INIT_WORK(&hisi_hba->debugfs_work, hisi_sas_debugfs_work_handler);
 	hisi_hba->hw = &hisi_sas_v3_hw;
 	hisi_hba->pci_dev = pdev;
 	hisi_hba->dev = dev;
 	hisi_hba->shost = shost;
 	SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
 
-	timer_setup(&hisi_hba->timer, NULL, 0);
+	if (prot_mask & ~HISI_SAS_PROT_MASK)
+		dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n",
+			prot_mask);
+	else
+		hisi_hba->prot_mask = prot_mask;
 
 	if (hisi_sas_get_fw_info(hisi_hba) < 0)
 		goto err_out;
 
-	if (hisi_sas_alloc(hisi_hba, shost)) {
+	if (hisi_sas_alloc(hisi_hba)) {
 		hisi_sas_free(hisi_hba);
 		goto err_out;
 	}
@@ -2196,14 +3169,13 @@
 	if (rc)
 		goto err_out_disable_device;
 
-	if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
-	    (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
-		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
-		   (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
-			dev_err(dev, "No usable DMA addressing method\n");
-			rc = -EIO;
-			goto err_out_regions;
-		}
+	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (rc)
+		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (rc) {
+		dev_err(dev, "No usable DMA addressing method\n");
+		rc = -ENODEV;
+		goto err_out_regions;
 	}
 
 	shost = hisi_sas_shost_alloc_pci(pdev);
@@ -2218,7 +3190,7 @@
 
 	hisi_hba->regs = pcim_iomap(pdev, 5, 0);
 	if (!hisi_hba->regs) {
-		dev_err(dev, "cannot map register.\n");
+		dev_err(dev, "cannot map register\n");
 		rc = -ENOMEM;
 		goto err_out_ha;
 	}
@@ -2242,22 +3214,32 @@
 	shost->max_lun = ~0;
 	shost->max_channel = 1;
 	shost->max_cmd_len = 16;
-	shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
-	shost->can_queue = hisi_hba->hw->max_command_entries;
-	shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+	shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
+	shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
 
 	sha->sas_ha_name = DRV_NAME;
 	sha->dev = dev;
 	sha->lldd_module = THIS_MODULE;
 	sha->sas_addr = &hisi_hba->sas_addr[0];
 	sha->num_phys = hisi_hba->n_phy;
-	sha->core.shost = hisi_hba->shost;
 
 	for (i = 0; i < hisi_hba->n_phy; i++) {
 		sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
 		sha->sas_port[i] = &hisi_hba->port[i].sas_port;
 	}
 
+	if (hisi_hba->prot_mask) {
+		dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
+			 prot_mask);
+		scsi_host_set_prot(hisi_hba->shost, prot_mask);
+		if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
+			scsi_host_set_guard(hisi_hba->shost,
+					    SHOST_DIX_GUARD_CRC);
+	}
+
+	if (hisi_sas_debugfs_enable)
+		hisi_sas_debugfs_init(hisi_hba);
+
 	rc = scsi_add_host(shost, dev);
 	if (rc)
 		goto err_out_ha;
@@ -2294,10 +3276,11 @@
 	free_irq(pci_irq_vector(pdev, 1), hisi_hba);
 	free_irq(pci_irq_vector(pdev, 2), hisi_hba);
 	free_irq(pci_irq_vector(pdev, 11), hisi_hba);
-	for (i = 0; i < hisi_hba->queue_count; i++) {
+	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+		int nr = hisi_sas_intr_conv ? 16 : 16 + i;
 
-		free_irq(pci_irq_vector(pdev, i+16), cq);
+		free_irq(pci_irq_vector(pdev, nr), cq);
 	}
 	pci_free_irq_vectors(pdev);
 }
@@ -2309,6 +3292,8 @@
 	struct hisi_hba *hisi_hba = sha->lldd_ha;
 	struct Scsi_Host *shost = sha->core.shost;
 
+	hisi_sas_debugfs_exit(hisi_hba);
+
 	if (timer_pending(&hisi_hba->timer))
 		del_timer(&hisi_hba->timer);
 
@@ -2323,161 +3308,6 @@
 	scsi_host_put(shost);
 }
 
-static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = {
-	{ .irq_msk = BIT(19), .msg = "HILINK_INT" },
-	{ .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" },
-	{ .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" },
-	{ .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" },
-	{ .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" },
-	{ .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" },
-	{ .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" },
-	{ .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" },
-	{ .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" },
-	{ .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" },
-	{ .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" },
-	{ .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" },
-	{ .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" },
-};
-
-static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
-	{ .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" },
-	{ .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" },
-	{ .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" },
-	{ .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" },
-	{ .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" },
-	{ .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" },
-	{ .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" },
-	{ .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" },
-	{ .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" },
-	{ .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" },
-	{ .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" },
-	{ .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" },
-	{ .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" },
-	{ .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" },
-	{ .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" },
-	{ .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" },
-	{ .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" },
-	{ .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" },
-	{ .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" },
-	{ .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" },
-	{ .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" },
-	{ .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" },
-	{ .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" },
-	{ .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" },
-	{ .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" },
-	{ .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" },
-	{ .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" },
-	{ .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" },
-	{ .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" },
-	{ .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" },
-	{ .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
-};
-
-static const struct hisi_sas_hw_error sas_ras_intr2_nfe[] = {
-	{ .irq_msk = BIT(0), .msg = "DMAC0_AXI_BUS_ERR" },
-	{ .irq_msk = BIT(1), .msg = "DMAC1_AXI_BUS_ERR" },
-	{ .irq_msk = BIT(2), .msg = "DMAC2_AXI_BUS_ERR" },
-	{ .irq_msk = BIT(3), .msg = "DMAC3_AXI_BUS_ERR" },
-	{ .irq_msk = BIT(4), .msg = "DMAC4_AXI_BUS_ERR" },
-	{ .irq_msk = BIT(5), .msg = "DMAC5_AXI_BUS_ERR" },
-	{ .irq_msk = BIT(6), .msg = "DMAC6_AXI_BUS_ERR" },
-	{ .irq_msk = BIT(7), .msg = "DMAC7_AXI_BUS_ERR" },
-	{ .irq_msk = BIT(8), .msg = "DMAC0_FIFO_OMIT_ERR" },
-	{ .irq_msk = BIT(9), .msg = "DMAC1_FIFO_OMIT_ERR" },
-	{ .irq_msk = BIT(10), .msg = "DMAC2_FIFO_OMIT_ERR" },
-	{ .irq_msk = BIT(11), .msg = "DMAC3_FIFO_OMIT_ERR" },
-	{ .irq_msk = BIT(12), .msg = "DMAC4_FIFO_OMIT_ERR" },
-	{ .irq_msk = BIT(13), .msg = "DMAC5_FIFO_OMIT_ERR" },
-	{ .irq_msk = BIT(14), .msg = "DMAC6_FIFO_OMIT_ERR" },
-	{ .irq_msk = BIT(15), .msg = "DMAC7_FIFO_OMIT_ERR" },
-	{ .irq_msk = BIT(16), .msg = "HGC_RLSE_SLOT_UNMATCH" },
-	{ .irq_msk = BIT(17), .msg = "HGC_LM_ADD_FCH_LIST_ERR" },
-	{ .irq_msk = BIT(18), .msg = "HGC_AXI_BUS_ERR" },
-	{ .irq_msk = BIT(19), .msg = "HGC_FIFO_OMIT_ERR" },
-};
-
-static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
-{
-	struct device *dev = hisi_hba->dev;
-	const struct hisi_sas_hw_error *ras_error;
-	bool need_reset = false;
-	u32 irq_value;
-	int i;
-
-	irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0);
-	for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) {
-		ras_error = &sas_ras_intr0_nfe[i];
-		if (ras_error->irq_msk & irq_value) {
-			dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n",
-					ras_error->msg, irq_value);
-			need_reset = true;
-		}
-	}
-	hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value);
-
-	irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1);
-	for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) {
-		ras_error = &sas_ras_intr1_nfe[i];
-		if (ras_error->irq_msk & irq_value) {
-			dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n",
-					ras_error->msg, irq_value);
-			need_reset = true;
-		}
-	}
-	hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
-
-	irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR2);
-	for (i = 0; i < ARRAY_SIZE(sas_ras_intr2_nfe); i++) {
-		ras_error = &sas_ras_intr2_nfe[i];
-		if (ras_error->irq_msk & irq_value) {
-			dev_warn(dev, "SAS_RAS_INTR2: %s(irq_value=0x%x) found.\n",
-					ras_error->msg, irq_value);
-			need_reset = true;
-		}
-	}
-	hisi_sas_write32(hisi_hba, SAS_RAS_INTR2, irq_value);
-
-	return need_reset;
-}
-
-static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev,
-		pci_channel_state_t state)
-{
-	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
-	struct hisi_hba *hisi_hba = sha->lldd_ha;
-	struct device *dev = hisi_hba->dev;
-
-	dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state);
-	if (state == pci_channel_io_perm_failure)
-		return PCI_ERS_RESULT_DISCONNECT;
-
-	if (process_non_fatal_error_v3_hw(hisi_hba))
-		return PCI_ERS_RESULT_NEED_RESET;
-
-	return PCI_ERS_RESULT_CAN_RECOVER;
-}
-
-static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev)
-{
-	return PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
-{
-	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
-	struct hisi_hba *hisi_hba = sha->lldd_ha;
-	struct device *dev = hisi_hba->dev;
-	HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
-
-	dev_info(dev, "PCI error: slot reset callback!!\n");
-	queue_work(hisi_hba->wq, &r.work);
-	wait_for_completion(r.completion);
-	if (r.done)
-		return PCI_ERS_RESULT_RECOVERED;
-
-	return PCI_ERS_RESULT_DISCONNECT;
-}
-
 static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
 {
 	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
@@ -2524,7 +3354,7 @@
 	struct hisi_hba *hisi_hba = sha->lldd_ha;
 	struct device *dev = hisi_hba->dev;
 	struct Scsi_Host *shost = hisi_hba->shost;
-	u32 device_state;
+	pci_power_t device_state;
 	int rc;
 
 	if (!pdev->pm_cap) {
@@ -2570,23 +3400,29 @@
 	struct Scsi_Host *shost = hisi_hba->shost;
 	struct device *dev = hisi_hba->dev;
 	unsigned int rc;
-	u32 device_state = pdev->current_state;
+	pci_power_t device_state = pdev->current_state;
 
 	dev_warn(dev, "resuming from operating state [D%d]\n",
-			device_state);
+		 device_state);
 	pci_set_power_state(pdev, PCI_D0);
 	pci_enable_wake(pdev, PCI_D0, 0);
 	pci_restore_state(pdev);
 	rc = pci_enable_device(pdev);
-	if (rc)
+	if (rc) {
 		dev_err(dev, "enable device failed during resume (%d)\n", rc);
+		return rc;
+	}
 
 	pci_set_master(pdev);
 	scsi_unblock_requests(shost);
 	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
 
 	sas_prep_resume_ha(sha);
-	init_reg_v3_hw(hisi_hba);
+	rc = hw_init_v3_hw(hisi_hba);
+	if (rc) {
+		scsi_remove_host(shost);
+		pci_disable_device(pdev);
+	}
 	hisi_hba->hw->phys_init(hisi_hba);
 	sas_resume_ha(sha);
 	clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
@@ -2601,9 +3437,6 @@
 MODULE_DEVICE_TABLE(pci, sas_v3_pci_table);
 
 static const struct pci_error_handlers hisi_sas_err_handler = {
-	.error_detected	= hisi_sas_error_detected_v3_hw,
-	.mmio_enabled	= hisi_sas_mmio_enabled_v3_hw,
-	.slot_reset	= hisi_sas_slot_reset_v3_hw,
 	.reset_prepare	= hisi_sas_reset_prepare_v3_hw,
 	.reset_done	= hisi_sas_reset_done_v3_hw,
 };
@@ -2619,6 +3452,7 @@
 };
 
 module_pci_driver(sas_v3_pci_driver);
+module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index ea4b0bb..55522b7 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  hosts.c Copyright (C) 1992 Drew Eckhardt
  *          Copyright (C) 1993, 1994, 1995 Eric Youngdale
@@ -222,18 +223,9 @@
 	if (error)
 		goto fail;
 
-	if (shost_use_blk_mq(shost)) {
-		error = scsi_mq_setup_tags(shost);
-		if (error)
-			goto fail;
-	} else {
-		shost->bqt = blk_init_tags(shost->can_queue,
-				shost->hostt->tag_alloc_policy);
-		if (!shost->bqt) {
-			error = -ENOMEM;
-			goto fail;
-		}
-	}
+	error = scsi_mq_setup_tags(shost);
+	if (error)
+		goto fail;
 
 	if (!shost->shost_gendev.parent)
 		shost->shost_gendev.parent = dev ? dev : &platform_bus;
@@ -309,8 +301,7 @@
 	pm_runtime_disable(&shost->shost_gendev);
 	pm_runtime_set_suspended(&shost->shost_gendev);
 	pm_runtime_put_noidle(&shost->shost_gendev);
-	if (shost_use_blk_mq(shost))
-		scsi_mq_destroy_tags(shost);
+	scsi_mq_destroy_tags(shost);
  fail:
 	return error;
 }
@@ -344,13 +335,8 @@
 		kfree(dev_name(&shost->shost_dev));
 	}
 
-	if (shost_use_blk_mq(shost)) {
-		if (shost->tag_set.tags)
-			scsi_mq_destroy_tags(shost);
-	} else {
-		if (shost->bqt)
-			blk_free_tags(shost->bqt);
-	}
+	if (shost->tag_set.tags)
+		scsi_mq_destroy_tags(shost);
 
 	kfree(shost->shost_data);
 
@@ -431,7 +417,6 @@
 	shost->sg_prot_tablesize = sht->sg_prot_tablesize;
 	shost->cmd_per_lun = sht->cmd_per_lun;
 	shost->unchecked_isa_dma = sht->unchecked_isa_dma;
-	shost->use_clustering = sht->use_clustering;
 	shost->no_write_same = sht->no_write_same;
 
 	if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
@@ -464,6 +449,11 @@
 	else
 		shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
 
+	if (sht->max_segment_size)
+		shost->max_segment_size = sht->max_segment_size;
+	else
+		shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
+
 	/*
 	 * assume a 4GB boundary, if not set
 	 */
@@ -472,7 +462,8 @@
 	else
 		shost->dma_boundary = 0xffffffff;
 
-	shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq;
+	if (sht->virt_boundary_mask)
+		shost->virt_boundary_mask = sht->virt_boundary_mask;
 
 	device_initialize(&shost->shost_gendev);
 	dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c120929..216e557 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -21,7 +21,6 @@
 #include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/pci.h>
-#include <linux/pci-aspm.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -60,7 +59,7 @@
  * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
  * with an optional trailing '-' followed by a byte value (0-255).
  */
-#define HPSA_DRIVER_VERSION "3.4.20-125"
+#define HPSA_DRIVER_VERSION "3.4.20-170"
 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
 #define HPSA "hpsa"
 
@@ -73,6 +72,8 @@
 
 /*define how many times we will try a command because of bus resets */
 #define MAX_CMD_RETRIES 3
+/* How long to wait before giving up on a command */
+#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
 
 /* Embedded module documentation macros - see modules.h */
 MODULE_AUTHOR("Hewlett-Packard Company");
@@ -251,10 +252,11 @@
 
 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
-static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
+		      void __user *arg);
 
 #ifdef CONFIG_COMPAT
-static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
+static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
 	void __user *arg);
 #endif
 
@@ -343,11 +345,6 @@
 	return c->scsi_cmd == SCSI_CMD_IDLE;
 }
 
-static inline bool hpsa_is_pending_event(struct CommandList *c)
-{
-	return c->reset_pending;
-}
-
 /* extract sense key, asc, and ascq from sense data.  -1 means invalid. */
 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
 			u8 *sense_key, u8 *asc, u8 *ascq)
@@ -965,7 +962,6 @@
 	.scan_finished		= hpsa_scan_finished,
 	.change_queue_depth	= hpsa_change_queue_depth,
 	.this_id		= -1,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
 	.ioctl			= hpsa_ioctl,
 	.slave_alloc		= hpsa_slave_alloc,
@@ -1144,6 +1140,8 @@
 {
 	dial_down_lockup_detection_during_fw_flash(h, c);
 	atomic_inc(&h->commands_outstanding);
+	if (c->device)
+		atomic_inc(&c->device->commands_outstanding);
 
 	reply_queue = h->reply_map[raw_smp_processor_id()];
 	switch (c->cmd_type) {
@@ -1167,9 +1165,6 @@
 
 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
 {
-	if (unlikely(hpsa_is_pending_event(c)))
-		return finish_cmd(c);
-
 	__enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
 }
 
@@ -1328,7 +1323,7 @@
 		dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
 			" suspect firmware bug or unsupported hardware "
 			"configuration.\n");
-			return -1;
+		return -1;
 	}
 
 lun_assigned:
@@ -1842,25 +1837,33 @@
 	return count;
 }
 
+#define NUM_WAIT 20
 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
 						struct hpsa_scsi_dev_t *device)
 {
 	int cmds = 0;
 	int waits = 0;
+	int num_wait = NUM_WAIT;
+
+	if (device->external)
+		num_wait = HPSA_EH_PTRAID_TIMEOUT;
 
 	while (1) {
 		cmds = hpsa_find_outstanding_commands_for_dev(h, device);
 		if (cmds == 0)
 			break;
-		if (++waits > 20)
+		if (++waits > num_wait)
 			break;
 		msleep(1000);
 	}
 
-	if (waits > 20)
+	if (waits > num_wait) {
 		dev_warn(&h->pdev->dev,
-			"%s: removing device with %d outstanding commands!\n",
-			__func__, cmds);
+			"%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
+			__func__,
+			h->scsi_host->host_no,
+			device->bus, device->target, device->lun, cmds);
+	}
 }
 
 static void hpsa_remove_device(struct ctlr_info *h,
@@ -2131,11 +2134,16 @@
 	sdev->no_uld_attach = !sd || !sd->expose_device;
 
 	if (sd) {
-		if (sd->external)
+		sd->was_removed = 0;
+		if (sd->external) {
 			queue_depth = EXTERNAL_QD;
-		else
+			sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
+			blk_queue_rq_timeout(sdev->request_queue,
+						HPSA_EH_PTRAID_TIMEOUT);
+		} else {
 			queue_depth = sd->queue_depth != 0 ?
 					sd->queue_depth : sdev->host->can_queue;
+		}
 	} else
 		queue_depth = sdev->host->can_queue;
 
@@ -2146,7 +2154,12 @@
 
 static void hpsa_slave_destroy(struct scsi_device *sdev)
 {
-	/* nothing to do. */
+	struct hpsa_scsi_dev_t *hdev = NULL;
+
+	hdev = sdev->hostdata;
+
+	if (hdev)
+		hdev->was_removed = 1;
 }
 
 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
@@ -2240,8 +2253,8 @@
 
 	chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
 	chain_size = le32_to_cpu(cp->sg[0].length);
-	temp64 = pci_map_single(h->pdev, chain_block, chain_size,
-				PCI_DMA_TODEVICE);
+	temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
+				DMA_TO_DEVICE);
 	if (dma_mapping_error(&h->pdev->dev, temp64)) {
 		/* prevent subsequent unmapping */
 		cp->sg->address = 0;
@@ -2261,7 +2274,7 @@
 	chain_sg = cp->sg;
 	temp64 = le64_to_cpu(chain_sg->address);
 	chain_size = le32_to_cpu(cp->sg[0].length);
-	pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
+	dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
 }
 
 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
@@ -2277,8 +2290,8 @@
 	chain_len = sizeof(*chain_sg) *
 		(le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
 	chain_sg->Len = cpu_to_le32(chain_len);
-	temp64 = pci_map_single(h->pdev, chain_block, chain_len,
-				PCI_DMA_TODEVICE);
+	temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
+				DMA_TO_DEVICE);
 	if (dma_mapping_error(&h->pdev->dev, temp64)) {
 		/* prevent subsequent unmapping */
 		chain_sg->Addr = cpu_to_le64(0);
@@ -2297,8 +2310,8 @@
 		return;
 
 	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
-	pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
-			le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
+	dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
+			le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
 }
 
 
@@ -2320,6 +2333,8 @@
 	case IOACCEL2_SERV_RESPONSE_COMPLETE:
 		switch (c2->error_data.status) {
 		case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+			if (cmd)
+				cmd->result = 0;
 			break;
 		case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
 			cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2414,13 +2429,16 @@
 		break;
 	}
 
+	if (dev->in_reset)
+		retry = 0;
+
 	return retry;	/* retry on raid path? */
 }
 
 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
 		struct CommandList *c)
 {
-	bool do_wake = false;
+	struct hpsa_scsi_dev_t *dev = c->device;
 
 	/*
 	 * Reset c->scsi_cmd here so that the reset handler will know
@@ -2429,25 +2447,12 @@
 	 */
 	c->scsi_cmd = SCSI_CMD_IDLE;
 	mb();	/* Declare command idle before checking for pending events. */
-	if (c->reset_pending) {
-		unsigned long flags;
-		struct hpsa_scsi_dev_t *dev;
-
-		/*
-		 * There appears to be a reset pending; lock the lock and
-		 * reconfirm.  If so, then decrement the count of outstanding
-		 * commands and wake the reset command if this is the last one.
-		 */
-		spin_lock_irqsave(&h->lock, flags);
-		dev = c->reset_pending;		/* Re-fetch under the lock. */
-		if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
-			do_wake = true;
-		c->reset_pending = NULL;
-		spin_unlock_irqrestore(&h->lock, flags);
+	if (dev) {
+		atomic_dec(&dev->commands_outstanding);
+		if (dev->in_reset &&
+			atomic_read(&dev->commands_outstanding) <= 0)
+			wake_up_all(&h->event_sync_wait_queue);
 	}
-
-	if (do_wake)
-		wake_up_all(&h->event_sync_wait_queue);
 }
 
 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
@@ -2479,8 +2484,10 @@
 
 	/* check for good status */
 	if (likely(c2->error_data.serv_response == 0 &&
-			c2->error_data.status == 0))
+			c2->error_data.status == 0)) {
+		cmd->result = 0;
 		return hpsa_cmd_free_and_done(h, c, cmd);
+	}
 
 	/*
 	 * Any RAID offload error results in retry which will use
@@ -2496,6 +2503,11 @@
 			dev->offload_to_be_enabled = 0;
 		}
 
+		if (dev->in_reset) {
+			cmd->result = DID_RESET << 16;
+			return hpsa_cmd_free_and_done(h, c, cmd);
+		}
+
 		return hpsa_retry_cmd(h, c);
 	}
 
@@ -2574,6 +2586,12 @@
 	cmd->result = (DID_OK << 16); 		/* host byte */
 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
 
+	/* SCSI command has already been cleaned up in SML */
+	if (dev->was_removed) {
+		hpsa_cmd_resolve_and_free(h, cp);
+		return;
+	}
+
 	if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
 		if (dev->physical_device && dev->expose_device &&
 			dev->removed) {
@@ -2595,10 +2613,6 @@
 		return hpsa_cmd_free_and_done(h, cp, cmd);
 	}
 
-	if ((unlikely(hpsa_is_pending_event(cp))))
-		if (cp->reset_pending)
-			return hpsa_cmd_free_and_done(h, cp, cmd);
-
 	if (cp->cmd_type == CMD_IOACCEL2)
 		return process_ioaccel2_completion(h, cp, cmd, dev);
 
@@ -2647,9 +2661,20 @@
 			decode_sense_data(ei->SenseInfo, sense_data_size,
 				&sense_key, &asc, &ascq);
 		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
-			if (sense_key == ABORTED_COMMAND) {
+			switch (sense_key) {
+			case ABORTED_COMMAND:
 				cmd->result |= DID_SOFT_ERROR << 16;
 				break;
+			case UNIT_ATTENTION:
+				if (asc == 0x3F && ascq == 0x0E)
+					h->drv_req_rescan = 1;
+				break;
+			case ILLEGAL_REQUEST:
+				if (asc == 0x25 && ascq == 0x00) {
+					dev->removed = 1;
+					cmd->result = DID_NO_CONNECT << 16;
+				}
+				break;
 			}
 			break;
 		}
@@ -2759,13 +2784,13 @@
 	return hpsa_cmd_free_and_done(h, cp, cmd);
 }
 
-static void hpsa_pci_unmap(struct pci_dev *pdev,
-	struct CommandList *c, int sg_used, int data_direction)
+static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
+		int sg_used, enum dma_data_direction data_direction)
 {
 	int i;
 
 	for (i = 0; i < sg_used; i++)
-		pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
+		dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
 				le32_to_cpu(c->SG[i].Len),
 				data_direction);
 }
@@ -2774,17 +2799,17 @@
 		struct CommandList *cp,
 		unsigned char *buf,
 		size_t buflen,
-		int data_direction)
+		enum dma_data_direction data_direction)
 {
 	u64 addr64;
 
-	if (buflen == 0 || data_direction == PCI_DMA_NONE) {
+	if (buflen == 0 || data_direction == DMA_NONE) {
 		cp->Header.SGList = 0;
 		cp->Header.SGTotal = cpu_to_le16(0);
 		return 0;
 	}
 
-	addr64 = pci_map_single(pdev, buf, buflen, data_direction);
+	addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
 	if (dma_mapping_error(&pdev->dev, addr64)) {
 		/* Prevent subsequent unmap of something never mapped */
 		cp->Header.SGList = 0;
@@ -2845,7 +2870,8 @@
 
 #define MAX_DRIVER_CMD_RETRIES 25
 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
-	struct CommandList *c, int data_direction, unsigned long timeout_msecs)
+		struct CommandList *c, enum dma_data_direction data_direction,
+		unsigned long timeout_msecs)
 {
 	int backoff_time = 10, retry_count = 0;
 	int rc;
@@ -2969,8 +2995,8 @@
 		rc = -1;
 		goto out;
 	}
-	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-		PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+			NO_TIMEOUT);
 	if (rc)
 		goto out;
 	ei = c->err_info;
@@ -3022,8 +3048,8 @@
 		rc = -1;
 		goto out;
 	}
-	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-					PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+			NO_TIMEOUT);
 	if (rc)
 		goto out;
 	ei = c->err_info;
@@ -3036,7 +3062,7 @@
 	return rc;
 }
 
-static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
+static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
 	u8 reset_type, int reply_queue)
 {
 	int rc = IO_OK;
@@ -3044,11 +3070,10 @@
 	struct ErrorInfo *ei;
 
 	c = cmd_alloc(h);
-
+	c->device = dev;
 
 	/* fill_cmd can't fail here, no data buffer to map. */
-	(void) fill_cmd(c, reset_type, h, NULL, 0, 0,
-			scsi3addr, TYPE_MSG);
+	(void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
 	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
 	if (rc) {
 		dev_warn(&h->pdev->dev, "Failed to send reset command\n");
@@ -3126,9 +3151,8 @@
 }
 
 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
-	unsigned char *scsi3addr, u8 reset_type, int reply_queue)
+	u8 reset_type, int reply_queue)
 {
-	int i;
 	int rc = 0;
 
 	/* We can really only handle one reset at a time */
@@ -3137,38 +3161,14 @@
 		return -EINTR;
 	}
 
-	BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
-
-	for (i = 0; i < h->nr_cmds; i++) {
-		struct CommandList *c = h->cmd_pool + i;
-		int refcount = atomic_inc_return(&c->refcount);
-
-		if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
-			unsigned long flags;
-
-			/*
-			 * Mark the target command as having a reset pending,
-			 * then lock a lock so that the command cannot complete
-			 * while we're considering it.  If the command is not
-			 * idle then count it; otherwise revoke the event.
-			 */
-			c->reset_pending = dev;
-			spin_lock_irqsave(&h->lock, flags);	/* Implied MB */
-			if (!hpsa_is_cmd_idle(c))
-				atomic_inc(&dev->reset_cmds_out);
-			else
-				c->reset_pending = NULL;
-			spin_unlock_irqrestore(&h->lock, flags);
-		}
-
-		cmd_free(h, c);
-	}
-
-	rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
-	if (!rc)
+	rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
+	if (!rc) {
+		/* incremented by sending the reset request */
+		atomic_dec(&dev->commands_outstanding);
 		wait_event(h->event_sync_wait_queue,
-			atomic_read(&dev->reset_cmds_out) == 0 ||
+			atomic_read(&dev->commands_outstanding) <= 0 ||
 			lockup_detected(h));
+	}
 
 	if (unlikely(lockup_detected(h))) {
 		dev_warn(&h->pdev->dev,
@@ -3176,10 +3176,8 @@
 		rc = -ENODEV;
 	}
 
-	if (unlikely(rc))
-		atomic_set(&dev->reset_cmds_out, 0);
-	else
-		rc = wait_for_device_to_become_ready(h, scsi3addr, 0);
+	if (!rc)
+		rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
 
 	mutex_unlock(&h->reset_mutex);
 	return rc;
@@ -3306,8 +3304,8 @@
 		cmd_free(h, c);
 		return -1;
 	}
-	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-					PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+			NO_TIMEOUT);
 	if (rc)
 		goto out;
 	ei = c->err_info;
@@ -3349,8 +3347,8 @@
 	c->Request.CDB[2] = bmic_device_index & 0xff;
 	c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
 
-	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-				PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+			NO_TIMEOUT);
 	if (rc)
 		goto out;
 	ei = c->err_info;
@@ -3377,8 +3375,8 @@
 	if (rc)
 		goto out;
 
-	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-		PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+			NO_TIMEOUT);
 	if (rc)
 		goto out;
 	ei = c->err_info;
@@ -3408,7 +3406,7 @@
 	c->Request.CDB[2] = bmic_device_index & 0xff;
 	c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
 
-	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
+	hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
 						NO_TIMEOUT);
 	ei = c->err_info;
 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
@@ -3484,7 +3482,7 @@
 	else
 		c->Request.CDB[5] = 0;
 
-	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
+	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
 						NO_TIMEOUT);
 	if (rc)
 		goto out;
@@ -3731,8 +3729,8 @@
 	}
 	if (extended_response)
 		c->Request.CDB[1] = extended_response;
-	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-					PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+			NO_TIMEOUT);
 	if (rc)
 		goto out;
 	ei = c->err_info;
@@ -3955,14 +3953,18 @@
 	memset(this_device->device_id, 0,
 		sizeof(this_device->device_id));
 	if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
-		sizeof(this_device->device_id)) < 0)
+		sizeof(this_device->device_id)) < 0) {
 		dev_err(&h->pdev->dev,
-			"hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
+			"hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
 			h->ctlr, __func__,
 			h->scsi_host->host_no,
-			this_device->target, this_device->lun,
+			this_device->bus, this_device->target,
+			this_device->lun,
 			scsi_device_type(this_device->devtype),
 			this_device->model);
+		rc = HPSA_LV_FAILED;
+		goto bail_out;
+	}
 
 	if ((this_device->devtype == TYPE_DISK ||
 		this_device->devtype == TYPE_ZBC) &&
@@ -4110,7 +4112,7 @@
 			"maximum logical LUNs (%d) exceeded.  "
 			"%d LUNs ignored.\n", HPSA_MAX_LUN,
 			*nlogicals - HPSA_MAX_LUN);
-			*nlogicals = HPSA_MAX_LUN;
+		*nlogicals = HPSA_MAX_LUN;
 	}
 	if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
 		dev_warn(&h->pdev->dev,
@@ -4662,6 +4664,7 @@
 	case WRITE_6:
 	case WRITE_12:
 		is_write = 1;
+		/* fall through */
 	case READ_6:
 	case READ_12:
 		if (*cdb_len == 6) {
@@ -4803,6 +4806,9 @@
 
 	c->phys_disk = dev;
 
+	if (dev->in_reset)
+		return -1;
+
 	return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
 		cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
 }
@@ -4923,7 +4929,7 @@
 			curr_sg->reserved[0] = 0;
 			curr_sg->reserved[1] = 0;
 			curr_sg->reserved[2] = 0;
-			curr_sg->chain_indicator = 0x80;
+			curr_sg->chain_indicator = IOACCEL2_CHAIN;
 
 			curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
 		}
@@ -4940,6 +4946,11 @@
 			curr_sg++;
 		}
 
+		/*
+		 * Set the last s/g element bit
+		 */
+		(curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
+
 		switch (cmd->sc_data_direction) {
 		case DMA_TO_DEVICE:
 			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
@@ -4988,6 +4999,11 @@
 	} else
 		cp->sg_count = (u8) use_sg;
 
+	if (phys_disk->in_reset) {
+		cmd->result = DID_RESET << 16;
+		return -1;
+	}
+
 	enqueue_cmd_and_start_io(h, c);
 	return 0;
 }
@@ -5005,6 +5021,9 @@
 	if (!c->scsi_cmd->device->hostdata)
 		return -1;
 
+	if (phys_disk->in_reset)
+		return -1;
+
 	/* Try to honor the device's queue depth */
 	if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
 					phys_disk->queue_depth) {
@@ -5088,10 +5107,14 @@
 	if (!dev)
 		return -1;
 
+	if (dev->in_reset)
+		return -1;
+
 	/* check for valid opcode, get LBA and block count */
 	switch (cmd->cmnd[0]) {
 	case WRITE_6:
 		is_write = 1;
+		/* fall through */
 	case READ_6:
 		first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
 				(cmd->cmnd[2] << 8) |
@@ -5102,6 +5125,7 @@
 		break;
 	case WRITE_10:
 		is_write = 1;
+		/* fall through */
 	case READ_10:
 		first_block =
 			(((u64) cmd->cmnd[2]) << 24) |
@@ -5114,6 +5138,7 @@
 		break;
 	case WRITE_12:
 		is_write = 1;
+		/* fall through */
 	case READ_12:
 		first_block =
 			(((u64) cmd->cmnd[2]) << 24) |
@@ -5128,6 +5153,7 @@
 		break;
 	case WRITE_16:
 		is_write = 1;
+		/* fall through */
 	case READ_16:
 		first_block =
 			(((u64) cmd->cmnd[2]) << 56) |
@@ -5388,13 +5414,13 @@
  */
 static int hpsa_ciss_submit(struct ctlr_info *h,
 	struct CommandList *c, struct scsi_cmnd *cmd,
-	unsigned char scsi3addr[])
+	struct hpsa_scsi_dev_t *dev)
 {
 	cmd->host_scribble = (unsigned char *) c;
 	c->cmd_type = CMD_SCSI;
 	c->scsi_cmd = cmd;
 	c->Header.ReplyQueue = 0;  /* unused in simple mode */
-	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+	memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8);
 	c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
 
 	/* Fill in the request block... */
@@ -5445,6 +5471,14 @@
 		hpsa_cmd_resolve_and_free(h, c);
 		return SCSI_MLQUEUE_HOST_BUSY;
 	}
+
+	if (dev->in_reset) {
+		hpsa_cmd_resolve_and_free(h, c);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	c->device = dev;
+
 	enqueue_cmd_and_start_io(h, c);
 	/* the cmd'll come back via intr handler in complete_scsi_command()  */
 	return 0;
@@ -5496,8 +5530,7 @@
 }
 
 static int hpsa_ioaccel_submit(struct ctlr_info *h,
-		struct CommandList *c, struct scsi_cmnd *cmd,
-		unsigned char *scsi3addr)
+		struct CommandList *c, struct scsi_cmnd *cmd)
 {
 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
 	int rc = IO_ACCEL_INELIGIBLE;
@@ -5505,12 +5538,19 @@
 	if (!dev)
 		return SCSI_MLQUEUE_HOST_BUSY;
 
+	if (dev->in_reset)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	if (hpsa_simple_mode)
+		return IO_ACCEL_INELIGIBLE;
+
 	cmd->host_scribble = (unsigned char *) c;
 
 	if (dev->offload_enabled) {
 		hpsa_cmd_init(h, c->cmdindex, c);
 		c->cmd_type = CMD_SCSI;
 		c->scsi_cmd = cmd;
+		c->device = dev;
 		rc = hpsa_scsi_ioaccel_raid_map(h, c);
 		if (rc < 0)     /* scsi_dma_map failed. */
 			rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -5518,6 +5558,7 @@
 		hpsa_cmd_init(h, c->cmdindex, c);
 		c->cmd_type = CMD_SCSI;
 		c->scsi_cmd = cmd;
+		c->device = dev;
 		rc = hpsa_scsi_ioaccel_direct_map(h, c);
 		if (rc < 0)     /* scsi_dma_map failed. */
 			rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -5537,8 +5578,12 @@
 		cmd->result = DID_NO_CONNECT << 16;
 		return hpsa_cmd_free_and_done(c->h, c, cmd);
 	}
-	if (c->reset_pending)
+
+	if (dev->in_reset) {
+		cmd->result = DID_RESET << 16;
 		return hpsa_cmd_free_and_done(c->h, c, cmd);
+	}
+
 	if (c->cmd_type == CMD_IOACCEL2) {
 		struct ctlr_info *h = c->h;
 		struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
@@ -5546,7 +5591,7 @@
 
 		if (c2->error_data.serv_response ==
 				IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
-			rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
+			rc = hpsa_ioaccel_submit(h, c, cmd);
 			if (rc == 0)
 				return;
 			if (rc == SCSI_MLQUEUE_HOST_BUSY) {
@@ -5562,7 +5607,7 @@
 		}
 	}
 	hpsa_cmd_partial_init(c->h, c->cmdindex, c);
-	if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
+	if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
 		/*
 		 * If we get here, it means dma mapping failed. Try
 		 * again via scsi mid layer, which will then get
@@ -5581,7 +5626,6 @@
 {
 	struct ctlr_info *h;
 	struct hpsa_scsi_dev_t *dev;
-	unsigned char scsi3addr[8];
 	struct CommandList *c;
 	int rc = 0;
 
@@ -5603,14 +5647,24 @@
 		return 0;
 	}
 
-	memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
-
 	if (unlikely(lockup_detected(h))) {
 		cmd->result = DID_NO_CONNECT << 16;
 		cmd->scsi_done(cmd);
 		return 0;
 	}
+
+	if (dev->in_reset)
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+
 	c = cmd_tagged_alloc(h, cmd);
+	if (c == NULL)
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+
+	/*
+	 * This is necessary because the SML doesn't zero out this field during
+	 * error recovery.
+	 */
+	cmd->result = 0;
 
 	/*
 	 * Call alternate submit routine for I/O accelerated commands.
@@ -5619,7 +5673,7 @@
 	if (likely(cmd->retries == 0 &&
 			!blk_rq_is_passthrough(cmd->request) &&
 			h->acciopath_status)) {
-		rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
+		rc = hpsa_ioaccel_submit(h, c, cmd);
 		if (rc == 0)
 			return 0;
 		if (rc == SCSI_MLQUEUE_HOST_BUSY) {
@@ -5627,7 +5681,7 @@
 			return SCSI_MLQUEUE_HOST_BUSY;
 		}
 	}
-	return hpsa_ciss_submit(h, c, cmd, scsi3addr);
+	return hpsa_ciss_submit(h, c, cmd, dev);
 }
 
 static void hpsa_scan_complete(struct ctlr_info *h)
@@ -5803,7 +5857,7 @@
 	/* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
 	(void) fill_cmd(c, TEST_UNIT_READY, h,
 			NULL, 0, 0, lunaddr, TYPE_CMD);
-	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
 	if (rc)
 		return rc;
 	/* no unmap needed here because no data xfer. */
@@ -5909,8 +5963,9 @@
 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
 {
 	int rc = SUCCESS;
+	int i;
 	struct ctlr_info *h;
-	struct hpsa_scsi_dev_t *dev;
+	struct hpsa_scsi_dev_t *dev = NULL;
 	u8 reset_type;
 	char msg[48];
 	unsigned long flags;
@@ -5976,9 +6031,19 @@
 		reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
 	hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
 
+	/*
+	 * wait to see if any commands will complete before sending reset
+	 */
+	dev->in_reset = true; /* block any new cmds from OS for this device */
+	for (i = 0; i < 10; i++) {
+		if (atomic_read(&dev->commands_outstanding) > 0)
+			msleep(1000);
+		else
+			break;
+	}
+
 	/* send a reset to the SCSI LUN which the command was sent to */
-	rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
-			   DEFAULT_REPLY_QUEUE);
+	rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE);
 	if (rc == 0)
 		rc = SUCCESS;
 	else
@@ -5992,6 +6057,8 @@
 return_reset_status:
 	spin_lock_irqsave(&h->reset_lock, flags);
 	h->reset_in_progress = 0;
+	if (dev)
+		dev->in_reset = false;
 	spin_unlock_irqrestore(&h->reset_lock, flags);
 	return rc;
 }
@@ -6017,7 +6084,6 @@
 		BUG();
 	}
 
-	atomic_inc(&c->refcount);
 	if (unlikely(!hpsa_is_cmd_idle(c))) {
 		/*
 		 * We expect that the SCSI layer will hand us a unique tag
@@ -6025,14 +6091,18 @@
 		 * two requests...because if the selected command isn't idle
 		 * then someone is going to be very disappointed.
 		 */
-		dev_err(&h->pdev->dev,
-			"tag collision (tag=%d) in cmd_tagged_alloc().\n",
-			idx);
-		if (c->scsi_cmd != NULL)
-			scsi_print_command(c->scsi_cmd);
-		scsi_print_command(scmd);
+		if (idx != h->last_collision_tag) { /* Print once per tag */
+			dev_warn(&h->pdev->dev,
+				"%s: tag collision (tag=%d)\n", __func__, idx);
+			if (scmd)
+				scsi_print_command(scmd);
+			h->last_collision_tag = idx;
+		}
+		return NULL;
 	}
 
+	atomic_inc(&c->refcount);
+
 	hpsa_cmd_partial_init(h, idx, c);
 	return c;
 }
@@ -6100,6 +6170,7 @@
 		break; /* it's ours now. */
 	}
 	hpsa_cmd_partial_init(h, i, c);
+	c->device = NULL;
 	return c;
 }
 
@@ -6122,7 +6193,7 @@
 
 #ifdef CONFIG_COMPAT
 
-static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
+static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
 	void __user *arg)
 {
 	IOCTL32_Command_struct __user *arg32 =
@@ -6159,7 +6230,7 @@
 }
 
 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
-	int cmd, void __user *arg)
+	unsigned int cmd, void __user *arg)
 {
 	BIG_IOCTL32_Command_struct __user *arg32 =
 	    (BIG_IOCTL32_Command_struct __user *) arg;
@@ -6196,7 +6267,8 @@
 	return err;
 }
 
-static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
+static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
+			     void __user *arg)
 {
 	switch (cmd) {
 	case CCISS_GETPCIINFO:
@@ -6320,8 +6392,8 @@
 
 	/* Fill in the scatter gather information */
 	if (iocommand.buf_size > 0) {
-		temp64 = pci_map_single(h->pdev, buff,
-			iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+		temp64 = dma_map_single(&h->pdev->dev, buff,
+			iocommand.buf_size, DMA_BIDIRECTIONAL);
 		if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
 			c->SG[0].Addr = cpu_to_le64(0);
 			c->SG[0].Len = cpu_to_le32(0);
@@ -6335,7 +6407,7 @@
 	rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
 					NO_TIMEOUT);
 	if (iocommand.buf_size > 0)
-		hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+		hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
 	check_ioctl_unit_attention(h, c);
 	if (rc) {
 		rc = -EIO;
@@ -6381,13 +6453,9 @@
 		return -EINVAL;
 	if (!capable(CAP_SYS_RAWIO))
 		return -EPERM;
-	ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
-	if (!ioc) {
-		status = -ENOMEM;
-		goto cleanup1;
-	}
-	if (copy_from_user(ioc, argp, sizeof(*ioc))) {
-		status = -EFAULT;
+	ioc = vmemdup_user(argp, sizeof(*ioc));
+	if (IS_ERR(ioc)) {
+		status = PTR_ERR(ioc);
 		goto cleanup1;
 	}
 	if ((ioc->buf_size < 1) &&
@@ -6447,14 +6515,14 @@
 	if (ioc->buf_size > 0) {
 		int i;
 		for (i = 0; i < sg_used; i++) {
-			temp64 = pci_map_single(h->pdev, buff[i],
-				    buff_size[i], PCI_DMA_BIDIRECTIONAL);
+			temp64 = dma_map_single(&h->pdev->dev, buff[i],
+				    buff_size[i], DMA_BIDIRECTIONAL);
 			if (dma_mapping_error(&h->pdev->dev,
 							(dma_addr_t) temp64)) {
 				c->SG[i].Addr = cpu_to_le64(0);
 				c->SG[i].Len = cpu_to_le32(0);
 				hpsa_pci_unmap(h->pdev, c, i,
-					PCI_DMA_BIDIRECTIONAL);
+					DMA_BIDIRECTIONAL);
 				status = -ENOMEM;
 				goto cleanup0;
 			}
@@ -6467,7 +6535,7 @@
 	status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
 						NO_TIMEOUT);
 	if (sg_used)
-		hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+		hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
 	check_ioctl_unit_attention(h, c);
 	if (status) {
 		status = -EIO;
@@ -6505,7 +6573,7 @@
 		kfree(buff);
 	}
 	kfree(buff_size);
-	kfree(ioc);
+	kvfree(ioc);
 	return status;
 }
 
@@ -6520,7 +6588,8 @@
 /*
  * ioctl
  */
-static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
+static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
+		      void __user *arg)
 {
 	struct ctlr_info *h;
 	void __user *argp = (void __user *)arg;
@@ -6555,8 +6624,7 @@
 	}
 }
 
-static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
-				u8 reset_type)
+static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type)
 {
 	struct CommandList *c;
 
@@ -6579,7 +6647,7 @@
 	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
 	int cmd_type)
 {
-	int pci_dir = XFER_NONE;
+	enum dma_data_direction dir = DMA_NONE;
 
 	c->cmd_type = CMD_IOCTL_PEND;
 	c->scsi_cmd = SCSI_CMD_BUSY;
@@ -6785,18 +6853,18 @@
 
 	switch (GET_DIR(c->Request.type_attr_dir)) {
 	case XFER_READ:
-		pci_dir = PCI_DMA_FROMDEVICE;
+		dir = DMA_FROM_DEVICE;
 		break;
 	case XFER_WRITE:
-		pci_dir = PCI_DMA_TODEVICE;
+		dir = DMA_TO_DEVICE;
 		break;
 	case XFER_NONE:
-		pci_dir = PCI_DMA_NONE;
+		dir = DMA_NONE;
 		break;
 	default:
-		pci_dir = PCI_DMA_BIDIRECTIONAL;
+		dir = DMA_BIDIRECTIONAL;
 	}
-	if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+	if (hpsa_map_one(h->pdev, c, buff, size, dir))
 		return -1;
 	return 0;
 }
@@ -6992,13 +7060,13 @@
 	 * CCISS commands, so they must be allocated from the lower 4GiB of
 	 * memory.
 	 */
-	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 	if (err) {
 		iounmap(vaddr);
 		return err;
 	}
 
-	cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
+	cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
 	if (cmd == NULL) {
 		iounmap(vaddr);
 		return -ENOMEM;
@@ -7047,7 +7115,7 @@
 		return -ETIMEDOUT;
 	}
 
-	pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
+	dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
 
 	if (tag & HPSA_ERROR_BIT) {
 		dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
@@ -7741,7 +7809,7 @@
 	hpsa_disable_interrupt_mode(h);		/* pci_init 2 */
 	/*
 	 * call pci_disable_device before pci_release_regions per
-	 * Documentation/PCI/pci.txt
+	 * Documentation/driver-api/pci/pci.rst
 	 */
 	pci_disable_device(h->pdev);		/* pci_init 1 */
 	pci_release_regions(h->pdev);		/* pci_init 2 */
@@ -7824,7 +7892,7 @@
 clean1:
 	/*
 	 * call pci_disable_device before pci_release_regions per
-	 * Documentation/PCI/pci.txt
+	 * Documentation/driver-api/pci/pci.rst
 	 */
 	pci_disable_device(h->pdev);
 	pci_release_regions(h->pdev);
@@ -7914,7 +7982,7 @@
 	kfree(h->cmd_pool_bits);
 	h->cmd_pool_bits = NULL;
 	if (h->cmd_pool) {
-		pci_free_consistent(h->pdev,
+		dma_free_coherent(&h->pdev->dev,
 				h->nr_cmds * sizeof(struct CommandList),
 				h->cmd_pool,
 				h->cmd_pool_dhandle);
@@ -7922,7 +7990,7 @@
 		h->cmd_pool_dhandle = 0;
 	}
 	if (h->errinfo_pool) {
-		pci_free_consistent(h->pdev,
+		dma_free_coherent(&h->pdev->dev,
 				h->nr_cmds * sizeof(struct ErrorInfo),
 				h->errinfo_pool,
 				h->errinfo_pool_dhandle);
@@ -7936,12 +8004,12 @@
 	h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
 				   sizeof(unsigned long),
 				   GFP_KERNEL);
-	h->cmd_pool = pci_alloc_consistent(h->pdev,
+	h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
 		    h->nr_cmds * sizeof(*h->cmd_pool),
-		    &(h->cmd_pool_dhandle));
-	h->errinfo_pool = pci_alloc_consistent(h->pdev,
+		    &h->cmd_pool_dhandle, GFP_KERNEL);
+	h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
 		    h->nr_cmds * sizeof(*h->errinfo_pool),
-		    &(h->errinfo_pool_dhandle));
+		    &h->errinfo_pool_dhandle, GFP_KERNEL);
 	if ((h->cmd_pool_bits == NULL)
 	    || (h->cmd_pool == NULL)
 	    || (h->errinfo_pool == NULL)) {
@@ -7959,10 +8027,15 @@
 static void hpsa_free_irqs(struct ctlr_info *h)
 {
 	int i;
+	int irq_vector = 0;
+
+	if (hpsa_simple_mode)
+		irq_vector = h->intr_mode;
 
 	if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
 		/* Single reply queue, only one irq to free */
-		free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
+		free_irq(pci_irq_vector(h->pdev, irq_vector),
+				&h->q[h->intr_mode]);
 		h->q[h->intr_mode] = 0;
 		return;
 	}
@@ -7981,6 +8054,10 @@
 	irqreturn_t (*intxhandler)(int, void *))
 {
 	int rc, i;
+	int irq_vector = 0;
+
+	if (hpsa_simple_mode)
+		irq_vector = h->intr_mode;
 
 	/*
 	 * initialize h->q[x] = x so that interrupt handlers know which
@@ -8016,14 +8093,14 @@
 		if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
 			sprintf(h->intrname[0], "%s-msi%s", h->devname,
 				h->msix_vectors ? "x" : "");
-			rc = request_irq(pci_irq_vector(h->pdev, 0),
+			rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
 				msixhandler, 0,
 				h->intrname[0],
 				&h->q[h->intr_mode]);
 		} else {
 			sprintf(h->intrname[h->intr_mode],
 				"%s-intx", h->devname);
-			rc = request_irq(pci_irq_vector(h->pdev, 0),
+			rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
 				intxhandler, IRQF_SHARED,
 				h->intrname[0],
 				&h->q[h->intr_mode]);
@@ -8031,7 +8108,7 @@
 	}
 	if (rc) {
 		dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
-		       pci_irq_vector(h->pdev, 0), h->devname);
+		       pci_irq_vector(h->pdev, irq_vector), h->devname);
 		hpsa_free_irqs(h);
 		return -ENODEV;
 	}
@@ -8041,7 +8118,7 @@
 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
 {
 	int rc;
-	hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
+	hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER);
 
 	dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
 	rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
@@ -8068,7 +8145,7 @@
 	for (i = 0; i < h->nreply_queues; i++) {
 		if (!h->reply_queue[i].head)
 			continue;
-		pci_free_consistent(h->pdev,
+		dma_free_coherent(&h->pdev->dev,
 					h->reply_queue_size,
 					h->reply_queue[i].head,
 					h->reply_queue[i].busaddr);
@@ -8097,6 +8174,11 @@
 		destroy_workqueue(h->rescan_ctlr_wq);
 		h->rescan_ctlr_wq = NULL;
 	}
+	if (h->monitor_ctlr_wq) {
+		destroy_workqueue(h->monitor_ctlr_wq);
+		h->monitor_ctlr_wq = NULL;
+	}
+
 	kfree(h);				/* init_one 1 */
 }
 
@@ -8432,8 +8514,8 @@
 
 	spin_lock_irqsave(&h->lock, flags);
 	if (!h->remove_in_progress)
-		schedule_delayed_work(&h->event_monitor_work,
-					HPSA_EVENT_MONITOR_INTERVAL);
+		queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
+				HPSA_EVENT_MONITOR_INTERVAL);
 	spin_unlock_irqrestore(&h->lock, flags);
 }
 
@@ -8478,7 +8560,7 @@
 
 	spin_lock_irqsave(&h->lock, flags);
 	if (!h->remove_in_progress)
-		schedule_delayed_work(&h->monitor_ctlr_work,
+		queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
 				h->heartbeat_sample_interval);
 	spin_unlock_irqrestore(&h->lock, flags);
 }
@@ -8594,11 +8676,11 @@
 	number_of_controllers++;
 
 	/* configure PCI DMA stuff */
-	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
 	if (rc == 0) {
 		dac = 1;
 	} else {
-		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 		if (rc == 0) {
 			dac = 0;
 		} else {
@@ -8646,6 +8728,12 @@
 		goto clean7;	/* aer/h */
 	}
 
+	h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
+	if (!h->monitor_ctlr_wq) {
+		rc = -ENOMEM;
+		goto clean7;
+	}
+
 	/*
 	 * At this point, the controller is ready to take commands.
 	 * Now, if reset_devices and the hard reset didn't work, try
@@ -8775,6 +8863,10 @@
 		destroy_workqueue(h->rescan_ctlr_wq);
 		h->rescan_ctlr_wq = NULL;
 	}
+	if (h->monitor_ctlr_wq) {
+		destroy_workqueue(h->monitor_ctlr_wq);
+		h->monitor_ctlr_wq = NULL;
+	}
 	kfree(h);
 	return rc;
 }
@@ -8797,8 +8889,8 @@
 		RAID_CTLR_LUNID, TYPE_CMD)) {
 		goto out;
 	}
-	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-					PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
+			DEFAULT_TIMEOUT);
 	if (rc)
 		goto out;
 	if (c->err_info->CommandStatus != 0)
@@ -8833,8 +8925,8 @@
 		RAID_CTLR_LUNID, TYPE_CMD))
 		goto errout;
 
-	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-		PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+			NO_TIMEOUT);
 	if ((rc != 0) || (c->err_info->CommandStatus != 0))
 		goto errout;
 
@@ -8845,8 +8937,8 @@
 		RAID_CTLR_LUNID, TYPE_CMD))
 		goto errout;
 
-	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-		PCI_DMA_TODEVICE, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
+			NO_TIMEOUT);
 	if ((rc != 0)  || (c->err_info->CommandStatus != 0))
 		goto errout;
 
@@ -8855,8 +8947,8 @@
 		RAID_CTLR_LUNID, TYPE_CMD))
 		goto errout;
 
-	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-		PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+			NO_TIMEOUT);
 	if ((rc != 0)  || (c->err_info->CommandStatus != 0))
 		goto errout;
 
@@ -8922,6 +9014,7 @@
 	cancel_delayed_work_sync(&h->event_monitor_work);
 	destroy_workqueue(h->rescan_ctlr_wq);
 	destroy_workqueue(h->resubmit_wq);
+	destroy_workqueue(h->monitor_ctlr_wq);
 
 	hpsa_delete_sas_host(h);
 
@@ -9228,9 +9321,9 @@
 	BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
 			IOACCEL1_COMMANDLIST_ALIGNMENT);
 	h->ioaccel_cmd_pool =
-		pci_alloc_consistent(h->pdev,
+		dma_alloc_coherent(&h->pdev->dev,
 			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
-			&(h->ioaccel_cmd_pool_dhandle));
+			&h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
 
 	h->ioaccel1_blockFetchTable =
 		kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9281,9 +9374,9 @@
 	BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
 			IOACCEL2_COMMANDLIST_ALIGNMENT);
 	h->ioaccel2_cmd_pool =
-		pci_alloc_consistent(h->pdev,
+		dma_alloc_coherent(&h->pdev->dev,
 			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
-			&(h->ioaccel2_cmd_pool_dhandle));
+			&h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
 
 	h->ioaccel2_blockFetchTable =
 		kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9356,9 +9449,10 @@
 	h->reply_queue_size = h->max_commands * sizeof(u64);
 
 	for (i = 0; i < h->nreply_queues; i++) {
-		h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
+		h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
 						h->reply_queue_size,
-						&(h->reply_queue[i].busaddr));
+						&h->reply_queue[i].busaddr,
+						GFP_KERNEL);
 		if (!h->reply_queue[i].head) {
 			rc = -ENOMEM;
 			goto clean1;	/* rq, ioaccel */
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 59e0236..f8c88fc 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -65,6 +65,7 @@
 	u8 physical_device : 1;
 	u8 expose_device;
 	u8 removed : 1;			/* device is marked for death */
+	u8 was_removed : 1;		/* device actually removed */
 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
 	unsigned char device_id[16];    /* from inquiry pg. 0x83 */
 	u64 sas_address;
@@ -75,11 +76,12 @@
 	unsigned char raid_level;	/* from inquiry page 0xC1 */
 	unsigned char volume_offline;	/* discovered via TUR or VPD */
 	u16 queue_depth;		/* max queue_depth for this device */
-	atomic_t reset_cmds_out;	/* Count of commands to-be affected */
+	atomic_t commands_outstanding;	/* track commands sent to device */
 	atomic_t ioaccel_cmds_out;	/* Only used for physical devices
 					 * counts commands sent to physical
 					 * device via "ioaccel" path.
 					 */
+	bool in_reset;
 	u32 ioaccel_handle;
 	u8 active_path_index;
 	u8 path_map;
@@ -174,6 +176,7 @@
 	struct CfgTable __iomem *cfgtable;
 	int	interrupts_enabled;
 	int 	max_commands;
+	int	last_collision_tag; /* tags are global */
 	atomic_t commands_outstanding;
 #	define PERF_MODE_INT	0
 #	define DOORBELL_INT	1
@@ -300,6 +303,7 @@
 	int	needs_abort_tags_swizzled;
 	struct workqueue_struct *resubmit_wq;
 	struct workqueue_struct *rescan_ctlr_wq;
+	struct workqueue_struct *monitor_ctlr_wq;
 	atomic_t abort_cmds_available;
 	wait_queue_head_t event_sync_wait_queue;
 	struct mutex reset_mutex;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 21a726e..7825cbf 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -448,7 +448,7 @@
 	struct hpsa_scsi_dev_t *phys_disk;
 
 	int abort_pending;
-	struct hpsa_scsi_dev_t *reset_pending;
+	struct hpsa_scsi_dev_t *device;
 	atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */
 } __aligned(COMMANDLIST_ALIGNMENT);
 
@@ -517,6 +517,7 @@
 	u8 reserved[3];
 	u8 chain_indicator;
 #define IOACCEL2_CHAIN 0x80
+#define IOACCEL2_LAST_SG 0x40
 };
 
 /*
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 2fad7f0..6a2561f 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * HighPoint RR3xxx/4xxx controller driver for Linux
  * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Please report bugs/comments/suggestions to linux@highpoint-tech.com
  *
  * For more information, visit http://www.highpoint-tech.com
@@ -1180,7 +1172,6 @@
 	.eh_host_reset_handler      = hptiop_reset,
 	.info                       = hptiop_info,
 	.emulated                   = 0,
-	.use_clustering             = ENABLE_CLUSTERING,
 	.proc_name                  = driver_name,
 	.shost_attrs                = hptiop_attrs,
 	.slave_configure            = hptiop_slave_config,
@@ -1293,6 +1284,7 @@
 	dma_addr_t start_phy;
 	void *start_virt;
 	u32 offset, i, req_size;
+	int rc;
 
 	dprintk("hptiop_probe(%p)\n", pcidev);
 
@@ -1309,11 +1301,14 @@
 
 	/* Enable 64bit DMA if possible */
 	iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
-	if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask))) {
-		if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
-			printk(KERN_ERR "hptiop: fail to set dma_mask\n");
-			goto disable_pci_device;
-		}
+	rc = dma_set_mask(&pcidev->dev,
+			  DMA_BIT_MASK(iop_ops->hw_dma_bit_mask));
+	if (rc)
+		rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
+
+	if (rc) {
+		printk(KERN_ERR "hptiop: fail to set dma_mask\n");
+		goto disable_pci_device;
 	}
 
 	if (pci_request_regions(pcidev, driver_name)) {
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 4d1c511..35184c2 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * HighPoint RR3xxx/4xxx controller driver for Linux
  * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Please report bugs/comments/suggestions to linux@highpoint-tech.com
  *
  * For more information, visit http://www.highpoint-tech.com
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index 3840c64..5eb1cb1 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_SCSI_IBMVSCSI)	+= ibmvscsi.o
 obj-$(CONFIG_SCSI_IBMVFC)	+= ibmvfc.o
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b64ca97..df897df 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
  *
  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) IBM Corporation, 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  */
 
 #include <linux/module.h>
@@ -139,6 +125,7 @@
 	{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
 
 	{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+	{ IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
 };
 
 static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -1494,9 +1481,9 @@
 	if (rsp->flags & FCP_RSP_LEN_VALID)
 		rsp_code = rsp->data.info.rsp_code;
 
-	scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+	scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
 		    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
-		    cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+		    cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
 		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
 }
 
@@ -1843,6 +1830,7 @@
 		port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
 			(bsg_request->rqst_data.h_els.port_id[1] << 8) |
 			bsg_request->rqst_data.h_els.port_id[2];
+		/* fall through */
 	case FC_BSG_RPT_ELS:
 		fc_flags = IBMVFC_FC_ELS;
 		break;
@@ -1851,6 +1839,7 @@
 		port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
 			(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
 			bsg_request->rqst_data.h_ct.port_id[2];
+		/* fall through */
 	case FC_BSG_RPT_CT:
 		fc_flags = IBMVFC_FC_CT_IU;
 		break;
@@ -2022,7 +2011,7 @@
 		sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
-			    rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
 			    fc_rsp->scsi_status);
 		rsp_rc = -EIO;
 	} else
@@ -2381,7 +2370,7 @@
 		sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n",
 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
-			    rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
 			    fc_rsp->scsi_status);
 		rsp_rc = -EIO;
 	} else
@@ -2755,16 +2744,18 @@
 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
 		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
 			/* We need to re-setup the interpartition connection */
-			dev_info(vhost->dev, "Re-enabling adapter\n");
+			dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
 			vhost->client_migrated = 1;
 			ibmvfc_purge_requests(vhost, DID_REQUEUE);
 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
-		} else {
-			dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+		} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
+			dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
 			ibmvfc_purge_requests(vhost, DID_ERROR);
 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
+		} else {
+			dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
 		}
 		return;
 	case IBMVFC_CRQ_CMD_RSP:
@@ -3100,7 +3091,6 @@
 	.this_id = -1,
 	.sg_tablesize = SG_ALL,
 	.max_sectors = IBMVFC_MAX_SECTORS,
-	.use_clustering = ENABLE_CLUSTERING,
 	.shost_attrs = ibmvfc_attrs,
 	.track_queue_depth = 1,
 };
@@ -3349,7 +3339,7 @@
 
 		tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-			rsp->status, rsp->error, status);
+			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
 		break;
 	}
 
@@ -3447,9 +3437,10 @@
 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 
 		tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
-			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
-			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
-			ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
+			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+					     be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+			ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
 		break;
 	}
 
@@ -3620,7 +3611,7 @@
 		fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
 		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
 			 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
-			 mad->iu.status, mad->iu.error,
+			 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
 			 ibmvfc_get_fc_type(fc_reason), fc_reason,
 			 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
 		break;
@@ -3832,9 +3823,10 @@
 
 		tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-			rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
-			rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
-			rsp->fc_explain, status);
+			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+			ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
+			status);
 		break;
 	}
 
@@ -3960,7 +3952,7 @@
 		level += ibmvfc_retry_host_init(vhost);
 		ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-			   rsp->status, rsp->error);
+			   be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
 		break;
 	case IBMVFC_MAD_DRIVER_FAILED:
 		break;
@@ -4025,11 +4017,12 @@
 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
 		ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-						rsp->status, rsp->error);
+						be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
 		ibmvfc_free_event(evt);
 		return;
 	case IBMVFC_MAD_CRQ_ERROR:
 		ibmvfc_retry_host_init(vhost);
+		/* fall through */
 	case IBMVFC_MAD_DRIVER_FAILED:
 		ibmvfc_free_event(evt);
 		return;
@@ -4874,8 +4867,8 @@
 
 	spin_lock_irqsave(vhost->host->host_lock, flags);
 	ibmvfc_purge_requests(vhost, DID_ERROR);
-	ibmvfc_free_event_pool(vhost);
 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	ibmvfc_free_event_pool(vhost);
 
 	ibmvfc_free_mem(vhost);
 	spin_lock(&ibmvfc_driver_lock);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index b81a53c..7da89f4 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * ibmvfc.h -- driver for IBM Power Virtual Fibre Channel Adapter
  *
  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
  *
  * Copyright (C) IBM Corporation, 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  */
 
 #ifndef _IBMVFC_H
@@ -78,9 +64,14 @@
 	IBMVFC_CRQ_XPORT_EVENT		= 0xFF,
 };
 
-enum ibmvfc_crq_format {
+enum ibmvfc_crq_init_msg {
 	IBMVFC_CRQ_INIT			= 0x01,
 	IBMVFC_CRQ_INIT_COMPLETE	= 0x02,
+};
+
+enum ibmvfc_crq_xport_evts {
+	IBMVFC_PARTNER_FAILED		= 0x01,
+	IBMVFC_PARTNER_DEREGISTER	= 0x02,
 	IBMVFC_PARTITION_MIGRATED	= 0x06,
 };
 
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 9df8a1a..7f66a77 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /* ------------------------------------------------------------
  * ibmvscsi.c
  * (C) Copyright IBM Corporation 1994, 2004
@@ -5,21 +6,6 @@
  *          Santiago Leon (santil@us.ibm.com)
  *          Dave Boutcher (sleddog@us.ibm.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
- * USA
- *
  * ------------------------------------------------------------
  * Emulation of a SCSI host adapter for Virtual I/O devices
  *
@@ -96,6 +82,7 @@
 static char partition_name[96] = "UNKNOWN";
 static unsigned int partition_number = -1;
 static LIST_HEAD(ibmvscsi_head);
+static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
 
 static struct scsi_transport_template *ibmvscsi_transport_template;
 
@@ -827,7 +814,7 @@
 	atomic_set(&hostdata->request_limit, 0);
 
 	purge_requests(hostdata, DID_ERROR);
-	hostdata->reset_crq = 1;
+	hostdata->action = IBMVSCSI_HOST_ACTION_RESET;
 	wake_up(&hostdata->work_wait_q);
 }
 
@@ -1178,7 +1165,8 @@
 		   be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
 
 	/* If we had any pending I/Os, kick them */
-	scsi_unblock_requests(hostdata->host);
+	hostdata->action = IBMVSCSI_HOST_ACTION_UNBLOCK;
+	wake_up(&hostdata->work_wait_q);
 }
 
 /**
@@ -1796,7 +1784,7 @@
 			/* We need to re-setup the interpartition connection */
 			dev_info(hostdata->dev, "Re-enabling adapter!\n");
 			hostdata->client_migrated = 1;
-			hostdata->reenable_crq = 1;
+			hostdata->action = IBMVSCSI_HOST_ACTION_REENABLE;
 			purge_requests(hostdata, DID_REQUEUE);
 			wake_up(&hostdata->work_wait_q);
 		} else {
@@ -2049,6 +2037,16 @@
 	.show = show_host_config,
 };
 
+static int ibmvscsi_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+
+	dev_info(hostdata->dev, "Initiating adapter reset!\n");
+	ibmvscsi_reset_host(hostdata);
+
+	return 0;
+}
+
 static struct device_attribute *ibmvscsi_attrs[] = {
 	&ibmvscsi_host_vhost_loc,
 	&ibmvscsi_host_vhost_name,
@@ -2075,11 +2073,11 @@
 	.eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
 	.slave_configure = ibmvscsi_slave_configure,
 	.change_queue_depth = ibmvscsi_change_queue_depth,
+	.host_reset = ibmvscsi_host_reset,
 	.cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
 	.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
 	.this_id = -1,
 	.sg_tablesize = SG_ALL,
-	.use_clustering = ENABLE_CLUSTERING,
 	.shost_attrs = ibmvscsi_attrs,
 };
 
@@ -2105,48 +2103,75 @@
 
 static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
 {
+	unsigned long flags;
 	int rc;
 	char *action = "reset";
 
-	if (hostdata->reset_crq) {
-		smp_rmb();
-		hostdata->reset_crq = 0;
-
+	spin_lock_irqsave(hostdata->host->host_lock, flags);
+	switch (hostdata->action) {
+	case IBMVSCSI_HOST_ACTION_UNBLOCK:
+		rc = 0;
+		break;
+	case IBMVSCSI_HOST_ACTION_RESET:
+		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 		rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
+		spin_lock_irqsave(hostdata->host->host_lock, flags);
 		if (!rc)
 			rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
 		vio_enable_interrupts(to_vio_dev(hostdata->dev));
-	} else if (hostdata->reenable_crq) {
-		smp_rmb();
+		break;
+	case IBMVSCSI_HOST_ACTION_REENABLE:
 		action = "enable";
+		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 		rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
-		hostdata->reenable_crq = 0;
+		spin_lock_irqsave(hostdata->host->host_lock, flags);
 		if (!rc)
 			rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
-	} else
+		break;
+	case IBMVSCSI_HOST_ACTION_NONE:
+	default:
+		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 		return;
+	}
+
+	hostdata->action = IBMVSCSI_HOST_ACTION_NONE;
 
 	if (rc) {
 		atomic_set(&hostdata->request_limit, -1);
 		dev_err(hostdata->dev, "error after %s\n", action);
 	}
+	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 
 	scsi_unblock_requests(hostdata->host);
 }
 
-static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
+static int __ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
 {
 	if (kthread_should_stop())
 		return 1;
-	else if (hostdata->reset_crq) {
-		smp_rmb();
-		return 1;
-	} else if (hostdata->reenable_crq) {
-		smp_rmb();
-		return 1;
+	switch (hostdata->action) {
+	case IBMVSCSI_HOST_ACTION_NONE:
+		return 0;
+	case IBMVSCSI_HOST_ACTION_RESET:
+	case IBMVSCSI_HOST_ACTION_REENABLE:
+	case IBMVSCSI_HOST_ACTION_UNBLOCK:
+	default:
+		break;
 	}
 
-	return 0;
+	return 1;
+}
+
+static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(hostdata->host->host_lock, flags);
+	rc = __ibmvscsi_work_to_do(hostdata);
+	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
+	return rc;
 }
 
 static int ibmvscsi_work(void *data)
@@ -2271,7 +2296,9 @@
 	}
 
 	dev_set_drvdata(&vdev->dev, hostdata);
+	spin_lock(&ibmvscsi_driver_lock);
 	list_add_tail(&hostdata->host_list, &ibmvscsi_head);
+	spin_unlock(&ibmvscsi_driver_lock);
 	return 0;
 
       add_srp_port_failed:
@@ -2293,15 +2320,27 @@
 static int ibmvscsi_remove(struct vio_dev *vdev)
 {
 	struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
-	list_del(&hostdata->host_list);
-	unmap_persist_bufs(hostdata);
+	unsigned long flags;
+
+	srp_remove_host(hostdata->host);
+	scsi_remove_host(hostdata->host);
+
+	purge_requests(hostdata, DID_ERROR);
+
+	spin_lock_irqsave(hostdata->host->host_lock, flags);
 	release_event_pool(&hostdata->pool, hostdata);
+	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
 	ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
 					max_events);
 
 	kthread_stop(hostdata->work_thread);
-	srp_remove_host(hostdata->host);
-	scsi_remove_host(hostdata->host);
+	unmap_persist_bufs(hostdata);
+
+	spin_lock(&ibmvscsi_driver_lock);
+	list_del(&hostdata->host_list);
+	spin_unlock(&ibmvscsi_driver_lock);
+
 	scsi_host_put(hostdata->host);
 
 	return 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 3a78755..e60916e 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /* ------------------------------------------------------------
  * ibmvscsi.h
  * (C) Copyright IBM Corporation 1994, 2003
@@ -5,21 +6,6 @@
  *          Santiago Leon (santil@us.ibm.com)
  *          Dave Boutcher (sleddog@us.ibm.com)
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
- * USA
- *
  * ------------------------------------------------------------
  * Emulation of a SCSI host adapter for Virtual I/O devices
  *
@@ -88,13 +74,19 @@
 	dma_addr_t iu_token;
 };
 
+enum ibmvscsi_host_action {
+	IBMVSCSI_HOST_ACTION_NONE = 0,
+	IBMVSCSI_HOST_ACTION_RESET,
+	IBMVSCSI_HOST_ACTION_REENABLE,
+	IBMVSCSI_HOST_ACTION_UNBLOCK,
+};
+
 /* all driver data associated with a host adapter */
 struct ibmvscsi_host_data {
 	struct list_head host_list;
 	atomic_t request_limit;
 	int client_migrated;
-	int reset_crq;
-	int reenable_crq;
+	enum ibmvscsi_host_action action;
 	struct device *dev;
 	struct event_pool pool;
 	struct crq_queue queue;
diff --git a/drivers/scsi/ibmvscsi_tgt/Makefile b/drivers/scsi/ibmvscsi_tgt/Makefile
index 0c060ce..cc7a825 100644
--- a/drivers/scsi/ibmvscsi_tgt/Makefile
+++ b/drivers/scsi/ibmvscsi_tgt/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_SCSI_IBMVSCSIS)	+= ibmvscsis.o
 
 ibmvscsis-y := libsrp.o ibmvscsi_tgt.o
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index f42a619..a929fe7 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*******************************************************************************
  * IBM Virtual SCSI Target Driver
  * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
@@ -10,16 +11,6 @@
  * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
  * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  ****************************************************************************/
 
 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
@@ -1590,6 +1581,7 @@
 	case H_PERMISSION:
 		if (connection_broken(vscsi))
 			flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
+		/* Fall through */
 	default:
 		dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
 			rc);
@@ -2266,7 +2258,6 @@
 	/*
 	 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
 	 */
-	target_wait_for_sess_cmds(se_sess);
 	target_remove_session(se_sess);
 	tport->ibmv_nexus = NULL;
 	kfree(nexus);
@@ -2502,8 +2493,10 @@
 		break;
 	case H_CLOSED:
 		vscsi->flags |= CLIENT_FAILED;
+		/* Fall through */
 	case H_DROPPED:
 		vscsi->flags |= RESPONSE_Q_DOWN;
+		/* Fall through */
 	case H_REMOTE_PARM:
 		dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
 			rc);
@@ -3696,11 +3689,6 @@
 	return 0;
 }
 
-static char *ibmvscsis_get_fabric_name(void)
-{
-	return "ibmvscsis";
-}
-
 static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
 	struct ibmvscsis_tport *tport =
@@ -3794,11 +3782,6 @@
 	return 0;
 }
 
-static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd)
-{
-	return 0;
-}
-
 static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
 {
 }
@@ -4045,9 +4028,8 @@
 
 static const struct target_core_fabric_ops ibmvscsis_ops = {
 	.module				= THIS_MODULE,
-	.name				= "ibmvscsis",
+	.fabric_name			= "ibmvscsis",
 	.max_data_sg_nents		= MAX_TXU / PAGE_SIZE,
-	.get_fabric_name		= ibmvscsis_get_fabric_name,
 	.tpg_get_wwn			= ibmvscsis_get_fabric_wwn,
 	.tpg_get_tag			= ibmvscsis_get_tag,
 	.tpg_get_default_depth		= ibmvscsis_get_default_depth,
@@ -4060,7 +4042,6 @@
 	.release_cmd			= ibmvscsis_release_cmd,
 	.sess_get_index			= ibmvscsis_sess_get_index,
 	.write_pending			= ibmvscsis_write_pending,
-	.write_pending_status		= ibmvscsis_write_pending_status,
 	.set_default_node_attributes	= ibmvscsis_set_default_node_attrs,
 	.get_cmd_state			= ibmvscsis_get_cmd_state,
 	.queue_data_in			= ibmvscsis_queue_data_in,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index cc96c27..7ae074e 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*******************************************************************************
  * IBM Virtual SCSI Target Driver
  * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
@@ -11,16 +12,6 @@
  * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
  * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  ****************************************************************************/
 
 #ifndef __H_IBMVSCSI_TGT
diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.c b/drivers/scsi/ibmvscsi_tgt/libsrp.c
index 5a4cc28..8a0e28a 100644
--- a/drivers/scsi/ibmvscsi_tgt/libsrp.c
+++ b/drivers/scsi/ibmvscsi_tgt/libsrp.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*******************************************************************************
  * SCSI RDMA Protocol lib functions
  *
  * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
  * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  ***********************************************************************/
 
 #define pr_fmt(fmt)	"libsrp: " fmt
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 8c6627b..2519fb7 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* imm.c   --  low level driver for the IOMEGA MatchMaker
  * parallel port SCSI host adapter.
  * 
@@ -686,7 +687,7 @@
 		if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
 			/* if scatter/gather, advance to the next segment */
 			if (cmd->SCp.buffers_residual--) {
-				cmd->SCp.buffer++;
+				cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
 				cmd->SCp.this_residual =
 				    cmd->SCp.buffer->length;
 				cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
@@ -796,21 +797,21 @@
 			return 0;
 		}
 		return 1;	/* wait until imm_wakeup claims parport */
-		/* Phase 1 - Connected */
-	case 1:
+
+	case 1:		/* Phase 1 - Connected */
 		imm_connect(dev, CONNECT_EPP_MAYBE);
 		cmd->SCp.phase++;
+		/* fall through */
 
-		/* Phase 2 - We are now talking to the scsi bus */
-	case 2:
+	case 2:		/* Phase 2 - We are now talking to the scsi bus */
 		if (!imm_select(dev, scmd_id(cmd))) {
 			imm_fail(dev, DID_NO_CONNECT);
 			return 0;
 		}
 		cmd->SCp.phase++;
+		/* fall through */
 
-		/* Phase 3 - Ready to accept a command */
-	case 3:
+	case 3:		/* Phase 3 - Ready to accept a command */
 		w_ctr(ppb, 0x0c);
 		if (!(r_str(ppb) & 0x80))
 			return 1;
@@ -818,9 +819,9 @@
 		if (!imm_send_command(cmd))
 			return 0;
 		cmd->SCp.phase++;
+		/* fall through */
 
-		/* Phase 4 - Setup scatter/gather buffers */
-	case 4:
+	case 4:		/* Phase 4 - Setup scatter/gather buffers */
 		if (scsi_bufflen(cmd)) {
 			cmd->SCp.buffer = scsi_sglist(cmd);
 			cmd->SCp.this_residual = cmd->SCp.buffer->length;
@@ -834,8 +835,9 @@
 		cmd->SCp.phase++;
 		if (cmd->SCp.this_residual & 0x01)
 			cmd->SCp.this_residual++;
-		/* Phase 5 - Pre-Data transfer stage */
-	case 5:
+		/* fall through */
+
+	case 5:		/* Phase 5 - Pre-Data transfer stage */
 		/* Spin lock for BUSY */
 		w_ctr(ppb, 0x0c);
 		if (!(r_str(ppb) & 0x80))
@@ -850,9 +852,9 @@
 			if (imm_negotiate(dev))
 				return 0;
 		cmd->SCp.phase++;
+		/* fall through */
 
-		/* Phase 6 - Data transfer stage */
-	case 6:
+	case 6:		/* Phase 6 - Data transfer stage */
 		/* Spin lock for BUSY */
 		w_ctr(ppb, 0x0c);
 		if (!(r_str(ppb) & 0x80))
@@ -866,9 +868,9 @@
 				return 1;
 		}
 		cmd->SCp.phase++;
+		/* fall through */
 
-		/* Phase 7 - Post data transfer stage */
-	case 7:
+	case 7:		/* Phase 7 - Post data transfer stage */
 		if ((dev->dp) && (dev->rd)) {
 			if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) {
 				w_ctr(ppb, 0x4);
@@ -878,9 +880,9 @@
 			}
 		}
 		cmd->SCp.phase++;
+		/* fall through */
 
-		/* Phase 8 - Read status/message */
-	case 8:
+	case 8:		/* Phase 8 - Read status/message */
 		/* Check for data overrun */
 		if (imm_wait(dev) != (unsigned char) 0xb8) {
 			imm_fail(dev, DID_ERROR);
@@ -1110,7 +1112,6 @@
 	.bios_param		= imm_biosparam,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.can_queue		= 1,
 	.slave_alloc		= imm_adjust_queue,
 };
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 7a91cf3..41fd64c 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /**************************************************************************
  * Initio 9100 device driver for Linux.
  *
@@ -6,21 +7,6 @@
  * Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
  * Copyright (c) 2007 Red Hat
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
  *************************************************************************
  *
  * DESCRIPTION:
@@ -2817,7 +2803,6 @@
 	.can_queue		= MAX_TARGETS * i91u_MAXQUEUE,
 	.this_id		= 1,
 	.sg_tablesize		= SG_ALL,
-	.use_clustering		= ENABLE_CLUSTERING,
 };
 
 static int initio_probe_one(struct pci_dev *pdev,
@@ -2840,7 +2825,7 @@
 		reg = 0;
 	bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8));
 
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 		printk(KERN_WARNING  "i91u: Could not set 32 bit DMA mask\n");
 		error = -ENODEV;
 		goto out_disable_device;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 271990b..079c04b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * ipr.c -- driver for IBM Power Linux RAID adapters
  *
  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
  *
  * Copyright (C) 2003, 2004 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  */
 
 /*
@@ -3915,22 +3901,23 @@
 				 u8 *buffer, u32 len)
 {
 	int bsize_elem, i, result = 0;
-	struct scatterlist *scatterlist;
+	struct scatterlist *sg;
 	void *kaddr;
 
 	/* Determine the actual number of bytes per element */
 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
 
-	scatterlist = sglist->scatterlist;
+	sg = sglist->scatterlist;
 
-	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
-		struct page *page = sg_page(&scatterlist[i]);
+	for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
+			buffer += bsize_elem) {
+		struct page *page = sg_page(sg);
 
 		kaddr = kmap(page);
 		memcpy(kaddr, buffer, bsize_elem);
 		kunmap(page);
 
-		scatterlist[i].length = bsize_elem;
+		sg->length = bsize_elem;
 
 		if (result != 0) {
 			ipr_trace;
@@ -3939,13 +3926,13 @@
 	}
 
 	if (len % bsize_elem) {
-		struct page *page = sg_page(&scatterlist[i]);
+		struct page *page = sg_page(sg);
 
 		kaddr = kmap(page);
 		memcpy(kaddr, buffer, len % bsize_elem);
 		kunmap(page);
 
-		scatterlist[i].length = len % bsize_elem;
+		sg->length = len % bsize_elem;
 	}
 
 	sglist->buffer_len = len;
@@ -3966,6 +3953,7 @@
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
 	struct scatterlist *scatterlist = sglist->scatterlist;
+	struct scatterlist *sg;
 	int i;
 
 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
@@ -3974,10 +3962,10 @@
 
 	ioarcb->ioadl_len =
 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
-	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
+	for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
 		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
-		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
-		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
+		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
+		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
 	}
 
 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
@@ -3997,6 +3985,7 @@
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
 	struct scatterlist *scatterlist = sglist->scatterlist;
+	struct scatterlist *sg;
 	int i;
 
 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
@@ -4006,11 +3995,11 @@
 	ioarcb->ioadl_len =
 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
 
-	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
+	for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
 		ioadl[i].flags_and_data_len =
-			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
+			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
 		ioadl[i].address =
-			cpu_to_be32(sg_dma_address(&scatterlist[i]));
+			cpu_to_be32(sg_dma_address(sg));
 	}
 
 	ioadl[i-1].flags_and_data_len |=
@@ -6696,7 +6685,8 @@
  * Return value:
  * 	0 on success / other on failure
  **/
-static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
+		     void __user *arg)
 {
 	struct ipr_resource_entry *res;
 
@@ -6754,7 +6744,6 @@
 	.sg_tablesize = IPR_MAX_SGLIST,
 	.max_sectors = IPR_IOA_MAX_SECTORS,
 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
-	.use_clustering = ENABLE_CLUSTERING,
 	.shost_attrs = ipr_ioa_attrs,
 	.sdev_attrs = ipr_dev_attrs,
 	.proc_name = IPR_NAME,
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index f6baa23..a67baeb 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * ipr.h -- driver for IBM Power Linux RAID adapters
  *
@@ -5,20 +6,6 @@
  *
  * Copyright (C) 2003, 2004 IBM Corporation
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * Alan Cox <alan@lxorguk.ukuu.org.uk> - Removed several careless u32/dma_addr_t errors
  *				that broke 64bit platforms.
  */
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bd6ac6b..e8bc8d3 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -208,7 +208,7 @@
 
 #define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
                          DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
-                         PCI_DMA_BIDIRECTIONAL : \
+                         DMA_BIDIRECTIONAL : \
                          scb->scsi_cmd->sc_data_direction)
 
 #ifdef IPS_DEBUG
@@ -365,7 +365,6 @@
 	.this_id		= -1,
 	.sg_tablesize		= IPS_MAX_SG,
 	.cmd_per_lun		= 3,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.no_write_same		= 1,
 };
 
@@ -1529,11 +1528,12 @@
 	if (ha->ioctl_data && length <= ha->ioctl_len)
 		return 0;
 	/* there is no buffer or it's not big enough, allocate a new one */
-	bigger_buf = pci_alloc_consistent(ha->pcidev, length, &dma_busaddr);
+	bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr,
+			GFP_KERNEL);
 	if (bigger_buf) {
 		/* free the old memory */
-		pci_free_consistent(ha->pcidev, ha->ioctl_len, ha->ioctl_data,
-				    ha->ioctl_busaddr);
+		dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
+				  ha->ioctl_data, ha->ioctl_busaddr);
 		/* use the new memory */
 		ha->ioctl_data = (char *) bigger_buf;
 		ha->ioctl_len = length;
@@ -1678,9 +1678,8 @@
 		} else if (!ha->flash_data) {
 			datasize = pt->CoppCP.cmd.flashfw.total_packets *
 			    pt->CoppCP.cmd.flashfw.count;
-			ha->flash_data = pci_alloc_consistent(ha->pcidev,
-					                      datasize,
-							      &ha->flash_busaddr);
+			ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev,
+					datasize, &ha->flash_busaddr, GFP_KERNEL);
 			if (!ha->flash_data){
 				printk(KERN_WARNING "Unable to allocate a flash buffer\n");
 				return IPS_FAILURE;
@@ -1801,13 +1800,13 @@
 	}
 	if (IPS_USE_ENH_SGLIST(ha)) {
 		scb->sg_list.enh_list[indx].address_lo =
-		    cpu_to_le32(pci_dma_lo32(busaddr));
+		    cpu_to_le32(lower_32_bits(busaddr));
 		scb->sg_list.enh_list[indx].address_hi =
-		    cpu_to_le32(pci_dma_hi32(busaddr));
+		    cpu_to_le32(upper_32_bits(busaddr));
 		scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len);
 	} else {
 		scb->sg_list.std_list[indx].address =
-		    cpu_to_le32(pci_dma_lo32(busaddr));
+		    cpu_to_le32(lower_32_bits(busaddr));
 		scb->sg_list.std_list[indx].length = cpu_to_le32(e_len);
 	}
 
@@ -1858,7 +1857,7 @@
 
 	scb->data_len = ha->flash_datasize;
 	scb->data_busaddr =
-	    pci_map_single(ha->pcidev, ha->flash_data, scb->data_len,
+	    dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len,
 			   IPS_DMA_DIR(scb));
 	scb->flags |= IPS_SCB_MAP_SINGLE;
 	scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
@@ -1880,8 +1879,8 @@
 	if (ha->flash_data == ips_FlashData)
 		test_and_clear_bit(0, &ips_FlashDataInUse);
 	else if (ha->flash_data)
-		pci_free_consistent(ha->pcidev, ha->flash_len, ha->flash_data,
-				    ha->flash_busaddr);
+		dma_free_coherent(&ha->pcidev->dev, ha->flash_len,
+				  ha->flash_data, ha->flash_busaddr);
 	ha->flash_data = NULL;
 }
 
@@ -3485,6 +3484,7 @@
 
 		case START_STOP:
 			scb->scsi_cmd->result = DID_OK << 16;
+			break;
 
 		case TEST_UNIT_READY:
 		case INQUIRY:
@@ -4212,7 +4212,7 @@
 
 	if (ha) {
 		if (ha->enq) {
-			pci_free_consistent(ha->pcidev, sizeof(IPS_ENQ),
+			dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ),
 					    ha->enq, ha->enq_busaddr);
 			ha->enq = NULL;
 		}
@@ -4221,7 +4221,7 @@
 		ha->conf = NULL;
 
 		if (ha->adapt) {
-			pci_free_consistent(ha->pcidev,
+			dma_free_coherent(&ha->pcidev->dev,
 					    sizeof (IPS_ADAPTER) +
 					    sizeof (IPS_IO_CMD), ha->adapt,
 					    ha->adapt->hw_status_start);
@@ -4229,7 +4229,7 @@
 		}
 
 		if (ha->logical_drive_info) {
-			pci_free_consistent(ha->pcidev,
+			dma_free_coherent(&ha->pcidev->dev,
 					    sizeof (IPS_LD_INFO),
 					    ha->logical_drive_info,
 					    ha->logical_drive_info_dma_addr);
@@ -4243,7 +4243,7 @@
 		ha->subsys = NULL;
 
 		if (ha->ioctl_data) {
-			pci_free_consistent(ha->pcidev, ha->ioctl_len,
+			dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
 					    ha->ioctl_data, ha->ioctl_busaddr);
 			ha->ioctl_data = NULL;
 			ha->ioctl_datasize = 0;
@@ -4276,11 +4276,11 @@
 ips_deallocatescbs(ips_ha_t * ha, int cmds)
 {
 	if (ha->scbs) {
-		pci_free_consistent(ha->pcidev,
+		dma_free_coherent(&ha->pcidev->dev,
 				    IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
 				    ha->scbs->sg_list.list,
 				    ha->scbs->sg_busaddr);
-		pci_free_consistent(ha->pcidev, sizeof (ips_scb_t) * cmds,
+		dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds,
 				    ha->scbs, ha->scbs->scb_busaddr);
 		ha->scbs = NULL;
 	}			/* end if */
@@ -4307,17 +4307,16 @@
 	METHOD_TRACE("ips_allocatescbs", 1);
 
 	/* Allocate memory for the SCBs */
-	ha->scbs =
-	    pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof (ips_scb_t),
-				 &command_dma);
+	ha->scbs = dma_alloc_coherent(&ha->pcidev->dev,
+			ha->max_cmds * sizeof (ips_scb_t),
+			&command_dma, GFP_KERNEL);
 	if (ha->scbs == NULL)
 		return 0;
-	ips_sg.list =
-	    pci_alloc_consistent(ha->pcidev,
-				 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG *
-				 ha->max_cmds, &sg_dma);
+	ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev,
+			IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds,
+			&sg_dma, GFP_KERNEL);
 	if (ips_sg.list == NULL) {
-		pci_free_consistent(ha->pcidev,
+		dma_free_coherent(&ha->pcidev->dev,
 				    ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
 				    command_dma);
 		return 0;
@@ -4446,8 +4445,8 @@
 	if (scb->flags & IPS_SCB_MAP_SG)
                 scsi_dma_unmap(scb->scsi_cmd);
 	else if (scb->flags & IPS_SCB_MAP_SINGLE)
-		pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
-				 IPS_DMA_DIR(scb));
+		dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr,
+				 scb->data_len, IPS_DMA_DIR(scb));
 
 	/* check to make sure this is not our "special" scb */
 	if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
@@ -4559,7 +4558,8 @@
 	dma_addr_t command_dma;
 
 	/* Create a usuable SCB */
-	scb = pci_alloc_consistent(ha->pcidev, sizeof(ips_scb_t), &command_dma);
+	scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t),
+			&command_dma, GFP_KERNEL);
 	if (scb) {
 	    memset(scb, 0, sizeof(ips_scb_t));
 	    ips_init_scb(ha, scb);
@@ -4594,7 +4594,7 @@
 	/* Now RESET and INIT the adapter */
 	(*ha->func.reset) (ha);
 
-	pci_free_consistent(ha->pcidev, sizeof(ips_scb_t), scb, command_dma);
+	dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma);
 	return;
 }
 
@@ -6677,7 +6677,6 @@
 	sh->sg_tablesize = sh->hostt->sg_tablesize;
 	sh->can_queue = sh->hostt->can_queue;
 	sh->cmd_per_lun = sh->hostt->cmd_per_lun;
-	sh->use_clustering = sh->hostt->use_clustering;
 	sh->max_sectors = 128;
 
 	sh->max_id = ha->ntargets;
@@ -6925,30 +6924,31 @@
 	 * it!  Also, don't use 64bit addressing if dma addresses
 	 * are guaranteed to be < 4G.
 	 */
-	if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
-	    !pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) {
+	if (sizeof(dma_addr_t) > 4 && IPS_HAS_ENH_SGLIST(ha) &&
+	    !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
 		(ha)->flags |= IPS_HA_ENH_SG;
 	} else {
-		if (pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(32)) != 0) {
+		if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) {
 			printk(KERN_WARNING "Unable to set DMA Mask\n");
 			return ips_abort_init(ha, index);
 		}
 	}
 	if(ips_cd_boot && !ips_FlashData){
-		ips_FlashData = pci_alloc_consistent(pci_dev, PAGE_SIZE << 7,
-						     &ips_flashbusaddr);
+		ips_FlashData = dma_alloc_coherent(&pci_dev->dev,
+				PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL);
 	}
 
-	ha->enq = pci_alloc_consistent(pci_dev, sizeof (IPS_ENQ),
-				       &ha->enq_busaddr);
+	ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ),
+			&ha->enq_busaddr, GFP_KERNEL);
 	if (!ha->enq) {
 		IPS_PRINTK(KERN_WARNING, pci_dev,
 			   "Unable to allocate host inquiry structure\n");
 		return ips_abort_init(ha, index);
 	}
 
-	ha->adapt = pci_alloc_consistent(pci_dev, sizeof (IPS_ADAPTER) +
-					 sizeof (IPS_IO_CMD), &dma_address);
+	ha->adapt = dma_alloc_coherent(&pci_dev->dev,
+			sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD),
+			&dma_address, GFP_KERNEL);
 	if (!ha->adapt) {
 		IPS_PRINTK(KERN_WARNING, pci_dev,
 			   "Unable to allocate host adapt & dummy structures\n");
@@ -6959,7 +6959,8 @@
 
 
 
-	ha->logical_drive_info = pci_alloc_consistent(pci_dev, sizeof (IPS_LD_INFO), &dma_address);
+	ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev,
+			sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL);
 	if (!ha->logical_drive_info) {
 		IPS_PRINTK(KERN_WARNING, pci_dev,
 			   "Unable to allocate logical drive info structure\n");
@@ -6997,8 +6998,8 @@
 	if (ips_ioctlsize < PAGE_SIZE)
 		ips_ioctlsize = PAGE_SIZE;
 
-	ha->ioctl_data = pci_alloc_consistent(pci_dev, ips_ioctlsize,
-					      &ha->ioctl_busaddr);
+	ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize,
+			&ha->ioctl_busaddr, GFP_KERNEL);
 	ha->ioctl_len = ips_ioctlsize;
 	if (!ha->ioctl_data) {
 		IPS_PRINTK(KERN_WARNING, pci_dev,
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index db54617..6c0678f 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -96,15 +96,6 @@
       #define __iomem
    #endif
 
-   #define pci_dma_hi32(a)         ((a >> 16) >> 16)
-   #define pci_dma_lo32(a)         (a & 0xffffffff)
-
-   #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
-      #define IPS_ENABLE_DMA64        (1)
-   #else
-      #define IPS_ENABLE_DMA64        (0)
-   #endif
-
    /*
     * Adapter address map equates
     */
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 1ee3868..7b5deae 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -2717,9 +2717,9 @@
  *    the task management request.
  * @task_request: the handle to the task request object to start.
  */
-enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
-					       struct isci_remote_device *idev,
-					       struct isci_request *ireq)
+enum sci_status sci_controller_start_task(struct isci_host *ihost,
+					  struct isci_remote_device *idev,
+					  struct isci_request *ireq)
 {
 	enum sci_status status;
 
@@ -2728,7 +2728,7 @@
 			 "%s: SCIC Controller starting task from invalid "
 			 "state\n",
 			 __func__);
-		return SCI_TASK_FAILURE_INVALID_STATE;
+		return SCI_FAILURE_INVALID_STATE;
 	}
 
 	status = sci_remote_device_start_task(ihost, idev, ireq);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index b353992..6bc3f02 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -489,7 +489,7 @@
 	struct isci_remote_device *idev,
 	struct isci_request *ireq);
 
-enum sci_task_status sci_controller_start_task(
+enum sci_status sci_controller_start_task(
 	struct isci_host *ihost,
 	struct isci_remote_device *idev,
 	struct isci_request *ireq);
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 08c7b1e..1727d0c 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -163,7 +163,6 @@
 	.this_id			= -1,
 	.sg_tablesize			= SG_ALL,
 	.max_sectors			= SCSI_DEFAULT_MAX_SECTORS,
-	.use_clustering			= ENABLE_CLUSTERING,
 	.eh_abort_handler		= sas_eh_abort_handler,
 	.eh_device_reset_handler        = sas_eh_device_reset_handler,
 	.eh_target_reset_handler        = sas_eh_target_reset_handler,
@@ -304,21 +303,10 @@
 
 	pci_set_master(pdev);
 
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err) {
-		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (err)
-			return err;
-	}
-
-	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err) {
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (err)
-			return err;
-	}
-
-	return 0;
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (err)
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	return err;
 }
 
 static int num_controllers(struct pci_dev *pdev)
@@ -588,6 +576,13 @@
 	shost->max_lun = ~0;
 	shost->max_cmd_len = MAX_COMMAND_SIZE;
 
+	/* turn on DIF support */
+	scsi_host_set_prot(shost,
+			   SHOST_DIF_TYPE1_PROTECTION |
+			   SHOST_DIF_TYPE2_PROTECTION |
+			   SHOST_DIF_TYPE3_PROTECTION);
+	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
 	err = scsi_add_host(shost, &pdev->dev);
 	if (err)
 		goto err_shost;
@@ -675,13 +670,6 @@
 			goto err_host_alloc;
 		}
 		pci_info->hosts[i] = h;
-
-		/* turn on DIF support */
-		scsi_host_set_prot(to_shost(h),
-				   SHOST_DIF_TYPE1_PROTECTION |
-				   SHOST_DIF_TYPE2_PROTECTION |
-				   SHOST_DIF_TYPE3_PROTECTION);
-		scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
 	}
 
 	err = isci_setup_interrupts(pdev);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 1deca8c..7f9b3f2 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -778,6 +778,7 @@
 			break;
 		case SCU_EVENT_LINK_FAILURE:
 			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+			/* fall through */
 		case SCU_EVENT_HARD_RESET_RECEIVED:
 			/* Start the oob/sn state machine over again */
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index cc51f38..49aa4e6 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -310,7 +310,7 @@
 		/* Kill all outstanding requests for the device. */
 		sci_remote_device_terminate_requests(idev);
 
-		/* Fall through into the default case... */
+		/* Fall through - into the default case... */
 	default:
 		clear_bit(IDEV_IO_READY, &idev->flags);
 		break;
@@ -593,7 +593,7 @@
 
 			break;
 		}
-	/* Else, fall through and treat as unhandled... */
+		/* fall through - and treat as unhandled... */
 	default:
 		dev_dbg(scirdev_to_dev(idev),
 			"%s: device: %p event code: %x: %s\n",
@@ -1087,7 +1087,7 @@
 
 	if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
 		sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
-	} else if (dev_is_expander(dev)) {
+	} else if (dev_is_expander(dev->dev_type)) {
 		sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
 	} else
 		isci_remote_device_ready(ihost, idev);
@@ -1478,7 +1478,7 @@
 	struct domain_device *dev = idev->domain_dev;
 	enum sci_status status;
 
-	if (dev->parent && dev_is_expander(dev->parent))
+	if (dev->parent && dev_is_expander(dev->parent->dev_type))
 		status = sci_remote_device_ea_construct(iport, idev);
 	else
 		status = sci_remote_device_da_construct(iport, idev);
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 47a013f..3ad681c 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -295,11 +295,6 @@
 	return idev;
 }
 
-static inline bool dev_is_expander(struct domain_device *dev)
-{
-	return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE;
-}
-
 static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
 {
 	/* XXX delete this voodoo when converting to the top-level device
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index e3f2a53..474a434 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -601,9 +601,9 @@
 				 __func__, sci_rnc);
 			return SCI_FAILURE_INVALID_STATE;
 		}
-		/* Fall through and handle like SCI_RNC_POSTING */
+		/* Fall through - and handle like SCI_RNC_POSTING */
 	case SCI_RNC_RESUMING:
-		/* Fall through and handle like SCI_RNC_POSTING */
+		/* Fall through - and handle like SCI_RNC_POSTING */
 	case SCI_RNC_POSTING:
 		/* Set the destination state to AWAIT - this signals the
 		 * entry into the SCI_RNC_READY state that a suspension
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index ed197bc..343d24c 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -224,7 +224,7 @@
 	idev = ireq->target_device;
 	iport = idev->owning_port;
 
-	/* Fill in the TC with the its required data */
+	/* Fill in the TC with its required data */
 	task_context->abort = 0;
 	task_context->priority = 0;
 	task_context->initiator_request = 1;
@@ -506,7 +506,7 @@
 	idev = ireq->target_device;
 	iport = idev->owning_port;
 
-	/* Fill in the TC with the its required data */
+	/* Fill in the TC with its required data */
 	task_context->abort = 0;
 	task_context->priority = SCU_TASK_PRIORITY_NORMAL;
 	task_context->initiator_request = 1;
@@ -894,7 +894,7 @@
 		 * and don't wait for the task response.
 		 */
 		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
-		/* Fall through and handle like ABORTING... */
+		/* Fall through - and handle like ABORTING... */
 	case SCI_REQ_ABORTING:
 		if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
 			set_bit(IREQ_PENDING_ABORT, &ireq->flags);
@@ -1626,9 +1626,9 @@
 
 	if (status == SCI_SUCCESS) {
 		if (ireq->stp.rsp.status & ATA_ERR)
-			status = SCI_IO_FAILURE_RESPONSE_VALID;
+			status = SCI_FAILURE_IO_RESPONSE_VALID;
 	} else {
-		status = SCI_IO_FAILURE_RESPONSE_VALID;
+		status = SCI_FAILURE_IO_RESPONSE_VALID;
 	}
 
 	if (status != SCI_SUCCESS) {
@@ -3101,7 +3101,7 @@
 		/* pass */;
 	else if (dev_is_sata(dev))
 		memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
-	else if (dev_is_expander(dev))
+	else if (dev_is_expander(dev->dev_type))
 		/* pass */;
 	else
 		return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
@@ -3235,7 +3235,7 @@
 	iport = idev->owning_port;
 
 	/*
-	 * Fill in the TC with the its required data
+	 * Fill in the TC with its required data
 	 * 00h
 	 */
 	task_context->priority = 0;
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 6dcaed0..26fa1a4 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -258,7 +258,7 @@
 				 struct isci_tmf *tmf, unsigned long timeout_ms)
 {
 	DECLARE_COMPLETION_ONSTACK(completion);
-	enum sci_task_status status = SCI_TASK_FAILURE;
+	enum sci_status status = SCI_FAILURE;
 	struct isci_request *ireq;
 	int ret = TMF_RESP_FUNC_FAILED;
 	unsigned long flags;
@@ -301,7 +301,7 @@
 	/* start the TMF io. */
 	status = sci_controller_start_task(ihost, idev, ireq);
 
-	if (status != SCI_TASK_SUCCESS) {
+	if (status != SCI_SUCCESS) {
 		dev_dbg(&ihost->pdev->dev,
 			 "%s: start_io failed - status = 0x%x, request = %p\n",
 			 __func__,
@@ -511,7 +511,7 @@
 		 "%s: dev = %p (%s%s), task = %p, old_request == %p\n",
 		 __func__, idev,
 		 (dev_is_sata(task->dev) ? "STP/SATA"
-					 : ((dev_is_expander(task->dev))
+					 : ((dev_is_expander(task->dev->dev_type))
 						? "SMP"
 						: "SSP")),
 		 ((idev) ? ((test_bit(IDEV_GONE, &idev->flags))
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index d453667..e4857b7 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Export the iSCSI boot info to userland via sysfs.
  *
  * Copyright (C) 2010 Red Hat, Inc.  All rights reserved.
  * Copyright (C) 2010 Mike Christie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/module.h>
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b025a0b..7bedbe8 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * iSCSI Initiator over TCP/IP Data-Path
  *
@@ -7,16 +8,6 @@
  * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
  * maintained by open-iscsi@googlegroups.com
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
  * See the file COPYING included with this distribution for more details.
  *
  * Credits:
@@ -44,6 +35,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_transport_iscsi.h>
+#include <trace/events/iscsi.h>
 
 #include "iscsi_tcp.h"
 
@@ -72,6 +64,9 @@
 			iscsi_conn_printk(KERN_INFO, _conn,	\
 					     "%s " dbg_fmt,	\
 					     __func__, ##arg);	\
+		iscsi_dbg_trace(trace_iscsi_dbg_sw_tcp,		\
+				&(_conn)->cls_conn->dev,	\
+				"%s " dbg_fmt, __func__, ##arg);\
 	} while (0);
 
 
@@ -514,7 +509,7 @@
 	if (!task->sc)
 		iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
 	else {
-		struct scsi_data_buffer *sdb = scsi_out(task->sc);
+		struct scsi_data_buffer *sdb = &task->sc->sdb;
 
 		err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
 						  sdb->table.nents, offset,
@@ -800,7 +795,8 @@
 			return rc;
 
 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
-						 &addr, param, buf);
+						 &addr,
+						 (enum iscsi_param)param, buf);
 	default:
 		return iscsi_host_get_param(shost, param, buf);
 	}
@@ -947,12 +943,6 @@
 	return 0;
 }
 
-static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
-{
-	blk_queue_flag_set(QUEUE_FLAG_BIDI, sdev->request_queue);
-	return 0;
-}
-
 static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
 {
 	struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
@@ -979,8 +969,7 @@
 	.eh_abort_handler       = iscsi_eh_abort,
 	.eh_device_reset_handler= iscsi_eh_device_reset,
 	.eh_target_reset_handler = iscsi_eh_recover_target,
-	.use_clustering         = DISABLE_CLUSTERING,
-	.slave_alloc            = iscsi_sw_tcp_slave_alloc,
+	.dma_boundary		= PAGE_SIZE - 1,
 	.slave_configure        = iscsi_sw_tcp_slave_configure,
 	.target_alloc		= iscsi_target_alloc,
 	.proc_name		= "iscsi_tcp",
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 06d42d0..7914531 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * iSCSI Initiator TCP Transport
  * Copyright (C) 2004 Dmitry Yusupov
@@ -6,16 +7,6 @@
  * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
  * maintained by open-iscsi@googlegroups.com
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
  * See the file COPYING included with this distribution for more details.
  */
 
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 6eb5ff3..7f683e4 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* jazz_esp.c: ESP front-end for MIPS JAZZ systems.
  *
  * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
@@ -38,30 +39,6 @@
 	return *(volatile u8 *)(esp->regs + reg);
 }
 
-static dma_addr_t jazz_esp_map_single(struct esp *esp, void *buf,
-				      size_t sz, int dir)
-{
-	return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int jazz_esp_map_sg(struct esp *esp, struct scatterlist *sg,
-				  int num_sg, int dir)
-{
-	return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void jazz_esp_unmap_single(struct esp *esp, dma_addr_t addr,
-				  size_t sz, int dir)
-{
-	dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void jazz_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
-			      int num_sg, int dir)
-{
-	dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
 static int jazz_esp_irq_pending(struct esp *esp)
 {
 	if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
@@ -117,10 +94,6 @@
 static const struct esp_driver_ops jazz_esp_ops = {
 	.esp_write8	=	jazz_esp_write8,
 	.esp_read8	=	jazz_esp_read8,
-	.map_single	=	jazz_esp_map_single,
-	.map_sg		=	jazz_esp_map_sg,
-	.unmap_single	=	jazz_esp_unmap_single,
-	.unmap_sg	=	jazz_esp_unmap_sg,
 	.irq_pending	=	jazz_esp_irq_pending,
 	.reset_dma	=	jazz_esp_reset_dma,
 	.dma_drain	=	jazz_esp_dma_drain,
@@ -182,7 +155,7 @@
 
 	dev_set_drvdata(&dev->dev, esp);
 
-	err = scsi_esp_register(esp, &dev->dev);
+	err = scsi_esp_register(esp);
 	if (err)
 		goto fail_free_irq;
 
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index dc83927..abac2f3 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /* -*- mode: c; c-basic-offset: 8 -*- */
 
 /* PARISC LASI driver for the 53c700 chip
@@ -5,19 +6,6 @@
  * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
 **-----------------------------------------------------------------------------
 **  
-**  This program is free software; you can redistribute it and/or modify
-**  it under the terms of the GNU General Public License as published by
-**  the Free Software Foundation; either version 2 of the License, or
-**  (at your option) any later version.
-**
-**  This program is distributed in the hope that it will be useful,
-**  but WITHOUT ANY WARRANTY; without even the implied warranty of
-**  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-**  GNU General Public License for more details.
-**
-**  You should have received a copy of the GNU General Public License
-**  along with this program; if not, write to the Free Software
-**  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 **
 **-----------------------------------------------------------------------------
  */
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index f969a71..9c5f7c9 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index 6384a98..13a2e7c 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright(c) 2008 Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 42bcf7f..52e8666 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright(c) 2007 Intel Corporation. All rights reserved.
  * Copyright(c) 2008 Red Hat, Inc.  All rights reserved.
  * Copyright(c) 2008 Mike Christie
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
@@ -2603,7 +2591,7 @@
 
 	/* lport lock ? */
 	if (!lport || lport->state == LPORT_ST_DISABLED) {
-		FC_LPORT_DBG(lport, "Receiving frames for an lport that "
+		FC_LIBFC_DBG("Receiving frames for an lport that "
 			     "has not been initialized correctly\n");
 		fc_frame_free(fp);
 		return;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 4fae253..bf2cc96 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright(c) 2007 Intel Corporation. All rights reserved.
  * Copyright(c) 2008 Red Hat, Inc.  All rights reserved.
  * Copyright(c) 2008 Mike Christie
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
@@ -1872,7 +1860,6 @@
 	struct fc_lport *lport = shost_priv(shost);
 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
 	struct fc_fcp_pkt *fsp;
-	struct fc_rport_libfc_priv *rpriv;
 	int rval;
 	int rc = 0;
 	struct fc_stats *stats;
@@ -1894,8 +1881,6 @@
 		goto out;
 	}
 
-	rpriv = rport->dd_data;
-
 	if (!fc_fcp_lport_queue_ready(lport)) {
 		if (lport->qfull) {
 			if (fc_fcp_can_queue_ramp_down(lport))
@@ -2295,8 +2280,7 @@
 
 void fc_destroy_fcp(void)
 {
-	if (scsi_pkt_cachep)
-		kmem_cache_destroy(scsi_pkt_cachep);
+	kmem_cache_destroy(scsi_pkt_cachep);
 }
 
 /**
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
index 0382ac0..f3aefb2 100644
--- a/drivers/scsi/libfc/fc_frame.c
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright(c) 2007 Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index dbadbc8..19c4ab4 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright(c) 2009 Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index b74189d..685e3bd 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright(c) 2009 Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index be83590..684c5e3 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright(c) 2007 Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
@@ -1726,14 +1714,14 @@
 	    fc_frame_payload_op(fp) != ELS_LS_ACC) {
 		FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
 		fc_lport_error(lport, fp);
-		goto err;
+		goto out;
 	}
 
 	flp = fc_frame_payload_get(fp, sizeof(*flp));
 	if (!flp) {
 		FC_LPORT_DBG(lport, "FLOGI bad response\n");
 		fc_lport_error(lport, fp);
-		goto err;
+		goto out;
 	}
 
 	mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1743,7 +1731,7 @@
 		FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
 			     "lport->mfs:%hu\n", mfs, lport->mfs);
 		fc_lport_error(lport, fp);
-		goto err;
+		goto out;
 	}
 
 	if (mfs <= lport->mfs) {
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
index c168321..c045898 100644
--- a/drivers/scsi/libfc/fc_npiv.c
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright(c) 2009 Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 372387a..da6e97d 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Maintained at www.Open-FCoE.org
  */
 
@@ -140,6 +128,7 @@
 struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
 {
 	struct fc_rport_priv *rdata;
+	size_t rport_priv_size = sizeof(*rdata);
 
 	lockdep_assert_held(&lport->disc.disc_mutex);
 
@@ -147,7 +136,9 @@
 	if (rdata)
 		return rdata;
 
-	rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
+	if (lport->rport_priv_size > 0)
+		rport_priv_size = lport->rport_priv_size;
+	rdata = kzalloc(rport_priv_size, GFP_KERNEL);
 	if (!rdata)
 		return NULL;
 
@@ -184,7 +175,6 @@
 	struct fc_rport_priv *rdata;
 
 	rdata = container_of(kref, struct fc_rport_priv, kref);
-	WARN_ON(!list_empty(&rdata->peers));
 	kfree_rcu(rdata, rcu);
 }
 EXPORT_SYMBOL(fc_rport_destroy);
@@ -860,7 +850,6 @@
 static void fc_rport_recv_flogi_req(struct fc_lport *lport,
 				    struct fc_frame *rx_fp)
 {
-	struct fc_disc *disc;
 	struct fc_els_flogi *flp;
 	struct fc_rport_priv *rdata;
 	struct fc_frame *fp = rx_fp;
@@ -871,7 +860,6 @@
 
 	FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n");
 
-	disc = &lport->disc;
 	if (!lport->point_to_multipoint) {
 		rjt_data.reason = ELS_RJT_UNSUP;
 		rjt_data.explan = ELS_EXPL_NONE;
@@ -1038,8 +1026,11 @@
 		struct fc_els_ls_rjt *rjt;
 
 		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
-		FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
-			     rjt->er_reason, rjt->er_explan);
+		if (!rjt)
+			FC_RPORT_DBG(rdata, "PLOGI bad response\n");
+		else
+			FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
+				     rjt->er_reason, rjt->er_explan);
 		fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
 	}
 out:
@@ -1158,8 +1149,10 @@
 	op = fc_frame_payload_op(fp);
 	if (op == ELS_LS_ACC) {
 		pp = fc_frame_payload_get(fp, sizeof(*pp));
-		if (!pp)
+		if (!pp) {
+			fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
 			goto out;
+		}
 
 		resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
 		FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
@@ -1172,8 +1165,10 @@
 				fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
 			goto out;
 		}
-		if (pp->prli.prli_spp_len < sizeof(pp->spp))
+		if (pp->prli.prli_spp_len < sizeof(pp->spp)) {
+			fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
 			goto out;
+		}
 
 		fcp_parm = ntohl(pp->spp.spp_params);
 		if (fcp_parm & FCP_SPPF_RETRY)
@@ -1211,8 +1206,11 @@
 
 	} else {
 		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
-		FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
-			     rjt->er_reason, rjt->er_explan);
+		if (!rjt)
+			FC_RPORT_DBG(rdata, "PRLI bad response\n");
+		else
+			FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
+				     rjt->er_reason, rjt->er_explan);
 		fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
 	}
 
@@ -1714,6 +1712,7 @@
 			kref_put(&rdata->kref, fc_rport_destroy);
 			goto busy;
 		}
+		/* fall through */
 	default:
 		FC_RPORT_DBG(rdata,
 			     "Reject ELS 0x%02x while in state %s\n",
@@ -2154,7 +2153,6 @@
 		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
 			     fc_rport_state(rdata));
 
-		rdata->flags &= ~FC_RP_STARTED;
 		fc_rport_enter_delete(rdata, RPORT_EV_STOP);
 		mutex_unlock(&rdata->rp_mutex);
 		kref_put(&rdata->kref, fc_rport_destroy);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f78d2e5..ebd47c0 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * iSCSI lib functions
  *
@@ -6,20 +7,6 @@
  * Copyright (C) 2004 - 2005 Dmitry Yusupov
  * Copyright (C) 2004 - 2005 Alex Aizman
  * maintained by open-iscsi@googlegroups.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 #include <linux/types.h>
 #include <linux/kfifo.h>
@@ -40,6 +27,7 @@
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_iscsi.h>
 #include <scsi/libiscsi.h>
+#include <trace/events/iscsi.h>
 
 static int iscsi_dbg_lib_conn;
 module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int,
@@ -68,6 +56,9 @@
 			iscsi_conn_printk(KERN_INFO, _conn,	\
 					     "%s " dbg_fmt,	\
 					     __func__, ##arg);	\
+		iscsi_dbg_trace(trace_iscsi_dbg_conn,		\
+				&(_conn)->cls_conn->dev,	\
+				"%s " dbg_fmt, __func__, ##arg);\
 	} while (0);
 
 #define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...)			\
@@ -76,6 +67,9 @@
 			iscsi_session_printk(KERN_INFO, _session,	\
 					     "%s " dbg_fmt,		\
 					     __func__, ##arg);		\
+		iscsi_dbg_trace(trace_iscsi_dbg_session, 		\
+				&(_session)->cls_session->dev,		\
+				"%s " dbg_fmt, __func__, ##arg);	\
 	} while (0);
 
 #define ISCSI_DBG_EH(_session, dbg_fmt, arg...)				\
@@ -84,6 +78,9 @@
 			iscsi_session_printk(KERN_INFO, _session,	\
 					     "%s " dbg_fmt,		\
 					     __func__, ##arg);		\
+		iscsi_dbg_trace(trace_iscsi_dbg_eh,			\
+				&(_session)->cls_session->dev,		\
+				"%s " dbg_fmt, __func__, ##arg);	\
 	} while (0);
 
 inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
@@ -218,32 +215,6 @@
 	return 0;
 }
 
-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
-{
-	struct scsi_cmnd *sc = task->sc;
-	struct iscsi_rlength_ahdr *rlen_ahdr;
-	int rc;
-
-	rlen_ahdr = iscsi_next_hdr(task);
-	rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
-	if (rc)
-		return rc;
-
-	rlen_ahdr->ahslength =
-		cpu_to_be16(sizeof(rlen_ahdr->read_length) +
-						  sizeof(rlen_ahdr->reserved));
-	rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
-	rlen_ahdr->reserved = 0;
-	rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
-
-	ISCSI_DBG_SESSION(task->conn->session,
-			  "bidi-in rlen_ahdr->read_length(%d) "
-		          "rlen_ahdr->ahslength(%d)\n",
-		          be32_to_cpu(rlen_ahdr->read_length),
-		          be16_to_cpu(rlen_ahdr->ahslength));
-	return 0;
-}
-
 /**
  * iscsi_check_tmf_restrictions - check if a task is affected by TMF
  * @task: iscsi task
@@ -382,13 +353,6 @@
 	memcpy(hdr->cdb, sc->cmnd, cmd_len);
 
 	task->imm_count = 0;
-	if (scsi_bidi_cmnd(sc)) {
-		hdr->flags |= ISCSI_FLAG_CMD_READ;
-		rc = iscsi_prep_bidi_ahs(task);
-		if (rc)
-			return rc;
-	}
-
 	if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
 		task->protected = true;
 
@@ -463,12 +427,10 @@
 
 	conn->scsicmd_pdus_cnt++;
 	ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
-			  "itt 0x%x len %d bidi_len %d cmdsn %d win %d]\n",
-			  scsi_bidi_cmnd(sc) ? "bidirectional" :
+			  "itt 0x%x len %d cmdsn %d win %d]\n",
 			  sc->sc_data_direction == DMA_TO_DEVICE ?
 			  "write" : "read", conn->id, sc, sc->cmnd[0],
 			  task->itt, transfer_length,
-			  scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
 			  session->cmdsn,
 			  session->max_cmdsn - session->exp_cmdsn + 1);
 	return 0;
@@ -637,12 +599,7 @@
 		state = ISCSI_TASK_ABRT_TMF;
 
 	sc->result = err << 16;
-	if (!scsi_bidi_cmnd(sc))
-		scsi_set_resid(sc, scsi_bufflen(sc));
-	else {
-		scsi_out(sc)->resid = scsi_out(sc)->length;
-		scsi_in(sc)->resid = scsi_in(sc)->length;
-	}
+	scsi_set_resid(sc, scsi_bufflen(sc));
 
 	/* regular RX path uses back_lock */
 	spin_lock_bh(&conn->session->back_lock);
@@ -828,7 +785,7 @@
  * @datalen: len of buffer
  *
  * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
- * then completes the command and task.
+ * then completes the command and task. called under back_lock
  **/
 static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 			       struct iscsi_task *task, char *data,
@@ -897,14 +854,7 @@
 
 	if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
 			   ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
-		int res_count = be32_to_cpu(rhdr->bi_residual_count);
-
-		if (scsi_bidi_cmnd(sc) && res_count > 0 &&
-				(rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
-				 res_count <= scsi_in(sc)->length))
-			scsi_in(sc)->resid = res_count;
-		else
-			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+		sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
 	}
 
 	if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
@@ -931,6 +881,9 @@
  * @conn: iscsi connection
  * @hdr:  iscsi pdu
  * @task: scsi command task
+ *
+ * iscsi_data_in_rsp sets up the scsi_cmnd fields based on the data received
+ * then completes the command and task. called under back_lock
  **/
 static void
 iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -951,8 +904,8 @@
 
 		if (res_count > 0 &&
 		    (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
-		     res_count <= scsi_in(sc)->length))
-			scsi_in(sc)->resid = res_count;
+		     res_count <= sc->sdb.length))
+			scsi_set_resid(sc, res_count);
 		else
 			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
 	}
@@ -1015,6 +968,16 @@
 	return 0;
 }
 
+/**
+ * iscsi_nop_out_rsp - SCSI NOP Response processing
+ * @task: scsi command task
+ * @nop: the nop structure
+ * @data: where to put the data
+ * @datalen: length of data
+ *
+ * iscsi_nop_out_rsp handles nop response from use or
+ * from user space. called under back_lock
+ **/
 static int iscsi_nop_out_rsp(struct iscsi_task *task,
 			     struct iscsi_nopin *nop, char *data, int datalen)
 {
@@ -1449,7 +1412,13 @@
 	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
 		return -ENODATA;
 
+	spin_lock_bh(&conn->session->back_lock);
+	if (conn->task == NULL) {
+		spin_unlock_bh(&conn->session->back_lock);
+		return -ENODATA;
+	}
 	__iscsi_get_task(task);
+	spin_unlock_bh(&conn->session->back_lock);
 	spin_unlock_bh(&conn->session->frwd_lock);
 	rc = conn->session->tt->xmit_task(task);
 	spin_lock_bh(&conn->session->frwd_lock);
@@ -1781,7 +1750,9 @@
 	return 0;
 
 prepd_reject:
+	spin_lock_bh(&session->back_lock);
 	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
+	spin_unlock_bh(&session->back_lock);
 reject:
 	spin_unlock_bh(&session->frwd_lock);
 	ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
@@ -1789,17 +1760,14 @@
 	return SCSI_MLQUEUE_TARGET_BUSY;
 
 prepd_fault:
+	spin_lock_bh(&session->back_lock);
 	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
+	spin_unlock_bh(&session->back_lock);
 fault:
 	spin_unlock_bh(&session->frwd_lock);
 	ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
 			  sc->cmnd[0], reason);
-	if (!scsi_bidi_cmnd(sc))
-		scsi_set_resid(sc, scsi_bufflen(sc));
-	else {
-		scsi_out(sc)->resid = scsi_out(sc)->length;
-		scsi_in(sc)->resid = scsi_in(sc)->length;
-	}
+	scsi_set_resid(sc, scsi_bufflen(sc));
 	sc->scsi_done(sc);
 	return 0;
 }
@@ -3111,8 +3079,9 @@
 		state = ISCSI_TASK_ABRT_SESS_RECOV;
 		if (task->state == ISCSI_TASK_PENDING)
 			state = ISCSI_TASK_COMPLETED;
+		spin_lock_bh(&session->back_lock);
 		iscsi_complete_task(task, state);
-
+		spin_unlock_bh(&session->back_lock);
 	}
 }
 
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 4fcb9e6..6ef93c7 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * iSCSI over TCP/IP Data-Path lib
  *
@@ -7,18 +8,6 @@
  * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
  * maintained by open-iscsi@googlegroups.com
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * See the file COPYING included with this distribution for more details.
- *
  * Credits:
  *	Christoph Hellwig
  *	FUJITA Tomonori
@@ -43,6 +32,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_transport_iscsi.h>
+#include <trace/events/iscsi.h>
 
 #include "iscsi_tcp.h"
 
@@ -65,6 +55,9 @@
 			iscsi_conn_printk(KERN_INFO, _conn,	\
 					     "%s " dbg_fmt,	\
 					     __func__, ##arg);	\
+		iscsi_dbg_trace(trace_iscsi_dbg_tcp,		\
+				&(_conn)->cls_conn->dev,	\
+				"%s " dbg_fmt, __func__, ##arg);\
 	} while (0);
 
 static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
@@ -125,12 +118,17 @@
 	BUG_ON(sg->length == 0);
 
 	/*
+	 * We always map for the recv path.
+	 *
 	 * If the page count is greater than one it is ok to send
 	 * to the network layer's zero copy send path. If not we
-	 * have to go the slow sendmsg path. We always map for the
-	 * recv path.
+	 * have to go the slow sendmsg path.
+	 *
+	 * Same goes for slab pages: skb_can_coalesce() allows
+	 * coalescing neighboring slab objects into a single frag which
+	 * triggers one of hardened usercopy checks.
 	 */
-	if (page_count(sg_page(sg)) >= 1 && !recv)
+	if (!recv && page_count(sg_page(sg)) >= 1 && !PageSlab(sg_page(sg)))
 		return;
 
 	if (recv) {
@@ -491,7 +489,7 @@
 	struct iscsi_tcp_task *tcp_task = task->dd_data;
 	struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
 	int datasn = be32_to_cpu(rhdr->datasn);
-	unsigned total_in_length = scsi_in(task->sc)->length;
+	unsigned total_in_length = task->sc->sdb.length;
 
 	/*
 	 * lib iscsi will update this in the completion handling if there
@@ -576,11 +574,11 @@
 			      data_length, session->max_burst);
 
 	data_offset = be32_to_cpu(rhdr->data_offset);
-	if (data_offset + data_length > scsi_out(task->sc)->length) {
+	if (data_offset + data_length > task->sc->sdb.length) {
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "invalid R2T with data len %u at offset %u "
 				  "and total length %d\n", data_length,
-				  data_offset, scsi_out(task->sc)->length);
+				  data_offset, task->sc->sdb.length);
 		return ISCSI_ERR_DATALEN;
 	}
 
@@ -692,7 +690,7 @@
 		if (tcp_conn->in.datalen) {
 			struct iscsi_tcp_task *tcp_task = task->dd_data;
 			struct ahash_request *rx_hash = NULL;
-			struct scsi_data_buffer *sdb = scsi_in(task->sc);
+			struct scsi_data_buffer *sdb = &task->sc->sdb;
 
 			/*
 			 * Setup copy of Data-In into the struct scsi_cmnd
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index 13739bf..5c6a5ef 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -1,26 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Kernel configuration file for the SAS Class
 #
 # Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
 # Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
 #
-# This file is licensed under GPLv2.
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 of the
-# License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
-# USA
-#
 
 config SCSI_SAS_LIBSAS
 	tristate "SAS Domain Transport Attributes"
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index 2e70140..e63a54f 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -1,35 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Kernel Makefile for the libsas helpers
 #
 # Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
 # Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
 #
-# This file is licensed under GPLv2.
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 of the
-# License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
-# USA
 
 obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o
 libsas-y +=  sas_init.o     \
 		sas_phy.o      \
 		sas_port.o     \
 		sas_event.o    \
-		sas_dump.o     \
 		sas_discover.o \
 		sas_expander.o \
 		sas_scsi_host.o \
 		sas_task.o
 libsas-$(CONFIG_SCSI_SAS_ATA) +=	sas_ata.o
 libsas-$(CONFIG_SCSI_SAS_HOST_SMP) +=	sas_host_smp.o
+
+ccflags-y := -DDEBUG
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 64a958a..e9e0074 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Support for SATA devices on Serial Attached SCSI (SAS) controllers
  *
  * Copyright (C) 2006 IBM Corporation
  *
  * Written by: Darrick J. Wong <djwong@us.ibm.com>, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
  */
 
 #include <linux/scatterlist.h>
@@ -75,8 +61,8 @@
 
 		case SAS_OPEN_TO:
 		case SAS_OPEN_REJECT:
-			SAS_DPRINTK("%s: Saw error %d.  What to do?\n",
-				    __func__, ts->stat);
+			pr_warn("%s: Saw error %d.  What to do?\n",
+				__func__, ts->stat);
 			return AC_ERR_OTHER;
 
 		case SAM_STAT_CHECK_CONDITION:
@@ -151,8 +137,7 @@
 	} else {
 		ac = sas_to_ata_err(stat);
 		if (ac) {
-			SAS_DPRINTK("%s: SAS error %x\n", __func__,
-				    stat->stat);
+			pr_warn("%s: SAS error %x\n", __func__, stat->stat);
 			/* We saw a SAS error. Send a vague error. */
 			if (!link->sactive) {
 				qc->err_mask = ac;
@@ -237,7 +222,7 @@
 
 	ret = i->dft->lldd_execute_task(task, GFP_ATOMIC);
 	if (ret) {
-		SAS_DPRINTK("lldd_execute_task returned: %d\n", ret);
+		pr_debug("lldd_execute_task returned: %d\n", ret);
 
 		if (qc->scsicmd)
 			ASSIGN_SAS_TASK(qc->scsicmd, NULL);
@@ -282,9 +267,9 @@
 		res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
 					      &dev->sata_dev.rps_resp);
 		if (res) {
-			SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
-				    "0x%x\n", SAS_ADDR(dev->parent->sas_addr),
-				    phy->phy_id, res);
+			pr_debug("report phy sata to %016llx:%02d returned 0x%x\n",
+				 SAS_ADDR(dev->parent->sas_addr),
+				 phy->phy_id, res);
 			return res;
 		}
 		memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
@@ -375,7 +360,7 @@
 	vaf.fmt = fmt;
 	vaf.va = &args;
 
-	r = printk("%ssas: ata%u: %s: %pV",
+	r = printk("%s" SAS_FMT "ata%u: %s: %pV",
 		   level, ap->print_id, dev_name(dev), &vaf);
 
 	va_end(args);
@@ -431,8 +416,7 @@
 	if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
 	    task->task_state_flags & SAS_TASK_STATE_DONE) {
 		spin_unlock_irqrestore(&task->task_state_lock, flags);
-		SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
-			    task);
+		pr_debug("%s: Task %p already finished.\n", __func__, task);
 		goto out;
 	}
 	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
@@ -452,7 +436,7 @@
 	 * aborted ata tasks, otherwise we (likely) leak the sas task
 	 * here
 	 */
-	SAS_DPRINTK("%s: Task %p leaked.\n", __func__, task);
+	pr_warn("%s: Task %p leaked.\n", __func__, task);
 
 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
 		task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
@@ -558,7 +542,7 @@
 
 	ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL);
 	if (!ata_host)	{
-		SAS_DPRINTK("ata host alloc failed.\n");
+		pr_err("ata host alloc failed.\n");
 		return -ENOMEM;
 	}
 
@@ -566,7 +550,7 @@
 
 	ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost);
 	if (!ap) {
-		SAS_DPRINTK("ata_sas_port_alloc failed.\n");
+		pr_err("ata_sas_port_alloc failed.\n");
 		rc = -ENODEV;
 		goto free_host;
 	}
@@ -601,12 +585,7 @@
 
 	/* Bounce SCSI-initiated commands to the SCSI EH */
 	if (qc->scsicmd) {
-		struct request_queue *q = qc->scsicmd->device->request_queue;
-		unsigned long flags;
-
-		spin_lock_irqsave(q->queue_lock, flags);
 		blk_abort_request(qc->scsicmd->request);
-		spin_unlock_irqrestore(q->queue_lock, flags);
 		return;
 	}
 
@@ -654,7 +633,7 @@
 		/* if libata could not bring the link up, don't surface
 		 * the device
 		 */
-		if (ata_dev_disabled(sas_to_ata_dev(dev)))
+		if (!ata_dev_enabled(sas_to_ata_dev(dev)))
 			sas_fail_probe(dev, __func__, -ENODEV);
 	}
 
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 0148ae6..f47b4b2 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -1,25 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Serial Attached SCSI (SAS) Discover process
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/scatterlist.h>
@@ -128,7 +112,7 @@
 					  SAS_FANOUT_EXPANDER_DEVICE);
 		break;
 	default:
-		printk("ERROR: Unidentified device type %d\n", dev->dev_type);
+		pr_warn("ERROR: Unidentified device type %d\n", dev->dev_type);
 		rphy = NULL;
 		break;
 	}
@@ -186,10 +170,9 @@
 
 	res = i->dft->lldd_dev_found(dev);
 	if (res) {
-		printk("sas: driver on pcidev %s cannot handle "
-		       "device %llx, error:%d\n",
-		       dev_name(sas_ha->dev),
-		       SAS_ADDR(dev->sas_addr), res);
+		pr_warn("driver on host %s cannot handle device %llx, error:%d\n",
+			dev_name(sas_ha->dev),
+			SAS_ADDR(dev->sas_addr), res);
 	}
 	set_bit(SAS_DEV_FOUND, &dev->state);
 	kref_get(&dev->kref);
@@ -260,7 +243,7 @@
 	 * phy_list is not being mutated
 	 */
 	list_for_each_entry(phy, &port->phy_list, port_phy_el) {
-		if (si->dft->lldd_port_formed)
+		if (si->dft->lldd_port_deformed)
 			si->dft->lldd_port_deformed(phy);
 		phy->suspended = 1;
 		port->suspended = 1;
@@ -310,7 +293,7 @@
 	dev->phy = NULL;
 
 	/* remove the phys and ports, everything else should be gone */
-	if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+	if (dev_is_expander(dev->dev_type))
 		kfree(dev->ex_dev.ex_phy);
 
 	if (dev_is_sata(dev) && dev->sata_dev.ap) {
@@ -456,8 +439,8 @@
 		return;
 	dev = port->port_dev;
 
-	SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id,
-		    task_pid_nr(current));
+	pr_debug("DOING DISCOVERY on port %d, pid:%d\n", port->id,
+		 task_pid_nr(current));
 
 	switch (dev->dev_type) {
 	case SAS_END_DEVICE:
@@ -473,12 +456,13 @@
 		error = sas_discover_sata(dev);
 		break;
 #else
-		SAS_DPRINTK("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
+		pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
 		/* Fall through */
 #endif
+		/* Fall through - only for the #else condition above. */
 	default:
 		error = -ENXIO;
-		SAS_DPRINTK("unhandled device %d\n", dev->dev_type);
+		pr_err("unhandled device %d\n", dev->dev_type);
 		break;
 	}
 
@@ -495,8 +479,8 @@
 
 	sas_probe_devices(port);
 
-	SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
-		    task_pid_nr(current), error);
+	pr_debug("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
+		 task_pid_nr(current), error);
 }
 
 static void sas_revalidate_domain(struct work_struct *work)
@@ -510,22 +494,21 @@
 	/* prevent revalidation from finding sata links in recovery */
 	mutex_lock(&ha->disco_mutex);
 	if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
-		SAS_DPRINTK("REVALIDATION DEFERRED on port %d, pid:%d\n",
-			    port->id, task_pid_nr(current));
+		pr_debug("REVALIDATION DEFERRED on port %d, pid:%d\n",
+			 port->id, task_pid_nr(current));
 		goto out;
 	}
 
 	clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending);
 
-	SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
-		    task_pid_nr(current));
+	pr_debug("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
+		 task_pid_nr(current));
 
-	if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
-		     ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
+	if (ddev && dev_is_expander(ddev->dev_type))
 		res = sas_ex_revalidate_domain(ddev);
 
-	SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
-		    port->id, task_pid_nr(current), res);
+	pr_debug("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
+		 port->id, task_pid_nr(current), res);
  out:
 	mutex_unlock(&ha->disco_mutex);
 
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
deleted file mode 100644
index 7e5d262..0000000
--- a/drivers/scsi/libsas/sas_dump.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Serial Attached SCSI (SAS) Dump/Debugging routines
- *
- * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
- * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- */
-
-#include "sas_dump.h"
-
-static const char *sas_porte_str[] = {
-	[0] = "PORTE_BYTES_DMAED",
-	[1] = "PORTE_BROADCAST_RCVD",
-	[2] = "PORTE_LINK_RESET_ERR",
-	[3] = "PORTE_TIMER_EVENT",
-	[4] = "PORTE_HARD_RESET",
-};
-
-static const char *sas_phye_str[] = {
-	[0] = "PHYE_LOSS_OF_SIGNAL",
-	[1] = "PHYE_OOB_DONE",
-	[2] = "PHYE_OOB_ERROR",
-	[3] = "PHYE_SPINUP_HOLD",
-	[4] = "PHYE_RESUME_TIMEOUT",
-};
-
-void sas_dprint_porte(int phyid, enum port_event pe)
-{
-	SAS_DPRINTK("phy%d: port event: %s\n", phyid, sas_porte_str[pe]);
-}
-void sas_dprint_phye(int phyid, enum phy_event pe)
-{
-	SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]);
-}
-
-void sas_dump_port(struct asd_sas_port *port)
-{
-	SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class);
-	SAS_DPRINTK("port%d: sas_addr:%llx\n", port->id,
-		    SAS_ADDR(port->sas_addr));
-	SAS_DPRINTK("port%d: attached_sas_addr:%llx\n", port->id,
-		    SAS_ADDR(port->attached_sas_addr));
-	SAS_DPRINTK("port%d: iproto:0x%x\n", port->id, port->iproto);
-	SAS_DPRINTK("port%d: tproto:0x%x\n", port->id, port->tproto);
-	SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
-	SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
-}
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
deleted file mode 100644
index 6aaee6b..0000000
--- a/drivers/scsi/libsas/sas_dump.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Serial Attached SCSI (SAS) Dump/Debugging routines header file
- *
- * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
- * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- */
-
-#include "sas_internal.h"
-
-void sas_dprint_porte(int phyid, enum port_event pe);
-void sas_dprint_phye(int phyid, enum phy_event pe);
-void sas_dump_port(struct asd_sas_port *port);
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index ae923eb..a1852f6 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -1,31 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Serial Attached SCSI (SAS) Event processing
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/export.h>
 #include <scsi/scsi_host.h>
 #include "sas_internal.h"
-#include "sas_dump.h"
 
 int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
 {
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index fadc99c..9fdb9c9 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Serial Attached SCSI (SAS) Expander discovery and configuration
  *
@@ -5,26 +6,12 @@
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  *
  * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/scatterlist.h>
 #include <linux/blkdev.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 
 #include "sas_internal.h"
 
@@ -48,17 +35,16 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&task->task_state_lock, flags);
-	if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
 		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+		complete(&task->slow_task->completion);
+	}
 	spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-	complete(&task->slow_task->completion);
 }
 
 static void smp_task_done(struct sas_task *task)
 {
-	if (!del_timer(&task->slow_task->timer))
-		return;
+	del_timer(&task->slow_task->timer);
 	complete(&task->slow_task->completion);
 }
 
@@ -100,17 +86,17 @@
 
 		if (res) {
 			del_timer(&task->slow_task->timer);
-			SAS_DPRINTK("executing SMP task failed:%d\n", res);
+			pr_notice("executing SMP task failed:%d\n", res);
 			break;
 		}
 
 		wait_for_completion(&task->slow_task->completion);
 		res = -ECOMM;
 		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-			SAS_DPRINTK("smp task timed out or aborted\n");
+			pr_notice("smp task timed out or aborted\n");
 			i->dft->lldd_abort_task(task);
 			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
-				SAS_DPRINTK("SMP task aborted and not done\n");
+				pr_notice("SMP task aborted and not done\n");
 				break;
 			}
 		}
@@ -135,11 +121,11 @@
 		    task->task_status.stat == SAS_DEVICE_UNKNOWN)
 			break;
 		else {
-			SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
-				    "status 0x%x\n", __func__,
-				    SAS_ADDR(dev->sas_addr),
-				    task->task_status.resp,
-				    task->task_status.stat);
+			pr_notice("%s: task to dev %016llx response: 0x%x status 0x%x\n",
+				  __func__,
+				  SAS_ADDR(dev->sas_addr),
+				  task->task_status.resp,
+				  task->task_status.stat);
 			sas_free_task(task);
 			task = NULL;
 		}
@@ -348,11 +334,11 @@
 	if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
 		set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending);
 
-	SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
-		    test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "",
-		    SAS_ADDR(dev->sas_addr), phy->phy_id,
-		    sas_route_char(dev, phy), phy->linkrate,
-		    SAS_ADDR(phy->attached_sas_addr), type);
+	pr_debug("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
+		 test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "",
+		 SAS_ADDR(dev->sas_addr), phy->phy_id,
+		 sas_route_char(dev, phy), phy->linkrate,
+		 SAS_ADDR(phy->attached_sas_addr), type);
 }
 
 /* check if we have an existing attached ata device on this expander phy */
@@ -394,7 +380,7 @@
 		return res;
 	dr = &((struct smp_resp *)disc_resp)->disc;
 	if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
-		sas_printk("Found loopback topology, just ignore it!\n");
+		pr_notice("Found loopback topology, just ignore it!\n");
 		return 0;
 	}
 	sas_set_ex_phy(dev, single, disc_resp);
@@ -501,12 +487,12 @@
 				       RG_RESP_SIZE);
 
 		if (res) {
-			SAS_DPRINTK("RG to ex %016llx failed:0x%x\n",
-				    SAS_ADDR(dev->sas_addr), res);
+			pr_notice("RG to ex %016llx failed:0x%x\n",
+				  SAS_ADDR(dev->sas_addr), res);
 			goto out;
 		} else if (rg_resp->result != SMP_RESP_FUNC_ACC) {
-			SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n",
-				    SAS_ADDR(dev->sas_addr), rg_resp->result);
+			pr_debug("RG:ex %016llx returned SMP result:0x%x\n",
+				 SAS_ADDR(dev->sas_addr), rg_resp->result);
 			res = rg_resp->result;
 			goto out;
 		}
@@ -514,8 +500,8 @@
 		ex_assign_report_general(dev, rg_resp);
 
 		if (dev->ex_dev.configuring) {
-			SAS_DPRINTK("RG: ex %llx self-configuring...\n",
-				    SAS_ADDR(dev->sas_addr));
+			pr_debug("RG: ex %llx self-configuring...\n",
+				 SAS_ADDR(dev->sas_addr));
 			schedule_timeout_interruptible(5*HZ);
 		} else
 			break;
@@ -569,12 +555,12 @@
 
 	res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE);
 	if (res) {
-		SAS_DPRINTK("MI: ex %016llx failed:0x%x\n",
-			    SAS_ADDR(dev->sas_addr), res);
+		pr_notice("MI: ex %016llx failed:0x%x\n",
+			  SAS_ADDR(dev->sas_addr), res);
 		goto out;
 	} else if (mi_resp[2] != SMP_RESP_FUNC_ACC) {
-		SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n",
-			    SAS_ADDR(dev->sas_addr), mi_resp[2]);
+		pr_debug("MI ex %016llx returned SMP result:0x%x\n",
+			 SAS_ADDR(dev->sas_addr), mi_resp[2]);
 		goto out;
 	}
 
@@ -615,7 +601,14 @@
 	}
 
 	res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE);
-
+	if (res) {
+		pr_err("ex %016llx phy%02d PHY control failed: %d\n",
+		       SAS_ADDR(dev->sas_addr), phy_id, res);
+	} else if (pc_resp[2] != SMP_RESP_FUNC_ACC) {
+		pr_err("ex %016llx phy%02d PHY control failed: function result 0x%x\n",
+		       SAS_ADDR(dev->sas_addr), phy_id, pc_resp[2]);
+		res = pc_resp[2];
+	}
 	kfree(pc_resp);
 	kfree(pc_req);
 	return res;
@@ -690,10 +683,10 @@
 	if (res)
 		goto out;
 
-	phy->invalid_dword_count = scsi_to_u32(&resp[12]);
-	phy->running_disparity_error_count = scsi_to_u32(&resp[16]);
-	phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]);
-	phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
+	phy->invalid_dword_count = get_unaligned_be32(&resp[12]);
+	phy->running_disparity_error_count = get_unaligned_be32(&resp[16]);
+	phy->loss_of_dword_sync_count = get_unaligned_be32(&resp[20]);
+	phy->phy_reset_problem_count = get_unaligned_be32(&resp[24]);
 
  out:
 	kfree(req);
@@ -818,6 +811,31 @@
 
 #ifdef CONFIG_SCSI_SAS_ATA
 	if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
+		if (child->linkrate > parent->min_linkrate) {
+			struct sas_phy *cphy = child->phy;
+			enum sas_linkrate min_prate = cphy->minimum_linkrate,
+				parent_min_lrate = parent->min_linkrate,
+				min_linkrate = (min_prate > parent_min_lrate) ?
+					       parent_min_lrate : 0;
+			struct sas_phy_linkrates rates = {
+				.maximum_linkrate = parent->min_linkrate,
+				.minimum_linkrate = min_linkrate,
+			};
+			int ret;
+
+			pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n",
+				   SAS_ADDR(child->sas_addr), phy_id);
+			ret = sas_smp_phy_control(parent, phy_id,
+						  PHY_FUNC_LINK_RESET, &rates);
+			if (ret) {
+				pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n",
+				       SAS_ADDR(child->sas_addr), phy_id, ret);
+				goto out_free;
+			}
+			pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n",
+				  SAS_ADDR(child->sas_addr), phy_id);
+			child->linkrate = child->min_linkrate;
+		}
 		res = sas_get_ata_info(child, phy);
 		if (res)
 			goto out_free;
@@ -829,6 +847,7 @@
 		rphy = sas_end_device_alloc(phy->port);
 		if (!rphy)
 			goto out_free;
+		rphy->identify.phy_identifier = phy_id;
 
 		child->rphy = rphy;
 		get_device(&rphy->dev);
@@ -837,10 +856,9 @@
 
 		res = sas_discover_sata(child);
 		if (res) {
-			SAS_DPRINTK("sas_discover_sata() for device %16llx at "
-				    "%016llx:0x%x returned 0x%x\n",
-				    SAS_ADDR(child->sas_addr),
-				    SAS_ADDR(parent->sas_addr), phy_id, res);
+			pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n",
+				  SAS_ADDR(child->sas_addr),
+				  SAS_ADDR(parent->sas_addr), phy_id, res);
 			goto out_list_del;
 		}
 	} else
@@ -856,22 +874,22 @@
 
 		child->rphy = rphy;
 		get_device(&rphy->dev);
+		rphy->identify.phy_identifier = phy_id;
 		sas_fill_in_rphy(child, rphy);
 
 		list_add_tail(&child->disco_list_node, &parent->port->disco_list);
 
 		res = sas_discover_end_dev(child);
 		if (res) {
-			SAS_DPRINTK("sas_discover_end_dev() for device %16llx "
-				    "at %016llx:0x%x returned 0x%x\n",
-				    SAS_ADDR(child->sas_addr),
-				    SAS_ADDR(parent->sas_addr), phy_id, res);
+			pr_notice("sas_discover_end_dev() for device %16llx at %016llx:%02d returned 0x%x\n",
+				  SAS_ADDR(child->sas_addr),
+				  SAS_ADDR(parent->sas_addr), phy_id, res);
 			goto out_list_del;
 		}
 	} else {
-		SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
-			    phy->attached_tproto, SAS_ADDR(parent->sas_addr),
-			    phy_id);
+		pr_notice("target proto 0x%x at %016llx:0x%x not handled\n",
+			  phy->attached_tproto, SAS_ADDR(parent->sas_addr),
+			  phy_id);
 		goto out_free;
 	}
 
@@ -928,11 +946,10 @@
 	int res;
 
 	if (phy->routing_attr == DIRECT_ROUTING) {
-		SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not "
-			    "allowed\n",
-			    SAS_ADDR(parent->sas_addr), phy_id,
-			    SAS_ADDR(phy->attached_sas_addr),
-			    phy->attached_phy_id);
+		pr_warn("ex %016llx:%02d:D <--> ex %016llx:0x%x is not allowed\n",
+			SAS_ADDR(parent->sas_addr), phy_id,
+			SAS_ADDR(phy->attached_sas_addr),
+			phy->attached_phy_id);
 		return NULL;
 	}
 	child = sas_alloc_device();
@@ -988,6 +1005,8 @@
 		list_del(&child->dev_list_node);
 		spin_unlock_irq(&parent->port->dev_list_lock);
 		sas_put_device(child);
+		sas_port_delete(phy->port);
+		phy->port = NULL;
 		return NULL;
 	}
 	list_add_tail(&child->siblings, &parent->ex_dev.children);
@@ -1039,25 +1058,24 @@
 	    ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
 	    ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
 	    ex_phy->attached_dev_type != SAS_SATA_PENDING) {
-		SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
-			    "phy 0x%x\n", ex_phy->attached_dev_type,
-			    SAS_ADDR(dev->sas_addr),
-			    phy_id);
+		pr_warn("unknown device type(0x%x) attached to ex %016llx phy%02d\n",
+			ex_phy->attached_dev_type,
+			SAS_ADDR(dev->sas_addr),
+			phy_id);
 		return 0;
 	}
 
 	res = sas_configure_routing(dev, ex_phy->attached_sas_addr);
 	if (res) {
-		SAS_DPRINTK("configure routing for dev %016llx "
-			    "reported 0x%x. Forgotten\n",
-			    SAS_ADDR(ex_phy->attached_sas_addr), res);
+		pr_notice("configure routing for dev %016llx reported 0x%x. Forgotten\n",
+			  SAS_ADDR(ex_phy->attached_sas_addr), res);
 		sas_disable_routing(dev, ex_phy->attached_sas_addr);
 		return res;
 	}
 
 	if (sas_ex_join_wide_port(dev, phy_id)) {
-		SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
-			    phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
+		pr_debug("Attaching ex phy%02d to wide port %016llx\n",
+			 phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
 		return res;
 	}
 
@@ -1068,14 +1086,13 @@
 		break;
 	case SAS_FANOUT_EXPANDER_DEVICE:
 		if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
-			SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
-				    "attached to ex %016llx phy 0x%x\n",
-				    SAS_ADDR(ex_phy->attached_sas_addr),
-				    ex_phy->attached_phy_id,
-				    SAS_ADDR(dev->sas_addr),
-				    phy_id);
+			pr_debug("second fanout expander %016llx phy%02d attached to ex %016llx phy%02d\n",
+				 SAS_ADDR(ex_phy->attached_sas_addr),
+				 ex_phy->attached_phy_id,
+				 SAS_ADDR(dev->sas_addr),
+				 phy_id);
 			sas_ex_disable_phy(dev, phy_id);
-			break;
+			return res;
 		} else
 			memcpy(dev->port->disc.fanout_sas_addr,
 			       ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
@@ -1087,28 +1104,9 @@
 		break;
 	}
 
-	if (child) {
-		int i;
-
-		for (i = 0; i < ex->num_phys; i++) {
-			if (ex->ex_phy[i].phy_state == PHY_VACANT ||
-			    ex->ex_phy[i].phy_state == PHY_NOT_PRESENT)
-				continue;
-			/*
-			 * Due to races, the phy might not get added to the
-			 * wide port, so we add the phy to the wide port here.
-			 */
-			if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
-			    SAS_ADDR(child->sas_addr)) {
-				ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
-				if (sas_ex_join_wide_port(dev, i))
-					SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
-						    i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
-
-			}
-		}
-	}
-
+	if (!child)
+		pr_notice("ex %016llx phy%02d failed to discover\n",
+			  SAS_ADDR(dev->sas_addr), phy_id);
 	return res;
 }
 
@@ -1124,11 +1122,10 @@
 		    phy->phy_state == PHY_NOT_PRESENT)
 			continue;
 
-		if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE ||
-		     phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
+		if (dev_is_expander(phy->attached_dev_type) &&
 		    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
-			memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
+			memcpy(sub_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
 
 			return 1;
 		}
@@ -1140,28 +1137,25 @@
 {
 	struct expander_device *ex = &dev->ex_dev;
 	struct domain_device *child;
-	u8 sub_addr[8] = {0, };
+	u8 sub_addr[SAS_ADDR_SIZE] = {0, };
 
 	list_for_each_entry(child, &ex->children, siblings) {
-		if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
-		    child->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
+		if (!dev_is_expander(child->dev_type))
 			continue;
 		if (sub_addr[0] == 0) {
 			sas_find_sub_addr(child, sub_addr);
 			continue;
 		} else {
-			u8 s2[8];
+			u8 s2[SAS_ADDR_SIZE];
 
 			if (sas_find_sub_addr(child, s2) &&
 			    (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) {
 
-				SAS_DPRINTK("ex %016llx->%016llx-?->%016llx "
-					    "diverges from subtractive "
-					    "boundary %016llx\n",
-					    SAS_ADDR(dev->sas_addr),
-					    SAS_ADDR(child->sas_addr),
-					    SAS_ADDR(s2),
-					    SAS_ADDR(sub_addr));
+				pr_notice("ex %016llx->%016llx-?->%016llx diverges from subtractive boundary %016llx\n",
+					  SAS_ADDR(dev->sas_addr),
+					  SAS_ADDR(child->sas_addr),
+					  SAS_ADDR(s2),
+					  SAS_ADDR(sub_addr));
 
 				sas_ex_disable_port(child, s2);
 			}
@@ -1231,8 +1225,7 @@
 		    phy->phy_state == PHY_NOT_PRESENT)
 			continue;
 
-		if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
-		     phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) &&
+		if (dev_is_expander(phy->attached_dev_type) &&
 		    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
 			if (!sub_sas_addr)
@@ -1240,12 +1233,10 @@
 			else if (SAS_ADDR(sub_sas_addr) !=
 				 SAS_ADDR(phy->attached_sas_addr)) {
 
-				SAS_DPRINTK("ex %016llx phy 0x%x "
-					    "diverges(%016llx) on subtractive "
-					    "boundary(%016llx). Disabled\n",
-					    SAS_ADDR(dev->sas_addr), i,
-					    SAS_ADDR(phy->attached_sas_addr),
-					    SAS_ADDR(sub_sas_addr));
+				pr_notice("ex %016llx phy%02d diverges(%016llx) on subtractive boundary(%016llx). Disabled\n",
+					  SAS_ADDR(dev->sas_addr), i,
+					  SAS_ADDR(phy->attached_sas_addr),
+					  SAS_ADDR(sub_sas_addr));
 				sas_ex_disable_phy(dev, i);
 			}
 		}
@@ -1263,19 +1254,17 @@
 	};
 	struct domain_device *parent = child->parent;
 
-	sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx "
-		   "phy 0x%x has %c:%c routing link!\n",
+	pr_notice("%s ex %016llx phy%02d <--> %s ex %016llx phy%02d has %c:%c routing link!\n",
+		  ex_type[parent->dev_type],
+		  SAS_ADDR(parent->sas_addr),
+		  parent_phy->phy_id,
 
-		   ex_type[parent->dev_type],
-		   SAS_ADDR(parent->sas_addr),
-		   parent_phy->phy_id,
+		  ex_type[child->dev_type],
+		  SAS_ADDR(child->sas_addr),
+		  child_phy->phy_id,
 
-		   ex_type[child->dev_type],
-		   SAS_ADDR(child->sas_addr),
-		   child_phy->phy_id,
-
-		   sas_route_char(parent, parent_phy),
-		   sas_route_char(child, child_phy));
+		  sas_route_char(parent, parent_phy),
+		  sas_route_char(child, child_phy));
 }
 
 static int sas_check_eeds(struct domain_device *child,
@@ -1287,13 +1276,12 @@
 
 	if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) {
 		res = -ENODEV;
-		SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx "
-			    "phy S:0x%x, while there is a fanout ex %016llx\n",
-			    SAS_ADDR(parent->sas_addr),
-			    parent_phy->phy_id,
-			    SAS_ADDR(child->sas_addr),
-			    child_phy->phy_id,
-			    SAS_ADDR(parent->port->disc.fanout_sas_addr));
+		pr_warn("edge ex %016llx phy S:%02d <--> edge ex %016llx phy S:%02d, while there is a fanout ex %016llx\n",
+			SAS_ADDR(parent->sas_addr),
+			parent_phy->phy_id,
+			SAS_ADDR(child->sas_addr),
+			child_phy->phy_id,
+			SAS_ADDR(parent->port->disc.fanout_sas_addr));
 	} else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) {
 		memcpy(parent->port->disc.eeds_a, parent->sas_addr,
 		       SAS_ADDR_SIZE);
@@ -1311,12 +1299,11 @@
 		;
 	else {
 		res = -ENODEV;
-		SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx "
-			    "phy 0x%x link forms a third EEDS!\n",
-			    SAS_ADDR(parent->sas_addr),
-			    parent_phy->phy_id,
-			    SAS_ADDR(child->sas_addr),
-			    child_phy->phy_id);
+		pr_warn("edge ex %016llx phy%02d <--> edge ex %016llx phy%02d link forms a third EEDS!\n",
+			SAS_ADDR(parent->sas_addr),
+			parent_phy->phy_id,
+			SAS_ADDR(child->sas_addr),
+			child_phy->phy_id);
 	}
 
 	return res;
@@ -1334,8 +1321,7 @@
 	if (!child->parent)
 		return 0;
 
-	if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
-	    child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
+	if (!dev_is_expander(child->parent->dev_type))
 		return 0;
 
 	parent_ex = &child->parent->ex_dev;
@@ -1430,14 +1416,13 @@
 			goto out;
 		res = rri_resp[2];
 		if (res == SMP_RESP_NO_INDEX) {
-			SAS_DPRINTK("overflow of indexes: dev %016llx "
-				    "phy 0x%x index 0x%x\n",
-				    SAS_ADDR(dev->sas_addr), phy_id, i);
+			pr_warn("overflow of indexes: dev %016llx phy%02d index 0x%x\n",
+				SAS_ADDR(dev->sas_addr), phy_id, i);
 			goto out;
 		} else if (res != SMP_RESP_FUNC_ACC) {
-			SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x "
-				    "result 0x%x\n", __func__,
-				    SAS_ADDR(dev->sas_addr), phy_id, i, res);
+			pr_notice("%s: dev %016llx phy%02d index 0x%x result 0x%x\n",
+				  __func__, SAS_ADDR(dev->sas_addr), phy_id,
+				  i, res);
 			goto out;
 		}
 		if (SAS_ADDR(sas_addr) != 0) {
@@ -1501,9 +1486,8 @@
 		goto out;
 	res = cri_resp[2];
 	if (res == SMP_RESP_NO_INDEX) {
-		SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x "
-			    "index 0x%x\n",
-			    SAS_ADDR(dev->sas_addr), phy_id, index);
+		pr_warn("overflow of indexes: dev %016llx phy%02d index 0x%x\n",
+			SAS_ADDR(dev->sas_addr), phy_id, index);
 	}
 out:
 	kfree(cri_req);
@@ -1550,8 +1534,8 @@
 	}
 
 	if (ex_parent->conf_route_table == 0) {
-		SAS_DPRINTK("ex %016llx has self-configuring routing table\n",
-			    SAS_ADDR(parent->sas_addr));
+		pr_debug("ex %016llx has self-configuring routing table\n",
+			 SAS_ADDR(parent->sas_addr));
 		return 0;
 	}
 
@@ -1612,8 +1596,8 @@
 
 	res = sas_expander_discover(dev);
 	if (res) {
-		SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n",
-			    SAS_ADDR(dev->sas_addr), res);
+		pr_warn("expander %016llx discovery failed(0x%x)\n",
+			SAS_ADDR(dev->sas_addr), res);
 		goto out_err;
 	}
 
@@ -1633,8 +1617,7 @@
 	struct domain_device *dev;
 
 	list_for_each_entry(dev, &port->dev_list, dev_list_node) {
-		if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
-		    dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+		if (dev_is_expander(dev->dev_type)) {
 			struct sas_expander_device *ex =
 				rphy_to_expander_device(dev->rphy);
 
@@ -1747,10 +1730,11 @@
 
 	res = sas_get_phy_discover(dev, phy_id, disc_resp);
 	if (res == 0) {
-		memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8);
+		memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
+		       SAS_ADDR_SIZE);
 		*type = to_dev_type(dr);
 		if (*type == 0)
-			memset(sas_addr, 0, 8);
+			memset(sas_addr, 0, SAS_ADDR_SIZE);
 	}
 	kfree(disc_resp);
 	return res;
@@ -1857,13 +1841,15 @@
 		if (phy_id != -1) {
 			*src_dev = dev;
 			ex->ex_change_count = ex_change_count;
-			SAS_DPRINTK("Expander phy change count has changed\n");
+			pr_info("ex %016llx phy%02d change count has changed\n",
+				SAS_ADDR(dev->sas_addr), phy_id);
 			return res;
 		} else
-			SAS_DPRINTK("Expander phys DID NOT change\n");
+			pr_info("ex %016llx phys DID NOT change\n",
+				SAS_ADDR(dev->sas_addr));
 	}
 	list_for_each_entry(ch, &ex->children, siblings) {
-		if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+		if (dev_is_expander(ch->dev_type)) {
 			res = sas_find_bcast_dev(ch, src_dev);
 			if (*src_dev)
 				return res;
@@ -1880,8 +1866,7 @@
 
 	list_for_each_entry_safe(child, n, &ex->children, siblings) {
 		set_bit(SAS_DEV_GONE, &child->state);
-		if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
-		    child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+		if (dev_is_expander(child->dev_type))
 			sas_unregister_ex_tree(port, child);
 		else
 			sas_unregister_dev(port, child);
@@ -1901,8 +1886,7 @@
 			if (SAS_ADDR(child->sas_addr) ==
 			    SAS_ADDR(phy->attached_sas_addr)) {
 				set_bit(SAS_DEV_GONE, &child->state);
-				if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
-				    child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+				if (dev_is_expander(child->dev_type))
 					sas_unregister_ex_tree(parent->port, child);
 				else
 					sas_unregister_dev(parent->port, child);
@@ -1931,8 +1915,7 @@
 	int res = 0;
 
 	list_for_each_entry(child, &ex_root->children, siblings) {
-		if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
-		    child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+		if (dev_is_expander(child->dev_type)) {
 			struct sas_expander_device *ex =
 				rphy_to_expander_device(child->rphy);
 
@@ -1970,8 +1953,8 @@
 	struct domain_device *child;
 	int res;
 
-	SAS_DPRINTK("ex %016llx phy%d new device attached\n",
-		    SAS_ADDR(dev->sas_addr), phy_id);
+	pr_debug("ex %016llx phy%02d new device attached\n",
+		 SAS_ADDR(dev->sas_addr), phy_id);
 	res = sas_ex_phy_discover(dev, phy_id);
 	if (res)
 		return res;
@@ -1985,8 +1968,7 @@
 	list_for_each_entry(child, &dev->ex_dev.children, siblings) {
 		if (SAS_ADDR(child->sas_addr) ==
 		    SAS_ADDR(ex_phy->attached_sas_addr)) {
-			if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
-			    child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+			if (dev_is_expander(child->dev_type))
 				res = sas_discover_bfs_by_root(child);
 			break;
 		}
@@ -2009,15 +1991,23 @@
 	return false;
 }
 
-static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
+static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
+			      bool last, int sibling)
 {
 	struct expander_device *ex = &dev->ex_dev;
 	struct ex_phy *phy = &ex->ex_phy[phy_id];
 	enum sas_device_type type = SAS_PHY_UNUSED;
-	u8 sas_addr[8];
+	u8 sas_addr[SAS_ADDR_SIZE];
+	char msg[80] = "";
 	int res;
 
-	memset(sas_addr, 0, 8);
+	if (!last)
+		sprintf(msg, ", part of a wide port with phy%02d", sibling);
+
+	pr_debug("ex %016llx rediscovering phy%02d%s\n",
+		 SAS_ADDR(dev->sas_addr), phy_id, msg);
+
+	memset(sas_addr, 0, SAS_ADDR_SIZE);
 	res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
 	switch (res) {
 	case SMP_RESP_NO_PHY:
@@ -2039,6 +2029,11 @@
 	if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
 		phy->phy_state = PHY_EMPTY;
 		sas_unregister_devs_sas_addr(dev, phy_id, last);
+		/*
+		 * Even though the PHY is empty, for convenience we discover
+		 * the PHY to update the PHY info, like negotiated linkrate.
+		 */
+		sas_ex_phy_discover(dev, phy_id);
 		return res;
 	} else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
 		   dev_type_flutter(type, phy->attached_dev_type)) {
@@ -2049,19 +2044,16 @@
 
 		if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
 			action = ", needs recovery";
-		SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
-			    SAS_ADDR(dev->sas_addr), phy_id, action);
+		pr_debug("ex %016llx phy%02d broadcast flutter%s\n",
+			 SAS_ADDR(dev->sas_addr), phy_id, action);
 		return res;
 	}
 
-	/* delete the old link */
-	if (SAS_ADDR(phy->attached_sas_addr) &&
-	    SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) {
-		SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
-			    SAS_ADDR(dev->sas_addr), phy_id,
-			    SAS_ADDR(phy->attached_sas_addr));
-		sas_unregister_devs_sas_addr(dev, phy_id, last);
-	}
+	/* we always have to delete the old device when we went here */
+	pr_info("ex %016llx phy%02d replace %016llx\n",
+		SAS_ADDR(dev->sas_addr), phy_id,
+		SAS_ADDR(phy->attached_sas_addr));
+	sas_unregister_devs_sas_addr(dev, phy_id, last);
 
 	return sas_discover_new(dev, phy_id);
 }
@@ -2088,8 +2080,8 @@
 	int i;
 	bool last = true;	/* is this the last phy of the port */
 
-	SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n",
-		    SAS_ADDR(dev->sas_addr), phy_id);
+	pr_debug("ex %016llx phy%02d originated BROADCAST(CHANGE)\n",
+		 SAS_ADDR(dev->sas_addr), phy_id);
 
 	if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) {
 		for (i = 0; i < ex->num_phys; i++) {
@@ -2099,13 +2091,11 @@
 				continue;
 			if (SAS_ADDR(phy->attached_sas_addr) ==
 			    SAS_ADDR(changed_phy->attached_sas_addr)) {
-				SAS_DPRINTK("phy%d part of wide port with "
-					    "phy%d\n", phy_id, i);
 				last = false;
 				break;
 			}
 		}
-		res = sas_rediscover_dev(dev, phy_id, last);
+		res = sas_rediscover_dev(dev, phy_id, last, i);
 	} else
 		res = sas_discover_new(dev, phy_id);
 	return res;
@@ -2158,23 +2148,23 @@
 	case SAS_FANOUT_EXPANDER_DEVICE:
 		break;
 	default:
-		printk("%s: can we send a smp request to a device?\n",
+		pr_err("%s: can we send a smp request to a device?\n",
 		       __func__);
 		goto out;
 	}
 
 	dev = sas_find_dev_by_rphy(rphy);
 	if (!dev) {
-		printk("%s: fail to find a domain_device?\n", __func__);
+		pr_err("%s: fail to find a domain_device?\n", __func__);
 		goto out;
 	}
 
 	/* do we need to support multiple segments? */
 	if (job->request_payload.sg_cnt > 1 ||
 	    job->reply_payload.sg_cnt > 1) {
-		printk("%s: multiple segments req %u, rsp %u\n",
-		       __func__, job->request_payload.payload_len,
-		       job->reply_payload.payload_len);
+		pr_info("%s: multiple segments req %u, rsp %u\n",
+			__func__, job->request_payload.payload_len,
+			job->reply_payload.payload_len);
 		goto out;
 	}
 
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 9ead93d..eca2a6b 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Serial Attached SCSI (SAS) Expander discovery and configuration
  *
  * Copyright (C) 2007 James E.J. Bottomley
  *		<James.Bottomley@HansenPartnership.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 only.
  */
 #include <linux/scatterlist.h>
 #include <linux/blkdev.h>
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index ede0af7..21c43b1 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -1,26 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Serial Attached SCSI (SAS) Transport Layer initialization
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- *
  */
 
 #include <linux/module.h>
@@ -87,25 +70,27 @@
 /*------------ SAS addr hash -----------*/
 void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
 {
-        const u32 poly = 0x00DB2777;
-        u32     r = 0;
-        int     i;
+	const u32 poly = 0x00DB2777;
+	u32 r = 0;
+	int i;
 
-        for (i = 0; i < 8; i++) {
-                int b;
-                for (b = 7; b >= 0; b--) {
-                        r <<= 1;
-                        if ((1 << b) & sas_addr[i]) {
-                                if (!(r & 0x01000000))
-                                        r ^= poly;
-                        } else if (r & 0x01000000)
-                                r ^= poly;
-                }
-        }
+	for (i = 0; i < SAS_ADDR_SIZE; i++) {
+		int b;
 
-        hashed[0] = (r >> 16) & 0xFF;
-        hashed[1] = (r >> 8) & 0xFF ;
-        hashed[2] = r & 0xFF;
+		for (b = (SAS_ADDR_SIZE - 1); b >= 0; b--) {
+			r <<= 1;
+			if ((1 << b) & sas_addr[i]) {
+				if (!(r & 0x01000000))
+					r ^= poly;
+			} else if (r & 0x01000000) {
+				r ^= poly;
+			}
+		}
+	}
+
+	hashed[0] = (r >> 16) & 0xFF;
+	hashed[1] = (r >> 8) & 0xFF;
+	hashed[2] = r & 0xFF;
 }
 
 int sas_register_ha(struct sas_ha_struct *sas_ha)
@@ -128,19 +113,19 @@
 
 	error = sas_register_phys(sas_ha);
 	if (error) {
-		printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
+		pr_notice("couldn't register sas phys:%d\n", error);
 		return error;
 	}
 
 	error = sas_register_ports(sas_ha);
 	if (error) {
-		printk(KERN_NOTICE "couldn't register sas ports:%d\n", error);
+		pr_notice("couldn't register sas ports:%d\n", error);
 		goto Undo_phys;
 	}
 
 	error = sas_init_events(sas_ha);
 	if (error) {
-		printk(KERN_NOTICE "couldn't start event thread:%d\n", error);
+		pr_notice("couldn't start event thread:%d\n", error);
 		goto Undo_ports;
 	}
 
@@ -623,8 +608,8 @@
 	if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
 		if (i->dft->lldd_control_phy) {
 			if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
-				sas_printk("The phy%02d bursting events, shut it down.\n",
-					phy->id);
+				pr_notice("The phy%d bursting events, shut it down.\n",
+					  phy->id);
 				sas_notify_phy_event(phy, PHYE_SHUTDOWN);
 			}
 		} else {
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 50e12d6..01f1738 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -1,26 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Serial Attached SCSI (SAS) class internal header file
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- *
  */
 
 #ifndef _SAS_INTERNAL_H_
@@ -32,9 +15,13 @@
 #include <scsi/libsas.h>
 #include <scsi/sas_ata.h>
 
-#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
 
-#define SAS_DPRINTK(fmt, ...) printk(KERN_DEBUG "sas: " fmt, ## __VA_ARGS__)
+#define SAS_FMT "sas: "
+
+#define pr_fmt(fmt) SAS_FMT fmt
 
 #define TO_SAS_TASK(_scsi_cmd)  ((void *)(_scsi_cmd)->host_scribble)
 #define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
@@ -120,10 +107,10 @@
 
 static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err)
 {
-	SAS_DPRINTK("%s: for %s device %16llx returned %d\n",
-		    func, dev->parent ? "exp-attached" :
-					    "direct-attached",
-		    SAS_ADDR(dev->sas_addr), err);
+	pr_warn("%s: for %s device %16llx returned %d\n",
+		func, dev->parent ? "exp-attached" :
+		"direct-attached",
+		SAS_ADDR(dev->sas_addr), err);
 	sas_unregister_dev(dev->port, dev);
 }
 
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index bf3e1b9..4ca4b1f 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -1,25 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Serial Attached SCSI (SAS) Phy class
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include "sas_internal.h"
@@ -35,7 +19,6 @@
 	struct asd_sas_event *ev = to_asd_sas_event(work);
 	struct asd_sas_phy *phy = ev->phy;
 
-	phy->in_shutdown = 0;
 	phy->error = 0;
 	sas_deform_port(phy, 1);
 }
@@ -45,7 +28,6 @@
 	struct asd_sas_event *ev = to_asd_sas_event(work);
 	struct asd_sas_phy *phy = ev->phy;
 
-	phy->in_shutdown = 0;
 	phy->error = 0;
 }
 
@@ -122,11 +104,11 @@
 		phy->enabled = 0;
 		ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
 		if (ret)
-			sas_printk("lldd disable phy%02d returned %d\n",
-				phy->id, ret);
+			pr_notice("lldd disable phy%d returned %d\n", phy->id,
+				  ret);
 	} else
-		sas_printk("phy%02d is not enabled, cannot shutdown\n",
-			phy->id);
+		pr_notice("phy%d is not enabled, cannot shutdown\n", phy->id);
+	phy->in_shutdown = 0;
 }
 
 /* ---------- Phy class registration ---------- */
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index fad23dd..7c86fd2 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -1,25 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Serial Attached SCSI (SAS) Port class
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include "sas_internal.h"
@@ -70,7 +54,7 @@
 			continue;
 		}
 
-		if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+		if (dev_is_expander(dev->dev_type)) {
 			dev->ex_dev.ex_change_count = -1;
 			for (i = 0; i < dev->ex_dev.num_phys; i++) {
 				struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
@@ -95,6 +79,7 @@
 	int i;
 	struct sas_ha_struct *sas_ha = phy->ha;
 	struct asd_sas_port *port = phy->port;
+	struct domain_device *port_dev;
 	struct sas_internal *si =
 		to_sas_internal(sas_ha->core.shost->transportt);
 	unsigned long flags;
@@ -110,9 +95,9 @@
 			wake_up(&sas_ha->eh_wait_q);
 			return;
 		} else {
-			SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
-				    __func__, phy->id, phy->port->id,
-				    phy->port->num_phys);
+			pr_info("%s: phy%d belongs to port%d already(%d)!\n",
+				__func__, phy->id, phy->port->id,
+				phy->port->num_phys);
 			return;
 		}
 	}
@@ -125,8 +110,8 @@
 		if (*(u64 *) port->sas_addr &&
 		    phy_is_wideport_member(port, phy) && port->num_phys > 0) {
 			/* wide port */
-			SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
-				    port->id);
+			pr_debug("phy%d matched wide port%d\n", phy->id,
+				 port->id);
 			break;
 		}
 		spin_unlock(&port->phy_list_lock);
@@ -147,15 +132,15 @@
 	}
 
 	if (i >= sas_ha->num_phys) {
-		printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
-		       __func__);
+		pr_err("%s: couldn't find a free port, bug?\n", __func__);
 		spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
 		return;
 	}
 
 	/* add the phy to the port */
+	port_dev = port->port_dev;
 	list_add_tail(&phy->port_phy_el, &port->phy_list);
-	sas_phy_set_target(phy, port->port_dev);
+	sas_phy_set_target(phy, port_dev);
 	phy->port = port;
 	port->num_phys++;
 	port->phy_mask |= (1U << phy->id);
@@ -180,19 +165,26 @@
 	}
 	sas_port_add_phy(port->port, phy->phy);
 
-	SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n",
-		    dev_name(&phy->phy->dev), dev_name(&port->port->dev),
-		    port->phy_mask,
-		    SAS_ADDR(port->attached_sas_addr));
+	pr_debug("%s added to %s, phy_mask:0x%x (%16llx)\n",
+		 dev_name(&phy->phy->dev), dev_name(&port->port->dev),
+		 port->phy_mask,
+		 SAS_ADDR(port->attached_sas_addr));
 
-	if (port->port_dev)
-		port->port_dev->pathways = port->num_phys;
+	if (port_dev)
+		port_dev->pathways = port->num_phys;
 
 	/* Tell the LLDD about this port formation. */
 	if (si->dft->lldd_port_formed)
 		si->dft->lldd_port_formed(phy);
 
 	sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
+	/* Only insert a revalidate event after initial discovery */
+	if (port_dev && dev_is_expander(port_dev->dev_type)) {
+		struct expander_device *ex_dev = &port_dev->ex_dev;
+
+		ex_dev->ex_change_count = -1;
+		sas_discover_event(port, DISCE_REVALIDATE_DOMAIN);
+	}
 	flush_workqueue(sas_ha->disco_q);
 }
 
@@ -255,6 +247,15 @@
 	spin_unlock(&port->phy_list_lock);
 	spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
 
+	/* Only insert revalidate event if the port still has members */
+	if (port->port && dev && dev_is_expander(dev->dev_type)) {
+		struct expander_device *ex_dev = &dev->ex_dev;
+
+		ex_dev->ex_change_count = -1;
+		sas_discover_event(port, DISCE_REVALIDATE_DOMAIN);
+	}
+	flush_workqueue(sas_ha->disco_q);
+
 	return;
 }
 
@@ -279,7 +280,7 @@
 	prim = phy->sas_prim;
 	spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
 
-	SAS_DPRINTK("broadcast received: %d\n", prim);
+	pr_debug("broadcast received: %d\n", prim);
 	sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
 
 	if (phy->port)
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 3322934..bec83eb 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -1,26 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Serial Attached SCSI (SAS) class SCSI Host glue.
  *
  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- *
  */
 
 #include <linux/kthread.h>
@@ -93,9 +76,8 @@
 				hs = DID_ERROR;
 			break;
 		case SAS_PROTO_RESPONSE:
-			SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
-				    "task; please report this\n",
-				    task->dev->port->ha->sas_ha_name);
+			pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n",
+				  task->dev->port->ha->sas_ha_name);
 			break;
 		case SAS_ABORTED_TASK:
 			hs = DID_ABORT;
@@ -132,12 +114,12 @@
 
 	if (unlikely(!task)) {
 		/* task will be completed by the error handler */
-		SAS_DPRINTK("task done but aborted\n");
+		pr_debug("task done but aborted\n");
 		return;
 	}
 
 	if (unlikely(!sc)) {
-		SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
+		pr_debug("task_done called with non existing SCSI cmnd!\n");
 		sas_free_task(task);
 		return;
 	}
@@ -208,7 +190,7 @@
 	return 0;
 
 out_free_task:
-	SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
+	pr_debug("lldd_execute_task returned: %d\n", res);
 	ASSIGN_SAS_TASK(cmd, NULL);
 	sas_free_task(task);
 	if (res == -SAS_QUEUE_FULL)
@@ -301,40 +283,38 @@
 		to_sas_internal(task->dev->port->ha->core.shost->transportt);
 
 	for (i = 0; i < 5; i++) {
-		SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
+		pr_notice("%s: aborting task 0x%p\n", __func__, task);
 		res = si->dft->lldd_abort_task(task);
 
 		spin_lock_irqsave(&task->task_state_lock, flags);
 		if (task->task_state_flags & SAS_TASK_STATE_DONE) {
 			spin_unlock_irqrestore(&task->task_state_lock, flags);
-			SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
-				    task);
+			pr_debug("%s: task 0x%p is done\n", __func__, task);
 			return TASK_IS_DONE;
 		}
 		spin_unlock_irqrestore(&task->task_state_lock, flags);
 
 		if (res == TMF_RESP_FUNC_COMPLETE) {
-			SAS_DPRINTK("%s: task 0x%p is aborted\n",
-				    __func__, task);
+			pr_notice("%s: task 0x%p is aborted\n",
+				  __func__, task);
 			return TASK_IS_ABORTED;
 		} else if (si->dft->lldd_query_task) {
-			SAS_DPRINTK("%s: querying task 0x%p\n",
-				    __func__, task);
+			pr_notice("%s: querying task 0x%p\n", __func__, task);
 			res = si->dft->lldd_query_task(task);
 			switch (res) {
 			case TMF_RESP_FUNC_SUCC:
-				SAS_DPRINTK("%s: task 0x%p at LU\n",
-					    __func__, task);
+				pr_notice("%s: task 0x%p at LU\n", __func__,
+					  task);
 				return TASK_IS_AT_LU;
 			case TMF_RESP_FUNC_COMPLETE:
-				SAS_DPRINTK("%s: task 0x%p not at LU\n",
-					    __func__, task);
+				pr_notice("%s: task 0x%p not at LU\n",
+					  __func__, task);
 				return TASK_IS_NOT_AT_LU;
 			case TMF_RESP_FUNC_FAILED:
-                                SAS_DPRINTK("%s: task 0x%p failed to abort\n",
-                                                __func__, task);
-                                return TASK_ABORT_FAILED;
-                        }
+				pr_notice("%s: task 0x%p failed to abort\n",
+					  __func__, task);
+				return TASK_ABORT_FAILED;
+			}
 
 		}
 	}
@@ -350,9 +330,9 @@
 
 	int_to_scsilun(cmd->device->lun, &lun);
 
-	SAS_DPRINTK("eh: device %llx LUN %llx has the task\n",
-		    SAS_ADDR(dev->sas_addr),
-		    cmd->device->lun);
+	pr_notice("eh: device %llx LUN %llx has the task\n",
+		  SAS_ADDR(dev->sas_addr),
+		  cmd->device->lun);
 
 	if (i->dft->lldd_abort_task_set)
 		res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
@@ -376,8 +356,8 @@
 	struct sas_internal *i =
 		to_sas_internal(dev->port->ha->core.shost->transportt);
 
-	SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
-		    SAS_ADDR(dev->sas_addr));
+	pr_notice("I_T nexus reset for dev %016llx\n",
+		  SAS_ADDR(dev->sas_addr));
 
 	if (i->dft->lldd_I_T_nexus_reset)
 		res = i->dft->lldd_I_T_nexus_reset(dev);
@@ -434,7 +414,6 @@
 		goto retry;
 	}
 }
-EXPORT_SYMBOL(sas_wait_eh);
 
 static int sas_queue_reset(struct domain_device *dev, int reset_type,
 			   u64 lun, int wait)
@@ -471,9 +450,9 @@
 			return SUCCESS;
 	}
 
-	SAS_DPRINTK("%s reset of %s failed\n",
-		    reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
-		    dev_name(&dev->rphy->dev));
+	pr_warn("%s reset of %s failed\n",
+		reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
+		dev_name(&dev->rphy->dev));
 
 	return FAILED;
 }
@@ -501,7 +480,7 @@
 	if (task)
 		res = i->dft->lldd_abort_task(task);
 	else
-		SAS_DPRINTK("no task to abort\n");
+		pr_notice("no task to abort\n");
 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
 		return SUCCESS;
 
@@ -612,34 +591,33 @@
 		spin_unlock_irqrestore(&task->task_state_lock, flags);
 
 		if (need_reset) {
-			SAS_DPRINTK("%s: task 0x%p requests reset\n",
-				    __func__, task);
+			pr_notice("%s: task 0x%p requests reset\n",
+				  __func__, task);
 			goto reset;
 		}
 
-		SAS_DPRINTK("trying to find task 0x%p\n", task);
+		pr_debug("trying to find task 0x%p\n", task);
 		res = sas_scsi_find_task(task);
 
 		switch (res) {
 		case TASK_IS_DONE:
-			SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
+			pr_notice("%s: task 0x%p is done\n", __func__,
 				    task);
 			sas_eh_finish_cmd(cmd);
 			continue;
 		case TASK_IS_ABORTED:
-			SAS_DPRINTK("%s: task 0x%p is aborted\n",
-				    __func__, task);
+			pr_notice("%s: task 0x%p is aborted\n",
+				  __func__, task);
 			sas_eh_finish_cmd(cmd);
 			continue;
 		case TASK_IS_AT_LU:
-			SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
+			pr_info("task 0x%p is at LU: lu recover\n", task);
  reset:
 			tmf_resp = sas_recover_lu(task->dev, cmd);
 			if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
-				SAS_DPRINTK("dev %016llx LU %llx is "
-					    "recovered\n",
-					    SAS_ADDR(task->dev),
-					    cmd->device->lun);
+				pr_notice("dev %016llx LU %llx is recovered\n",
+					  SAS_ADDR(task->dev),
+					  cmd->device->lun);
 				sas_eh_finish_cmd(cmd);
 				sas_scsi_clear_queue_lu(work_q, cmd);
 				goto Again;
@@ -647,14 +625,14 @@
 			/* fallthrough */
 		case TASK_IS_NOT_AT_LU:
 		case TASK_ABORT_FAILED:
-			SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
-				    task);
+			pr_notice("task 0x%p is not at LU: I_T recover\n",
+				  task);
 			tmf_resp = sas_recover_I_T(task->dev);
 			if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
 			    tmf_resp == -ENODEV) {
 				struct domain_device *dev = task->dev;
-				SAS_DPRINTK("I_T %016llx recovered\n",
-					    SAS_ADDR(task->dev->sas_addr));
+				pr_notice("I_T %016llx recovered\n",
+					  SAS_ADDR(task->dev->sas_addr));
 				sas_eh_finish_cmd(cmd);
 				sas_scsi_clear_queue_I_T(work_q, dev);
 				goto Again;
@@ -663,12 +641,12 @@
 			try_to_reset_cmd_device(cmd);
 			if (i->dft->lldd_clear_nexus_port) {
 				struct asd_sas_port *port = task->dev->port;
-				SAS_DPRINTK("clearing nexus for port:%d\n",
-					    port->id);
+				pr_debug("clearing nexus for port:%d\n",
+					  port->id);
 				res = i->dft->lldd_clear_nexus_port(port);
 				if (res == TMF_RESP_FUNC_COMPLETE) {
-					SAS_DPRINTK("clear nexus port:%d "
-						    "succeeded\n", port->id);
+					pr_notice("clear nexus port:%d succeeded\n",
+						  port->id);
 					sas_eh_finish_cmd(cmd);
 					sas_scsi_clear_queue_port(work_q,
 								  port);
@@ -676,11 +654,10 @@
 				}
 			}
 			if (i->dft->lldd_clear_nexus_ha) {
-				SAS_DPRINTK("clear nexus ha\n");
+				pr_debug("clear nexus ha\n");
 				res = i->dft->lldd_clear_nexus_ha(ha);
 				if (res == TMF_RESP_FUNC_COMPLETE) {
-					SAS_DPRINTK("clear nexus ha "
-						    "succeeded\n");
+					pr_notice("clear nexus ha succeeded\n");
 					sas_eh_finish_cmd(cmd);
 					goto clear_q;
 				}
@@ -689,10 +666,9 @@
 			 * of effort could recover from errors.  Quite
 			 * possibly the HA just disappeared.
 			 */
-			SAS_DPRINTK("error from  device %llx, LUN %llx "
-				    "couldn't be recovered in any way\n",
-				    SAS_ADDR(task->dev->sas_addr),
-				    cmd->device->lun);
+			pr_err("error from  device %llx, LUN %llx couldn't be recovered in any way\n",
+			       SAS_ADDR(task->dev->sas_addr),
+			       cmd->device->lun);
 
 			sas_eh_finish_cmd(cmd);
 			goto clear_q;
@@ -704,7 +680,7 @@
 	return;
 
  clear_q:
-	SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
+	pr_debug("--- Exit %s -- clear_q\n", __func__);
 	list_for_each_entry_safe(cmd, n, work_q, eh_entry)
 		sas_eh_finish_cmd(cmd);
 	goto out;
@@ -758,8 +734,8 @@
 	list_splice_init(&shost->eh_cmd_q, &eh_work_q);
 	spin_unlock_irq(shost->host_lock);
 
-	SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
-		    __func__, scsi_host_busy(shost), shost->host_failed);
+	pr_notice("Enter %s busy: %d failed: %d\n",
+		  __func__, scsi_host_busy(shost), shost->host_failed);
 	/*
 	 * Deal with commands that still have SAS tasks (i.e. they didn't
 	 * complete via the normal sas_task completion mechanism),
@@ -800,12 +776,12 @@
 	if (retry)
 		goto retry;
 
-	SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
-		    __func__, scsi_host_busy(shost),
-		    shost->host_failed, tries);
+	pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n",
+		  __func__, scsi_host_busy(shost),
+		  shost->host_failed, tries);
 }
 
-int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
 {
 	struct domain_device *dev = sdev_to_domain_dev(sdev);
 
@@ -875,9 +851,8 @@
 	if (scsi_dev->tagged_supported) {
 		scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
 	} else {
-		SAS_DPRINTK("device %llx, LUN %llx doesn't support "
-			    "TCQ\n", SAS_ADDR(dev->sas_addr),
-			    scsi_dev->lun);
+		pr_notice("device %llx, LUN %llx doesn't support TCQ\n",
+			  SAS_ADDR(dev->sas_addr), scsi_dev->lun);
 		scsi_change_queue_depth(scsi_dev, 1);
 	}
 
@@ -930,16 +905,10 @@
 		return;
 	}
 
-	if (dev_is_sata(task->dev)) {
+	if (dev_is_sata(task->dev))
 		sas_ata_task_abort(task);
-	} else {
-		struct request_queue *q = sc->device->request_queue;
-		unsigned long flags;
-
-		spin_lock_irqsave(q->queue_lock, flags);
+	else
 		blk_abort_request(sc->request);
-		spin_unlock_irqrestore(q->queue_lock, flags);
-	}
 }
 
 void sas_target_destroy(struct scsi_target *starget)
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
index a78e5bd..1ded7d8 100644
--- a/drivers/scsi/libsas/sas_task.c
+++ b/drivers/scsi/libsas/sas_task.c
@@ -1,3 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "sas_internal.h"
+
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <scsi/sas.h>
@@ -23,11 +27,8 @@
 		memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
 
 		if (iu->status != SAM_STAT_CHECK_CONDITION)
-			dev_printk(KERN_WARNING, dev,
-				   "dev %llx sent sense data, but "
-				   "stat(%x) is not CHECK CONDITION\n",
-				   SAS_ADDR(task->dev->sas_addr),
-				   iu->status);
+			dev_warn(dev, "dev %llx sent sense data, but stat(%x) is not CHECK CONDITION\n",
+				 SAS_ADDR(task->dev->sas_addr), iu->status);
 	}
 	else
 		/* when datapres contains corrupt/unknown value... */
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 43732e8..691acbd 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -51,8 +51,10 @@
 		cmnd for menlo needs nearly twice as for firmware
 		downloads using bsg */
 
+#define LPFC_DEFAULT_XPSGL_SIZE	256
+#define LPFC_MAX_SG_TABLESIZE	0xffff
 #define LPFC_MIN_SG_SLI4_BUF_SZ	0x800	/* based on LPFC_DEFAULT_SG_SEG_CNT */
-#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
+#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
 #define LPFC_MAX_SG_SEG_CNT_DIF 512	/* sg element count per scsi cmnd  */
 #define LPFC_MAX_SG_SEG_CNT	4096	/* sg element count per scsi cmnd */
 #define LPFC_MIN_SG_SEG_CNT	32	/* sg element count per scsi cmnd */
@@ -84,8 +86,6 @@
 #define LPFC_HB_MBOX_INTERVAL   5	/* Heart beat interval in seconds. */
 #define LPFC_HB_MBOX_TIMEOUT    30	/* Heart beat timeout  in seconds. */
 
-#define LPFC_LOOK_AHEAD_OFF	0	/* Look ahead logic is turned off */
-
 /* Error Attention event polling interval */
 #define LPFC_ERATT_POLL_INTERVAL	5 /* EATT poll interval in seconds */
 
@@ -146,6 +146,7 @@
 	struct lpfc_nvmet_rcv_ctx *context;
 	struct lpfc_iocbq *iocbq;
 	struct lpfc_sglq *sglq;
+	struct work_struct defer_work;
 };
 
 struct lpfc_dma_pool {
@@ -235,8 +236,6 @@
 	} sli3Feat;
 } lpfc_vpd_t;
 
-struct lpfc_scsi_buf;
-
 
 /*
  * lpfc stat counters
@@ -277,6 +276,7 @@
 	uint32_t elsXmitADISC;
 	uint32_t elsXmitLOGO;
 	uint32_t elsXmitSCR;
+	uint32_t elsXmitRSCN;
 	uint32_t elsXmitRNID;
 	uint32_t elsXmitFARP;
 	uint32_t elsXmitFARPR;
@@ -335,6 +335,18 @@
 	LPFC_HBA_ERROR       =  -1
 };
 
+struct lpfc_trunk_link_state {
+	enum hba_state state;
+	uint8_t fault;
+};
+
+struct lpfc_trunk_link  {
+	struct lpfc_trunk_link_state link0,
+				     link1,
+				     link2,
+				     link3;
+};
+
 struct lpfc_vport {
 	struct lpfc_hba *phba;
 	struct list_head listentry;
@@ -454,6 +466,7 @@
 	uint32_t cfg_use_adisc;
 	uint32_t cfg_discovery_threads;
 	uint32_t cfg_log_verbose;
+	uint32_t cfg_enable_fc4_type;
 	uint32_t cfg_max_luns;
 	uint32_t cfg_enable_da_id;
 	uint32_t cfg_max_scsicmpl_time;
@@ -467,6 +480,7 @@
 	struct dentry *debug_disc_trc;
 	struct dentry *debug_nodelist;
 	struct dentry *debug_nvmestat;
+	struct dentry *debug_scsistat;
 	struct dentry *debug_nvmektime;
 	struct dentry *debug_cpucheck;
 	struct dentry *vport_debugfs_root;
@@ -490,6 +504,7 @@
 	struct nvme_fc_local_port *localport;
 	uint8_t  nvmei_support; /* driver supports NVME Initiator */
 	uint32_t last_fcp_wqidx;
+	uint32_t rcv_flogi_cnt; /* How many unsol FLOGIs ACK'd. */
 };
 
 struct hbq_s {
@@ -583,22 +598,47 @@
 	struct list_head ext_dmabuf_list;
 };
 
+struct lpfc_epd_pool {
+	/* Expedite pool */
+	struct list_head list;
+	u32 count;
+	spinlock_t lock;	/* lock for expedite pool */
+};
+
+struct lpfc_ras_fwlog {
+	uint8_t *fwlog_buff;
+	uint32_t fw_buffcount; /* Buffer size posted to FW */
+#define LPFC_RAS_BUFF_ENTERIES  16      /* Each entry can hold max of 64k */
+#define LPFC_RAS_MAX_ENTRY_SIZE (64 * 1024)
+#define LPFC_RAS_MIN_BUFF_POST_SIZE (256 * 1024)
+#define LPFC_RAS_MAX_BUFF_POST_SIZE (1024 * 1024)
+	uint32_t fw_loglevel; /* Log level set */
+	struct lpfc_dmabuf lwpd;
+	struct list_head fwlog_buff_list;
+
+	/* RAS support status on adapter */
+	bool ras_hwsupport; /* RAS Support available on HW or not */
+	bool ras_enabled;   /* Ras Enabled for the function */
+#define LPFC_RAS_DISABLE_LOGGING 0x00
+#define LPFC_RAS_ENABLE_LOGGING 0x01
+	bool ras_active;    /* RAS logging running state */
+};
+
 struct lpfc_hba {
 	/* SCSI interface function jump table entries */
-	int (*lpfc_new_scsi_buf)
-		(struct lpfc_vport *, int);
-	struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
-		(struct lpfc_hba *, struct lpfc_nodelist *);
+	struct lpfc_io_buf * (*lpfc_get_scsi_buf)
+		(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+		struct scsi_cmnd *cmnd);
 	int (*lpfc_scsi_prep_dma_buf)
-		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+		(struct lpfc_hba *, struct lpfc_io_buf *);
 	void (*lpfc_scsi_unprep_dma_buf)
-		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+		(struct lpfc_hba *, struct lpfc_io_buf *);
 	void (*lpfc_release_scsi_buf)
-		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+		(struct lpfc_hba *, struct lpfc_io_buf *);
 	void (*lpfc_rampdown_queue_depth)
 		(struct lpfc_hba *);
 	void (*lpfc_scsi_prep_cmnd)
-		(struct lpfc_vport *, struct lpfc_scsi_buf *,
+		(struct lpfc_vport *, struct lpfc_io_buf *,
 		 struct lpfc_nodelist *);
 
 	/* IOCB interface function jump table entries */
@@ -641,13 +681,17 @@
 		(struct lpfc_hba *);
 
 	int (*lpfc_bg_scsi_prep_dma_buf)
-		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+		(struct lpfc_hba *, struct lpfc_io_buf *);
 	/* Add new entries here */
 
+	/* expedite pool */
+	struct lpfc_epd_pool epd_pool;
+
 	/* SLI4 specific HBA data structure */
 	struct lpfc_sli4_hba sli4_hba;
 
 	struct workqueue_struct *wq;
+	struct delayed_work     eq_delay_work;
 
 	struct lpfc_sli sli;
 	uint8_t pci_dev_grp;	/* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
@@ -664,6 +708,7 @@
 	uint32_t iocb_cmd_size;
 	uint32_t iocb_rsp_size;
 
+	struct lpfc_trunk_link  trunk_link;
 	enum hba_state link_state;
 	uint32_t link_flag;	/* link state flags */
 #define LS_LOOPBACK_MODE      0x1	/* NPort is in Loopback mode */
@@ -680,7 +725,6 @@
 #define HBA_FCOE_MODE		0x4 /* HBA function in FCoE Mode */
 #define HBA_SP_QUEUE_EVT	0x8 /* Slow-path qevt posted to worker thread*/
 #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
-#define FCP_XRI_ABORT_EVENT	0x20
 #define ELS_XRI_ABORT_EVENT	0x40
 #define ASYNC_EVENT		0x80
 #define LINK_DISABLED		0x100 /* Link disabled by user */
@@ -690,14 +734,14 @@
 #define HBA_AER_ENABLED		0x1000 /* AER enabled with HBA */
 #define HBA_DEVLOSS_TMO         0x2000 /* HBA in devloss timeout */
 #define HBA_RRQ_ACTIVE		0x4000 /* process the rrq active list */
-#define HBA_FCP_IOQ_FLUSH	0x8000 /* FCP I/O queues being flushed */
+#define HBA_IOQ_FLUSH		0x8000 /* FCP/NVME I/O queues being flushed */
 #define HBA_FW_DUMP_OP		0x10000 /* Skips fn reset before FW dump */
 #define HBA_RECOVERABLE_UE	0x20000 /* Firmware supports recoverable UE */
 #define HBA_FORCED_LINK_SPEED	0x40000 /*
 					 * Firmware supports Forced Link Speed
 					 * capability
 					 */
-#define HBA_NVME_IOQ_FLUSH      0x80000 /* NVME IO queues flushed. */
+#define HBA_FLOGI_ISSUED	0x100000 /* FLOGI was issued */
 
 	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
 	struct lpfc_dmabuf slim2p;
@@ -750,12 +794,14 @@
 	uint8_t  nvmet_support;	/* driver supports NVMET */
 #define LPFC_NVMET_MAX_PORTS	32
 	uint8_t  mds_diags_support;
-	uint32_t initial_imax;
 	uint8_t  bbcredit_support;
 	uint8_t  enab_exp_wqcq_pages;
+	u8	 nsler; /* Firmware supports FC-NVMe-2 SLER */
 
 	/* HBA Config Parameters */
 	uint32_t cfg_ack0;
+	uint32_t cfg_xri_rebalancing;
+	uint32_t cfg_xpsgl;
 	uint32_t cfg_enable_npiv;
 	uint32_t cfg_enable_rrq;
 	uint32_t cfg_topology;
@@ -764,6 +810,7 @@
 #define LPFC_FCF_PRIORITY 2	/* Priority fcf failover */
 	uint32_t cfg_fcf_failover_policy;
 	uint32_t cfg_fcp_io_sched;
+	uint32_t cfg_ns_query;
 	uint32_t cfg_fcp2_no_tgt_reset;
 	uint32_t cfg_cr_delay;
 	uint32_t cfg_cr_count;
@@ -776,12 +823,16 @@
 	uint32_t cfg_use_msi;
 	uint32_t cfg_auto_imax;
 	uint32_t cfg_fcp_imax;
+	uint32_t cfg_force_rscn;
+	uint32_t cfg_cq_poll_threshold;
+	uint32_t cfg_cq_max_proc_limit;
 	uint32_t cfg_fcp_cpu_map;
-	uint32_t cfg_fcp_io_channel;
+	uint32_t cfg_fcp_mq_threshold;
+	uint32_t cfg_hdw_queue;
+	uint32_t cfg_irq_chann;
 	uint32_t cfg_suppress_rsp;
 	uint32_t cfg_nvme_oas;
 	uint32_t cfg_nvme_embed_cmd;
-	uint32_t cfg_nvme_io_channel;
 	uint32_t cfg_nvmet_mrq_post;
 	uint32_t cfg_nvmet_mrq;
 	uint32_t cfg_enable_nvmet;
@@ -790,6 +841,7 @@
 	uint32_t cfg_total_seg_cnt;
 	uint32_t cfg_sg_seg_cnt;
 	uint32_t cfg_nvme_seg_cnt;
+	uint32_t cfg_scsi_seg_cnt;
 	uint32_t cfg_sg_dma_buf_size;
 	uint64_t cfg_soft_wwnn;
 	uint64_t cfg_soft_wwpn;
@@ -816,6 +868,7 @@
 	uint32_t cfg_prot_guard;
 	uint32_t cfg_hostmem_hgp;
 	uint32_t cfg_log_verbose;
+	uint32_t cfg_enable_fc4_type;
 	uint32_t cfg_aer_support;
 	uint32_t cfg_sriov_nr_virtfn;
 	uint32_t cfg_request_firmware_upgrade;
@@ -833,15 +886,15 @@
 #define LPFC_FDMI_SUPPORT	1	/* FDMI supported? */
 	uint32_t cfg_enable_SmartSAN;
 	uint32_t cfg_enable_mds_diags;
-	uint32_t cfg_enable_fc4_type;
+	uint32_t cfg_ras_fwlog_level;
+	uint32_t cfg_ras_fwlog_buffsize;
+	uint32_t cfg_ras_fwlog_func;
 	uint32_t cfg_enable_bbcr;	/* Enable BB Credit Recovery */
 	uint32_t cfg_enable_dpp;	/* Enable Direct Packet Push */
-	uint32_t cfg_xri_split;
 #define LPFC_ENABLE_FCP  1
 #define LPFC_ENABLE_NVME 2
 #define LPFC_ENABLE_BOTH 3
 	uint32_t cfg_enable_pbde;
-	uint32_t io_channel_irqs;	/* number of irqs for io channels */
 	struct nvmet_fc_target_port *targetport;
 	lpfc_vpd_t vpd;		/* vital product data */
 
@@ -855,6 +908,7 @@
 	wait_queue_head_t    work_waitq;
 	struct task_struct   *worker_thread;
 	unsigned long data_flags;
+	uint32_t border_sge_num;
 
 	uint32_t hbq_in_use;		/* HBQs in use flag */
 	uint32_t hbq_count;	        /* Count of configured HBQs */
@@ -895,6 +949,7 @@
 	int brd_no;			/* FC board number */
 	char SerialNumber[32];		/* adapter Serial Number */
 	char OptionROMVersion[32];	/* adapter BIOS / Fcode version */
+	char BIOSVersion[16];		/* Boot BIOS version */
 	char ModelDesc[256];		/* Model Description */
 	char ModelName[80];		/* Model Name */
 	char ProgramType[256];		/* Program Type */
@@ -913,14 +968,6 @@
 	struct timer_list eratt_poll;
 	uint32_t eratt_poll_interval;
 
-	/*
-	 * stat  counters
-	 */
-	atomic_t fc4ScsiInputRequests;
-	atomic_t fc4ScsiOutputRequests;
-	atomic_t fc4ScsiControlRequests;
-	atomic_t fc4ScsiIoCmpls;
-
 	uint64_t bg_guard_err_cnt;
 	uint64_t bg_apptag_err_cnt;
 	uint64_t bg_reftag_err_cnt;
@@ -931,13 +978,6 @@
 	struct list_head lpfc_scsi_buf_list_get;
 	struct list_head lpfc_scsi_buf_list_put;
 	uint32_t total_scsi_bufs;
-	spinlock_t nvme_buf_list_get_lock;  /* NVME buf alloc list lock */
-	spinlock_t nvme_buf_list_put_lock;  /* NVME buf free list lock */
-	struct list_head lpfc_nvme_buf_list_get;
-	struct list_head lpfc_nvme_buf_list_put;
-	uint32_t total_nvme_bufs;
-	uint32_t get_nvme_bufs;
-	uint32_t put_nvme_bufs;
 	struct list_head lpfc_iocb_list;
 	uint32_t total_iocbq_bufs;
 	struct list_head active_rrq_list;
@@ -951,6 +991,7 @@
 	struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
 	struct dma_pool *lpfc_hbq_pool;	/* SLI3 hbq buffer pool */
 	struct dma_pool *txrdy_payload_pool;
+	struct dma_pool *lpfc_cmd_rsp_buf_pool;
 	struct lpfc_dma_pool lpfc_mbuf_safety_pool;
 
 	mempool_t *mbox_mem_pool;
@@ -963,9 +1004,11 @@
 	uint32_t intr_mode;
 #define LPFC_INTR_ERROR	0xFFFFFFFF
 	struct list_head port_list;
+	spinlock_t port_list_lock;	/* lock for port_list mutations */
 	struct lpfc_vport *pport;	/* physical lpfc_vport pointer */
 	uint16_t max_vpi;		/* Maximum virtual nports */
-#define LPFC_MAX_VPI 0xFFFF		/* Max number of VPI supported */
+#define LPFC_MAX_VPI	0xFF		/* Max number VPI supported 0 - 0xff */
+#define LPFC_MAX_VPORTS	0x100		/* Max vports per port, with pport */
 	uint16_t max_vports;            /*
 					 * For IOV HBAs max_vpi can change
 					 * after a reset. max_vports is max
@@ -992,11 +1035,10 @@
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	struct dentry *hba_debugfs_root;
 	atomic_t debugfs_vport_count;
+	struct dentry *debug_multixri_pools;
 	struct dentry *debug_hbqinfo;
 	struct dentry *debug_dumpHostSlim;
 	struct dentry *debug_dumpHBASlim;
-	struct dentry *debug_dumpData;   /* BlockGuard BPL */
-	struct dentry *debug_dumpDif;    /* BlockGuard BPL */
 	struct dentry *debug_InjErrLBA;  /* LBA to inject errors at */
 	struct dentry *debug_InjErrNPortID;  /* NPortID to inject errors at */
 	struct dentry *debug_InjErrWWPN;  /* WWPN to inject errors at */
@@ -1009,6 +1051,10 @@
 
 	struct dentry *debug_nvmeio_trc;
 	struct lpfc_debugfs_nvmeio_trc *nvmeio_trc;
+	struct dentry *debug_hdwqinfo;
+#ifdef LPFC_HDWQ_LOCK_STAT
+	struct dentry *debug_lockstat;
+#endif
 	atomic_t nvmeio_trc_cnt;
 	uint32_t nvmeio_trc_size;
 	uint32_t nvmeio_trc_output_idx;
@@ -1049,7 +1095,6 @@
 
 	uint8_t temp_sensor_support;
 	/* Fields used for heart beat. */
-	unsigned long last_eqdelay_time;
 	unsigned long last_completion_time;
 	unsigned long skipped_hb;
 	struct timer_list hb_tmofunc;
@@ -1087,11 +1132,18 @@
 	uint16_t vlan_id;
 	struct list_head fcf_conn_rec_list;
 
+	bool defer_flogi_acc_flag;
+	uint16_t defer_flogi_acc_rx_id;
+	uint16_t defer_flogi_acc_ox_id;
+
 	spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
 	struct list_head ct_ev_waiters;
 	struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
 	uint32_t ctx_idx;
 
+	/* RAS Support */
+	struct lpfc_ras_fwlog ras_fwlog;
+
 	uint8_t menlo_flag;	/* menlo generic flags */
 #define HBA_MENLO_SUPPORT	0x1 /* HBA supports menlo commands */
 	uint32_t iocb_cnt;
@@ -1116,16 +1168,12 @@
 	uint16_t sfp_warning;
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-#define LPFC_CHECK_CPU_CNT    32
-	uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
-	uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
-	uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
-	uint32_t cpucheck_ccmpl_io[LPFC_CHECK_CPU_CNT];
 	uint16_t cpucheck_on;
 #define LPFC_CHECK_OFF		0
 #define LPFC_CHECK_NVME_IO	1
 #define LPFC_CHECK_NVMET_RCV	2
 #define LPFC_CHECK_NVMET_IO	4
+#define LPFC_CHECK_SCSI_IO	8
 	uint16_t ktime_on;
 	uint64_t ktime_data_samples;
 	uint64_t ktime_status_samples;
@@ -1235,6 +1283,12 @@
 static inline struct lpfc_sli_ring *
 lpfc_phba_elsring(struct lpfc_hba *phba)
 {
+	/* Return NULL if sli_rev has become invalid due to bad fw */
+	if (phba->sli_rev != LPFC_SLI_REV4  &&
+	    phba->sli_rev != LPFC_SLI_REV3  &&
+	    phba->sli_rev != LPFC_SLI_REV2)
+		return NULL;
+
 	if (phba->sli_rev == LPFC_SLI_REV4) {
 		if (phba->sli4_hba.els_wq)
 			return phba->sli4_hba.els_wq->pring;
@@ -1243,3 +1297,23 @@
 	}
 	return &phba->sli.sli3_ring[LPFC_ELS_RING];
 }
+
+/**
+ * lpfc_sli4_mod_hba_eq_delay - update EQ delay
+ * @phba: Pointer to HBA context object.
+ * @q: The Event Queue to update.
+ * @delay: The delay value (in us) to be written.
+ *
+ **/
+static inline void
+lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba *phba, struct lpfc_queue *eq,
+			   u32 delay)
+{
+	struct lpfc_register reg_data;
+
+	reg_data.word0 = 0;
+	bf_set(lpfc_sliport_eqdelay_id, &reg_data, eq->queue_id);
+	bf_set(lpfc_sliport_eqdelay_delay, &reg_data, delay);
+	writel(reg_data.word0, phba->sli4_hba.u.if_type2.EQDregaddr);
+	eq->q_mode = delay;
+}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 1a6ed9b..25aa7a5 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -64,9 +64,6 @@
 #define LPFC_MIN_MRQ_POST	512
 #define LPFC_MAX_MRQ_POST	2048
 
-#define LPFC_MAX_NVME_INFO_TMP_LEN	100
-#define LPFC_NVME_INFO_MORE_STR		"\nCould be more info...\n"
-
 /*
  * Write key size should be multiple of 4. If write key is changed
  * make sure that library write key is also changed.
@@ -74,6 +71,23 @@
 #define LPFC_REG_WRITE_KEY_SIZE	4
 #define LPFC_REG_WRITE_KEY	"EMLX"
 
+const char *const trunk_errmsg[] = {	/* map errcode */
+	"",	/* There is no such error code at index 0*/
+	"link negotiated speed does not match existing"
+		" trunk - link was \"low\" speed",
+	"link negotiated speed does not match"
+		" existing trunk - link was \"middle\" speed",
+	"link negotiated speed does not match existing"
+		" trunk - link was \"high\" speed",
+	"Attached to non-trunking port - F_Port",
+	"Attached to non-trunking port - N_Port",
+	"FLOGI response timeout",
+	"non-FLOGI frame received",
+	"Invalid FLOGI response",
+	"Trunking initialization protocol",
+	"Trunk peer device mismatch",
+};
+
 /**
  * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
  * @incr: integer to convert.
@@ -117,7 +131,7 @@
 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
 		       char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
+	return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
 }
 
 /**
@@ -137,9 +151,9 @@
 	struct lpfc_hba   *phba = vport->phba;
 
 	if (phba->hba_flag & HBA_FIP_SUPPORT)
-		return snprintf(buf, PAGE_SIZE, "1\n");
+		return scnprintf(buf, PAGE_SIZE, "1\n");
 	else
-		return snprintf(buf, PAGE_SIZE, "0\n");
+		return scnprintf(buf, PAGE_SIZE, "0\n");
 }
 
 static ssize_t
@@ -155,15 +169,16 @@
 	struct lpfc_nvme_rport *rport;
 	struct lpfc_nodelist *ndlp;
 	struct nvme_fc_remote_port *nrport;
-	struct lpfc_nvme_ctrl_stat *cstat;
+	struct lpfc_fc4_ctrl_stat *cstat;
 	uint64_t data1, data2, data3;
 	uint64_t totin, totout, tot;
 	char *statep;
 	int i;
 	int len = 0;
 	char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
+	unsigned long iflags = 0;
 
-	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
+	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
 		len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
 		return len;
 	}
@@ -334,14 +349,13 @@
 
 	rcu_read_lock();
 	scnprintf(tmp, sizeof(tmp),
-		  "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
+		  "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
 		  phba->brd_no,
 		  phba->sli4_hba.max_cfg_param.max_xri,
-		  phba->sli4_hba.nvme_xri_max,
-		  phba->sli4_hba.scsi_xri_max,
+		  phba->sli4_hba.io_xri_max,
 		  lpfc_sli4_get_els_iocb_cnt(phba));
 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
-		goto buffer_done;
+		goto rcu_unlock_buf_done;
 
 	/* Port state is only one of two values for now. */
 	if (localport->port_id)
@@ -357,15 +371,15 @@
 		  wwn_to_u64(vport->fc_nodename.u.wwn),
 		  localport->port_id, statep);
 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
-		goto buffer_done;
+		goto rcu_unlock_buf_done;
 
 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 		nrport = NULL;
-		spin_lock(&vport->phba->hbalock);
+		spin_lock_irqsave(&vport->phba->hbalock, iflags);
 		rport = lpfc_ndlp_get_nrport(ndlp);
 		if (rport)
 			nrport = rport->remoteport;
-		spin_unlock(&vport->phba->hbalock);
+		spin_unlock_irqrestore(&vport->phba->hbalock, iflags);
 		if (!nrport)
 			continue;
 
@@ -384,39 +398,39 @@
 
 		/* Tab in to show lport ownership. */
 		if (strlcat(buf, "NVME RPORT       ", PAGE_SIZE) >= PAGE_SIZE)
-			goto buffer_done;
+			goto rcu_unlock_buf_done;
 		if (phba->brd_no >= 10) {
 			if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
-				goto buffer_done;
+				goto rcu_unlock_buf_done;
 		}
 
 		scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
 			  nrport->port_name);
 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
-			goto buffer_done;
+			goto rcu_unlock_buf_done;
 
 		scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
 			  nrport->node_name);
 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
-			goto buffer_done;
+			goto rcu_unlock_buf_done;
 
 		scnprintf(tmp, sizeof(tmp), "DID x%06x ",
 			  nrport->port_id);
 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
-			goto buffer_done;
+			goto rcu_unlock_buf_done;
 
 		/* An NVME rport can have multiple roles. */
 		if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
 			if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
-				goto buffer_done;
+				goto rcu_unlock_buf_done;
 		}
 		if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
 			if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
-				goto buffer_done;
+				goto rcu_unlock_buf_done;
 		}
 		if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
 			if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
-				goto buffer_done;
+				goto rcu_unlock_buf_done;
 		}
 		if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
 					  FC_PORT_ROLE_NVME_TARGET |
@@ -424,12 +438,12 @@
 			scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
 				  nrport->port_role);
 			if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
-				goto buffer_done;
+				goto rcu_unlock_buf_done;
 		}
 
 		scnprintf(tmp, sizeof(tmp), "%s\n", statep);
 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
-			goto buffer_done;
+			goto rcu_unlock_buf_done;
 	}
 	rcu_read_unlock();
 
@@ -457,13 +471,13 @@
 
 	totin = 0;
 	totout = 0;
-	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
-		cstat = &lport->cstat[i];
-		tot = atomic_read(&cstat->fc4NvmeIoCmpls);
+	for (i = 0; i < phba->cfg_hdw_queue; i++) {
+		cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
+		tot = cstat->io_cmpls;
 		totin += tot;
-		data1 = atomic_read(&cstat->fc4NvmeInputRequests);
-		data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
-		data3 = atomic_read(&cstat->fc4NvmeControlRequests);
+		data1 = cstat->input_requests;
+		data2 = cstat->output_requests;
+		data3 = cstat->control_requests;
 		totout += (data1 + data2 + data3);
 	}
 	scnprintf(tmp, sizeof(tmp),
@@ -491,7 +505,13 @@
 		  atomic_read(&lport->cmpl_fcp_err));
 	strlcat(buf, tmp, PAGE_SIZE);
 
-buffer_done:
+	/* RCU is already unlocked. */
+	goto buffer_done;
+
+ rcu_unlock_buf_done:
+	rcu_read_unlock();
+
+ buffer_done:
 	len = strnlen(buf, PAGE_SIZE);
 
 	if (unlikely(len >= (PAGE_SIZE - 1))) {
@@ -510,6 +530,57 @@
 }
 
 static ssize_t
+lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = shost_priv(shost);
+	struct lpfc_hba *phba = vport->phba;
+	int len;
+	struct lpfc_fc4_ctrl_stat *cstat;
+	u64 data1, data2, data3;
+	u64 tot, totin, totout;
+	int i;
+	char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
+
+	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
+	    (phba->sli_rev != LPFC_SLI_REV4))
+		return 0;
+
+	scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
+
+	totin = 0;
+	totout = 0;
+	for (i = 0; i < phba->cfg_hdw_queue; i++) {
+		cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
+		tot = cstat->io_cmpls;
+		totin += tot;
+		data1 = cstat->input_requests;
+		data2 = cstat->output_requests;
+		data3 = cstat->control_requests;
+		totout += (data1 + data2 + data3);
+
+		scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
+			  "IO %016llx ", i, data1, data2, data3);
+		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+			goto buffer_done;
+
+		scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
+			  tot, ((data1 + data2 + data3) - tot));
+		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+			goto buffer_done;
+	}
+	scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
+		  "OutIO %016llx\n", totin, totout, totout - totin);
+	strlcat(buf, tmp, PAGE_SIZE);
+
+buffer_done:
+	len = strnlen(buf, PAGE_SIZE);
+
+	return len;
+}
+
+static ssize_t
 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
 		  char *buf)
 {
@@ -517,14 +588,15 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	if (phba->cfg_enable_bg)
+	if (phba->cfg_enable_bg) {
 		if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
-			return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
+			return scnprintf(buf, PAGE_SIZE,
+					"BlockGuard Enabled\n");
 		else
-			return snprintf(buf, PAGE_SIZE,
+			return scnprintf(buf, PAGE_SIZE,
 					"BlockGuard Not Supported\n");
-	else
-			return snprintf(buf, PAGE_SIZE,
+	} else
+		return scnprintf(buf, PAGE_SIZE,
 					"BlockGuard Disabled\n");
 }
 
@@ -536,7 +608,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%llu\n",
+	return scnprintf(buf, PAGE_SIZE, "%llu\n",
 			(unsigned long long)phba->bg_guard_err_cnt);
 }
 
@@ -548,7 +620,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%llu\n",
+	return scnprintf(buf, PAGE_SIZE, "%llu\n",
 			(unsigned long long)phba->bg_apptag_err_cnt);
 }
 
@@ -560,7 +632,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%llu\n",
+	return scnprintf(buf, PAGE_SIZE, "%llu\n",
 			(unsigned long long)phba->bg_reftag_err_cnt);
 }
 
@@ -578,7 +650,7 @@
 {
 	struct Scsi_Host *host = class_to_shost(dev);
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
+	return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
 }
 
 /**
@@ -597,7 +669,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
 }
 
 /**
@@ -619,7 +691,7 @@
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
-	return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
 }
 
 /**
@@ -638,7 +710,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
 }
 
 /**
@@ -657,7 +729,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
 }
 
 /**
@@ -676,7 +748,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
 }
 
 /**
@@ -694,7 +766,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
 		(phba->sli.sli_flag & LPFC_MENLO_MAINT));
 }
 
@@ -714,7 +786,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
 }
 
 /**
@@ -742,10 +814,10 @@
 	sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
 
 	if (phba->sli_rev < LPFC_SLI_REV4)
-		len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
+		len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
 			       fwrev, phba->sli_rev);
 	else
-		len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
+		len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
 			       fwrev, phba->sli_rev, if_type, sli_family);
 
 	return len;
@@ -769,7 +841,8 @@
 	lpfc_vpd_t *vp = &phba->vpd;
 
 	lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
-	return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
+	return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
+			 vp->rev.smRev, vp->rev.smFwRev);
 }
 
 /**
@@ -790,10 +863,11 @@
 	char fwrev[FW_REV_STR_SIZE];
 
 	if (phba->sli_rev < LPFC_SLI_REV4)
-		return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				phba->OptionROMVersion);
 
 	lpfc_decode_firmware_rev(phba, fwrev, 1);
-	return snprintf(buf, PAGE_SIZE, "%s\n", fwrev);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
 }
 
 /**
@@ -824,20 +898,20 @@
 	case LPFC_LINK_DOWN:
 	case LPFC_HBA_ERROR:
 		if (phba->hba_flag & LINK_DISABLED)
-			len += snprintf(buf + len, PAGE_SIZE-len,
+			len += scnprintf(buf + len, PAGE_SIZE-len,
 				"Link Down - User disabled\n");
 		else
-			len += snprintf(buf + len, PAGE_SIZE-len,
+			len += scnprintf(buf + len, PAGE_SIZE-len,
 				"Link Down\n");
 		break;
 	case LPFC_LINK_UP:
 	case LPFC_CLEAR_LA:
 	case LPFC_HBA_READY:
-		len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
+		len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
 
 		switch (vport->port_state) {
 		case LPFC_LOCAL_CFG_LINK:
-			len += snprintf(buf + len, PAGE_SIZE-len,
+			len += scnprintf(buf + len, PAGE_SIZE-len,
 					"Configuring Link\n");
 			break;
 		case LPFC_FDISC:
@@ -847,42 +921,80 @@
 		case LPFC_NS_QRY:
 		case LPFC_BUILD_DISC_LIST:
 		case LPFC_DISC_AUTH:
-			len += snprintf(buf + len, PAGE_SIZE - len,
+			len += scnprintf(buf + len, PAGE_SIZE - len,
 					"Discovery\n");
 			break;
 		case LPFC_VPORT_READY:
-			len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+					"Ready\n");
 			break;
 
 		case LPFC_VPORT_FAILED:
-			len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+					"Failed\n");
 			break;
 
 		case LPFC_VPORT_UNKNOWN:
-			len += snprintf(buf + len, PAGE_SIZE - len,
+			len += scnprintf(buf + len, PAGE_SIZE - len,
 					"Unknown\n");
 			break;
 		}
 		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
-			len += snprintf(buf + len, PAGE_SIZE-len,
+			len += scnprintf(buf + len, PAGE_SIZE-len,
 					"   Menlo Maint Mode\n");
 		else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 			if (vport->fc_flag & FC_PUBLIC_LOOP)
-				len += snprintf(buf + len, PAGE_SIZE-len,
+				len += scnprintf(buf + len, PAGE_SIZE-len,
 						"   Public Loop\n");
 			else
-				len += snprintf(buf + len, PAGE_SIZE-len,
+				len += scnprintf(buf + len, PAGE_SIZE-len,
 						"   Private Loop\n");
 		} else {
 			if (vport->fc_flag & FC_FABRIC)
-				len += snprintf(buf + len, PAGE_SIZE-len,
+				len += scnprintf(buf + len, PAGE_SIZE-len,
 						"   Fabric\n");
 			else
-				len += snprintf(buf + len, PAGE_SIZE-len,
+				len += scnprintf(buf + len, PAGE_SIZE-len,
 						"   Point-2-Point\n");
 		}
 	}
 
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    ((bf_get(lpfc_sli_intf_if_type,
+	     &phba->sli4_hba.sli_intf) ==
+	     LPFC_SLI_INTF_IF_TYPE_6))) {
+		struct lpfc_trunk_link link = phba->trunk_link;
+
+		if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+				"Trunk port 0: Link %s %s\n",
+				(link.link0.state == LPFC_LINK_UP) ?
+				 "Up" : "Down. ",
+				trunk_errmsg[link.link0.fault]);
+
+		if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+				"Trunk port 1: Link %s %s\n",
+				(link.link1.state == LPFC_LINK_UP) ?
+				 "Up" : "Down. ",
+				trunk_errmsg[link.link1.fault]);
+
+		if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+				"Trunk port 2: Link %s %s\n",
+				(link.link2.state == LPFC_LINK_UP) ?
+				 "Up" : "Down. ",
+				trunk_errmsg[link.link2.fault]);
+
+		if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+				"Trunk port 3: Link %s %s\n",
+				(link.link3.state == LPFC_LINK_UP) ?
+				 "Up" : "Down. ",
+				trunk_errmsg[link.link3.fault]);
+
+	}
+
 	return len;
 }
 
@@ -903,15 +1015,15 @@
 	struct lpfc_hba *phba = vport->phba;
 
 	if (phba->sli_rev < LPFC_SLI_REV4)
-		return snprintf(buf, PAGE_SIZE, "fc\n");
+		return scnprintf(buf, PAGE_SIZE, "fc\n");
 
 	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
 		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
-			return snprintf(buf, PAGE_SIZE, "fcoe\n");
+			return scnprintf(buf, PAGE_SIZE, "fcoe\n");
 		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
-			return snprintf(buf, PAGE_SIZE, "fc\n");
+			return scnprintf(buf, PAGE_SIZE, "fc\n");
 	}
-	return snprintf(buf, PAGE_SIZE, "unknown\n");
+	return scnprintf(buf, PAGE_SIZE, "unknown\n");
 }
 
 /**
@@ -931,7 +1043,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
 			phba->sli4_hba.pc_sli4_params.oas_supported);
 }
 
@@ -989,7 +1101,7 @@
 	struct Scsi_Host  *shost = class_to_shost(dev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
 			vport->fc_map_cnt + vport->fc_unmap_cnt);
 }
 
@@ -1121,6 +1233,20 @@
 
 	psli = &phba->sli;
 
+	/*
+	 * If freeing the queues have already started, don't access them.
+	 * Otherwise set FREE_WAIT to indicate that queues are being used
+	 * to hold the freeing process until we finish.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
+		psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
+	} else {
+		spin_unlock_irq(&phba->hbalock);
+		goto skip_wait;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
 	/* Wait a little for things to settle down, but not
 	 * long enough for dev loss timeout to expire.
 	 */
@@ -1142,6 +1268,11 @@
 		}
 	}
 out:
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
+	spin_unlock_irq(&phba->hbalock);
+
+skip_wait:
 	init_completion(&online_compl);
 	rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
 	if (rc == 0)
@@ -1156,6 +1287,82 @@
 }
 
 /**
+ * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Issues a PCI secondary bus reset for the phba->pcidev.
+ *
+ * Notes:
+ * First walks the bus_list to ensure only PCI devices with Emulex
+ * vendor id, device ids that support hot reset, only one occurrence
+ * of function 0, and all ports on the bus are in offline mode to ensure the
+ * hot reset only affects one valid HBA.
+ *
+ * Returns:
+ * -ENOTSUPP, cfg_enable_hba_reset must be of value 2
+ * -ENODEV,   NULL ptr to pcidev
+ * -EBADSLT,  detected invalid device
+ * -EBUSY,    port is not in offline state
+ *      0,    successful
+ */
+static int
+lpfc_reset_pci_bus(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev = phba->pcidev;
+	struct Scsi_Host *shost = NULL;
+	struct lpfc_hba *phba_other = NULL;
+	struct pci_dev *ptr = NULL;
+	int res;
+
+	if (phba->cfg_enable_hba_reset != 2)
+		return -ENOTSUPP;
+
+	if (!pdev) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
+		return -ENODEV;
+	}
+
+	res = lpfc_check_pci_resettable(phba);
+	if (res)
+		return res;
+
+	/* Walk the list of devices on the pci_dev's bus */
+	list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
+		/* Check port is offline */
+		shost = pci_get_drvdata(ptr);
+		if (shost) {
+			phba_other =
+				((struct lpfc_vport *)shost->hostdata)->phba;
+			if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
+				lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
+						"8349 WWPN = 0x%02x%02x%02x%02x"
+						"%02x%02x%02x%02x is not "
+						"offline!\n",
+						phba_other->wwpn[0],
+						phba_other->wwpn[1],
+						phba_other->wwpn[2],
+						phba_other->wwpn[3],
+						phba_other->wwpn[4],
+						phba_other->wwpn[5],
+						phba_other->wwpn[6],
+						phba_other->wwpn[7]);
+				return -EBUSY;
+			}
+		}
+	}
+
+	/* Issue PCI bus reset */
+	res = pci_reset_bus(pdev);
+	if (res) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"8350 PCI reset bus failed: %d\n", res);
+	}
+
+	return res;
+}
+
+/**
  * lpfc_selective_reset - Offline then onlines the port
  * @phba: lpfc_hba pointer.
  *
@@ -1322,7 +1529,7 @@
 		return -EACCES;
 
 	if ((phba->sli_rev < LPFC_SLI_REV4) ||
-	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
 	     LPFC_SLI_INTF_IF_TYPE_2))
 		return -EPERM;
 
@@ -1427,7 +1634,67 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
+}
+
+static int
+lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
+{
+	LPFC_MBOXQ_t *mbox = NULL;
+	unsigned long val = 0;
+	char *pval = 0;
+	int rc = 0;
+
+	if (!strncmp("enable", buff_out,
+				 strlen("enable"))) {
+		pval = buff_out + strlen("enable") + 1;
+		rc = kstrtoul(pval, 0, &val);
+		if (rc)
+			return rc; /* Invalid  number */
+	} else if (!strncmp("disable", buff_out,
+				 strlen("disable"))) {
+		val = 0;
+	} else {
+		return -EINVAL;  /* Invalid command */
+	}
+
+	switch (val) {
+	case 0:
+		val = 0x0; /* Disable */
+		break;
+	case 2:
+		val = 0x1; /* Enable two port trunk */
+		break;
+	case 4:
+		val = 0x2; /* Enable four port trunk */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+			"0070 Set trunk mode with val %ld ", val);
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
+			 12, LPFC_SLI4_MBX_EMBED);
+
+	bf_set(lpfc_mbx_set_trunk_mode,
+	       &mbox->u.mqe.un.set_trunk_mode,
+	       val);
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	if (rc)
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"0071 Set trunk mode failed with status: %d",
+				rc);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+
+	return 0;
 }
 
 /**
@@ -1456,7 +1723,7 @@
 	else
 		state = "online";
 
-	return snprintf(buf, PAGE_SIZE, "%s\n", state);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", state);
 }
 
 /**
@@ -1522,6 +1789,11 @@
 		status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
 	else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
 		status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
+	else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
+		 == 0)
+		status = lpfc_reset_pci_bus(phba);
+	else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
+		status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
 	else
 		status = -EINVAL;
 
@@ -1590,7 +1862,7 @@
 	pmb = &pmboxq->u.mb;
 	pmb->mbxCommand = MBX_READ_CONFIG;
 	pmb->mbxOwner = OWN_HOST;
-	pmboxq->context1 = NULL;
+	pmboxq->ctx_buf = NULL;
 
 	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
 		rc = MBX_NOT_FINISHED;
@@ -1622,6 +1894,9 @@
 		max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
 			(bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
 
+		/* Limit the max we support */
+		if (max_vpi > LPFC_MAX_VPI)
+			max_vpi = LPFC_MAX_VPI;
 		if (mvpi)
 			*mvpi = max_vpi;
 		if (avpi)
@@ -1637,8 +1912,13 @@
 			*axri = pmb->un.varRdConfig.avail_xri;
 		if (mvpi)
 			*mvpi = pmb->un.varRdConfig.max_vpi;
-		if (avpi)
-			*avpi = pmb->un.varRdConfig.avail_vpi;
+		if (avpi) {
+			/* avail_vpi is only valid if link is up and ready */
+			if (phba->link_state == LPFC_HBA_READY)
+				*avpi = pmb->un.varRdConfig.avail_vpi;
+			else
+				*avpi = pmb->un.varRdConfig.max_vpi;
+		}
 	}
 
 	mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -1669,8 +1949,8 @@
 	uint32_t cnt;
 
 	if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
-		return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
-	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+		return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
+	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1697,8 +1977,8 @@
 	uint32_t cnt, acnt;
 
 	if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
-		return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
-	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+		return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1725,8 +2005,8 @@
 	uint32_t cnt;
 
 	if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
-		return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
-	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+		return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
+	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1753,8 +2033,8 @@
 	uint32_t cnt, acnt;
 
 	if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
-		return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
-	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+		return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1781,8 +2061,8 @@
 	uint32_t cnt;
 
 	if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
-		return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
-	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+		return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
+	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1809,8 +2089,8 @@
 	uint32_t cnt, acnt;
 
 	if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
-		return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
-	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+		return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1835,10 +2115,10 @@
 	struct lpfc_hba   *phba = vport->phba;
 
 	if (!(phba->max_vpi))
-		return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
+		return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
 	if (vport->port_type == LPFC_PHYSICAL_PORT)
-		return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
-	return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
+		return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
+	return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
 }
 
 /**
@@ -1860,7 +2140,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
+	return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
 }
 
 /**
@@ -1964,7 +2244,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
 }
 
 /**
@@ -1983,7 +2263,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
 }
 
 /**
@@ -2002,7 +2282,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
+	return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
 			(phba->cfg_enable_dss) ? "Enabled" : "Disabled",
 			(phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
 				"" : "Not ");
@@ -2031,7 +2311,7 @@
 	uint16_t max_nr_virtfn;
 
 	max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
-	return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
 }
 
 static inline bool lpfc_rangecheck(uint val, uint min, uint max)
@@ -2091,7 +2371,7 @@
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
 	struct lpfc_hba   *phba = vport->phba;\
-	return snprintf(buf, PAGE_SIZE, "%d\n",\
+	return scnprintf(buf, PAGE_SIZE, "%d\n",\
 			phba->cfg_##attr);\
 }
 
@@ -2119,7 +2399,7 @@
 	struct lpfc_hba   *phba = vport->phba;\
 	uint val = 0;\
 	val = phba->cfg_##attr;\
-	return snprintf(buf, PAGE_SIZE, "%#x\n",\
+	return scnprintf(buf, PAGE_SIZE, "%#x\n",\
 			phba->cfg_##attr);\
 }
 
@@ -2255,7 +2535,7 @@
 { \
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
-	return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
+	return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
 }
 
 /**
@@ -2280,7 +2560,7 @@
 { \
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
-	return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
+	return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
 }
 
 /**
@@ -2389,6 +2669,7 @@
 
 
 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
+static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL);
 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
@@ -2551,7 +2832,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
 			(unsigned long long)phba->cfg_soft_wwpn);
 }
 
@@ -2648,7 +2929,7 @@
 {
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
 			(unsigned long long)phba->cfg_soft_wwnn);
 }
 
@@ -2714,7 +2995,7 @@
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
 			wwn_to_u64(phba->cfg_oas_tgt_wwpn));
 }
 
@@ -2782,7 +3063,7 @@
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
 }
 
 /**
@@ -2845,7 +3126,7 @@
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
 			wwn_to_u64(phba->cfg_oas_vpt_wwpn));
 }
 
@@ -2916,7 +3197,7 @@
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
 }
 
 /**
@@ -2980,7 +3261,7 @@
 	if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
 		return -EFAULT;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
 }
 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
 		   lpfc_oas_lun_status_show, NULL);
@@ -3132,7 +3413,7 @@
 	if (oas_lun != NOT_OAS_ENABLED_LUN)
 		phba->cfg_oas_flags |= OAS_LUN_VALID;
 
-	len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
+	len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
 
 	return len;
 }
@@ -3266,7 +3547,7 @@
 	struct Scsi_Host  *shost = class_to_shost(dev);
 	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
 }
 
 static DEVICE_ATTR(iocb_hw, S_IRUGO,
@@ -3278,7 +3559,7 @@
 	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
 	struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
 			pring ? pring->txq_max : 0);
 }
 
@@ -3292,7 +3573,7 @@
 	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
 	struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
 			pring ? pring->txcmplq_max : 0);
 }
 
@@ -3328,7 +3609,7 @@
 	struct Scsi_Host  *shost = class_to_shost(dev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",	vport->cfg_devloss_tmo);
+	return scnprintf(buf, PAGE_SIZE, "%d\n",	vport->cfg_devloss_tmo);
 }
 
 /**
@@ -3402,8 +3683,8 @@
 		if (rport)
 			remoteport = rport->remoteport;
 		spin_unlock(&vport->phba->hbalock);
-		if (remoteport)
-			nvme_fc_set_remoteport_devloss(rport->remoteport,
+		if (rport && remoteport)
+			nvme_fc_set_remoteport_devloss(remoteport,
 						       vport->cfg_devloss_tmo);
 #endif
 	}
@@ -3539,29 +3820,13 @@
  * lpfc_enable_fc4_type: Defines what FC4 types are supported.
  * Supported Values:  1 - register just FCP
  *                    3 - register both FCP and NVME
- * Supported values are [1,3]. Default value is 1
+ * Supported values are [1,3]. Default value is 3
  */
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
+LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
 	    LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
 	    "Enable FC4 Protocol support - FCP / NVME");
 
 /*
- * lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME
- * This parameter is only used if:
- *     lpfc_enable_fc4_type is 3 - register both FCP and NVME and
- *     port is not configured for NVMET.
- *
- * ELS/CT always get 10% of XRIs, up to a maximum of 250
- * The remaining XRIs get split up based on lpfc_xri_split per port:
- *
- * Supported Values are in percentages
- * the xri_split value is the percentage the SCSI port will get. The remaining
- * percentage will go to NVME.
- */
-LPFC_ATTR_R(xri_split, 50, 10, 90,
-	    "Percentage of FCP XRI resources versus NVME");
-
-/*
 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
 # deluged with LOTS of information.
 # You can set a bit mask to record specific types of verbose messages:
@@ -3831,8 +4096,9 @@
 				val);
 			return -EINVAL;
 		}
-		if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
-			val == 4) {
+		if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+		     phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+		    val == 4) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
 				"3114 Loop mode not supported\n");
 			return -EINVAL;
@@ -4254,7 +4520,7 @@
 	uint32_t prev_val, if_type;
 
 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
-	if (if_type == LPFC_SLI_INTF_IF_TYPE_2 &&
+	if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
 	    phba->hba_flag & HBA_FORCED_LINK_SPEED)
 		return -EPERM;
 
@@ -4694,6 +4960,64 @@
 		   lpfc_request_firmware_upgrade_store);
 
 /**
+ * lpfc_force_rscn_store
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: unused string
+ * @count: unused variable.
+ *
+ * Description:
+ * Force the switch to send a RSCN to all other NPorts in our zone
+ * If we are direct connect pt2pt, build the RSCN command ourself
+ * and send to the other NPort. Not supported for private loop.
+ *
+ * Returns:
+ * 0      - on success
+ * -EIO   - if command is not sent
+ **/
+static ssize_t
+lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	int i;
+
+	i = lpfc_issue_els_rscn(vport, 0);
+	if (i)
+		return -EIO;
+	return strlen(buf);
+}
+
+/*
+ * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts
+ * connected to  the HBA.
+ *
+ * Value range is any ascii value
+ */
+static int lpfc_force_rscn;
+module_param(lpfc_force_rscn, int, 0644);
+MODULE_PARM_DESC(lpfc_force_rscn,
+		 "Force an RSCN to be sent to all remote NPorts");
+lpfc_param_show(force_rscn)
+
+/**
+ * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts
+ * @phba: lpfc_hba pointer.
+ * @val: unused value.
+ *
+ * Returns:
+ * zero if val saved.
+ **/
+static int
+lpfc_force_rscn_init(struct lpfc_hba *phba, int val)
+{
+	return 0;
+}
+static DEVICE_ATTR_RW(lpfc_force_rscn);
+
+/**
  * lpfc_fcp_imax_store
  *
  * @dev: class device that is converted into a Scsi_host.
@@ -4717,6 +5041,8 @@
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_eq_intr_info *eqi;
+	uint32_t usdelay;
 	int val = 0, i;
 
 	/* fcp_imax is only valid for SLI4 */
@@ -4737,12 +5063,27 @@
 	if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
 		return -EINVAL;
 
-	phba->cfg_fcp_imax = (uint32_t)val;
-	phba->initial_imax = phba->cfg_fcp_imax;
+	phba->cfg_auto_imax = (val) ? 0 : 1;
+	if (phba->cfg_fcp_imax && !val) {
+		queue_delayed_work(phba->wq, &phba->eq_delay_work,
+				   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
 
-	for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
+		for_each_present_cpu(i) {
+			eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
+			eqi->icnt = 0;
+		}
+	}
+
+	phba->cfg_fcp_imax = (uint32_t)val;
+
+	if (phba->cfg_fcp_imax)
+		usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
+	else
+		usdelay = 0;
+
+	for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
 		lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
-					 val);
+					 usdelay);
 
 	return strlen(buf);
 }
@@ -4796,15 +5137,120 @@
 
 static DEVICE_ATTR_RW(lpfc_fcp_imax);
 
+/**
+ * lpfc_cq_max_proc_limit_store
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: string with the cq max processing limit of cqes
+ * @count: unused variable.
+ *
+ * Description:
+ * If val is in a valid range, then set value on each cq
+ *
+ * Returns:
+ * The length of the buf: if successful
+ * -ERANGE: if val is not in the valid range
+ * -EINVAL: if bad value format or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_queue *eq, *cq;
+	unsigned long val;
+	int i;
+
+	/* cq_max_proc_limit is only valid for SLI4 */
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return -EINVAL;
+
+	/* Sanity check on user data */
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+	if (kstrtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT)
+		return -ERANGE;
+
+	phba->cfg_cq_max_proc_limit = (uint32_t)val;
+
+	/* set the values on the cq's */
+	for (i = 0; i < phba->cfg_irq_chann; i++) {
+		/* Get the EQ corresponding to the IRQ vector */
+		eq = phba->sli4_hba.hba_eq_hdl[i].eq;
+		if (!eq)
+			continue;
+
+		list_for_each_entry(cq, &eq->child_list, list)
+			cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
+						 cq->entry_count);
+	}
+
+	return strlen(buf);
+}
+
 /*
- * lpfc_auto_imax: Controls Auto-interrupt coalescing values support.
- *       0       No auto_imax support
- *       1       auto imax on
- * Auto imax will change the value of fcp_imax on a per EQ basis, using
- * the EQ Delay Multiplier, depending on the activity for that EQ.
- * Value range [0,1]. Default value is 1.
+ * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an
+ *   itteration of CQ processing.
  */
-LPFC_ATTR_RW(auto_imax, 1, 0, 1, "Enable Auto imax");
+static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
+module_param(lpfc_cq_max_proc_limit, int, 0644);
+MODULE_PARM_DESC(lpfc_cq_max_proc_limit,
+	    "Set the maximum number CQEs processed in an iteration of "
+	    "CQ processing");
+lpfc_param_show(cq_max_proc_limit)
+
+/*
+ * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a
+ *   single handler call which should request a polled completion rather
+ *   than re-enabling interrupts.
+ */
+LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL,
+	     LPFC_CQ_MIN_THRESHOLD_TO_POLL,
+	     LPFC_CQ_MAX_THRESHOLD_TO_POLL,
+	     "CQE Processing Threshold to enable Polling");
+
+/**
+ * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit
+ * @phba: lpfc_hba pointer.
+ * @val: entry limit
+ *
+ * Description:
+ * If val is in a valid range, then initialize the adapter's maximum
+ * value.
+ *
+ * Returns:
+ *  Always returns 0 for success, even if value not always set to
+ *  requested value. If value out of range or not supported, will fall
+ *  back to default.
+ **/
+static int
+lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
+{
+	phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return 0;
+
+	if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) {
+		phba->cfg_cq_max_proc_limit = val;
+		return 0;
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0371 "LPFC_DRIVER_NAME"_cq_max_proc_limit: "
+			"%d out of range, using default\n",
+			phba->cfg_cq_max_proc_limit);
+
+	return 0;
+}
+
+static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
 
 /**
  * lpfc_state_show - Display current driver CPU affinity
@@ -4830,57 +5276,86 @@
 
 	switch (phba->cfg_fcp_cpu_map) {
 	case 0:
-		len += snprintf(buf + len, PAGE_SIZE-len,
+		len += scnprintf(buf + len, PAGE_SIZE-len,
 				"fcp_cpu_map: No mapping (%d)\n",
 				phba->cfg_fcp_cpu_map);
 		return len;
 	case 1:
-		len += snprintf(buf + len, PAGE_SIZE-len,
+		len += scnprintf(buf + len, PAGE_SIZE-len,
 				"fcp_cpu_map: HBA centric mapping (%d): "
-				"%d online CPUs\n",
-				phba->cfg_fcp_cpu_map,
-				phba->sli4_hba.num_online_cpu);
-		break;
-	case 2:
-		len += snprintf(buf + len, PAGE_SIZE-len,
-				"fcp_cpu_map: Driver centric mapping (%d): "
-				"%d online CPUs\n",
-				phba->cfg_fcp_cpu_map,
-				phba->sli4_hba.num_online_cpu);
+				"%d of %d CPUs online from %d possible CPUs\n",
+				phba->cfg_fcp_cpu_map, num_online_cpus(),
+				num_present_cpus(),
+				phba->sli4_hba.num_possible_cpu);
 		break;
 	}
 
-	while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
+	while (phba->sli4_hba.curr_disp_cpu <
+	       phba->sli4_hba.num_possible_cpu) {
 		cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
 
-		/* margin should fit in this and the truncated message */
-		if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
-			len += snprintf(buf + len, PAGE_SIZE-len,
-					"CPU %02d io_chan %02d "
-					"physid %d coreid %d\n",
+		if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+					"CPU %02d not present\n",
+					phba->sli4_hba.curr_disp_cpu);
+		else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
+			if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
+				len += scnprintf(
+					buf + len, PAGE_SIZE - len,
+					"CPU %02d hdwq None "
+					"physid %d coreid %d ht %d ua %d\n",
 					phba->sli4_hba.curr_disp_cpu,
-					cpup->channel_id, cpup->phys_id,
-					cpup->core_id);
-		else
-			len += snprintf(buf + len, PAGE_SIZE-len,
-					"CPU %02d io_chan %02d "
-					"physid %d coreid %d IRQ %d\n",
+					cpup->phys_id, cpup->core_id,
+					(cpup->flag & LPFC_CPU_MAP_HYPER),
+					(cpup->flag & LPFC_CPU_MAP_UNASSIGN));
+			else
+				len += scnprintf(
+					buf + len, PAGE_SIZE - len,
+					"CPU %02d EQ %04d hdwq %04d "
+					"physid %d coreid %d ht %d ua %d\n",
 					phba->sli4_hba.curr_disp_cpu,
-					cpup->channel_id, cpup->phys_id,
-					cpup->core_id, cpup->irq);
+					cpup->eq, cpup->hdwq, cpup->phys_id,
+					cpup->core_id,
+					(cpup->flag & LPFC_CPU_MAP_HYPER),
+					(cpup->flag & LPFC_CPU_MAP_UNASSIGN));
+		} else {
+			if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
+				len += scnprintf(
+					buf + len, PAGE_SIZE - len,
+					"CPU %02d hdwq None "
+					"physid %d coreid %d ht %d ua %d IRQ %d\n",
+					phba->sli4_hba.curr_disp_cpu,
+					cpup->phys_id,
+					cpup->core_id,
+					(cpup->flag & LPFC_CPU_MAP_HYPER),
+					(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
+					cpup->irq);
+			else
+				len += scnprintf(
+					buf + len, PAGE_SIZE - len,
+					"CPU %02d EQ %04d hdwq %04d "
+					"physid %d coreid %d ht %d ua %d IRQ %d\n",
+					phba->sli4_hba.curr_disp_cpu,
+					cpup->eq, cpup->hdwq, cpup->phys_id,
+					cpup->core_id,
+					(cpup->flag & LPFC_CPU_MAP_HYPER),
+					(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
+					cpup->irq);
+		}
 
 		phba->sli4_hba.curr_disp_cpu++;
 
 		/* display max number of CPUs keeping some margin */
 		if (phba->sli4_hba.curr_disp_cpu <
-				phba->sli4_hba.num_present_cpu &&
+				phba->sli4_hba.num_possible_cpu &&
 				(len >= (PAGE_SIZE - 64))) {
-			len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
+			len += scnprintf(buf + len,
+					PAGE_SIZE - len, "more...\n");
 			break;
 		}
 	}
 
-	if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu)
+	if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
 		phba->sli4_hba.curr_disp_cpu = 0;
 
 	return len;
@@ -4908,14 +5383,13 @@
 # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
 # for the HBA.
 #
-# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
+# Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1).
 #	0 - Do not affinitze IRQ vectors
 #	1 - Affintize HBA vectors with respect to each HBA
 #	    (start with CPU0 for each HBA)
-#	2 - Affintize HBA vectors with respect to the entire driver
-#	    (round robin thru all CPUs across all HBAs)
+# This also defines how Hardware Queues are mapped to specific CPUs.
 */
-static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(lpfc_fcp_cpu_map,
 		 "Defines how to map CPUs to IRQ vectors per HBA");
@@ -4949,7 +5423,7 @@
 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 			"3326 lpfc_fcp_cpu_map: %d out of range, using "
 			"default\n", val);
-	phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+	phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
 
 	return 0;
 }
@@ -4994,15 +5468,12 @@
  * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
  * For the Initiator (I), enabling this parameter means that an NVMET
  * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
- * processed by the initiator for subsequent NVME FCP IO. For the target
- * function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size
- * driver parameter as the target function's first burst size returned to the
- * initiator in the target's NVME PRLI response. Parameter supported on physical
- * port only - no NPIV support.
+ * processed by the initiator for subsequent NVME FCP IO.
+ * Currently, this feature is not supported on the NVME target
  * Value range is [0,1]. Default value is 0 (disabled).
  */
 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
-	     "Enable First Burst feature on I and T functions.");
+	     "Enable First Burst feature for NVME Initiator.");
 
 /*
 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
@@ -5049,13 +5520,20 @@
 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
 
 /*
+# lpfc_xri_rebalancing: enable or disable XRI rebalancing feature
+# range is [0,1]. Default value is 1.
+*/
+LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
+
+/*
  * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
  * range is [0,1]. Default value is 0.
- * For [0], FCP commands are issued to Work Queues ina round robin fashion.
+ * For [0], FCP commands are issued to Work Queues based on upper layer
+ * hardware queue index.
  * For [1], FCP commands are issued to a Work Queue associated with the
  *          current CPU.
  *
- * LPFC_FCP_SCHED_ROUND_ROBIN == 0
+ * LPFC_FCP_SCHED_BY_HDWQ == 0
  * LPFC_FCP_SCHED_BY_CPU == 1
  *
  * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
@@ -5063,11 +5541,23 @@
  * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
  * through WQs will be used.
  */
-LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_ROUND_ROBIN,
-	     LPFC_FCP_SCHED_ROUND_ROBIN,
+LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
+	     LPFC_FCP_SCHED_BY_HDWQ,
 	     LPFC_FCP_SCHED_BY_CPU,
 	     "Determine scheduling algorithm for "
-	     "issuing commands [0] - Round Robin, [1] - Current CPU");
+	     "issuing commands [0] - Hardware Queue, [1] - Current CPU");
+
+/*
+ * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN
+ * range is [0,1]. Default value is 0.
+ * For [0], GID_FT is used for NameServer queries after RSCN (default)
+ * For [1], GID_PT is used for NameServer queries after RSCN
+ *
+ */
+LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
+	     LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
+	     "Determine algorithm NameServer queries after RSCN "
+	     "[0] - GID_FT, [1] - GID_PT");
 
 /*
 # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
@@ -5217,49 +5707,61 @@
 	     "Embed NVME Command in WQE");
 
 /*
- * lpfc_fcp_io_channel: Set the number of FCP IO channels the driver
- * will advertise it supports to the SCSI layer. This also will map to
- * the number of WQs the driver will create.
+ * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
+ * the driver will advertise it supports to the SCSI layer.
  *
- *      0    = Configure the number of io channels to the number of active CPUs.
- *      1,32 = Manually specify how many io channels to use.
+ *      0    = Set nr_hw_queues by the number of CPUs or HW queues.
+ *      1,128 = Manually specify the maximum nr_hw_queue value to be set,
  *
- * Value range is [0,32]. Default value is 4.
+ * Value range is [0,256]. Default value is 8.
  */
-LPFC_ATTR_R(fcp_io_channel,
-	    LPFC_FCP_IO_CHAN_DEF,
-	    LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
-	    "Set the number of FCP I/O channels");
+LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
+	    LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
+	    "Set the number of SCSI Queues advertised");
 
 /*
- * lpfc_nvme_io_channel: Set the number of IO hardware queues the driver
- * will advertise it supports to the NVME layer. This also will map to
- * the number of WQs the driver will create.
- *
- * This module parameter is valid when lpfc_enable_fc4_type is set
- * to support NVME.
+ * lpfc_hdw_queue: Set the number of Hardware Queues the driver
+ * will advertise it supports to the NVME and  SCSI layers. This also
+ * will map to the number of CQ/WQ pairs the driver will create.
  *
  * The NVME Layer will try to create this many, plus 1 administrative
  * hardware queue. The administrative queue will always map to WQ 0
- * A hardware IO queue maps (qidx) to a specific driver WQ.
+ * A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
  *
- *      0    = Configure the number of io channels to the number of active CPUs.
- *      1,32 = Manually specify how many io channels to use.
+ *      0    = Configure the number of hdw queues to the number of active CPUs.
+ *      1,128 = Manually specify how many hdw queues to use.
  *
- * Value range is [0,32]. Default value is 0.
+ * Value range is [0,128]. Default value is 0.
  */
-LPFC_ATTR_R(nvme_io_channel,
-	    LPFC_NVME_IO_CHAN_DEF,
-	    LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
-	    "Set the number of NVME I/O channels");
+LPFC_ATTR_R(hdw_queue,
+	    LPFC_HBA_HDWQ_DEF,
+	    LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
+	    "Set the number of I/O Hardware Queues");
+
+/*
+ * lpfc_irq_chann: Set the number of IRQ vectors that are available
+ * for Hardware Queues to utilize.  This also will map to the number
+ * of EQ / MSI-X vectors the driver will create. This should never be
+ * more than the number of Hardware Queues
+ *
+ *      0     = Configure number of IRQ Channels to the number of active CPUs.
+ *      1,128 = Manually specify how many IRQ Channels to use.
+ *
+ * Value range is [0,128]. Default value is 0.
+ */
+LPFC_ATTR_R(irq_chann,
+	    LPFC_HBA_HDWQ_DEF,
+	    LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
+	    "Set the number of I/O IRQ Channels");
 
 /*
 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
 #       0  = HBA resets disabled
 #       1  = HBA resets enabled (default)
-# Value range is [0,1]. Default value is 1.
+#       2  = HBA reset via PCI bus reset enabled
+# Value range is [0,2]. Default value is 1.
 */
-LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
+LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
 
 /*
 # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
@@ -5293,16 +5795,6 @@
 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
 
 /*
-# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine
-#       0  = disabled (default)
-#       1  = enabled
-# Value range is [0,1]. Default value is 0.
-#
-# This feature in under investigation and may be supported in the future.
-*/
-unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
-
-/*
 # lpfc_prot_mask: i
 #	- Bit mask of host protection capabilities used to register with the
 #	  SCSI mid-layer
@@ -5358,15 +5850,74 @@
 
 /*
  * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
- * This value can be set to values between 64 and 4096. The default value is
- * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
- * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ * This value can be set to values between 64 and 4096. The default value
+ * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
+ * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
  * Because of the additional overhead involved in setting up T10-DIF,
  * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
  * and will be limited to 512 if BlockGuard is enabled under SLI3.
  */
-LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
-	    LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
+static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
+module_param(lpfc_sg_seg_cnt, uint, 0444);
+MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
+
+/**
+ * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
+ *    configured for the adapter
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains a string with the list sizes
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
+		     char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int len;
+
+	len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d  total SGEs: %d\n",
+		       phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
+
+	len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d  SCSI: %d  NVME: %d\n",
+			phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
+			phba->cfg_nvme_seg_cnt);
+	return len;
+}
+
+static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
+
+/**
+ * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
+ * @phba: lpfc_hba pointer.
+ * @val: contains the initial value
+ *
+ * Description:
+ * Validates the initial value is within range and assigns it to the
+ * adapter. If not in range, an error message is posted and the
+ * default value is assigned.
+ *
+ * Returns:
+ * zero if value is in range and is set
+ * -EINVAL if value was out of range
+ **/
+static int
+lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
+{
+	if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
+		phba->cfg_sg_seg_cnt = val;
+		return 0;
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
+			"be set to %d, allowed range is [%d, %d]\n",
+			val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
+	phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
+	return -EINVAL;
+}
 
 /*
  * lpfc_enable_mds_diags: Enable MDS Diagnostics
@@ -5374,7 +5925,32 @@
  *       1  = MDS Diagnostics enabled
  * Value range is [0,1]. Default value is 0.
  */
-LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
+LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
+
+/*
+ * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
+ *	0 = Disable firmware logging (default)
+ *	[1-4] = Multiple of 1/4th Mb of host memory for FW logging
+ * Value range [0..4]. Default value is 0
+ */
+LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+
+/*
+ * lpfc_ras_fwlog_level: Firmware logging verbosity level
+ * Valid only if firmware logging is enabled
+ * 0(Least Verbosity) 4 (most verbosity)
+ * Value range is [0..4]. Default value is 0
+ */
+LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
+
+/*
+ * lpfc_ras_fwlog_func: Firmware logging enabled on function number
+ * Default function which has RAS support : 0
+ * Value Range is [0..7].
+ * FW logging is a global action and enablement is via a specific
+ * port.
+ */
+LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
 
 /*
  * lpfc_enable_bbcr: Enable BB Credit Recovery
@@ -5394,6 +5970,7 @@
 
 struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_nvme_info,
+	&dev_attr_scsi_stat,
 	&dev_attr_bg_info,
 	&dev_attr_bg_guard_err,
 	&dev_attr_bg_apptag_err,
@@ -5421,15 +5998,16 @@
 	&dev_attr_lpfc_nodev_tmo,
 	&dev_attr_lpfc_devloss_tmo,
 	&dev_attr_lpfc_enable_fc4_type,
-	&dev_attr_lpfc_xri_split,
 	&dev_attr_lpfc_fcp_class,
 	&dev_attr_lpfc_use_adisc,
 	&dev_attr_lpfc_first_burst_size,
 	&dev_attr_lpfc_ack0,
+	&dev_attr_lpfc_xri_rebalancing,
 	&dev_attr_lpfc_topology,
 	&dev_attr_lpfc_scan_down,
 	&dev_attr_lpfc_link_speed,
 	&dev_attr_lpfc_fcp_io_sched,
+	&dev_attr_lpfc_ns_query,
 	&dev_attr_lpfc_fcp2_no_tgt_reset,
 	&dev_attr_lpfc_cr_delay,
 	&dev_attr_lpfc_cr_count,
@@ -5458,12 +6036,15 @@
 	&dev_attr_lpfc_use_msi,
 	&dev_attr_lpfc_nvme_oas,
 	&dev_attr_lpfc_nvme_embed_cmd,
-	&dev_attr_lpfc_auto_imax,
 	&dev_attr_lpfc_fcp_imax,
+	&dev_attr_lpfc_force_rscn,
+	&dev_attr_lpfc_cq_poll_threshold,
+	&dev_attr_lpfc_cq_max_proc_limit,
 	&dev_attr_lpfc_fcp_cpu_map,
-	&dev_attr_lpfc_fcp_io_channel,
+	&dev_attr_lpfc_fcp_mq_threshold,
+	&dev_attr_lpfc_hdw_queue,
+	&dev_attr_lpfc_irq_chann,
 	&dev_attr_lpfc_suppress_rsp,
-	&dev_attr_lpfc_nvme_io_channel,
 	&dev_attr_lpfc_nvmet_mrq,
 	&dev_attr_lpfc_nvmet_mrq_post,
 	&dev_attr_lpfc_nvme_enable_fb,
@@ -5501,6 +6082,9 @@
 	&dev_attr_protocol,
 	&dev_attr_lpfc_xlane_supported,
 	&dev_attr_lpfc_enable_mds_diags,
+	&dev_attr_lpfc_ras_fwlog_buffsize,
+	&dev_attr_lpfc_ras_fwlog_level,
+	&dev_attr_lpfc_ras_fwlog_func,
 	&dev_attr_lpfc_enable_bbcr,
 	&dev_attr_lpfc_enable_dpp,
 	NULL,
@@ -5919,6 +6503,9 @@
 		case LPFC_LINK_SPEED_64GHZ:
 			fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
 			break;
+		case LPFC_LINK_SPEED_128GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
+			break;
 		default:
 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 			break;
@@ -6018,7 +6605,7 @@
 	pmb = &pmboxq->u.mb;
 	pmb->mbxCommand = MBX_READ_STATUS;
 	pmb->mbxOwner = OWN_HOST;
-	pmboxq->context1 = NULL;
+	pmboxq->ctx_buf = NULL;
 	pmboxq->vport = vport;
 
 	if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -6050,7 +6637,7 @@
 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 	pmb->mbxCommand = MBX_READ_LNK_STAT;
 	pmb->mbxOwner = OWN_HOST;
-	pmboxq->context1 = NULL;
+	pmboxq->ctx_buf = NULL;
 	pmboxq->vport = vport;
 
 	if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -6130,7 +6717,7 @@
 	pmb->mbxCommand = MBX_READ_STATUS;
 	pmb->mbxOwner = OWN_HOST;
 	pmb->un.varWords[0] = 0x1; /* reset request */
-	pmboxq->context1 = NULL;
+	pmboxq->ctx_buf = NULL;
 	pmboxq->vport = vport;
 
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
@@ -6148,7 +6735,7 @@
 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
 	pmb->mbxCommand = MBX_READ_LNK_STAT;
 	pmb->mbxOwner = OWN_HOST;
-	pmboxq->context1 = NULL;
+	pmboxq->ctx_buf = NULL;
 	pmboxq->vport = vport;
 
 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
@@ -6270,10 +6857,31 @@
 static void
 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
 {
+	struct lpfc_rport_data *rdata = rport->dd_data;
+	struct lpfc_nodelist *ndlp = rdata->pnode;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	struct lpfc_nvme_rport *nrport = NULL;
+#endif
+
 	if (timeout)
 		rport->dev_loss_tmo = timeout;
 	else
 		rport->dev_loss_tmo = 1;
+
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		dev_info(&rport->dev, "Cannot find remote node to "
+				      "set rport dev loss tmo, port_id x%x\n",
+				      rport->port_id);
+		return;
+	}
+
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	nrport = lpfc_ndlp_get_nrport(ndlp);
+
+	if (nrport && nrport->remoteport)
+		nvme_fc_set_remoteport_devloss(nrport->remoteport,
+					       rport->dev_loss_tmo);
+#endif
 }
 
 /**
@@ -6296,7 +6904,7 @@
 {									\
 	struct fc_rport *rport = transport_class_to_rport(dev);		\
 	struct lpfc_rport_data *rdata = rport->hostdata;		\
-	return snprintf(buf, sz, format_string,				\
+	return scnprintf(buf, sz, format_string,			\
 		(rdata->target) ? cast rdata->target->field : 0);	\
 }
 
@@ -6470,6 +7078,21 @@
 };
 
 /**
+ * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
+ * Mode
+ * @phba: lpfc_hba pointer.
+ **/
+static void
+lpfc_get_hba_function_mode(struct lpfc_hba *phba)
+{
+	/* If it's a SkyHawk FCoE adapter */
+	if (phba->pcidev->device == PCI_DEVICE_ID_SKYHAWK)
+		phba->hba_flag |= HBA_FCOE_MODE;
+	else
+		phba->hba_flag &= ~HBA_FCOE_MODE;
+}
+
+/**
  * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
  * @phba: lpfc_hba pointer.
  **/
@@ -6477,6 +7100,7 @@
 lpfc_get_cfgparam(struct lpfc_hba *phba)
 {
 	lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
+	lpfc_ns_query_init(phba, lpfc_ns_query);
 	lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
 	lpfc_cr_delay_init(phba, lpfc_cr_delay);
 	lpfc_cr_count_init(phba, lpfc_cr_count);
@@ -6484,6 +7108,7 @@
 	lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
 	lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
 	lpfc_ack0_init(phba, lpfc_ack0);
+	lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing);
 	lpfc_topology_init(phba, lpfc_topology);
 	lpfc_link_speed_init(phba, lpfc_link_speed);
 	lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
@@ -6496,8 +7121,10 @@
 	lpfc_use_msi_init(phba, lpfc_use_msi);
 	lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
 	lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
-	lpfc_auto_imax_init(phba, lpfc_auto_imax);
 	lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+	lpfc_force_rscn_init(phba, lpfc_force_rscn);
+	lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
+	lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
 	lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
 	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
 	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
@@ -6521,8 +7148,18 @@
 	else
 		phba->cfg_poll = lpfc_poll;
 
-	if (phba->cfg_enable_bg)
+	/* Get the function mode */
+	lpfc_get_hba_function_mode(phba);
+
+	/* BlockGuard allowed for FC only. */
+	if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0581 BlockGuard feature not supported\n");
+		/* If set, clear the BlockGuard support param */
+		phba->cfg_enable_bg = 0;
+	} else if (phba->cfg_enable_bg) {
 		phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+	}
 
 	lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
 
@@ -6533,48 +7170,39 @@
 	/* Initialize first burst. Target vs Initiator are different. */
 	lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
 	lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
-	lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
-	lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
+	lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
+	lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
+	lpfc_irq_chann_init(phba, lpfc_irq_chann);
 	lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
 	lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
 
 	if (phba->sli_rev != LPFC_SLI_REV4) {
 		/* NVME only supported on SLI4 */
 		phba->nvmet_support = 0;
+		phba->cfg_nvmet_mrq = 0;
 		phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
 		phba->cfg_enable_bbcr = 0;
+		phba->cfg_xri_rebalancing = 0;
 	} else {
 		/* We MUST have FCP support */
 		if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
 			phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
 	}
 
-	if (phba->cfg_auto_imax && !phba->cfg_fcp_imax)
-		phba->cfg_auto_imax = 0;
-	phba->initial_imax = phba->cfg_fcp_imax;
+	phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
 
 	phba->cfg_enable_pbde = 0;
 
 	/* A value of 0 means use the number of CPUs found in the system */
-	if (phba->cfg_fcp_io_channel == 0)
-		phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
-	if (phba->cfg_nvme_io_channel == 0)
-		phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
-
-	if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
-		phba->cfg_fcp_io_channel = 0;
-
-	if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
-		phba->cfg_nvme_io_channel = 0;
-
-	if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
-		phba->io_channel_irqs = phba->cfg_fcp_io_channel;
-	else
-		phba->io_channel_irqs = phba->cfg_nvme_io_channel;
+	if (phba->cfg_hdw_queue == 0)
+		phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
+	if (phba->cfg_irq_chann == 0)
+		phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
+	if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
+		phba->cfg_irq_chann = phba->cfg_hdw_queue;
 
 	phba->cfg_soft_wwnn = 0L;
 	phba->cfg_soft_wwpn = 0L;
-	lpfc_xri_split_init(phba, lpfc_xri_split);
 	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
 	lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
 	lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
@@ -6587,6 +7215,10 @@
 	lpfc_sli_mode_init(phba, lpfc_sli_mode);
 	phba->cfg_enable_dss = 1;
 	lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
+	lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
+	lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
+	lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
+
 	return;
 }
 
@@ -6598,16 +7230,16 @@
 void
 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
 {
-	if (phba->cfg_nvme_io_channel > phba->sli4_hba.num_present_cpu)
-		phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
-
-	if (phba->cfg_fcp_io_channel > phba->sli4_hba.num_present_cpu)
-		phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
+	if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu)
+		phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
+	if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu)
+		phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
+	if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
+		phba->cfg_irq_chann = phba->cfg_hdw_queue;
 
 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
 	    phba->nvmet_support) {
 		phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
-		phba->cfg_fcp_io_channel = 0;
 
 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
 				"6013 %s x%x fb_size x%x, fb_max x%x\n",
@@ -6624,11 +7256,11 @@
 		}
 
 		if (!phba->cfg_nvmet_mrq)
-			phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+			phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
 
 		/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
-		if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
-			phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+		if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) {
+			phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
 					"6018 Adjust lpfc_nvmet_mrq to %d\n",
 					phba->cfg_nvmet_mrq);
@@ -6639,14 +7271,9 @@
 	} else {
 		/* Not NVME Target mode.  Turn off Target parameters. */
 		phba->nvmet_support = 0;
-		phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
+		phba->cfg_nvmet_mrq = 0;
 		phba->cfg_nvmet_fb_size = 0;
 	}
-
-	if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
-		phba->io_channel_irqs = phba->cfg_fcp_io_channel;
-	else
-		phba->io_channel_irqs = phba->cfg_nvme_io_channel;
 }
 
 /**
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 90745fe..39a736b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2009-2015 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -27,6 +27,7 @@
 #include <linux/delay.h>
 #include <linux/list.h>
 #include <linux/bsg-lib.h>
+#include <linux/vmalloc.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -1039,7 +1040,7 @@
 				if (!dmabuf) {
 					lpfc_printf_log(phba, KERN_ERR,
 						LOG_LIBDFC, "2616 No dmabuf "
-						"found for iocbq 0x%p\n",
+						"found for iocbq x%px\n",
 						iocbq);
 					kfree(evt_dat->data);
 					kfree(evt_dat);
@@ -1275,9 +1276,7 @@
 	return 0; /* call job done later */
 
 job_error:
-	if (dd_data != NULL)
-		kfree(dd_data);
-
+	kfree(dd_data);
 	job->dd_data = NULL;
 	return rc;
 }
@@ -1570,7 +1569,6 @@
 		"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
 		icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
 
-	ctiocb->iocb_cmpl = NULL;
 	ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
 	ctiocb->vport = phba->pport;
 	ctiocb->context1 = dd_data;
@@ -1967,14 +1965,17 @@
 }
 
 /**
- * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
+ * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic
  * @phba: Pointer to HBA context object.
+ * @mode: loopback mode to set
+ * @link_no: link number for loopback mode to set
  *
  * This function is responsible for issuing a sli4 mailbox command for setting
- * up internal loopback diagnostic.
+ * up loopback diagnostic for a link.
  */
 static int
-lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
+lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
+				uint32_t link_no)
 {
 	LPFC_MBOXQ_t *pmboxq;
 	uint32_t req_len, alloc_len;
@@ -1995,11 +1996,19 @@
 	}
 	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
 	bf_set(lpfc_mbx_set_diag_state_link_num,
-	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
-	bf_set(lpfc_mbx_set_diag_state_link_type,
-	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
+	       &link_diag_loopback->u.req, link_no);
+
+	if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
+		bf_set(lpfc_mbx_set_diag_state_link_type,
+		       &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
+	} else {
+		bf_set(lpfc_mbx_set_diag_state_link_type,
+		       &link_diag_loopback->u.req,
+		       phba->sli4_hba.lnk_info.lnk_tp);
+	}
+
 	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
-	       LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+	       mode);
 
 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
 	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
@@ -2053,7 +2062,7 @@
 	struct fc_bsg_request *bsg_request = job->request;
 	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct diag_mode_set *loopback_mode;
-	uint32_t link_flags, timeout;
+	uint32_t link_flags, timeout, link_no;
 	int i, rc = 0;
 
 	/* no data to return just the return code */
@@ -2068,12 +2077,39 @@
 				(int)(sizeof(struct fc_bsg_request) +
 				sizeof(struct diag_mode_set)));
 		rc = -EINVAL;
-		goto job_error;
+		goto job_done;
+	}
+
+	loopback_mode = (struct diag_mode_set *)
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
+	link_flags = loopback_mode->type;
+	timeout = loopback_mode->timeout * 100;
+
+	if (loopback_mode->physical_link == -1)
+		link_no = phba->sli4_hba.lnk_info.lnk_no;
+	else
+		link_no = loopback_mode->physical_link;
+
+	if (link_flags == DISABLE_LOOP_BACK) {
+		rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+					LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
+					link_no);
+		if (!rc) {
+			/* Unset the need disable bit */
+			phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
+		}
+		goto job_done;
+	} else {
+		/* Check if we need to disable the loopback state */
+		if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
+			rc = -EPERM;
+			goto job_done;
+		}
 	}
 
 	rc = lpfc_bsg_diag_mode_enter(phba);
 	if (rc)
-		goto job_error;
+		goto job_done;
 
 	/* indicate we are in loobpack diagnostic mode */
 	spin_lock_irq(&phba->hbalock);
@@ -2083,15 +2119,11 @@
 	/* reset port to start frome scratch */
 	rc = lpfc_selective_reset(phba);
 	if (rc)
-		goto job_error;
+		goto job_done;
 
 	/* bring the link to diagnostic mode */
 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 			"3129 Bring link to diagnostic state.\n");
-	loopback_mode = (struct diag_mode_set *)
-		bsg_request->rqst_data.h_vendor.vendor_cmd;
-	link_flags = loopback_mode->type;
-	timeout = loopback_mode->timeout * 100;
 
 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
 	if (rc) {
@@ -2119,13 +2151,54 @@
 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 			"3132 Set up loopback mode:x%x\n", link_flags);
 
-	if (link_flags == INTERNAL_LOOP_BACK)
-		rc = lpfc_sli4_bsg_set_internal_loopback(phba);
-	else if (link_flags == EXTERNAL_LOOP_BACK)
-		rc = lpfc_hba_init_link_fc_topology(phba,
-						    FLAGS_TOPOLOGY_MODE_PT_PT,
-						    MBX_NOWAIT);
-	else {
+	switch (link_flags) {
+	case INTERNAL_LOOP_BACK:
+		if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
+			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+					LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
+					link_no);
+		} else {
+			/* Trunk is configured, but link is not in this trunk */
+			if (phba->sli4_hba.conf_trunk) {
+				rc = -ELNRNG;
+				goto loopback_mode_exit;
+			}
+
+			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+					LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
+					link_no);
+		}
+
+		if (!rc) {
+			/* Set the need disable bit */
+			phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
+		}
+
+		break;
+	case EXTERNAL_LOOP_BACK:
+		if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
+			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+				LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
+				link_no);
+		} else {
+			/* Trunk is configured, but link is not in this trunk */
+			if (phba->sli4_hba.conf_trunk) {
+				rc = -ELNRNG;
+				goto loopback_mode_exit;
+			}
+
+			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+						LPFC_DIAG_LOOPBACK_TYPE_SERDES,
+						link_no);
+		}
+
+		if (!rc) {
+			/* Set the need disable bit */
+			phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
+		}
+
+		break;
+	default:
 		rc = -EINVAL;
 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
 				"3141 Loopback mode:x%x not supported\n",
@@ -2184,7 +2257,7 @@
 	}
 	lpfc_bsg_diag_mode_exit(phba);
 
-job_error:
+job_done:
 	/* make error code available to userspace */
 	bsg_reply->result = rc;
 	/* complete the job back to userspace if no error */
@@ -2221,7 +2294,7 @@
 
 	if (phba->sli_rev < LPFC_SLI_REV4)
 		rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
-	else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
 		 LPFC_SLI_INTF_IF_TYPE_2)
 		rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
 	else
@@ -2261,7 +2334,7 @@
 
 	if (phba->sli_rev < LPFC_SLI_REV4)
 		return -ENODEV;
-	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
 	    LPFC_SLI_INTF_IF_TYPE_2)
 		return -ENODEV;
 
@@ -2353,7 +2426,7 @@
 		rc = -ENODEV;
 		goto job_error;
 	}
-	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
 	    LPFC_SLI_INTF_IF_TYPE_2) {
 		rc = -ENODEV;
 		goto job_error;
@@ -2500,9 +2573,9 @@
 		return -ENOMEM;
 	}
 
-	dmabuff = (struct lpfc_dmabuf *) mbox->context1;
-	mbox->context1 = NULL;
-	mbox->context2 = NULL;
+	dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
+	mbox->ctx_buf = NULL;
+	mbox->ctx_ndlp = NULL;
 	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
 
 	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
@@ -2729,8 +2802,8 @@
 	INIT_LIST_HEAD(&dmabuf->list);
 
 	/* now, allocate dma buffer */
-	dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
-					   &(dmabuf->phys), GFP_KERNEL);
+	dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+					  &(dmabuf->phys), GFP_KERNEL);
 
 	if (!dmabuf->virt) {
 		kfree(dmabuf);
@@ -2843,9 +2916,6 @@
 
 		if (nocopydata) {
 			bpl->tus.f.bdeFlags = 0;
-			pci_dma_sync_single_for_device(phba->pcidev,
-				dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
-
 		} else {
 			memset((uint8_t *)dmp->dma.virt, 0, cnt);
 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
@@ -2949,7 +3019,7 @@
 			cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
 			cmd->un.cont64[i].tus.f.bdeSize =
 				((struct lpfc_dmabufext *)mp[i])->size;
-					cmd->ulpBdeCount = ++i;
+			cmd->ulpBdeCount = ++i;
 
 			if ((--num_bde > 0) && (i < 2))
 				continue;
@@ -3390,7 +3460,7 @@
 	unsigned long flags;
 	uint8_t *pmb, *pmb_buf;
 
-	dd_data = pmboxq->context1;
+	dd_data = pmboxq->ctx_ndlp;
 
 	/*
 	 * The outgoing buffer is readily referred from the dma buffer,
@@ -3575,7 +3645,7 @@
 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
 	uint8_t *pmbx;
 
-	dd_data = pmboxq->context1;
+	dd_data = pmboxq->ctx_buf;
 
 	/* Determine if job has been aborted */
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -3962,7 +4032,7 @@
 	pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
 
 	/* context fields to callback function */
-	pmboxq->context1 = dd_data;
+	pmboxq->ctx_buf = dd_data;
 	dd_data->type = TYPE_MBOX;
 	dd_data->set_job = job;
 	dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4133,7 +4203,7 @@
 		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
 
 		/* context fields to callback function */
-		pmboxq->context1 = dd_data;
+		pmboxq->ctx_buf = dd_data;
 		dd_data->type = TYPE_MBOX;
 		dd_data->set_job = job;
 		dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4478,7 +4548,7 @@
 		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
 
 		/* context fields to callback function */
-		pmboxq->context1 = dd_data;
+		pmboxq->ctx_buf = dd_data;
 		dd_data->type = TYPE_MBOX;
 		dd_data->set_job = job;
 		dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4684,7 +4754,7 @@
 	 * Don't allow mailbox commands to be sent when blocked or when in
 	 * the middle of discovery
 	 */
-	 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
 		rc = -EAGAIN;
 		goto job_done;
 	}
@@ -4763,7 +4833,7 @@
 	if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
 		from = pmbx;
 		ext = from + sizeof(MAILBOX_t);
-		pmboxq->context2 = ext;
+		pmboxq->ctx_buf = ext;
 		pmboxq->in_ext_byte_len =
 			mbox_req->inExtWLen * sizeof(uint32_t);
 		pmboxq->out_ext_byte_len =
@@ -4891,7 +4961,7 @@
 	pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
 
 	/* setup context field to pass wait_queue pointer to wake function */
-	pmboxq->context1 = dd_data;
+	pmboxq->ctx_ndlp = dd_data;
 	dd_data->type = TYPE_MBOX;
 	dd_data->set_job = job;
 	dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -5309,6 +5379,383 @@
 }
 
 /**
+ * lpfc_check_fwlog_support: Check FW log support on the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Check if FW Logging support by the adapter
+ **/
+int
+lpfc_check_fwlog_support(struct lpfc_hba *phba)
+{
+	struct lpfc_ras_fwlog *ras_fwlog = NULL;
+
+	ras_fwlog = &phba->ras_fwlog;
+
+	if (ras_fwlog->ras_hwsupport == false)
+		return -EACCES;
+	else if (ras_fwlog->ras_enabled == false)
+		return -EPERM;
+	else
+		return 0;
+}
+
+/**
+ * lpfc_bsg_get_ras_config: Get RAS configuration settings
+ * @job: fc_bsg_job to handle
+ *
+ * Get RAS configuration values set.
+ **/
+static int
+lpfc_bsg_get_ras_config(struct bsg_job *job)
+{
+	struct Scsi_Host *shost = fc_bsg_to_shost(job);
+	struct lpfc_vport *vport = shost_priv(shost);
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_bsg_get_ras_config_reply *ras_reply;
+	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+	int rc = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) +
+	    sizeof(struct lpfc_bsg_ras_req)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"6192 FW_LOG request received "
+				"below minimum size\n");
+		rc = -EINVAL;
+		goto ras_job_error;
+	}
+
+	/* Check FW log status */
+	rc = lpfc_check_fwlog_support(phba);
+	if (rc)
+		goto ras_job_error;
+
+	ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
+		bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+	/* Current logging state */
+	if (ras_fwlog->ras_active == true)
+		ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
+	else
+		ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
+
+	ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
+	ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
+
+ras_job_error:
+	/* make error code available to userspace */
+	bsg_reply->result = rc;
+
+	/* complete the job back to userspace */
+	if (!rc)
+		bsg_job_done(job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
+	return rc;
+}
+
+/**
+ * lpfc_bsg_set_ras_config: Set FW logging parameters
+ * @job: fc_bsg_job to handle
+ *
+ * Set log-level parameters for FW-logging in host memory
+ **/
+static int
+lpfc_bsg_set_ras_config(struct bsg_job *job)
+{
+	struct Scsi_Host *shost = fc_bsg_to_shost(job);
+	struct lpfc_vport *vport = shost_priv(shost);
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_bsg_set_ras_config_req *ras_req;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	uint8_t action = 0, log_level = 0;
+	int rc = 0, action_status = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) +
+	    sizeof(struct lpfc_bsg_set_ras_config_req)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"6182 Received RAS_LOG request "
+				"below minimum size\n");
+		rc = -EINVAL;
+		goto ras_job_error;
+	}
+
+	/* Check FW log status */
+	rc = lpfc_check_fwlog_support(phba);
+	if (rc)
+		goto ras_job_error;
+
+	ras_req = (struct lpfc_bsg_set_ras_config_req *)
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
+	action = ras_req->action;
+	log_level = ras_req->log_level;
+
+	if (action == LPFC_RASACTION_STOP_LOGGING) {
+		/* Check if already disabled */
+		if (ras_fwlog->ras_active == false) {
+			rc = -ESRCH;
+			goto ras_job_error;
+		}
+
+		/* Disable logging */
+		lpfc_ras_stop_fwlog(phba);
+	} else {
+		/*action = LPFC_RASACTION_START_LOGGING*/
+
+		/* Even though FW-logging is active re-initialize
+		 * FW-logging with new log-level. Return status
+		 * "Logging already Running" to caller.
+		 **/
+		if (ras_fwlog->ras_active)
+			action_status = -EINPROGRESS;
+
+		/* Enable logging */
+		rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
+					      LPFC_RAS_ENABLE_LOGGING);
+		if (rc) {
+			rc = -EINVAL;
+			goto ras_job_error;
+		}
+
+		/* Check if FW-logging is re-initialized */
+		if (action_status == -EINPROGRESS)
+			rc = action_status;
+	}
+ras_job_error:
+	/* make error code available to userspace */
+	bsg_reply->result = rc;
+
+	/* complete the job back to userspace */
+	if (!rc)
+		bsg_job_done(job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_get_ras_lwpd: Get log write position data
+ * @job: fc_bsg_job to handle
+ *
+ * Get Offset/Wrap count of the log message written
+ * in host memory
+ **/
+static int
+lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
+{
+	struct Scsi_Host *shost = fc_bsg_to_shost(job);
+	struct lpfc_vport *vport = shost_priv(shost);
+	struct lpfc_bsg_get_ras_lwpd *ras_reply;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	u32 *lwpd_ptr = NULL;
+	int rc = 0;
+
+	rc = lpfc_check_fwlog_support(phba);
+	if (rc)
+		goto ras_job_error;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) +
+	    sizeof(struct lpfc_bsg_ras_req)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"6183 Received RAS_LOG request "
+				"below minimum size\n");
+		rc = -EINVAL;
+		goto ras_job_error;
+	}
+
+	ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
+		bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+	if (!ras_fwlog->lwpd.virt) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"6193 Restart FW Logging\n");
+		rc = -EINVAL;
+		goto ras_job_error;
+	}
+
+	/* Get lwpd offset */
+	lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
+	ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
+
+	/* Get wrap count */
+	ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
+
+ras_job_error:
+	/* make error code available to userspace */
+	bsg_reply->result = rc;
+
+	/* complete the job back to userspace */
+	if (!rc)
+		bsg_job_done(job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_get_ras_fwlog: Read FW log
+ * @job: fc_bsg_job to handle
+ *
+ * Copy the FW log into the passed buffer.
+ **/
+static int
+lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
+{
+	struct Scsi_Host *shost = fc_bsg_to_shost(job);
+	struct lpfc_vport *vport = shost_priv(shost);
+	struct lpfc_hba *phba = vport->phba;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct lpfc_bsg_get_fwlog_req *ras_req;
+	u32 rd_offset, rd_index, offset;
+	void *src, *fwlog_buff;
+	struct lpfc_ras_fwlog *ras_fwlog = NULL;
+	struct lpfc_dmabuf *dmabuf, *next;
+	int rc = 0;
+
+	ras_fwlog = &phba->ras_fwlog;
+
+	rc = lpfc_check_fwlog_support(phba);
+	if (rc)
+		goto ras_job_error;
+
+	/* Logging to be stopped before reading */
+	if (ras_fwlog->ras_active == true) {
+		rc = -EINPROGRESS;
+		goto ras_job_error;
+	}
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) +
+	    sizeof(struct lpfc_bsg_get_fwlog_req)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"6184 Received RAS_LOG request "
+				"below minimum size\n");
+		rc = -EINVAL;
+		goto ras_job_error;
+	}
+
+	ras_req = (struct lpfc_bsg_get_fwlog_req *)
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
+	rd_offset = ras_req->read_offset;
+
+	/* Allocate memory to read fw log*/
+	fwlog_buff = vmalloc(ras_req->read_size);
+	if (!fwlog_buff) {
+		rc = -ENOMEM;
+		goto ras_job_error;
+	}
+
+	rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
+	offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
+
+	list_for_each_entry_safe(dmabuf, next,
+			      &ras_fwlog->fwlog_buff_list, list) {
+
+		if (dmabuf->buffer_tag < rd_index)
+			continue;
+
+		src = dmabuf->virt + offset;
+		memcpy(fwlog_buff, src, ras_req->read_size);
+		break;
+	}
+
+	bsg_reply->reply_payload_rcv_len =
+		sg_copy_from_buffer(job->reply_payload.sg_list,
+				    job->reply_payload.sg_cnt,
+				    fwlog_buff, ras_req->read_size);
+
+	vfree(fwlog_buff);
+
+ras_job_error:
+	bsg_reply->result = rc;
+	if (!rc)
+		bsg_job_done(job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
+
+	return rc;
+}
+
+static int
+lpfc_get_trunk_info(struct bsg_job *job)
+{
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct lpfc_hba *phba = vport->phba;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct lpfc_trunk_info *event_reply;
+	int rc = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"2744 Received GET TRUNK _INFO request below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	event_reply = (struct lpfc_trunk_info *)
+		bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+	if (job->reply_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct lpfc_trunk_info)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2728 Received GET TRUNK _INFO reply below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+	if (event_reply == NULL) {
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
+		   (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
+
+	bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
+		   (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
+
+	bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
+		   (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
+
+	bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
+		   (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
+
+	bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
+		   (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
+
+	bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
+		   bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
+
+	bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
+		   bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
+
+	bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
+		   bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
+
+	bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
+		   bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
+
+	event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
+	event_reply->logical_speed =
+				phba->sli4_hba.link_state.logical_speed / 1000;
+job_error:
+	bsg_reply->result = rc;
+	if (!rc)
+		bsg_job_done(job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
+	return rc;
+
+}
+
+/**
  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
  * @job: fc_bsg_job to handle
  **/
@@ -5355,6 +5802,21 @@
 	case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
 		rc = lpfc_forced_link_speed(job);
 		break;
+	case LPFC_BSG_VENDOR_RAS_GET_LWPD:
+		rc = lpfc_bsg_get_ras_lwpd(job);
+		break;
+	case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
+		rc = lpfc_bsg_get_ras_fwlog(job);
+		break;
+	case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
+		rc = lpfc_bsg_get_ras_config(job);
+		break;
+	case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
+		rc = lpfc_bsg_set_ras_config(job);
+		break;
+	case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
+		rc = lpfc_get_trunk_info(job);
+		break;
 	default:
 		rc = -EINVAL;
 		bsg_reply->reply_payload_rcv_len = 0;
@@ -5368,7 +5830,7 @@
 
 /**
  * lpfc_bsg_request - handle a bsg request from the FC transport
- * @job: fc_bsg_job to handle
+ * @job: bsg_job to handle
  **/
 int
 lpfc_bsg_request(struct bsg_job *job)
@@ -5402,7 +5864,7 @@
 
 /**
  * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
- * @job: fc_bsg_job that has timed out
+ * @job: bsg_job that has timed out
  *
  * This function just aborts the job's IOCB.  The aborted IOCB will return to
  * the waiting function which will handle passing the error back to userspace
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 32347c8..d170813 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2010-2015 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -38,6 +38,11 @@
 #define LPFC_BSG_VENDOR_DIAG_MODE_END		10
 #define LPFC_BSG_VENDOR_LINK_DIAG_TEST		11
 #define LPFC_BSG_VENDOR_FORCED_LINK_SPEED	14
+#define LPFC_BSG_VENDOR_RAS_GET_LWPD		16
+#define LPFC_BSG_VENDOR_RAS_GET_FWLOG		17
+#define LPFC_BSG_VENDOR_RAS_GET_CONFIG		18
+#define LPFC_BSG_VENDOR_RAS_SET_CONFIG		19
+#define LPFC_BSG_VENDOR_GET_TRUNK_INFO		20
 
 struct set_ct_event {
 	uint32_t command;
@@ -63,6 +68,7 @@
 };
 
 
+#define DISABLE_LOOP_BACK  0x0 /* disables loop back */
 #define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */
 #define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */
 
@@ -70,6 +76,7 @@
 	uint32_t command;
 	uint32_t type;
 	uint32_t timeout;
+	uint32_t physical_link;
 };
 
 struct sli4_link_diag {
@@ -296,6 +303,75 @@
 	uint8_t supported;
 };
 
+struct lpfc_bsg_ras_req {
+	uint32_t command;
+};
+
+struct lpfc_bsg_get_fwlog_req {
+	uint32_t command;
+	uint32_t read_size;
+	uint32_t read_offset;
+};
+
+struct lpfc_bsg_get_ras_lwpd {
+	uint32_t offset;
+	uint32_t wrap_count;
+};
+
+struct lpfc_bsg_set_ras_config_req {
+	uint32_t command;
+	uint8_t action;
+#define LPFC_RASACTION_STOP_LOGGING     0x00
+#define LPFC_RASACTION_START_LOGGING    0x01
+	uint8_t log_level;
+};
+
+struct lpfc_bsg_get_ras_config_reply {
+	uint8_t state;
+#define LPFC_RASLOG_STATE_STOPPED 0x00
+#define LPFC_RASLOG_STATE_RUNNING 0x01
+	uint8_t log_level;
+	uint32_t log_buff_sz;
+};
+
+struct lpfc_trunk_info {
+	uint32_t word0;
+#define lpfc_trunk_info_link_status_SHIFT      0
+#define lpfc_trunk_info_link_status_MASK       1
+#define lpfc_trunk_info_link_status_WORD       word0
+#define lpfc_trunk_info_trunk_active0_SHIFT    8
+#define lpfc_trunk_info_trunk_active0_MASK     1
+#define lpfc_trunk_info_trunk_active0_WORD     word0
+#define lpfc_trunk_info_trunk_active1_SHIFT    9
+#define lpfc_trunk_info_trunk_active1_MASK     1
+#define lpfc_trunk_info_trunk_active1_WORD     word0
+#define lpfc_trunk_info_trunk_active2_SHIFT    10
+#define lpfc_trunk_info_trunk_active2_MASK     1
+#define lpfc_trunk_info_trunk_active2_WORD     word0
+#define lpfc_trunk_info_trunk_active3_SHIFT    11
+#define lpfc_trunk_info_trunk_active3_MASK     1
+#define lpfc_trunk_info_trunk_active3_WORD     word0
+#define lpfc_trunk_info_trunk_config0_SHIFT    12
+#define lpfc_trunk_info_trunk_config0_MASK     1
+#define lpfc_trunk_info_trunk_config0_WORD     word0
+#define lpfc_trunk_info_trunk_config1_SHIFT    13
+#define lpfc_trunk_info_trunk_config1_MASK     1
+#define lpfc_trunk_info_trunk_config1_WORD     word0
+#define lpfc_trunk_info_trunk_config2_SHIFT    14
+#define lpfc_trunk_info_trunk_config2_MASK     1
+#define lpfc_trunk_info_trunk_config2_WORD     word0
+#define lpfc_trunk_info_trunk_config3_SHIFT    15
+#define lpfc_trunk_info_trunk_config3_MASK     1
+#define lpfc_trunk_info_trunk_config3_WORD     word0
+	uint16_t    port_speed;
+	uint16_t    logical_speed;
+	uint32_t    reserved3;
+};
+
+struct get_trunk_info_req {
+	uint32_t command;
+};
+
 /* driver only */
 #define SLI_CONFIG_NOT_HANDLED		0
 #define SLI_CONFIG_HANDLED		1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bea24bc..b2ad8c7 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -74,7 +74,6 @@
 void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
 void lpfc_retry_pport_discovery(struct lpfc_hba *);
-void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
 int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
 void lpfc_free_iocb_list(struct lpfc_hba *phba);
 int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
@@ -142,6 +141,7 @@
 int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
 int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
 int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry);
 int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
 int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
 int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
@@ -175,6 +175,7 @@
 void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
 			 struct lpfc_iocbq *);
 int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
+int lpfc_issue_gidpt(struct lpfc_vport *vport);
 int lpfc_issue_gidft(struct lpfc_vport *vport);
 int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
 int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
@@ -199,11 +200,6 @@
 int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd,
 			spinlock_t *slock);
 
-int lpfc_fof_queue_create(struct lpfc_hba *);
-int lpfc_fof_queue_setup(struct lpfc_hba *);
-int lpfc_fof_queue_destroy(struct lpfc_hba *);
-irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
-
 int lpfc_sli_setup(struct lpfc_hba *);
 int lpfc_sli4_setup(struct lpfc_hba *phba);
 void lpfc_sli_queue_init(struct lpfc_hba *phba);
@@ -320,8 +316,8 @@
 void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
 int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
 			struct lpfc_iocbq *, uint32_t);
-int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t rnum,
-			struct lpfc_iocbq *iocbq);
+int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
+			struct lpfc_iocbq *pwqe);
 struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
 struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
 					    struct lpfc_iocbq *piocbq);
@@ -330,7 +326,7 @@
 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
 void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
 void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
-void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
+void lpfc_sli_flush_io_rings(struct lpfc_hba *phba);
 int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
 			     struct lpfc_dmabuf *);
 struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
@@ -360,6 +356,7 @@
 struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
 struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
 					 struct lpfc_name *);
+struct lpfc_nodelist *lpfc_findnode_mapped(struct lpfc_vport *vport);
 
 int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 
@@ -380,8 +377,10 @@
 
 void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
 void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp);
+int lpfc_link_reset(struct lpfc_vport *vport);
 
 /* Function prototypes. */
+int lpfc_check_pci_resettable(const struct lpfc_hba *phba);
 const char* lpfc_info(struct Scsi_Host *);
 int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
 
@@ -434,17 +433,6 @@
 int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
 					  uint16_t *, uint16_t *);
 
-/* externs BlockGuard */
-extern char *_dump_buf_data;
-extern unsigned long _dump_buf_data_order;
-extern char *_dump_buf_dif;
-extern unsigned long _dump_buf_dif_order;
-extern spinlock_t _dump_buf_lock;
-extern int _dump_buf_done;
-extern spinlock_t pgcnt_lock;
-extern unsigned int pgcnt;
-extern unsigned int lpfc_fcp_look_ahead;
-
 /* Interface exported by fabric iocb scheduler */
 void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
 void lpfc_fabric_abort_hba(struct lpfc_hba *);
@@ -518,8 +506,13 @@
 void lpfc_sli4_node_prep(struct lpfc_hba *);
 int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba);
 int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba);
-int lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba);
-int lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba);
+int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *sglist);
+int lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf);
+int lpfc_sli4_io_sgl_update(struct lpfc_hba *phba);
+int lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
+		struct list_head *blist, int xricnt);
+int lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc);
+void lpfc_io_free(struct lpfc_hba *phba);
 void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
 uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
 int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
@@ -545,7 +538,17 @@
 int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
 void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
 
+/* RAS Interface */
+void lpfc_sli4_ras_init(struct lpfc_hba *phba);
+void lpfc_sli4_ras_setup(struct lpfc_hba *phba);
+int  lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t fwlog_level,
+			 uint32_t fwlog_enable);
+void lpfc_ras_stop_fwlog(struct lpfc_hba *phba);
+int lpfc_check_fwlog_support(struct lpfc_hba *phba);
+
 /* NVME interfaces. */
+void lpfc_nvme_rescan_port(struct lpfc_vport *vport,
+			   struct lpfc_nodelist *ndlp);
 void lpfc_nvme_unregister_port(struct lpfc_vport *vport,
 			struct lpfc_nodelist *ndlp);
 int lpfc_nvme_register_port(struct lpfc_vport *vport,
@@ -559,13 +562,30 @@
 void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba,
 			struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb);
 void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx,
-				struct rqb_dmabuf *nvmebuf, uint64_t isr_ts);
+				struct rqb_dmabuf *nvmebuf, uint64_t isr_ts,
+				uint8_t cqflag);
 void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
 void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
 				struct lpfc_iocbq *cmdiocb,
 				struct lpfc_wcqe_complete *abts_cmpl);
+void lpfc_create_multixri_pools(struct lpfc_hba *phba);
+void lpfc_create_destroy_pools(struct lpfc_hba *phba);
+void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid);
+void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 cnt);
+void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid);
+void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid);
+void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid);
+#ifdef LPFC_MXP_STAT
+void lpfc_snapshot_mxp(struct lpfc_hba *, u32);
+#endif
+struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
+				struct lpfc_nodelist *ndlp, u32 hwqid,
+				int);
+void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
+			 struct lpfc_sli4_hdw_queue *qp);
 void lpfc_nvme_cmd_template(void);
 void lpfc_nvmet_cmd_template(void);
+void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
 extern int lpfc_enable_nvmet_cnt;
 extern unsigned long long lpfc_enable_nvmet[];
 extern int lpfc_no_hba_reset_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 1cbdc89..25e8670 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -445,14 +445,14 @@
 	struct lpfc_vport *vport_curr;
 	unsigned long flags;
 
-	spin_lock_irqsave(&phba->hbalock, flags);
+	spin_lock_irqsave(&phba->port_list_lock, flags);
 	list_for_each_entry(vport_curr, &phba->port_list, listentry) {
 		if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
-			spin_unlock_irqrestore(&phba->hbalock, flags);
+			spin_unlock_irqrestore(&phba->port_list_lock, flags);
 			return vport_curr;
 		}
 	}
-	spin_unlock_irqrestore(&phba->hbalock, flags);
+	spin_unlock_irqrestore(&phba->port_list_lock, flags);
 	return NULL;
 }
 
@@ -462,6 +462,7 @@
 	struct lpfc_nodelist *ndlp;
 
 	if ((vport->port_type != LPFC_NPIV_PORT) ||
+	    (fc4_type == FC_TYPE_FCP) ||
 	    !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
 
 		ndlp = lpfc_setup_disc_node(vport, Did);
@@ -471,11 +472,6 @@
 				"Parse GID_FTrsp: did:x%x flg:x%x x%x",
 				Did, ndlp->nlp_flag, vport->fc_flag);
 
-			/* Don't assume the rport is always the previous
-			 * FC4 type.
-			 */
-			ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
-
 			/* By default, the driver expects to support FCP FC4 */
 			if (fc4_type == FC_TYPE_FCP)
 				ndlp->nlp_fc4_type |= NLP_FC4_FCP;
@@ -485,10 +481,20 @@
 
 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 					 "0238 Process x%06x NameServer Rsp "
-					 "Data: x%x x%x x%x x%x\n", Did,
+					 "Data: x%x x%x x%x x%x x%x\n", Did,
 					 ndlp->nlp_flag, ndlp->nlp_fc4_type,
-					 vport->fc_flag,
+					 ndlp->nlp_state, vport->fc_flag,
 					 vport->fc_rscn_id_cnt);
+
+			/* if ndlp needs to be discovered and prior
+			 * state of ndlp hit devloss, change state to
+			 * allow rediscovery.
+			 */
+			if (ndlp->nlp_flag & NLP_NPR_2B_DISC &&
+			    ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+				lpfc_nlp_set_state(vport, ndlp,
+						   NLP_STE_NPR_NODE);
+			}
 		} else {
 			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
 				"Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
@@ -496,9 +502,9 @@
 
 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 					 "0239 Skip x%06x NameServer Rsp "
-					 "Data: x%x x%x\n", Did,
-					 vport->fc_flag,
-					 vport->fc_rscn_id_cnt);
+					 "Data: x%x x%x %p\n",
+					 Did, vport->fc_flag,
+					 vport->fc_rscn_id_cnt, ndlp);
 		}
 	} else {
 		if (!(vport->fc_flag & FC_RSCN_MODE) ||
@@ -545,7 +551,17 @@
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_nodelist *ndlp = NULL;
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	char *str;
 
+	if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT)
+		str = "GID_FT";
+	else
+		str = "GID_PT";
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "6430 Process %s rsp for %08x type %x %s %s\n",
+			 str, Did, fc4_type,
+			 (fc4_type == FC_TYPE_FCP) ?  "FCP" : " ",
+			 (fc4_type == FC_TYPE_NVME) ?  "NVME" : " ");
 	/*
 	 * To conserve rpi's, filter out addresses for other
 	 * vports on the same physical HBAs.
@@ -746,9 +762,11 @@
 		if (CTrsp->CommandResponse.bits.CmdRsp ==
 		    cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-					 "0208 NameServer Rsp Data: x%x x%x\n",
+					 "0208 NameServer Rsp Data: x%x x%x "
+					 "sz x%x\n",
 					 vport->fc_flag,
-					 CTreq->un.gid.Fc4Type);
+					 CTreq->un.gid.Fc4Type,
+					 irsp->un.genreq64.bdl.bdeSize);
 
 			lpfc_ns_rsp(vport,
 				    outp,
@@ -809,6 +827,11 @@
 		}
 		vport->gidft_inp--;
 	}
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "4216 GID_FT cmpl inp %d disc %d\n",
+			 vport->gidft_inp, vport->num_disc_nodes);
+
 	/* Link up / RSCN discovery */
 	if ((vport->num_disc_nodes == 0) &&
 	    (vport->gidft_inp == 0)) {
@@ -837,6 +860,198 @@
 }
 
 static void
+lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp;
+	struct lpfc_dmabuf *outp;
+	struct lpfc_dmabuf *inp;
+	struct lpfc_sli_ct_request *CTrsp;
+	struct lpfc_sli_ct_request *CTreq;
+	struct lpfc_nodelist *ndlp;
+	int rc;
+
+	/* First save ndlp, before we overwrite it */
+	ndlp = cmdiocb->context_un.ndlp;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+	inp = (struct lpfc_dmabuf *)cmdiocb->context1;
+	outp = (struct lpfc_dmabuf *)cmdiocb->context2;
+	irsp = &rspiocb->iocb;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+			      "GID_PT cmpl:     status:x%x/x%x rtry:%d",
+			      irsp->ulpStatus, irsp->un.ulpWord[4],
+			      vport->fc_ns_retry);
+
+	/* Don't bother processing response if vport is being torn down. */
+	if (vport->load_flag & FC_UNLOADING) {
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
+		goto out;
+	}
+
+	if (lpfc_els_chk_latt(vport)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "4108 Link event during NS query\n");
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		goto out;
+	}
+	if (lpfc_error_lost_link(irsp)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "4166 NS query failed due to link event\n");
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
+		goto out;
+	}
+
+	spin_lock_irq(shost->host_lock);
+	if (vport->fc_flag & FC_RSCN_DEFERRED) {
+		vport->fc_flag &= ~FC_RSCN_DEFERRED;
+		spin_unlock_irq(shost->host_lock);
+
+		/* This is a GID_PT completing so the gidft_inp counter was
+		 * incremented before the GID_PT was issued to the wire.
+		 */
+		vport->gidft_inp--;
+
+		/*
+		 * Skip processing the NS response
+		 * Re-issue the NS cmd
+		 */
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+				 "4167 Process Deferred RSCN Data: x%x x%x\n",
+				 vport->fc_flag, vport->fc_rscn_id_cnt);
+		lpfc_els_handle_rscn(vport);
+
+		goto out;
+	}
+	spin_unlock_irq(shost->host_lock);
+
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+			if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
+			    (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+			    IOERR_NO_RESOURCES)
+				vport->fc_ns_retry++;
+
+			/* CT command is being retried */
+			vport->gidft_inp--;
+			rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT,
+					 vport->fc_ns_retry, GID_PT_N_PORT);
+			if (rc == 0)
+				goto out;
+		}
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "4103 GID_FT Query error: 0x%x 0x%x\n",
+				 irsp->ulpStatus, vport->fc_ns_retry);
+	} else {
+		/* Good status, continue checking */
+		CTreq = (struct lpfc_sli_ct_request *)inp->virt;
+		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "4105 NameServer Rsp Data: x%x x%x\n",
+					 vport->fc_flag,
+					 CTreq->un.gid.Fc4Type);
+
+			lpfc_ns_rsp(vport,
+				    outp,
+				    CTreq->un.gid.Fc4Type,
+				    (uint32_t)(irsp->un.genreq64.bdl.bdeSize));
+		} else if (CTrsp->CommandResponse.bits.CmdRsp ==
+			   be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+			/* NameServer Rsp Error */
+			if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ)
+			    && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) {
+				lpfc_printf_vlog(
+					vport, KERN_INFO, LOG_DISCOVERY,
+					"4106 No NameServer Entries "
+					"Data: x%x x%x x%x x%x\n",
+					CTrsp->CommandResponse.bits.CmdRsp,
+					(uint32_t)CTrsp->ReasonCode,
+					(uint32_t)CTrsp->Explanation,
+					vport->fc_flag);
+
+				lpfc_debugfs_disc_trc(
+				vport, LPFC_DISC_TRC_CT,
+				"GID_PT no entry  cmd:x%x rsn:x%x exp:x%x",
+				(uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+				(uint32_t)CTrsp->ReasonCode,
+				(uint32_t)CTrsp->Explanation);
+			} else {
+				lpfc_printf_vlog(
+					vport, KERN_INFO, LOG_DISCOVERY,
+					"4107 NameServer Rsp Error "
+					"Data: x%x x%x x%x x%x\n",
+					CTrsp->CommandResponse.bits.CmdRsp,
+					(uint32_t)CTrsp->ReasonCode,
+					(uint32_t)CTrsp->Explanation,
+					vport->fc_flag);
+
+				lpfc_debugfs_disc_trc(
+				vport, LPFC_DISC_TRC_CT,
+				"GID_PT rsp err1  cmd:x%x rsn:x%x exp:x%x",
+				(uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+				(uint32_t)CTrsp->ReasonCode,
+				(uint32_t)CTrsp->Explanation);
+			}
+		} else {
+			/* NameServer Rsp Error */
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+					 "4109 NameServer Rsp Error "
+					 "Data: x%x x%x x%x x%x\n",
+					 CTrsp->CommandResponse.bits.CmdRsp,
+					 (uint32_t)CTrsp->ReasonCode,
+					 (uint32_t)CTrsp->Explanation,
+					 vport->fc_flag);
+
+			lpfc_debugfs_disc_trc(
+				vport, LPFC_DISC_TRC_CT,
+				"GID_PT rsp err2  cmd:x%x rsn:x%x exp:x%x",
+				(uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+				(uint32_t)CTrsp->ReasonCode,
+				(uint32_t)CTrsp->Explanation);
+		}
+		vport->gidft_inp--;
+	}
+	/* Link up / RSCN discovery */
+	if ((vport->num_disc_nodes == 0) &&
+	    (vport->gidft_inp == 0)) {
+		/*
+		 * The driver has cycled through all Nports in the RSCN payload.
+		 * Complete the handling by cleaning up and marking the
+		 * current driver state.
+		 */
+		if (vport->port_state >= LPFC_DISC_AUTH) {
+			if (vport->fc_flag & FC_RSCN_MODE) {
+				lpfc_els_flush_rscn(vport);
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
+				spin_unlock_irq(shost->host_lock);
+			} else {
+				lpfc_els_flush_rscn(vport);
+			}
+		}
+
+		lpfc_disc_start(vport);
+	}
+out:
+	cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
+	lpfc_ct_free_iocb(phba, cmdiocb);
+}
+
+static void
 lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 			struct lpfc_iocbq *rspiocb)
 {
@@ -862,6 +1077,13 @@
 		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
 		fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
 
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "6431 Process GFF_ID rsp for %08x "
+				 "fbits %02x %s %s\n",
+				 did, fbits,
+				 (fbits & FC4_FEATURE_INIT) ? "Initiator" : " ",
+				 (fbits & FC4_FEATURE_TARGET) ? "Target" : " ");
+
 		if (CTrsp->CommandResponse.bits.CmdRsp ==
 		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
 			if ((fbits & FC4_FEATURE_INIT) &&
@@ -984,9 +1206,15 @@
 		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
 		fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
 		fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]);
+
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-				 "3062 DID x%06x GFT Wd0 x%08x Wd1 x%08x\n",
-				 did, fc4_data_0, fc4_data_1);
+				 "6432 Process GFT_ID rsp for %08x "
+				 "Data %08x %08x %s %s\n",
+				 did, fc4_data_0, fc4_data_1,
+				 (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) ?
+				  "FCP" : " ",
+				 (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ?
+				  "NVME" : " ");
 
 		ndlp = lpfc_findnode_did(vport, did);
 		if (ndlp) {
@@ -999,14 +1227,34 @@
 			if (fc4_data_1 &  LPFC_FC4_TYPE_BITMASK)
 				ndlp->nlp_fc4_type |= NLP_FC4_NVME;
 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-					 "3064 Setting ndlp %p, DID x%06x with "
-					 "FC4 x%08x, Data: x%08x x%08x\n",
+					 "3064 Setting ndlp x%px, DID x%06x "
+					 "with FC4 x%08x, Data: x%08x x%08x "
+					 "%d\n",
 					 ndlp, did, ndlp->nlp_fc4_type,
-					 FC_TYPE_FCP, FC_TYPE_NVME);
-			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+					 FC_TYPE_FCP, FC_TYPE_NVME,
+					 ndlp->nlp_state);
 
-			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
-			lpfc_issue_els_prli(vport, ndlp, 0);
+			if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
+			    ndlp->nlp_fc4_type) {
+				ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+
+				lpfc_nlp_set_state(vport, ndlp,
+						   NLP_STE_PRLI_ISSUE);
+				lpfc_issue_els_prli(vport, ndlp, 0);
+			} else if (!ndlp->nlp_fc4_type) {
+				/* If fc4 type is still unknown, then LOGO */
+				lpfc_printf_vlog(vport, KERN_INFO,
+						 LOG_DISCOVERY,
+						 "6443 Sending LOGO ndlp x%px,"
+						 "DID x%06x with fc4_type: "
+						 "x%08x, state: %d\n",
+						 ndlp, did, ndlp->nlp_fc4_type,
+						 ndlp->nlp_state);
+				lpfc_issue_els_logo(vport, ndlp, 0);
+				ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+				lpfc_nlp_set_state(vport, ndlp,
+						   NLP_STE_NPR_NODE);
+			}
 		}
 	} else
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
@@ -1220,7 +1468,7 @@
 	 * Name object.  NPIV is not in play so this integer
 	 * value is sufficient and unique per FC-ID.
 	 */
-	n = snprintf(symbol, size, "%d", vport->phba->brd_no);
+	n = scnprintf(symbol, size, "%d", vport->phba->brd_no);
 	return n;
 }
 
@@ -1234,26 +1482,26 @@
 
 	lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
 
-	n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
+	n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
 	if (size < n)
 		return n;
 
-	n += snprintf(symbol + n, size - n, " FV%s", fwrev);
+	n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
 	if (size < n)
 		return n;
 
-	n += snprintf(symbol + n, size - n, " DV%s.",
+	n += scnprintf(symbol + n, size - n, " DV%s.",
 		      lpfc_release_version);
 	if (size < n)
 		return n;
 
-	n += snprintf(symbol + n, size - n, " HN:%s.",
+	n += scnprintf(symbol + n, size - n, " HN:%s.",
 		      init_utsname()->nodename);
 	if (size < n)
 		return n;
 
 	/* Note :- OS name is "Linux" */
-	n += snprintf(symbol + n, size - n, " OS:%s\n",
+	n += scnprintf(symbol + n, size - n, " OS:%s",
 		      init_utsname()->sysname);
 	return n;
 }
@@ -1317,6 +1565,7 @@
 	struct ulp_bde64 *bpl;
 	void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
 		      struct lpfc_iocbq *) = NULL;
+	uint32_t *ptr;
 	uint32_t rsp_size = 1024;
 	size_t   size;
 	int rc = 0;
@@ -1370,6 +1619,8 @@
 	bpl->tus.f.bdeFlags = 0;
 	if (cmdcode == SLI_CTNS_GID_FT)
 		bpl->tus.f.bdeSize = GID_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_GID_PT)
+		bpl->tus.f.bdeSize = GID_REQUEST_SZ;
 	else if (cmdcode == SLI_CTNS_GFF_ID)
 		bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
 	else if (cmdcode == SLI_CTNS_GFT_ID)
@@ -1410,6 +1661,18 @@
 		rsp_size = FC_MAX_NS_RSP;
 		break;
 
+	case SLI_CTNS_GID_PT:
+		CtReq->CommandResponse.bits.CmdRsp =
+		    cpu_to_be16(SLI_CTNS_GID_PT);
+		CtReq->un.gid.PortType = context;
+
+		if (vport->port_state < LPFC_NS_QRY)
+			vport->port_state = LPFC_NS_QRY;
+		lpfc_set_disctmo(vport);
+		cmpl = lpfc_cmpl_ct_cmd_gid_pt;
+		rsp_size = FC_MAX_NS_RSP;
+		break;
+
 	case SLI_CTNS_GFF_ID:
 		CtReq->CommandResponse.bits.CmdRsp =
 			cpu_to_be16(SLI_CTNS_GFF_ID);
@@ -1431,18 +1694,28 @@
 		CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
 
 		/* Register FC4 FCP type if enabled.  */
-		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
+		if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
+		    vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
 			CtReq->un.rft.fcpReg = 1;
 
 		/* Register NVME type if enabled.  Defined LE and swapped.
 		 * rsvd[0] is used as word1 because of the hard-coded
 		 * word0 usage in the ct_request data structure.
 		 */
-		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
-			CtReq->un.rft.rsvd[0] = cpu_to_be32(0x00000100);
+		if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
+		    vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+			CtReq->un.rft.rsvd[0] =
+				cpu_to_be32(LPFC_FC4_TYPE_BITMASK);
 
+		ptr = (uint32_t *)CtReq;
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "6433 Issue RFT (%s %s): %08x %08x %08x %08x "
+				 "%08x %08x %08x %08x\n",
+				 CtReq->un.rft.fcpReg ? "FCP" : " ",
+				 CtReq->un.rft.rsvd[0] ? "NVME" : " ",
+				 *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
+				 *(ptr + 4), *(ptr + 5),
+				 *(ptr + 6), *(ptr + 7));
 		cmpl = lpfc_cmpl_ct_cmd_rft_id;
 		break;
 
@@ -1497,8 +1770,8 @@
 		 * caller can specify NVME (type x28) as well.  But only
 		 * these that FC4 type is supported.
 		 */
-		if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-		     (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
+		if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+		     (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
 		    (context == FC_TYPE_NVME)) {
 			if ((vport == phba->pport) && phba->nvmet_support) {
 				CtReq->un.rff.fbits = (FC4_FEATURE_TARGET |
@@ -1509,14 +1782,22 @@
 			}
 			CtReq->un.rff.type_code = context;
 
-		} else if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-			    (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) &&
+		} else if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+			    (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) &&
 			   (context == FC_TYPE_FCP))
 			CtReq->un.rff.type_code = context;
 
 		else
 			goto ns_cmd_free_bmpvirt;
 
+		ptr = (uint32_t *)CtReq;
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "6434 Issue RFF (%s): %08x %08x %08x %08x "
+				 "%08x %08x %08x %08x\n",
+				 (context == FC_TYPE_NVME) ? "NVME" : "FCP",
+				 *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
+				 *(ptr + 4), *(ptr + 5),
+				 *(ptr + 6), *(ptr + 7));
 		cmpl = lpfc_cmpl_ct_cmd_rff_id;
 		break;
 	}
@@ -1762,6 +2043,9 @@
 	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
 	memset(ae, 0, 256);
 
+	/* This string MUST be consistent with other FC platforms
+	 * supported by Broadcom.
+	 */
 	strncpy(ae->un.AttrString,
 		"Emulex Corporation",
 		       sizeof(ae->un.AttrString));
@@ -2058,7 +2342,8 @@
 	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
 	memset(ae, 0, 256);
 
-	lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
+	strlcat(ae->un.AttrString, phba->BIOSVersion,
+		sizeof(ae->un.AttrString));
 	len = strnlen(ae->un.AttrString,
 			  sizeof(ae->un.AttrString));
 	len += (len & 3) ? (4 - (len & 3)) : 4;
@@ -2111,16 +2396,22 @@
 lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
 			    struct lpfc_fdmi_attr_def *ad)
 {
+	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_fdmi_attr_entry *ae;
 	uint32_t size;
 
 	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
 	memset(ae, 0, 32);
 
-	ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
-	ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
-	ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */
-	ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
+	ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+	ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+	ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+
+	/* Check to see if Firmware supports NVME and on physical port */
+	if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) &&
+	    phba->sli4_hba.pc_sli4_params.nvme)
+		ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
+
 	size = FOURBYTES + 32;
 	ad->AttrLen = cpu_to_be16(size);
 	ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
@@ -2139,6 +2430,8 @@
 
 	ae->un.AttrInt = 0;
 	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		if (phba->lmt & LMT_128Gb)
+			ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
 		if (phba->lmt & LMT_64Gb)
 			ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
 		if (phba->lmt & LMT_32Gb)
@@ -2215,6 +2508,9 @@
 		case LPFC_LINK_SPEED_64GHZ:
 			ae->un.AttrInt = HBA_PORTSPEED_64GFC;
 			break;
+		case LPFC_LINK_SPEED_128GHZ:
+			ae->un.AttrInt = HBA_PORTSPEED_128GFC;
+			break;
 		default:
 			ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
 			break;
@@ -2257,7 +2553,7 @@
 	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
 
 	hsp = (struct serv_parm *)&vport->fc_sparam;
-	ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) |
+	ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
 			  (uint32_t) hsp->cmn.bbRcvSizeLsb;
 	ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
 	size = FOURBYTES + sizeof(uint32_t);
@@ -2425,9 +2721,14 @@
 	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
 	memset(ae, 0, 32);
 
-	ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
-	ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
-	ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
+	ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+	ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+	ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+
+	/* Check to see if NVME is configured or not */
+	if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+		ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
+
 	size = FOURBYTES + 32;
 	ad->AttrLen = cpu_to_be16(size);
 	ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
@@ -2844,6 +3145,7 @@
 	case SLI_MGMT_GHAT:
 	case SLI_MGMT_GRPL:
 		rsp_size = FC_MAX_NS_RSP;
+		/* fall through */
 	case SLI_MGMT_DHBA:
 	case SLI_MGMT_DHAT:
 		pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
@@ -2856,6 +3158,7 @@
 	case SLI_MGMT_GPAT:
 	case SLI_MGMT_GPAS:
 		rsp_size = FC_MAX_NS_RSP;
+		/* fall through */
 	case SLI_MGMT_DPRT:
 	case SLI_MGMT_DPA:
 		pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index ca6c398..8d34be6 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2007-2015 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -170,7 +170,7 @@
 		snprintf(buffer,
 			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
 			dtp->seq_cnt, ms, dtp->fmt);
-		len +=  snprintf(buf+len, size-len, buffer,
+		len +=  scnprintf(buf+len, size-len, buffer,
 			dtp->data1, dtp->data2, dtp->data3);
 	}
 	for (i = 0; i < index; i++) {
@@ -181,7 +181,7 @@
 		snprintf(buffer,
 			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
 			dtp->seq_cnt, ms, dtp->fmt);
-		len +=  snprintf(buf+len, size-len, buffer,
+		len +=  scnprintf(buf+len, size-len, buffer,
 			dtp->data1, dtp->data2, dtp->data3);
 	}
 
@@ -236,7 +236,7 @@
 		snprintf(buffer,
 			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
 			dtp->seq_cnt, ms, dtp->fmt);
-		len +=  snprintf(buf+len, size-len, buffer,
+		len +=  scnprintf(buf+len, size-len, buffer,
 			dtp->data1, dtp->data2, dtp->data3);
 	}
 	for (i = 0; i < index; i++) {
@@ -247,7 +247,7 @@
 		snprintf(buffer,
 			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
 			dtp->seq_cnt, ms, dtp->fmt);
-		len +=  snprintf(buf+len, size-len, buffer,
+		len +=  scnprintf(buf+len, size-len, buffer,
 			dtp->data1, dtp->data2, dtp->data3);
 	}
 
@@ -307,7 +307,7 @@
 
 	i = lpfc_debugfs_last_hbq;
 
-	len +=  snprintf(buf+len, size-len, "HBQ %d Info\n", i);
+	len +=  scnprintf(buf+len, size-len, "HBQ %d Info\n", i);
 
 	hbqs =  &phba->hbqs[i];
 	posted = 0;
@@ -315,21 +315,21 @@
 		posted++;
 
 	hip =  lpfc_hbq_defs[i];
-	len +=  snprintf(buf+len, size-len,
+	len +=  scnprintf(buf+len, size-len,
 		"idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n",
 		hip->hbq_index, hip->profile, hip->rn,
 		hip->buffer_count, hip->init_count, hip->add_count, posted);
 
 	raw_index = phba->hbq_get[i];
 	getidx = le32_to_cpu(raw_index);
-	len +=  snprintf(buf+len, size-len,
+	len +=  scnprintf(buf+len, size-len,
 		"entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
 		hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
 		hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
 
 	hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt;
 	for (j=0; j<hbqs->entry_count; j++) {
-		len +=  snprintf(buf+len, size-len,
+		len +=  scnprintf(buf+len, size-len,
 			"%03d: %08x %04x %05x ", j,
 			le32_to_cpu(hbqe->bde.addrLow),
 			le32_to_cpu(hbqe->bde.tus.w),
@@ -341,14 +341,16 @@
 		low = hbqs->hbqPutIdx - posted;
 		if (low >= 0) {
 			if ((j >= hbqs->hbqPutIdx) || (j < low)) {
-				len +=  snprintf(buf+len, size-len, "Unused\n");
+				len +=  scnprintf(buf + len, size - len,
+						"Unused\n");
 				goto skipit;
 			}
 		}
 		else {
 			if ((j >= hbqs->hbqPutIdx) &&
 				(j < (hbqs->entry_count+low))) {
-				len +=  snprintf(buf+len, size-len, "Unused\n");
+				len +=  scnprintf(buf + len, size - len,
+						"Unused\n");
 				goto skipit;
 			}
 		}
@@ -358,8 +360,8 @@
 			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
 			phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
 			if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
-				len +=  snprintf(buf+len, size-len,
-					"Buf%d: %p %06x\n", i,
+				len +=  scnprintf(buf+len, size-len,
+					"Buf%d: x%px %06x\n", i,
 					hbq_buf->dbuf.virt, hbq_buf->tag);
 				found = 1;
 				break;
@@ -367,7 +369,7 @@
 			i++;
 		}
 		if (!found) {
-			len +=  snprintf(buf+len, size-len, "No DMAinfo?\n");
+			len +=  scnprintf(buf+len, size-len, "No DMAinfo?\n");
 		}
 skipit:
 		hbqe++;
@@ -378,6 +380,267 @@
 	return len;
 }
 
+static int lpfc_debugfs_last_xripool;
+
+/**
+ * lpfc_debugfs_common_xri_data - Dump Hardware Queue info to a buffer
+ * @phba: The HBA to gather host buffer info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the Hardware Queue info from the @phba to @buf up to
+ * @size number of bytes. A header that describes the current hdwq state will be
+ * dumped to @buf first and then info on each hdwq entry will be dumped to @buf
+ * until @size bytes have been dumped or all the hdwq info has been dumped.
+ *
+ * Notes:
+ * This routine will rotate through each configured Hardware Queue each
+ * time called.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
+{
+	struct lpfc_sli4_hdw_queue *qp;
+	int len = 0;
+	int i, out;
+	unsigned long iflag;
+
+	for (i = 0; i < phba->cfg_hdw_queue; i++) {
+		if (len > (LPFC_DUMP_MULTIXRIPOOL_SIZE - 80))
+			break;
+		qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
+
+		len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
+		spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
+		spin_lock(&qp->io_buf_list_get_lock);
+		spin_lock(&qp->io_buf_list_put_lock);
+		out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
+			qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs);
+		len += scnprintf(buf + len, size - len,
+				 "tot:%d get:%d put:%d mt:%d "
+				 "ABTS scsi:%d nvme:%d Out:%d\n",
+			qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs,
+			qp->empty_io_bufs, qp->abts_scsi_io_bufs,
+			qp->abts_nvme_io_bufs, out);
+		spin_unlock(&qp->io_buf_list_put_lock);
+		spin_unlock(&qp->io_buf_list_get_lock);
+		spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
+
+		lpfc_debugfs_last_xripool++;
+		if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
+			lpfc_debugfs_last_xripool = 0;
+	}
+
+	return len;
+}
+
+/**
+ * lpfc_debugfs_multixripools_data - Display multi-XRI pools information
+ * @phba: The HBA to gather host buffer info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine displays current multi-XRI pools information including XRI
+ * count in public, private and txcmplq. It also displays current high and
+ * low watermark.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
+{
+	u32 i;
+	u32 hwq_count;
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_multixri_pool *multixri_pool;
+	struct lpfc_pvt_pool *pvt_pool;
+	struct lpfc_pbl_pool *pbl_pool;
+	u32 txcmplq_cnt;
+	char tmp[LPFC_DEBUG_OUT_LINE_SZ] = {0};
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return 0;
+
+	if (!phba->sli4_hba.hdwq)
+		return 0;
+
+	if (!phba->cfg_xri_rebalancing) {
+		i = lpfc_debugfs_commonxripools_data(phba, buf, size);
+		return i;
+	}
+
+	/*
+	 * Pbl: Current number of free XRIs in public pool
+	 * Pvt: Current number of free XRIs in private pool
+	 * Busy: Current number of outstanding XRIs
+	 * HWM: Current high watermark
+	 * pvt_empty: Incremented by 1 when IO submission fails (no xri)
+	 * pbl_empty: Incremented by 1 when all pbl_pool are empty during
+	 *            IO submission
+	 */
+	scnprintf(tmp, sizeof(tmp),
+		  "HWQ:  Pbl  Pvt Busy  HWM |  pvt_empty  pbl_empty ");
+	if (strlcat(buf, tmp, size) >= size)
+		return strnlen(buf, size);
+
+#ifdef LPFC_MXP_STAT
+	/*
+	 * MAXH: Max high watermark seen so far
+	 * above_lmt: Incremented by 1 if xri_owned > xri_limit during
+	 *            IO submission
+	 * below_lmt: Incremented by 1 if xri_owned <= xri_limit  during
+	 *            IO submission
+	 * locPbl_hit: Incremented by 1 if successfully get a batch of XRI from
+	 *             local pbl_pool
+	 * othPbl_hit: Incremented by 1 if successfully get a batch of XRI from
+	 *             other pbl_pool
+	 */
+	scnprintf(tmp, sizeof(tmp),
+		  "MAXH  above_lmt  below_lmt locPbl_hit othPbl_hit");
+	if (strlcat(buf, tmp, size) >= size)
+		return strnlen(buf, size);
+
+	/*
+	 * sPbl: snapshot of Pbl 15 sec after stat gets cleared
+	 * sPvt: snapshot of Pvt 15 sec after stat gets cleared
+	 * sBusy: snapshot of Busy 15 sec after stat gets cleared
+	 */
+	scnprintf(tmp, sizeof(tmp),
+		  " | sPbl sPvt sBusy");
+	if (strlcat(buf, tmp, size) >= size)
+		return strnlen(buf, size);
+#endif
+
+	scnprintf(tmp, sizeof(tmp), "\n");
+	if (strlcat(buf, tmp, size) >= size)
+		return strnlen(buf, size);
+
+	hwq_count = phba->cfg_hdw_queue;
+	for (i = 0; i < hwq_count; i++) {
+		qp = &phba->sli4_hba.hdwq[i];
+		multixri_pool = qp->p_multixri_pool;
+		if (!multixri_pool)
+			continue;
+		pbl_pool = &multixri_pool->pbl_pool;
+		pvt_pool = &multixri_pool->pvt_pool;
+		txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
+
+		scnprintf(tmp, sizeof(tmp),
+			  "%03d: %4d %4d %4d %4d | %10d %10d ",
+			  i, pbl_pool->count, pvt_pool->count,
+			  txcmplq_cnt, pvt_pool->high_watermark,
+			  qp->empty_io_bufs, multixri_pool->pbl_empty_count);
+		if (strlcat(buf, tmp, size) >= size)
+			break;
+
+#ifdef LPFC_MXP_STAT
+		scnprintf(tmp, sizeof(tmp),
+			  "%4d %10d %10d %10d %10d",
+			  multixri_pool->stat_max_hwm,
+			  multixri_pool->above_limit_count,
+			  multixri_pool->below_limit_count,
+			  multixri_pool->local_pbl_hit_count,
+			  multixri_pool->other_pbl_hit_count);
+		if (strlcat(buf, tmp, size) >= size)
+			break;
+
+		scnprintf(tmp, sizeof(tmp),
+			  " | %4d %4d %5d",
+			  multixri_pool->stat_pbl_count,
+			  multixri_pool->stat_pvt_count,
+			  multixri_pool->stat_busy_count);
+		if (strlcat(buf, tmp, size) >= size)
+			break;
+#endif
+
+		scnprintf(tmp, sizeof(tmp), "\n");
+		if (strlcat(buf, tmp, size) >= size)
+			break;
+	}
+	return strnlen(buf, size);
+}
+
+
+#ifdef LPFC_HDWQ_LOCK_STAT
+static int lpfc_debugfs_last_lock;
+
+/**
+ * lpfc_debugfs_lockstat_data - Dump Hardware Queue info to a buffer
+ * @phba: The HBA to gather host buffer info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the Hardware Queue info from the @phba to @buf up to
+ * @size number of bytes. A header that describes the current hdwq state will be
+ * dumped to @buf first and then info on each hdwq entry will be dumped to @buf
+ * until @size bytes have been dumped or all the hdwq info has been dumped.
+ *
+ * Notes:
+ * This routine will rotate through each configured Hardware Queue each
+ * time called.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_lockstat_data(struct lpfc_hba *phba, char *buf, int size)
+{
+	struct lpfc_sli4_hdw_queue *qp;
+	int len = 0;
+	int i;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return 0;
+
+	if (!phba->sli4_hba.hdwq)
+		return 0;
+
+	for (i = 0; i < phba->cfg_hdw_queue; i++) {
+		if (len > (LPFC_HDWQINFO_SIZE - 100))
+			break;
+		qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_lock];
+
+		len += scnprintf(buf + len, size - len, "HdwQ %03d Lock ", i);
+		if (phba->cfg_xri_rebalancing) {
+			len += scnprintf(buf + len, size - len,
+					 "get_pvt:%d mv_pvt:%d "
+					 "mv2pub:%d mv2pvt:%d "
+					 "put_pvt:%d put_pub:%d wq:%d\n",
+					 qp->lock_conflict.alloc_pvt_pool,
+					 qp->lock_conflict.mv_from_pvt_pool,
+					 qp->lock_conflict.mv_to_pub_pool,
+					 qp->lock_conflict.mv_to_pvt_pool,
+					 qp->lock_conflict.free_pvt_pool,
+					 qp->lock_conflict.free_pub_pool,
+					 qp->lock_conflict.wq_access);
+		} else {
+			len += scnprintf(buf + len, size - len,
+					 "get:%d put:%d free:%d wq:%d\n",
+					 qp->lock_conflict.alloc_xri_get,
+					 qp->lock_conflict.alloc_xri_put,
+					 qp->lock_conflict.free_xri,
+					 qp->lock_conflict.wq_access);
+		}
+
+		lpfc_debugfs_last_lock++;
+		if (lpfc_debugfs_last_lock >= phba->cfg_hdw_queue)
+			lpfc_debugfs_last_lock = 0;
+	}
+
+	return len;
+}
+#endif
+
 static int lpfc_debugfs_last_hba_slim_off;
 
 /**
@@ -413,7 +676,7 @@
 	off = 0;
 	spin_lock_irq(&phba->hbalock);
 
-	len +=  snprintf(buf+len, size-len, "HBA SLIM\n");
+	len +=  scnprintf(buf+len, size-len, "HBA SLIM\n");
 	lpfc_memcpy_from_slim(buffer,
 		phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024);
 
@@ -427,7 +690,7 @@
 
 	i = 1024;
 	while (i > 0) {
-		len +=  snprintf(buf+len, size-len,
+		len +=  scnprintf(buf+len, size-len,
 		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
 		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
 		*(ptr+5), *(ptr+6), *(ptr+7));
@@ -471,11 +734,11 @@
 	off = 0;
 	spin_lock_irq(&phba->hbalock);
 
-	len +=  snprintf(buf+len, size-len, "SLIM Mailbox\n");
+	len +=  scnprintf(buf+len, size-len, "SLIM Mailbox\n");
 	ptr = (uint32_t *)phba->slim2p.virt;
 	i = sizeof(MAILBOX_t);
 	while (i > 0) {
-		len +=  snprintf(buf+len, size-len,
+		len +=  scnprintf(buf+len, size-len,
 		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
 		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
 		*(ptr+5), *(ptr+6), *(ptr+7));
@@ -484,11 +747,11 @@
 		off += (8 * sizeof(uint32_t));
 	}
 
-	len +=  snprintf(buf+len, size-len, "SLIM PCB\n");
+	len +=  scnprintf(buf+len, size-len, "SLIM PCB\n");
 	ptr = (uint32_t *)phba->pcb;
 	i = sizeof(PCB_t);
 	while (i > 0) {
-		len +=  snprintf(buf+len, size-len,
+		len +=  scnprintf(buf+len, size-len,
 		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
 		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
 		*(ptr+5), *(ptr+6), *(ptr+7));
@@ -501,7 +764,7 @@
 		for (i = 0; i < 4; i++) {
 			pgpp = &phba->port_gp[i];
 			pring = &psli->sli3_ring[i];
-			len +=  snprintf(buf+len, size-len,
+			len +=  scnprintf(buf+len, size-len,
 					 "Ring %d: CMD GetInx:%d "
 					 "(Max:%d Next:%d "
 					 "Local:%d flg:x%x)  "
@@ -518,7 +781,7 @@
 		word1 = readl(phba->CAregaddr);
 		word2 = readl(phba->HSregaddr);
 		word3 = readl(phba->HCregaddr);
-		len +=  snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
+		len +=  scnprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
 				 "HC:%08x\n", word0, word1, word2, word3);
 	}
 	spin_unlock_irq(&phba->hbalock);
@@ -550,19 +813,18 @@
 	struct lpfc_nodelist *ndlp;
 	unsigned char *statep;
 	struct nvme_fc_local_port *localport;
-	struct lpfc_nvmet_tgtport *tgtp;
 	struct nvme_fc_remote_port *nrport = NULL;
 	struct lpfc_nvme_rport *rport;
 
 	cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
 	outio = 0;
 
-	len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
+	len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
 	spin_lock_irq(shost->host_lock);
 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 		iocnt = 0;
 		if (!cnt) {
-			len +=  snprintf(buf+len, size-len,
+			len +=  scnprintf(buf+len, size-len,
 				"Missing Nodelist Entries\n");
 			break;
 		}
@@ -600,62 +862,63 @@
 		default:
 			statep = "UNKNOWN";
 		}
-		len += snprintf(buf+len, size-len, "%s DID:x%06x ",
+		len += scnprintf(buf+len, size-len, "%s DID:x%06x ",
 				statep, ndlp->nlp_DID);
-		len += snprintf(buf+len, size-len,
+		len += scnprintf(buf+len, size-len,
 				"WWPN x%llx ",
 				wwn_to_u64(ndlp->nlp_portname.u.wwn));
-		len += snprintf(buf+len, size-len,
+		len += scnprintf(buf+len, size-len,
 				"WWNN x%llx ",
 				wwn_to_u64(ndlp->nlp_nodename.u.wwn));
 		if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
-			len += snprintf(buf+len, size-len, "RPI:%03d ",
+			len += scnprintf(buf+len, size-len, "RPI:%03d ",
 					ndlp->nlp_rpi);
 		else
-			len += snprintf(buf+len, size-len, "RPI:none ");
-		len +=  snprintf(buf+len, size-len, "flag:x%08x ",
+			len += scnprintf(buf+len, size-len, "RPI:none ");
+		len +=  scnprintf(buf+len, size-len, "flag:x%08x ",
 			ndlp->nlp_flag);
 		if (!ndlp->nlp_type)
-			len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
+			len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE ");
 		if (ndlp->nlp_type & NLP_FC_NODE)
-			len += snprintf(buf+len, size-len, "FC_NODE ");
+			len += scnprintf(buf+len, size-len, "FC_NODE ");
 		if (ndlp->nlp_type & NLP_FABRIC) {
-			len += snprintf(buf+len, size-len, "FABRIC ");
+			len += scnprintf(buf+len, size-len, "FABRIC ");
 			iocnt = 0;
 		}
 		if (ndlp->nlp_type & NLP_FCP_TARGET)
-			len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
+			len += scnprintf(buf+len, size-len, "FCP_TGT sid:%d ",
 				ndlp->nlp_sid);
 		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
-			len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
+			len += scnprintf(buf+len, size-len, "FCP_INITIATOR ");
 		if (ndlp->nlp_type & NLP_NVME_TARGET)
-			len += snprintf(buf + len,
+			len += scnprintf(buf + len,
 					size - len, "NVME_TGT sid:%d ",
 					NLP_NO_SID);
 		if (ndlp->nlp_type & NLP_NVME_INITIATOR)
-			len += snprintf(buf + len,
+			len += scnprintf(buf + len,
 					size - len, "NVME_INITIATOR ");
-		len += snprintf(buf+len, size-len, "usgmap:%x ",
+		len += scnprintf(buf+len, size-len, "usgmap:%x ",
 			ndlp->nlp_usg_map);
-		len += snprintf(buf+len, size-len, "refcnt:%x",
+		len += scnprintf(buf+len, size-len, "refcnt:%x",
 			kref_read(&ndlp->kref));
 		if (iocnt) {
 			i = atomic_read(&ndlp->cmd_pending);
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					" OutIO:x%x Qdepth x%x",
 					i, ndlp->cmd_qdepth);
 			outio += i;
 		}
-		len +=  snprintf(buf+len, size-len, "\n");
+		len += scnprintf(buf + len, size - len, "defer:%x ",
+			ndlp->nlp_defer_did);
+		len +=  scnprintf(buf+len, size-len, "\n");
 	}
 	spin_unlock_irq(shost->host_lock);
 
-	len += snprintf(buf + len, size - len,
+	len += scnprintf(buf + len, size - len,
 			"\nOutstanding IO x%x\n",  outio);
 
 	if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
-		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"\nNVME Targetport Entry ...\n");
 
 		/* Port state is only one of two values for now. */
@@ -663,18 +926,18 @@
 			statep = "REGISTERED";
 		else
 			statep = "INIT";
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"TGT WWNN x%llx WWPN x%llx State %s\n",
 				wwn_to_u64(vport->fc_nodename.u.wwn),
 				wwn_to_u64(vport->fc_portname.u.wwn),
 				statep);
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"    Targetport DID x%06x\n",
 				phba->targetport->port_id);
 		goto out_exit;
 	}
 
-	len += snprintf(buf + len, size - len,
+	len += scnprintf(buf + len, size - len,
 				"\nNVME Lport/Rport Entries ...\n");
 
 	localport = vport->localport;
@@ -689,11 +952,11 @@
 	else
 		statep = "UNKNOWN ";
 
-	len += snprintf(buf + len, size - len,
+	len += scnprintf(buf + len, size - len,
 			"Lport DID x%06x PortState %s\n",
 			localport->port_id, statep);
 
-	len += snprintf(buf + len, size - len, "\tRport List:\n");
+	len += scnprintf(buf + len, size - len, "\tRport List:\n");
 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 		/* local short-hand pointer. */
 		spin_lock(&phba->hbalock);
@@ -720,32 +983,32 @@
 		}
 
 		/* Tab in to show lport ownership. */
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"\t%s Port ID:x%06x ",
 				statep, nrport->port_id);
-		len += snprintf(buf + len, size - len, "WWPN x%llx ",
+		len += scnprintf(buf + len, size - len, "WWPN x%llx ",
 				nrport->port_name);
-		len += snprintf(buf + len, size - len, "WWNN x%llx ",
+		len += scnprintf(buf + len, size - len, "WWNN x%llx ",
 				nrport->node_name);
 
 		/* An NVME rport can have multiple roles. */
 		if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
-			len +=  snprintf(buf + len, size - len,
+			len +=  scnprintf(buf + len, size - len,
 					 "INITIATOR ");
 		if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
-			len +=  snprintf(buf + len, size - len,
+			len +=  scnprintf(buf + len, size - len,
 					 "TARGET ");
 		if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
-			len +=  snprintf(buf + len, size - len,
+			len +=  scnprintf(buf + len, size - len,
 					 "DISCSRVC ");
 		if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
 					  FC_PORT_ROLE_NVME_TARGET |
 					  FC_PORT_ROLE_NVME_DISCOVERY))
-			len +=  snprintf(buf + len, size - len,
+			len +=  scnprintf(buf + len, size - len,
 					 "UNKNOWN ROLE x%x",
 					 nrport->port_role);
 		/* Terminate the string. */
-		len +=  snprintf(buf + len, size - len, "\n");
+		len +=  scnprintf(buf + len, size - len, "\n");
 	}
 
 	spin_unlock_irq(shost->host_lock);
@@ -773,46 +1036,46 @@
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
 	struct nvme_fc_local_port *localport;
-	struct lpfc_nvme_ctrl_stat *cstat;
+	struct lpfc_fc4_ctrl_stat *cstat;
 	struct lpfc_nvme_lport *lport;
 	uint64_t data1, data2, data3;
 	uint64_t tot, totin, totout;
-	int cnt, i, maxch;
+	int cnt, i;
 	int len = 0;
 
 	if (phba->nvmet_support) {
 		if (!phba->targetport)
 			return len;
 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"\nNVME Targetport Statistics\n");
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"LS: Rcv %08x Drop %08x Abort %08x\n",
 				atomic_read(&tgtp->rcv_ls_req_in),
 				atomic_read(&tgtp->rcv_ls_req_drop),
 				atomic_read(&tgtp->xmt_ls_abort));
 		if (atomic_read(&tgtp->rcv_ls_req_in) !=
 		    atomic_read(&tgtp->rcv_ls_req_out)) {
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"Rcv LS: in %08x != out %08x\n",
 					atomic_read(&tgtp->rcv_ls_req_in),
 					atomic_read(&tgtp->rcv_ls_req_out));
 		}
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"LS: Xmt %08x Drop %08x Cmpl %08x\n",
 				atomic_read(&tgtp->xmt_ls_rsp),
 				atomic_read(&tgtp->xmt_ls_drop),
 				atomic_read(&tgtp->xmt_ls_rsp_cmpl));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"LS: RSP Abort %08x xb %08x Err %08x\n",
 				atomic_read(&tgtp->xmt_ls_rsp_aborted),
 				atomic_read(&tgtp->xmt_ls_rsp_xb_set),
 				atomic_read(&tgtp->xmt_ls_rsp_error));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"FCP: Rcv %08x Defer %08x Release %08x "
 				"Drop %08x\n",
 				atomic_read(&tgtp->rcv_fcp_cmd_in),
@@ -822,13 +1085,13 @@
 
 		if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
 		    atomic_read(&tgtp->rcv_fcp_cmd_out)) {
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"Rcv FCP: in %08x != out %08x\n",
 					atomic_read(&tgtp->rcv_fcp_cmd_in),
 					atomic_read(&tgtp->rcv_fcp_cmd_out));
 		}
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"FCP Rsp: read %08x readrsp %08x "
 				"write %08x rsp %08x\n",
 				atomic_read(&tgtp->xmt_fcp_read),
@@ -836,56 +1099,56 @@
 				atomic_read(&tgtp->xmt_fcp_write),
 				atomic_read(&tgtp->xmt_fcp_rsp));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"FCP Rsp Cmpl: %08x err %08x drop %08x\n",
 				atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
 				atomic_read(&tgtp->xmt_fcp_rsp_error),
 				atomic_read(&tgtp->xmt_fcp_rsp_drop));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"FCP Rsp Abort: %08x xb %08x xricqe  %08x\n",
 				atomic_read(&tgtp->xmt_fcp_rsp_aborted),
 				atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
 				atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"ABORT: Xmt %08x Cmpl %08x\n",
 				atomic_read(&tgtp->xmt_fcp_abort),
 				atomic_read(&tgtp->xmt_fcp_abort_cmpl));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"ABORT: Sol %08x  Usol %08x Err %08x Cmpl %08x",
 				atomic_read(&tgtp->xmt_abort_sol),
 				atomic_read(&tgtp->xmt_abort_unsol),
 				atomic_read(&tgtp->xmt_abort_rsp),
 				atomic_read(&tgtp->xmt_abort_rsp_error));
 
-		len +=  snprintf(buf + len, size - len, "\n");
+		len +=  scnprintf(buf + len, size - len, "\n");
 
 		cnt = 0;
-		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 		list_for_each_entry_safe(ctxp, next_ctxp,
 				&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
 				list) {
 			cnt++;
 		}
-		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 		if (cnt) {
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"ABORT: %d ctx entries\n", cnt);
-			spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+			spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 			list_for_each_entry_safe(ctxp, next_ctxp,
 				    &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
 				    list) {
 				if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ))
 					break;
-				len += snprintf(buf + len, size - len,
+				len += scnprintf(buf + len, size - len,
 						"Entry: oxid %x state %x "
 						"flag %x\n",
 						ctxp->oxid, ctxp->state,
 						ctxp->flag);
 			}
-			spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+			spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 		}
 
 		/* Calculate outstanding IOs */
@@ -893,7 +1156,7 @@
 		tot += atomic_read(&tgtp->xmt_fcp_release);
 		tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
 				"CTX Outstanding %08llx\n",
 				phba->sli4_hba.nvmet_xri_cnt,
@@ -901,7 +1164,7 @@
 				phba->sli4_hba.nvmet_io_wait_total,
 				tot);
 	} else {
-		if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+		if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
 			return len;
 
 		localport = vport->localport;
@@ -911,47 +1174,43 @@
 		if (!lport)
 			return len;
 
-		len += snprintf(buf + len, size - len,
-				"\nNVME Lport Statistics\n");
+		len += scnprintf(buf + len, size - len,
+				"\nNVME HDWQ Statistics\n");
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"LS: Xmt %016x Cmpl %016x\n",
 				atomic_read(&lport->fc4NvmeLsRequests),
 				atomic_read(&lport->fc4NvmeLsCmpls));
 
-		if (phba->cfg_nvme_io_channel < 32)
-			maxch = phba->cfg_nvme_io_channel;
-		else
-			maxch = 32;
 		totin = 0;
 		totout = 0;
-		for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
-			cstat = &lport->cstat[i];
-			tot = atomic_read(&cstat->fc4NvmeIoCmpls);
+		for (i = 0; i < phba->cfg_hdw_queue; i++) {
+			cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
+			tot = cstat->io_cmpls;
 			totin += tot;
-			data1 = atomic_read(&cstat->fc4NvmeInputRequests);
-			data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
-			data3 = atomic_read(&cstat->fc4NvmeControlRequests);
+			data1 = cstat->input_requests;
+			data2 = cstat->output_requests;
+			data3 = cstat->control_requests;
 			totout += (data1 + data2 + data3);
 
 			/* Limit to 32, debugfs display buffer limitation */
 			if (i >= 32)
 				continue;
 
-			len += snprintf(buf + len, PAGE_SIZE - len,
-					"FCP (%d): Rd %016llx Wr %016llx "
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+					"HDWQ (%d): Rd %016llx Wr %016llx "
 					"IO %016llx ",
 					i, data1, data2, data3);
-			len += snprintf(buf + len, PAGE_SIZE - len,
+			len += scnprintf(buf + len, PAGE_SIZE - len,
 					"Cmpl %016llx OutIO %016llx\n",
 					tot, ((data1 + data2 + data3) - tot));
 		}
-		len += snprintf(buf + len, PAGE_SIZE - len,
+		len += scnprintf(buf + len, PAGE_SIZE - len,
 				"Total FCP Cmpl %016llx Issue %016llx "
 				"OutIO %016llx\n",
 				totin, totout, totout - totin);
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"LS Xmt Err: Abrt %08x Err %08x  "
 				"Cmpl Err: xb %08x Err %08x\n",
 				atomic_read(&lport->xmt_ls_abort),
@@ -959,7 +1218,7 @@
 				atomic_read(&lport->cmpl_ls_xb),
 				atomic_read(&lport->cmpl_ls_err));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"FCP Xmt Err: noxri %06x nondlp %06x "
 				"qdepth %06x wqerr %06x err %06x Abrt %06x\n",
 				atomic_read(&lport->xmt_fcp_noxri),
@@ -969,7 +1228,7 @@
 				atomic_read(&lport->xmt_fcp_err),
 				atomic_read(&lport->xmt_fcp_abort));
 
-		len += snprintf(buf + len, size - len,
+		len += scnprintf(buf + len, size - len,
 				"FCP Cmpl Err: xb %08x Err %08x\n",
 				atomic_read(&lport->cmpl_fcp_xb),
 				atomic_read(&lport->cmpl_fcp_err));
@@ -979,6 +1238,66 @@
 	return len;
 }
 
+/**
+ * lpfc_debugfs_scsistat_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the SCSI statistics associated with @vport
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_scsistat_data(struct lpfc_vport *vport, char *buf, int size)
+{
+	int len;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_fc4_ctrl_stat *cstat;
+	u64 data1, data2, data3;
+	u64 tot, totin, totout;
+	int i;
+	char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
+
+	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
+	    (phba->sli_rev != LPFC_SLI_REV4))
+		return 0;
+
+	scnprintf(buf, size, "SCSI HDWQ Statistics\n");
+
+	totin = 0;
+	totout = 0;
+	for (i = 0; i < phba->cfg_hdw_queue; i++) {
+		cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
+		tot = cstat->io_cmpls;
+		totin += tot;
+		data1 = cstat->input_requests;
+		data2 = cstat->output_requests;
+		data3 = cstat->control_requests;
+		totout += (data1 + data2 + data3);
+
+		scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
+			  "IO %016llx ", i, data1, data2, data3);
+		if (strlcat(buf, tmp, size) >= size)
+			goto buffer_done;
+
+		scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
+			  tot, ((data1 + data2 + data3) - tot));
+		if (strlcat(buf, tmp, size) >= size)
+			goto buffer_done;
+	}
+	scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
+		  "OutIO %016llx\n", totin, totout, totout - totin);
+	strlcat(buf, tmp, size);
+
+buffer_done:
+	len = strnlen(buf, size);
+
+	return len;
+}
 
 /**
  * lpfc_debugfs_nvmektime_data - Dump target node list to a buffer
@@ -1001,58 +1320,58 @@
 
 	if (phba->nvmet_support == 0) {
 		/* NVME Initiator */
-		len += snprintf(buf + len, PAGE_SIZE - len,
+		len += scnprintf(buf + len, PAGE_SIZE - len,
 				"ktime %s: Total Samples: %lld\n",
 				(phba->ktime_on ?  "Enabled" : "Disabled"),
 				phba->ktime_data_samples);
 		if (phba->ktime_data_samples == 0)
 			return len;
 
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"Segment 1: Last NVME Cmd cmpl "
 			"done -to- Start of next NVME cnd (in driver)\n");
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg1_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg1_min,
 			phba->ktime_seg1_max);
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"Segment 2: Driver start of NVME cmd "
 			"-to- Firmware WQ doorbell\n");
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg2_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg2_min,
 			phba->ktime_seg2_max);
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"Segment 3: Firmware WQ doorbell -to- "
 			"MSI-X ISR cmpl\n");
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg3_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg3_min,
 			phba->ktime_seg3_max);
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"Segment 4: MSI-X ISR cmpl -to- "
 			"NVME cmpl done\n");
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg4_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg4_min,
 			phba->ktime_seg4_max);
-		len += snprintf(
+		len += scnprintf(
 			buf + len, PAGE_SIZE - len,
 			"Total IO avg time: %08lld\n",
 			div_u64(phba->ktime_seg1_total +
@@ -1064,7 +1383,7 @@
 	}
 
 	/* NVME Target */
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"ktime %s: Total Samples: %lld %lld\n",
 			(phba->ktime_on ? "Enabled" : "Disabled"),
 			phba->ktime_data_samples,
@@ -1072,46 +1391,46 @@
 	if (phba->ktime_data_samples == 0)
 		return len;
 
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 1: MSI-X ISR Rcv cmd -to- "
 			"cmd pass to NVME Layer\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg1_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg1_min,
 			phba->ktime_seg1_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 2: cmd pass to NVME Layer- "
 			"-to- Driver rcv cmd OP (action)\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg2_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg2_min,
 			phba->ktime_seg2_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 3: Driver rcv cmd OP -to- "
 			"Firmware WQ doorbell: cmd\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg3_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg3_min,
 			phba->ktime_seg3_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 4: Firmware WQ doorbell: cmd "
 			"-to- MSI-X ISR for cmd cmpl\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg4_total,
 				phba->ktime_data_samples),
 			phba->ktime_seg4_min,
 			phba->ktime_seg4_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 5: MSI-X ISR for cmd cmpl "
 			"-to- NVME layer passed cmd done\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg5_total,
 				phba->ktime_data_samples),
@@ -1119,10 +1438,10 @@
 			phba->ktime_seg5_max);
 
 	if (phba->ktime_status_samples == 0) {
-		len += snprintf(buf + len, PAGE_SIZE-len,
+		len += scnprintf(buf + len, PAGE_SIZE-len,
 				"Total: cmd received by MSI-X ISR "
 				"-to- cmd completed on wire\n");
-		len += snprintf(buf + len, PAGE_SIZE-len,
+		len += scnprintf(buf + len, PAGE_SIZE-len,
 				"avg:%08lld min:%08lld "
 				"max %08lld\n",
 				div_u64(phba->ktime_seg10_total,
@@ -1132,46 +1451,46 @@
 		return len;
 	}
 
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 6: NVME layer passed cmd done "
 			"-to- Driver rcv rsp status OP\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg6_total,
 				phba->ktime_status_samples),
 			phba->ktime_seg6_min,
 			phba->ktime_seg6_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 7: Driver rcv rsp status OP "
 			"-to- Firmware WQ doorbell: status\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg7_total,
 				phba->ktime_status_samples),
 			phba->ktime_seg7_min,
 			phba->ktime_seg7_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 8: Firmware WQ doorbell: status"
 			" -to- MSI-X ISR for status cmpl\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg8_total,
 				phba->ktime_status_samples),
 			phba->ktime_seg8_min,
 			phba->ktime_seg8_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Segment 9: MSI-X ISR for status cmpl  "
 			"-to- NVME layer passed status done\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg9_total,
 				phba->ktime_status_samples),
 			phba->ktime_seg9_min,
 			phba->ktime_seg9_max);
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"Total: cmd received by MSI-X ISR -to- "
 			"cmd completed on wire\n");
-	len += snprintf(buf + len, PAGE_SIZE-len,
+	len += scnprintf(buf + len, PAGE_SIZE-len,
 			"avg:%08lld min:%08lld max %08lld\n",
 			div_u64(phba->ktime_seg10_total,
 				phba->ktime_status_samples),
@@ -1206,7 +1525,7 @@
 		(phba->nvmeio_trc_size - 1);
 	skip = phba->nvmeio_trc_output_idx;
 
-	len += snprintf(buf + len, size - len,
+	len += scnprintf(buf + len, size - len,
 			"%s IO Trace %s: next_idx %d skip %d size %d\n",
 			(phba->nvmet_support ? "NVME" : "NVMET"),
 			(state ? "Enabled" : "Disabled"),
@@ -1228,18 +1547,18 @@
 		if (!dtp->fmt)
 			continue;
 
-		len +=  snprintf(buf + len, size - len, dtp->fmt,
+		len +=  scnprintf(buf + len, size - len, dtp->fmt,
 			dtp->data1, dtp->data2, dtp->data3);
 
 		if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
 			phba->nvmeio_trc_output_idx = 0;
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"Trace Complete\n");
 			goto out;
 		}
 
 		if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"Trace Continue (%d of %d)\n",
 					phba->nvmeio_trc_output_idx,
 					phba->nvmeio_trc_size);
@@ -1257,18 +1576,18 @@
 		if (!dtp->fmt)
 			continue;
 
-		len +=  snprintf(buf + len, size - len, dtp->fmt,
+		len +=  scnprintf(buf + len, size - len, dtp->fmt,
 			dtp->data1, dtp->data2, dtp->data3);
 
 		if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
 			phba->nvmeio_trc_output_idx = 0;
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"Trace Complete\n");
 			goto out;
 		}
 
 		if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
-			len += snprintf(buf + len, size - len,
+			len += scnprintf(buf + len, size - len,
 					"Trace Continue (%d of %d)\n",
 					phba->nvmeio_trc_output_idx,
 					phba->nvmeio_trc_size);
@@ -1276,7 +1595,7 @@
 		}
 	}
 
-	len += snprintf(buf + len, size - len,
+	len += scnprintf(buf + len, size - len,
 			"Trace Done\n");
 out:
 	return len;
@@ -1299,62 +1618,73 @@
 lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
 {
 	struct lpfc_hba   *phba = vport->phba;
-	int i;
+	struct lpfc_sli4_hdw_queue *qp;
+	int i, j, max_cnt;
 	int len = 0;
-	uint32_t tot_xmt = 0;
-	uint32_t tot_rcv = 0;
-	uint32_t tot_cmpl = 0;
-	uint32_t tot_ccmpl = 0;
+	uint32_t tot_xmt;
+	uint32_t tot_rcv;
+	uint32_t tot_cmpl;
 
-	if (phba->nvmet_support == 0) {
-		/* NVME Initiator */
-		len += snprintf(buf + len, PAGE_SIZE - len,
-				"CPUcheck %s\n",
-				(phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
-					"Enabled" : "Disabled"));
-		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
-			if (i >= LPFC_CHECK_CPU_CNT)
-				break;
-			len += snprintf(buf + len, PAGE_SIZE - len,
-					"%02d: xmit x%08x cmpl x%08x\n",
-					i, phba->cpucheck_xmt_io[i],
-					phba->cpucheck_cmpl_io[i]);
-			tot_xmt += phba->cpucheck_xmt_io[i];
-			tot_cmpl += phba->cpucheck_cmpl_io[i];
-		}
-		len += snprintf(buf + len, PAGE_SIZE - len,
-				"tot:xmit x%08x cmpl x%08x\n",
-				tot_xmt, tot_cmpl);
-		return len;
-	}
-
-	/* NVME Target */
-	len += snprintf(buf + len, PAGE_SIZE - len,
+	len += scnprintf(buf + len, PAGE_SIZE - len,
 			"CPUcheck %s ",
-			(phba->cpucheck_on & LPFC_CHECK_NVMET_IO ?
-				"IO Enabled - " : "IO Disabled - "));
-	len += snprintf(buf + len, PAGE_SIZE - len,
-			"%s\n",
-			(phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
-				"Rcv Enabled\n" : "Rcv Disabled\n"));
-	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
-		if (i >= LPFC_CHECK_CPU_CNT)
-			break;
-		len += snprintf(buf + len, PAGE_SIZE - len,
-				"%02d: xmit x%08x ccmpl x%08x "
-				"cmpl x%08x rcv x%08x\n",
-				i, phba->cpucheck_xmt_io[i],
-				phba->cpucheck_ccmpl_io[i],
-				phba->cpucheck_cmpl_io[i],
-				phba->cpucheck_rcv_io[i]);
-		tot_xmt += phba->cpucheck_xmt_io[i];
-		tot_rcv += phba->cpucheck_rcv_io[i];
-		tot_cmpl += phba->cpucheck_cmpl_io[i];
-		tot_ccmpl += phba->cpucheck_ccmpl_io[i];
+			(phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
+				"Enabled" : "Disabled"));
+	if (phba->nvmet_support) {
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+				"%s\n",
+				(phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
+					"Rcv Enabled\n" : "Rcv Disabled\n"));
+	} else {
+		len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
 	}
-	len += snprintf(buf + len, PAGE_SIZE - len,
-			"tot:xmit x%08x ccmpl x%08x cmpl x%08x rcv x%08x\n",
-			tot_xmt, tot_ccmpl, tot_cmpl, tot_rcv);
+	max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ;
+
+	for (i = 0; i < phba->cfg_hdw_queue; i++) {
+		qp = &phba->sli4_hba.hdwq[i];
+
+		tot_rcv = 0;
+		tot_xmt = 0;
+		tot_cmpl = 0;
+		for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
+			tot_xmt += qp->cpucheck_xmt_io[j];
+			tot_cmpl += qp->cpucheck_cmpl_io[j];
+			if (phba->nvmet_support)
+				tot_rcv += qp->cpucheck_rcv_io[j];
+		}
+
+		/* Only display Hardware Qs with something */
+		if (!tot_xmt && !tot_cmpl && !tot_rcv)
+			continue;
+
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+				"HDWQ %03d: ", i);
+		for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
+			/* Only display non-zero counters */
+			if (!qp->cpucheck_xmt_io[j] &&
+			    !qp->cpucheck_cmpl_io[j] &&
+			    !qp->cpucheck_rcv_io[j])
+				continue;
+			if (phba->nvmet_support) {
+				len += scnprintf(buf + len, PAGE_SIZE - len,
+						"CPU %03d: %x/%x/%x ", j,
+						qp->cpucheck_rcv_io[j],
+						qp->cpucheck_xmt_io[j],
+						qp->cpucheck_cmpl_io[j]);
+			} else {
+				len += scnprintf(buf + len, PAGE_SIZE - len,
+						"CPU %03d: %x/%x ", j,
+						qp->cpucheck_xmt_io[j],
+						qp->cpucheck_cmpl_io[j]);
+			}
+		}
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+				"Total: %x\n", tot_xmt);
+		if (len >= max_cnt) {
+			len += scnprintf(buf + len, PAGE_SIZE - len,
+					"Truncated ...\n");
+			return len;
+		}
+	}
 	return len;
 }
 
@@ -1501,7 +1831,7 @@
 	int rc = -ENOMEM;
 
 	if (!lpfc_debugfs_max_disc_trc) {
-		 rc = -ENOSPC;
+		rc = -ENOSPC;
 		goto out;
 	}
 
@@ -1551,7 +1881,7 @@
 	int rc = -ENOMEM;
 
 	if (!lpfc_debugfs_max_slow_ring_trc) {
-		 rc = -ENOSPC;
+		rc = -ENOSPC;
 		goto out;
 	}
 
@@ -1620,6 +1950,135 @@
 }
 
 /**
+ * lpfc_debugfs_multixripools_open - Open the multixripool debugfs buffer
+ * @inode: The inode pointer that contains a hba pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the hba from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this hba, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_multixripools_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_hba *phba = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	debug->buffer = kzalloc(LPFC_DUMP_MULTIXRIPOOL_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_multixripools_data(
+		phba, debug->buffer, LPFC_DUMP_MULTIXRIPOOL_SIZE);
+
+	debug->i_private = inode->i_private;
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+#ifdef LPFC_HDWQ_LOCK_STAT
+/**
+ * lpfc_debugfs_lockstat_open - Open the lockstat debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_lockstat_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_hba *phba = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_HDWQINFO_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_lockstat_data(phba, debug->buffer,
+		LPFC_HBQINFO_SIZE);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static ssize_t
+lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
+			    size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	struct lpfc_sli4_hdw_queue *qp;
+	char mybuf[64];
+	char *pbuf;
+	int i;
+
+	/* Protect copy from user */
+	if (!access_ok(buf, nbytes))
+		return -EFAULT;
+
+	memset(mybuf, 0, sizeof(mybuf));
+
+	if (copy_from_user(mybuf, buf, nbytes))
+		return -EFAULT;
+	pbuf = &mybuf[0];
+
+	if ((strncmp(pbuf, "reset", strlen("reset")) == 0) ||
+	    (strncmp(pbuf, "zero", strlen("zero")) == 0)) {
+		for (i = 0; i < phba->cfg_hdw_queue; i++) {
+			qp = &phba->sli4_hba.hdwq[i];
+			qp->lock_conflict.alloc_xri_get = 0;
+			qp->lock_conflict.alloc_xri_put = 0;
+			qp->lock_conflict.free_xri = 0;
+			qp->lock_conflict.wq_access = 0;
+			qp->lock_conflict.alloc_pvt_pool = 0;
+			qp->lock_conflict.mv_from_pvt_pool = 0;
+			qp->lock_conflict.mv_to_pub_pool = 0;
+			qp->lock_conflict.mv_to_pvt_pool = 0;
+			qp->lock_conflict.free_pvt_pool = 0;
+			qp->lock_conflict.free_pub_pool = 0;
+			qp->lock_conflict.wq_access = 0;
+		}
+	}
+	return nbytes;
+}
+#endif
+
+/**
  * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
  * @inode: The inode pointer that contains a vport pointer.
  * @file: The file pointer to attach the log output.
@@ -1703,89 +2162,6 @@
 	return rc;
 }
 
-static int
-lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
-{
-	struct lpfc_debug *debug;
-	int rc = -ENOMEM;
-
-	if (!_dump_buf_data)
-		return -EBUSY;
-
-	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
-	if (!debug)
-		goto out;
-
-	/* Round to page boundary */
-	pr_err("9059 BLKGRD:  %s: _dump_buf_data=0x%p\n",
-			__func__, _dump_buf_data);
-	debug->buffer = _dump_buf_data;
-	if (!debug->buffer) {
-		kfree(debug);
-		goto out;
-	}
-
-	debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
-	file->private_data = debug;
-
-	rc = 0;
-out:
-	return rc;
-}
-
-static int
-lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
-{
-	struct lpfc_debug *debug;
-	int rc = -ENOMEM;
-
-	if (!_dump_buf_dif)
-		return -EBUSY;
-
-	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
-	if (!debug)
-		goto out;
-
-	/* Round to page boundary */
-	pr_err("9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n",
-			__func__, _dump_buf_dif, file);
-	debug->buffer = _dump_buf_dif;
-	if (!debug->buffer) {
-		kfree(debug);
-		goto out;
-	}
-
-	debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
-	file->private_data = debug;
-
-	rc = 0;
-out:
-	return rc;
-}
-
-static ssize_t
-lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
-		  size_t nbytes, loff_t *ppos)
-{
-	/*
-	 * The Data/DIF buffers only save one failing IO
-	 * The write op is used as a reset mechanism after an IO has
-	 * already been saved to the next one can be saved
-	 */
-	spin_lock(&_dump_buf_lock);
-
-	memset((void *)_dump_buf_data, 0,
-			((1 << PAGE_SHIFT) << _dump_buf_data_order));
-	memset((void *)_dump_buf_dif, 0,
-			((1 << PAGE_SHIFT) << _dump_buf_dif_order));
-
-	_dump_buf_done = 0;
-
-	spin_unlock(&_dump_buf_lock);
-
-	return nbytes;
-}
-
 static ssize_t
 lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
 	size_t nbytes, loff_t *ppos)
@@ -1797,28 +2173,29 @@
 	int cnt = 0;
 
 	if (dent == phba->debug_writeGuard)
-		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
+		cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
 	else if (dent == phba->debug_writeApp)
-		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
+		cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
 	else if (dent == phba->debug_writeRef)
-		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
+		cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
 	else if (dent == phba->debug_readGuard)
-		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
+		cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
 	else if (dent == phba->debug_readApp)
-		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
+		cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
 	else if (dent == phba->debug_readRef)
-		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
+		cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
 	else if (dent == phba->debug_InjErrNPortID)
-		cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid);
+		cnt = scnprintf(cbuf, 32, "0x%06x\n",
+				phba->lpfc_injerr_nportid);
 	else if (dent == phba->debug_InjErrWWPN) {
 		memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
 		tmp = cpu_to_be64(tmp);
-		cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp);
+		cnt = scnprintf(cbuf, 32, "0x%016llx\n", tmp);
 	} else if (dent == phba->debug_InjErrLBA) {
 		if (phba->lpfc_injerr_lba == (sector_t)(-1))
-			cnt = snprintf(cbuf, 32, "off\n");
+			cnt = scnprintf(cbuf, 32, "off\n");
 		else
-			cnt = snprintf(cbuf, 32, "0x%llx\n",
+			cnt = scnprintf(cbuf, 32, "0x%llx\n",
 				 (uint64_t) phba->lpfc_injerr_lba);
 	} else
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1997,18 +2374,76 @@
 	return 0;
 }
 
-static int
-lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
+/**
+ * lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine clears multi-XRI pools statistics when buf contains "clear".
+ *
+ * Return Value:
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf,
+				 size_t nbytes, loff_t *ppos)
 {
 	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	char mybuf[64];
+	char *pbuf;
+	u32 i;
+	u32 hwq_count;
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_multixri_pool *multixri_pool;
 
-	debug->buffer = NULL;
-	kfree(debug);
+	if (nbytes > 64)
+		nbytes = 64;
 
-	return 0;
+	/* Protect copy from user */
+	if (!access_ok(buf, nbytes))
+		return -EFAULT;
+
+	memset(mybuf, 0, sizeof(mybuf));
+
+	if (copy_from_user(mybuf, buf, nbytes))
+		return -EFAULT;
+	pbuf = &mybuf[0];
+
+	if ((strncmp(pbuf, "clear", strlen("clear"))) == 0) {
+		hwq_count = phba->cfg_hdw_queue;
+		for (i = 0; i < hwq_count; i++) {
+			qp = &phba->sli4_hba.hdwq[i];
+			multixri_pool = qp->p_multixri_pool;
+			if (!multixri_pool)
+				continue;
+
+			qp->empty_io_bufs = 0;
+			multixri_pool->pbl_empty_count = 0;
+#ifdef LPFC_MXP_STAT
+			multixri_pool->above_limit_count = 0;
+			multixri_pool->below_limit_count = 0;
+			multixri_pool->stat_max_hwm = 0;
+			multixri_pool->local_pbl_hit_count = 0;
+			multixri_pool->other_pbl_hit_count = 0;
+
+			multixri_pool->stat_pbl_count = 0;
+			multixri_pool->stat_pvt_count = 0;
+			multixri_pool->stat_busy_count = 0;
+			multixri_pool->stat_snapshot_taken = 0;
+#endif
+		}
+		return strlen(pbuf);
+	}
+
+	return -EINVAL;
 }
 
-
 static int
 lpfc_debugfs_nvmestat_open(struct inode *inode, struct file *file)
 {
@@ -2098,6 +2533,64 @@
 }
 
 static int
+lpfc_debugfs_scsistat_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_vport *vport = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	 /* Round to page boundary */
+	debug->buffer = kzalloc(LPFC_SCSISTAT_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_scsistat_data(vport, debug->buffer,
+		LPFC_SCSISTAT_SIZE);
+
+	debug->i_private = inode->i_private;
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static ssize_t
+lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf,
+			    size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
+	struct lpfc_hba *phba = vport->phba;
+	char mybuf[6] = {0};
+	int i;
+
+	/* Protect copy from user */
+	if (!access_ok(buf, nbytes))
+		return -EFAULT;
+
+	if (copy_from_user(mybuf, buf, (nbytes >= sizeof(mybuf)) ?
+				       (sizeof(mybuf) - 1) : nbytes))
+		return -EFAULT;
+
+	if ((strncmp(&mybuf[0], "reset", strlen("reset")) == 0) ||
+	    (strncmp(&mybuf[0], "zero", strlen("zero")) == 0)) {
+		for (i = 0; i < phba->cfg_hdw_queue; i++) {
+			memset(&phba->sli4_hba.hdwq[i].scsi_cstat, 0,
+			       sizeof(phba->sli4_hba.hdwq[i].scsi_cstat));
+		}
+	}
+
+	return nbytes;
+}
+
+static int
 lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
 {
 	struct lpfc_vport *vport = inode->i_private;
@@ -2348,7 +2841,7 @@
 	}
 
 	debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer,
-		LPFC_NVMEKTIME_SIZE);
+		LPFC_CPUCHECK_SIZE);
 
 	debug->i_private = inode->i_private;
 	file->private_data = debug;
@@ -2365,9 +2858,10 @@
 	struct lpfc_debug *debug = file->private_data;
 	struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
 	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_sli4_hdw_queue *qp;
 	char mybuf[64];
 	char *pbuf;
-	int i;
+	int i, j;
 
 	if (nbytes > 64)
 		nbytes = 64;
@@ -2382,8 +2876,18 @@
 		if (phba->nvmet_support)
 			phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
 		else
+			phba->cpucheck_on |= (LPFC_CHECK_NVME_IO |
+				LPFC_CHECK_SCSI_IO);
+		return strlen(pbuf);
+	} else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) {
+		if (phba->nvmet_support)
+			phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
+		else
 			phba->cpucheck_on |= LPFC_CHECK_NVME_IO;
 		return strlen(pbuf);
+	} else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) {
+		phba->cpucheck_on |= LPFC_CHECK_SCSI_IO;
+		return strlen(pbuf);
 	} else if ((strncmp(pbuf, "rcv",
 		   sizeof("rcv") - 1) == 0)) {
 		if (phba->nvmet_support)
@@ -2397,13 +2901,14 @@
 		return strlen(pbuf);
 	} else if ((strncmp(pbuf, "zero",
 		   sizeof("zero") - 1) == 0)) {
-		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
-			if (i >= LPFC_CHECK_CPU_CNT)
-				break;
-			phba->cpucheck_rcv_io[i] = 0;
-			phba->cpucheck_xmt_io[i] = 0;
-			phba->cpucheck_cmpl_io[i] = 0;
-			phba->cpucheck_ccmpl_io[i] = 0;
+		for (i = 0; i < phba->cfg_hdw_queue; i++) {
+			qp = &phba->sli4_hba.hdwq[i];
+
+			for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
+				qp->cpucheck_rcv_io[j] = 0;
+				qp->cpucheck_xmt_io[j] = 0;
+				qp->cpucheck_cmpl_io[j] = 0;
+			}
 		}
 		return strlen(pbuf);
 	}
@@ -2624,17 +3129,17 @@
 	switch (count) {
 	case SIZE_U8: /* byte (8 bits) */
 		pci_read_config_byte(pdev, where, &u8val);
-		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 				"%03x: %02x\n", where, u8val);
 		break;
 	case SIZE_U16: /* word (16 bits) */
 		pci_read_config_word(pdev, where, &u16val);
-		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 				"%03x: %04x\n", where, u16val);
 		break;
 	case SIZE_U32: /* double word (32 bits) */
 		pci_read_config_dword(pdev, where, &u32val);
-		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 				"%03x: %08x\n", where, u32val);
 		break;
 	case LPFC_PCI_CFG_BROWSE: /* browse all */
@@ -2654,25 +3159,25 @@
 	offset = offset_label;
 
 	/* Read PCI config space */
-	len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 			"%03x: ", offset_label);
 	while (index > 0) {
 		pci_read_config_dword(pdev, offset, &u32val);
-		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 				"%08x ", u32val);
 		offset += sizeof(uint32_t);
 		if (offset >= LPFC_PCI_CFG_SIZE) {
-			len += snprintf(pbuffer+len,
+			len += scnprintf(pbuffer+len,
 					LPFC_PCI_CFG_SIZE-len, "\n");
 			break;
 		}
 		index -= sizeof(uint32_t);
 		if (!index)
-			len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+			len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 					"\n");
 		else if (!(index % (8 * sizeof(uint32_t)))) {
 			offset_label += (8 * sizeof(uint32_t));
-			len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+			len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
 					"\n%03x: ", offset_label);
 		}
 	}
@@ -2943,7 +3448,7 @@
 	if (acc_range == SINGLE_WORD) {
 		offset_run = offset;
 		u32val = readl(mem_mapped_bar + offset_run);
-		len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
 				"%05x: %08x\n", offset_run, u32val);
 	} else
 		goto baracc_browse;
@@ -2957,35 +3462,35 @@
 	offset_run = offset_label;
 
 	/* Read PCI bar memory mapped space */
-	len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
 			"%05x: ", offset_label);
 	index = LPFC_PCI_BAR_RD_SIZE;
 	while (index > 0) {
 		u32val = readl(mem_mapped_bar + offset_run);
-		len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
 				"%08x ", u32val);
 		offset_run += sizeof(uint32_t);
 		if (acc_range == LPFC_PCI_BAR_BROWSE) {
 			if (offset_run >= bar_size) {
-				len += snprintf(pbuffer+len,
+				len += scnprintf(pbuffer+len,
 					LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
 				break;
 			}
 		} else {
 			if (offset_run >= offset +
 			    (acc_range * sizeof(uint32_t))) {
-				len += snprintf(pbuffer+len,
+				len += scnprintf(pbuffer+len,
 					LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
 				break;
 			}
 		}
 		index -= sizeof(uint32_t);
 		if (!index)
-			len += snprintf(pbuffer+len,
+			len += scnprintf(pbuffer+len,
 					LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
 		else if (!(index % (8 * sizeof(uint32_t)))) {
 			offset_label += (8 * sizeof(uint32_t));
-			len += snprintf(pbuffer+len,
+			len += scnprintf(pbuffer+len,
 					LPFC_PCI_BAR_RD_BUF_SIZE-len,
 					"\n%05x: ", offset_label);
 		}
@@ -3158,19 +3663,19 @@
 	if (!qp)
 		return len;
 
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\t\t%s WQ info: ", wqtype);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"AssocCQID[%04d]: WQ-STAT[oflow:x%x posted:x%llx]\n",
 			qp->assoc_qid, qp->q_cnt_1,
 			(unsigned long long)qp->q_cnt_4);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
-			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
+			"HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]",
 			qp->queue_id, qp->entry_count,
 			qp->entry_size, qp->host_index,
-			qp->hba_index, qp->entry_repost);
-	len +=  snprintf(pbuffer + len,
+			qp->hba_index, qp->notify_interval);
+	len +=  scnprintf(pbuffer + len,
 			LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
 	return len;
 }
@@ -3182,16 +3687,8 @@
 	struct lpfc_queue *qp;
 	int qidx;
 
-	for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
-		qp = phba->sli4_hba.fcp_wq[qidx];
-		if (qp->assoc_qid != cq_id)
-			continue;
-		*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
-		if (*len >= max_cnt)
-			return 1;
-	}
-	for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
-		qp = phba->sli4_hba.nvme_wq[qidx];
+	for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+		qp = phba->sli4_hba.hdwq[qidx].io_wq;
 		if (qp->assoc_qid != cq_id)
 			continue;
 		*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
@@ -3208,21 +3705,22 @@
 	if (!qp)
 		return len;
 
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\t%s CQ info: ", cqtype);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"AssocEQID[%02d]: CQ STAT[max:x%x relw:x%x "
 			"xabt:x%x wq:x%llx]\n",
 			qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
 			qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
-			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
+			"HST-IDX[%04d], NTFI[%03d], PLMT[%03d]",
 			qp->queue_id, qp->entry_count,
 			qp->entry_size, qp->host_index,
-			qp->hba_index, qp->entry_repost);
+			qp->notify_interval, qp->max_proc_limit);
 
-	len +=  snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+	len +=  scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"\n");
 
 	return len;
 }
@@ -3234,24 +3732,24 @@
 	if (!qp || !datqp)
 		return len;
 
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\t\t%s RQ info: ", rqtype);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x "
 			"posted:x%x rcv:x%llx]\n",
 			qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
 			qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
-			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
+			"HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n",
 			qp->queue_id, qp->entry_count, qp->entry_size,
-			qp->host_index, qp->hba_index, qp->entry_repost);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			qp->host_index, qp->hba_index, qp->notify_interval);
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
-			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
+			"HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n",
 			datqp->queue_id, datqp->entry_count,
 			datqp->entry_size, datqp->host_index,
-			datqp->hba_index, datqp->entry_repost);
+			datqp->hba_index, datqp->notify_interval);
 	return len;
 }
 
@@ -3260,45 +3758,22 @@
 		int *len, int max_cnt, int eqidx, int eq_id)
 {
 	struct lpfc_queue *qp;
-	int qidx, rc;
+	int rc;
 
-	for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
-		qp = phba->sli4_hba.fcp_cq[qidx];
-		if (qp->assoc_qid != eq_id)
-			continue;
+	qp = phba->sli4_hba.hdwq[eqidx].io_cq;
 
-		*len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len);
+	*len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len);
 
-		/* Reset max counter */
-		qp->CQ_max_cqe = 0;
+	/* Reset max counter */
+	qp->CQ_max_cqe = 0;
 
-		if (*len >= max_cnt)
-			return 1;
+	if (*len >= max_cnt)
+		return 1;
 
-		rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len,
-				max_cnt, qp->queue_id);
-		if (rc)
-			return 1;
-	}
-
-	for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
-		qp = phba->sli4_hba.nvme_cq[qidx];
-		if (qp->assoc_qid != eq_id)
-			continue;
-
-		*len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
-
-		/* Reset max counter */
-		qp->CQ_max_cqe = 0;
-
-		if (*len >= max_cnt)
-			return 1;
-
-		rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
-				max_cnt, qp->queue_id);
-		if (rc)
-			return 1;
-	}
+	rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len,
+				   max_cnt, qp->queue_id);
+	if (rc)
+		return 1;
 
 	if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
 		/* NVMET CQset */
@@ -3331,17 +3806,19 @@
 	if (!qp)
 		return len;
 
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
 			"cqe_proc:x%x eqe_proc:x%llx eqd %d]\n",
 			eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
 			(unsigned long long)qp->q_cnt_4, qp->q_mode);
-	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+	len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
 			"EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
-			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
+			"HST-IDX[%04d], NTFI[%03d], PLMT[%03d], AFFIN[%03d]",
 			qp->queue_id, qp->entry_count, qp->entry_size,
-			qp->host_index, qp->hba_index, qp->entry_repost);
-	len +=  snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+			qp->host_index, qp->notify_interval,
+			qp->max_proc_limit, qp->chann);
+	len +=  scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"\n");
 
 	return len;
 }
@@ -3387,24 +3864,20 @@
 	spin_lock_irq(&phba->hbalock);
 
 	/* Fast-path event queue */
-	if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) {
+	if (phba->sli4_hba.hdwq && phba->cfg_hdw_queue) {
 
 		x = phba->lpfc_idiag_last_eq;
-		if (phba->cfg_fof && (x >= phba->io_channel_irqs)) {
-			phba->lpfc_idiag_last_eq = 0;
-			goto fof;
-		}
 		phba->lpfc_idiag_last_eq++;
-		if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs)
-			if (phba->cfg_fof == 0)
-				phba->lpfc_idiag_last_eq = 0;
+		if (phba->lpfc_idiag_last_eq >= phba->cfg_hdw_queue)
+			phba->lpfc_idiag_last_eq = 0;
 
-		len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
-					"EQ %d out of %d HBA EQs\n",
-					x, phba->io_channel_irqs);
+		len += scnprintf(pbuffer + len,
+				 LPFC_QUE_INFO_GET_BUF_SIZE - len,
+				 "HDWQ %d out of %d HBA HDWQs\n",
+				 x, phba->cfg_hdw_queue);
 
 		/* Fast-path EQ */
-		qp = phba->sli4_hba.hba_eq[x];
+		qp = phba->sli4_hba.hdwq[x].hba_eq;
 		if (!qp)
 			goto out;
 
@@ -3479,40 +3952,11 @@
 		goto out;
 	}
 
-fof:
-	if (phba->cfg_fof) {
-		/* FOF EQ */
-		qp = phba->sli4_hba.fof_eq;
-		len = __lpfc_idiag_print_eq(qp, "FOF", pbuffer, len);
-
-		/* Reset max counter */
-		if (qp)
-			qp->EQ_max_eqe = 0;
-
-		if (len >= max_cnt)
-			goto too_big;
-
-		/* OAS CQ */
-		qp = phba->sli4_hba.oas_cq;
-		len = __lpfc_idiag_print_cq(qp, "OAS", pbuffer, len);
-		/* Reset max counter */
-		if (qp)
-			qp->CQ_max_cqe = 0;
-		if (len >= max_cnt)
-			goto too_big;
-
-		/* OAS WQ */
-		qp = phba->sli4_hba.oas_wq;
-		len = __lpfc_idiag_print_wq(qp, "OAS", pbuffer, len);
-		if (len >= max_cnt)
-			goto too_big;
-	}
-
 	spin_unlock_irq(&phba->hbalock);
 	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
 
 too_big:
-	len +=  snprintf(pbuffer + len,
+	len +=  scnprintf(pbuffer + len,
 		LPFC_QUE_INFO_GET_BUF_SIZE - len, "Truncated ...\n");
 out:
 	spin_unlock_irq(&phba->hbalock);
@@ -3568,22 +4012,22 @@
 		return 0;
 
 	esize = pque->entry_size;
-	len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
 			"QE-INDEX[%04d]:\n", index);
 
 	offset = 0;
-	pentry = pque->qe[index].address;
+	pentry = lpfc_sli4_qe(pque, index);
 	while (esize > 0) {
-		len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
 				"%08x ", *pentry);
 		pentry++;
 		offset += sizeof(uint32_t);
 		esize -= sizeof(uint32_t);
 		if (esize > 0 && !(offset % (4 * sizeof(uint32_t))))
-			len += snprintf(pbuffer+len,
+			len += scnprintf(pbuffer+len,
 					LPFC_QUE_ACC_BUF_SIZE-len, "\n");
 	}
-	len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
+	len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
 
 	return len;
 }
@@ -3725,9 +4169,9 @@
 	switch (quetp) {
 	case LPFC_IDIAG_EQ:
 		/* HBA event queue */
-		if (phba->sli4_hba.hba_eq) {
-			for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
-				qp = phba->sli4_hba.hba_eq[qidx];
+		if (phba->sli4_hba.hdwq) {
+			for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+				qp = phba->sli4_hba.hdwq[qidx].hba_eq;
 				if (qp && qp->queue_id == queid) {
 					/* Sanity check */
 					rc = lpfc_idiag_que_param_check(qp,
@@ -3776,10 +4220,10 @@
 			goto pass_check;
 		}
 		/* FCP complete queue */
-		if (phba->sli4_hba.fcp_cq) {
-			for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+		if (phba->sli4_hba.hdwq) {
+			for (qidx = 0; qidx < phba->cfg_hdw_queue;
 								qidx++) {
-				qp = phba->sli4_hba.fcp_cq[qidx];
+				qp = phba->sli4_hba.hdwq[qidx].io_cq;
 				if (qp && qp->queue_id == queid) {
 					/* Sanity check */
 					rc = lpfc_idiag_que_param_check(
@@ -3791,25 +4235,6 @@
 				}
 			}
 		}
-		/* NVME complete queue */
-		if (phba->sli4_hba.nvme_cq) {
-			qidx = 0;
-			do {
-				if (phba->sli4_hba.nvme_cq[qidx] &&
-				    phba->sli4_hba.nvme_cq[qidx]->queue_id ==
-				    queid) {
-					/* Sanity check */
-					rc = lpfc_idiag_que_param_check(
-						phba->sli4_hba.nvme_cq[qidx],
-						index, count);
-					if (rc)
-						goto error_out;
-					idiag.ptr_private =
-						phba->sli4_hba.nvme_cq[qidx];
-					goto pass_check;
-				}
-			} while (++qidx < phba->cfg_nvme_io_channel);
-		}
 		goto error_out;
 		break;
 	case LPFC_IDIAG_MQ:
@@ -3849,27 +4274,11 @@
 			idiag.ptr_private = phba->sli4_hba.nvmels_wq;
 			goto pass_check;
 		}
-		/* FCP work queue */
-		if (phba->sli4_hba.fcp_wq) {
-			for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
-								qidx++) {
-				qp = phba->sli4_hba.fcp_wq[qidx];
-				if (qp && qp->queue_id == queid) {
-					/* Sanity check */
-					rc = lpfc_idiag_que_param_check(
-						qp, index, count);
-					if (rc)
-						goto error_out;
-					idiag.ptr_private = qp;
-					goto pass_check;
-				}
-			}
-		}
-		/* NVME work queue */
-		if (phba->sli4_hba.nvme_wq) {
-			for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
-								qidx++) {
-				qp = phba->sli4_hba.nvme_wq[qidx];
+
+		if (phba->sli4_hba.hdwq) {
+			/* FCP/SCSI work queue */
+			for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+				qp = phba->sli4_hba.hdwq[qidx].io_wq;
 				if (qp && qp->queue_id == queid) {
 					/* Sanity check */
 					rc = lpfc_idiag_que_param_check(
@@ -3882,26 +4291,6 @@
 			}
 		}
 
-		/* NVME work queues */
-		if (phba->sli4_hba.nvme_wq) {
-			for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
-				qidx++) {
-				if (!phba->sli4_hba.nvme_wq[qidx])
-					continue;
-				if (phba->sli4_hba.nvme_wq[qidx]->queue_id ==
-				    queid) {
-					/* Sanity check */
-					rc = lpfc_idiag_que_param_check(
-						phba->sli4_hba.nvme_wq[qidx],
-						index, count);
-					if (rc)
-						goto error_out;
-					idiag.ptr_private =
-						phba->sli4_hba.nvme_wq[qidx];
-					goto pass_check;
-				}
-			}
-		}
 		goto error_out;
 		break;
 	case LPFC_IDIAG_RQ:
@@ -3948,7 +4337,7 @@
 		pque = (struct lpfc_queue *)idiag.ptr_private;
 		if (offset > pque->entry_size/sizeof(uint32_t) - 1)
 			goto error_out;
-		pentry = pque->qe[index].address;
+		pentry = lpfc_sli4_qe(pque, index);
 		pentry += offset;
 		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR)
 			*pentry = value;
@@ -3969,7 +4358,7 @@
  * lpfc_idiag_drbacc_read_reg - idiag debugfs read a doorbell register
  * @phba: The pointer to hba structure.
  * @pbuffer: The pointer to the buffer to copy the data to.
- * @len: The lenght of bytes to copied.
+ * @len: The length of bytes to copied.
  * @drbregid: The id to doorbell registers.
  *
  * Description:
@@ -3989,27 +4378,27 @@
 
 	switch (drbregid) {
 	case LPFC_DRB_EQ:
-		len += snprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len,
 				"EQ-DRB-REG: 0x%08x\n",
 				readl(phba->sli4_hba.EQDBregaddr));
 		break;
 	case LPFC_DRB_CQ:
-		len += snprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len,
+		len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len,
 				"CQ-DRB-REG: 0x%08x\n",
 				readl(phba->sli4_hba.CQDBregaddr));
 		break;
 	case LPFC_DRB_MQ:
-		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
 				"MQ-DRB-REG:   0x%08x\n",
 				readl(phba->sli4_hba.MQDBregaddr));
 		break;
 	case LPFC_DRB_WQ:
-		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
 				"WQ-DRB-REG:   0x%08x\n",
 				readl(phba->sli4_hba.WQDBregaddr));
 		break;
 	case LPFC_DRB_RQ:
-		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
 				"RQ-DRB-REG:   0x%08x\n",
 				readl(phba->sli4_hba.RQDBregaddr));
 		break;
@@ -4179,7 +4568,7 @@
  * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers
  * @phba: The pointer to hba structure.
  * @pbuffer: The pointer to the buffer to copy the data to.
- * @len: The lenght of bytes to copied.
+ * @len: The length of bytes to copied.
  * @drbregid: The id to doorbell registers.
  *
  * Description:
@@ -4199,37 +4588,37 @@
 
 	switch (ctlregid) {
 	case LPFC_CTL_PORT_SEM:
-		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
 				"Port SemReg:   0x%08x\n",
 				readl(phba->sli4_hba.conf_regs_memmap_p +
 				      LPFC_CTL_PORT_SEM_OFFSET));
 		break;
 	case LPFC_CTL_PORT_STA:
-		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
 				"Port StaReg:   0x%08x\n",
 				readl(phba->sli4_hba.conf_regs_memmap_p +
 				      LPFC_CTL_PORT_STA_OFFSET));
 		break;
 	case LPFC_CTL_PORT_CTL:
-		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
 				"Port CtlReg:   0x%08x\n",
 				readl(phba->sli4_hba.conf_regs_memmap_p +
 				      LPFC_CTL_PORT_CTL_OFFSET));
 		break;
 	case LPFC_CTL_PORT_ER1:
-		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
 				"Port Er1Reg:   0x%08x\n",
 				readl(phba->sli4_hba.conf_regs_memmap_p +
 				      LPFC_CTL_PORT_ER1_OFFSET));
 		break;
 	case LPFC_CTL_PORT_ER2:
-		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
 				"Port Er2Reg:   0x%08x\n",
 				readl(phba->sli4_hba.conf_regs_memmap_p +
 				      LPFC_CTL_PORT_ER2_OFFSET));
 		break;
 	case LPFC_CTL_PDEV_CTL:
-		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
 				"PDev CtlReg:   0x%08x\n",
 				readl(phba->sli4_hba.conf_regs_memmap_p +
 				      LPFC_CTL_PDEV_CTL_OFFSET));
@@ -4422,13 +4811,13 @@
 	mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
 	mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
 
-	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
 			"mbx_dump_map: 0x%08x\n", mbx_dump_map);
-	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
 			"mbx_dump_cnt: %04d\n", mbx_dump_cnt);
-	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
 			"mbx_word_cnt: %04d\n", mbx_word_cnt);
-	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
 			"mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd);
 
 	return len;
@@ -4577,35 +4966,35 @@
 {
 	uint16_t ext_cnt, ext_size;
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\nAvailable Extents Information:\n");
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tPort Available VPI extents: ");
 	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI,
 				       &ext_cnt, &ext_size);
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"Count %3d, Size %3d\n", ext_cnt, ext_size);
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tPort Available VFI extents: ");
 	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI,
 				       &ext_cnt, &ext_size);
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"Count %3d, Size %3d\n", ext_cnt, ext_size);
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tPort Available RPI extents: ");
 	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI,
 				       &ext_cnt, &ext_size);
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"Count %3d, Size %3d\n", ext_cnt, ext_size);
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tPort Available XRI extents: ");
 	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI,
 				       &ext_cnt, &ext_size);
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"Count %3d, Size %3d\n", ext_cnt, ext_size);
 
 	return len;
@@ -4629,55 +5018,55 @@
 	uint16_t ext_cnt, ext_size;
 	int rc;
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\nAllocated Extents Information:\n");
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tHost Allocated VPI extents: ");
 	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI,
 					    &ext_cnt, &ext_size);
 	if (!rc)
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"Port %d Extent %3d, Size %3d\n",
 				phba->brd_no, ext_cnt, ext_size);
 	else
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"N/A\n");
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tHost Allocated VFI extents: ");
 	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI,
 					    &ext_cnt, &ext_size);
 	if (!rc)
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"Port %d Extent %3d, Size %3d\n",
 				phba->brd_no, ext_cnt, ext_size);
 	else
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"N/A\n");
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tHost Allocated RPI extents: ");
 	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI,
 					    &ext_cnt, &ext_size);
 	if (!rc)
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"Port %d Extent %3d, Size %3d\n",
 				phba->brd_no, ext_cnt, ext_size);
 	else
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"N/A\n");
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tHost Allocated XRI extents: ");
 	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI,
 					    &ext_cnt, &ext_size);
 	if (!rc)
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"Port %d Extent %3d, Size %3d\n",
 				phba->brd_no, ext_cnt, ext_size);
 	else
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"N/A\n");
 
 	return len;
@@ -4701,49 +5090,49 @@
 	struct lpfc_rsrc_blks *rsrc_blks;
 	int index;
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\nDriver Extents Information:\n");
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tVPI extents:\n");
 	index = 0;
 	list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) {
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"\t\tBlock %3d: Start %4d, Count %4d\n",
 				index, rsrc_blks->rsrc_start,
 				rsrc_blks->rsrc_size);
 		index++;
 	}
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tVFI extents:\n");
 	index = 0;
 	list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list,
 			    list) {
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"\t\tBlock %3d: Start %4d, Count %4d\n",
 				index, rsrc_blks->rsrc_start,
 				rsrc_blks->rsrc_size);
 		index++;
 	}
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tRPI extents:\n");
 	index = 0;
 	list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list,
 			    list) {
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"\t\tBlock %3d: Start %4d, Count %4d\n",
 				index, rsrc_blks->rsrc_start,
 				rsrc_blks->rsrc_size);
 		index++;
 	}
 
-	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+	len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 			"\tXRI extents:\n");
 	index = 0;
 	list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list,
 			    list) {
-		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+		len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
 				"\t\tBlock %3d: Start %4d, Count %4d\n",
 				index, rsrc_blks->rsrc_start,
 				rsrc_blks->rsrc_size);
@@ -4866,6 +5255,16 @@
 	.release =      lpfc_debugfs_release,
 };
 
+#undef lpfc_debugfs_op_multixripools
+static const struct file_operations lpfc_debugfs_op_multixripools = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_multixripools_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =	lpfc_debugfs_multixripools_write,
+	.release =      lpfc_debugfs_release,
+};
+
 #undef lpfc_debugfs_op_hbqinfo
 static const struct file_operations lpfc_debugfs_op_hbqinfo = {
 	.owner =        THIS_MODULE,
@@ -4875,6 +5274,18 @@
 	.release =      lpfc_debugfs_release,
 };
 
+#ifdef LPFC_HDWQ_LOCK_STAT
+#undef lpfc_debugfs_op_lockstat
+static const struct file_operations lpfc_debugfs_op_lockstat = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_lockstat_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =        lpfc_debugfs_lockstat_write,
+	.release =      lpfc_debugfs_release,
+};
+#endif
+
 #undef lpfc_debugfs_op_dumpHBASlim
 static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
 	.owner =        THIS_MODULE,
@@ -4903,6 +5314,16 @@
 	.release =      lpfc_debugfs_release,
 };
 
+#undef lpfc_debugfs_op_scsistat
+static const struct file_operations lpfc_debugfs_op_scsistat = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_scsistat_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =	lpfc_debugfs_scsistat_write,
+	.release =      lpfc_debugfs_release,
+};
+
 #undef lpfc_debugfs_op_nvmektime
 static const struct file_operations lpfc_debugfs_op_nvmektime = {
 	.owner =        THIS_MODULE,
@@ -4933,26 +5354,6 @@
 	.release =      lpfc_debugfs_release,
 };
 
-#undef lpfc_debugfs_op_dumpData
-static const struct file_operations lpfc_debugfs_op_dumpData = {
-	.owner =        THIS_MODULE,
-	.open =         lpfc_debugfs_dumpData_open,
-	.llseek =       lpfc_debugfs_lseek,
-	.read =         lpfc_debugfs_read,
-	.write =	lpfc_debugfs_dumpDataDif_write,
-	.release =      lpfc_debugfs_dumpDataDif_release,
-};
-
-#undef lpfc_debugfs_op_dumpDif
-static const struct file_operations lpfc_debugfs_op_dumpDif = {
-	.owner =        THIS_MODULE,
-	.open =         lpfc_debugfs_dumpDif_open,
-	.llseek =       lpfc_debugfs_lseek,
-	.read =         lpfc_debugfs_read,
-	.write =	lpfc_debugfs_dumpDataDif_write,
-	.release =      lpfc_debugfs_dumpDataDif_release,
-};
-
 #undef lpfc_debugfs_op_dif_err
 static const struct file_operations lpfc_debugfs_op_dif_err = {
 	.owner =	THIS_MODULE,
@@ -5137,11 +5538,11 @@
 				if (i != 0)
 					pr_err("%s\n", line_buf);
 				len = 0;
-				len += snprintf(line_buf+len,
+				len += scnprintf(line_buf+len,
 						LPFC_MBX_ACC_LBUF_SZ-len,
 						"%03d: ", i);
 			}
-			len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+			len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
 					"%08x ", (uint32_t)*pword);
 			pword++;
 		}
@@ -5204,11 +5605,11 @@
 					pr_err("%s\n", line_buf);
 				len = 0;
 				memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
-				len += snprintf(line_buf+len,
+				len += scnprintf(line_buf+len,
 						LPFC_MBX_ACC_LBUF_SZ-len,
 						"%03d: ", i);
 			}
-			len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+			len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
 					"%08x ",
 					((uint32_t)*pword) & 0xffffffff);
 			pword++;
@@ -5227,18 +5628,18 @@
 					pr_err("%s\n", line_buf);
 				len = 0;
 				memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
-				len += snprintf(line_buf+len,
+				len += scnprintf(line_buf+len,
 						LPFC_MBX_ACC_LBUF_SZ-len,
 						"%03d: ", i);
 			}
 			for (j = 0; j < 4; j++) {
-				len += snprintf(line_buf+len,
+				len += scnprintf(line_buf+len,
 						LPFC_MBX_ACC_LBUF_SZ-len,
 						"%02x",
 						((uint8_t)*pbyte) & 0xff);
 				pbyte++;
 			}
-			len += snprintf(line_buf+len,
+			len += scnprintf(line_buf+len,
 					LPFC_MBX_ACC_LBUF_SZ-len, " ");
 		}
 		if ((i - 1) % 8)
@@ -5280,11 +5681,6 @@
 	if (!lpfc_debugfs_root) {
 		lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
 		atomic_set(&lpfc_debugfs_hba_count, 0);
-		if (!lpfc_debugfs_root) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "0408 Cannot create debugfs root\n");
-			goto debug_failed;
-		}
 	}
 	if (!lpfc_debugfs_start_time)
 		lpfc_debugfs_start_time = jiffies;
@@ -5295,25 +5691,42 @@
 		pport_setup = true;
 		phba->hba_debugfs_root =
 			debugfs_create_dir(name, lpfc_debugfs_root);
-		if (!phba->hba_debugfs_root) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "0412 Cannot create debugfs hba\n");
-			goto debug_failed;
-		}
 		atomic_inc(&lpfc_debugfs_hba_count);
 		atomic_set(&phba->debugfs_vport_count, 0);
 
+		/* Multi-XRI pools */
+		snprintf(name, sizeof(name), "multixripools");
+		phba->debug_multixri_pools =
+			debugfs_create_file(name, S_IFREG | 0644,
+					    phba->hba_debugfs_root,
+					    phba,
+					    &lpfc_debugfs_op_multixripools);
+		if (!phba->debug_multixri_pools) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "0527 Cannot create debugfs multixripools\n");
+			goto debug_failed;
+		}
+
 		/* Setup hbqinfo */
 		snprintf(name, sizeof(name), "hbqinfo");
 		phba->debug_hbqinfo =
-			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
-				 phba->hba_debugfs_root,
-				 phba, &lpfc_debugfs_op_hbqinfo);
-		if (!phba->debug_hbqinfo) {
+			debugfs_create_file(name, S_IFREG | 0644,
+					    phba->hba_debugfs_root,
+					    phba, &lpfc_debugfs_op_hbqinfo);
+
+#ifdef LPFC_HDWQ_LOCK_STAT
+		/* Setup lockstat */
+		snprintf(name, sizeof(name), "lockstat");
+		phba->debug_lockstat =
+			debugfs_create_file(name, S_IFREG | 0644,
+					    phba->hba_debugfs_root,
+					    phba, &lpfc_debugfs_op_lockstat);
+		if (!phba->debug_lockstat) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0411 Cannot create debugfs hbqinfo\n");
+					 "4610 Cant create debugfs lockstat\n");
 			goto debug_failed;
 		}
+#endif
 
 		/* Setup dumpHBASlim */
 		if (phba->sli_rev < LPFC_SLI_REV4) {
@@ -5323,12 +5736,6 @@
 					S_IFREG|S_IRUGO|S_IWUSR,
 					phba->hba_debugfs_root,
 					phba, &lpfc_debugfs_op_dumpHBASlim);
-			if (!phba->debug_dumpHBASlim) {
-				lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-						 "0413 Cannot create debugfs "
-						"dumpHBASlim\n");
-				goto debug_failed;
-			}
 		} else
 			phba->debug_dumpHBASlim = NULL;
 
@@ -5340,50 +5747,15 @@
 					S_IFREG|S_IRUGO|S_IWUSR,
 					phba->hba_debugfs_root,
 					phba, &lpfc_debugfs_op_dumpHostSlim);
-			if (!phba->debug_dumpHostSlim) {
-				lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-						 "0414 Cannot create debugfs "
-						 "dumpHostSlim\n");
-				goto debug_failed;
-			}
 		} else
 			phba->debug_dumpHostSlim = NULL;
 
-		/* Setup dumpData */
-		snprintf(name, sizeof(name), "dumpData");
-		phba->debug_dumpData =
-			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
-				 phba->hba_debugfs_root,
-				 phba, &lpfc_debugfs_op_dumpData);
-		if (!phba->debug_dumpData) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0800 Cannot create debugfs dumpData\n");
-			goto debug_failed;
-		}
-
-		/* Setup dumpDif */
-		snprintf(name, sizeof(name), "dumpDif");
-		phba->debug_dumpDif =
-			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
-				 phba->hba_debugfs_root,
-				 phba, &lpfc_debugfs_op_dumpDif);
-		if (!phba->debug_dumpDif) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0801 Cannot create debugfs dumpDif\n");
-			goto debug_failed;
-		}
-
 		/* Setup DIF Error Injections */
 		snprintf(name, sizeof(name), "InjErrLBA");
 		phba->debug_InjErrLBA =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 			phba->hba_debugfs_root,
 			phba, &lpfc_debugfs_op_dif_err);
-		if (!phba->debug_InjErrLBA) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0807 Cannot create debugfs InjErrLBA\n");
-			goto debug_failed;
-		}
 		phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
 
 		snprintf(name, sizeof(name), "InjErrNPortID");
@@ -5391,88 +5763,48 @@
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 			phba->hba_debugfs_root,
 			phba, &lpfc_debugfs_op_dif_err);
-		if (!phba->debug_InjErrNPortID) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0809 Cannot create debugfs InjErrNPortID\n");
-			goto debug_failed;
-		}
 
 		snprintf(name, sizeof(name), "InjErrWWPN");
 		phba->debug_InjErrWWPN =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 			phba->hba_debugfs_root,
 			phba, &lpfc_debugfs_op_dif_err);
-		if (!phba->debug_InjErrWWPN) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0810 Cannot create debugfs InjErrWWPN\n");
-			goto debug_failed;
-		}
 
 		snprintf(name, sizeof(name), "writeGuardInjErr");
 		phba->debug_writeGuard =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 			phba->hba_debugfs_root,
 			phba, &lpfc_debugfs_op_dif_err);
-		if (!phba->debug_writeGuard) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0802 Cannot create debugfs writeGuard\n");
-			goto debug_failed;
-		}
 
 		snprintf(name, sizeof(name), "writeAppInjErr");
 		phba->debug_writeApp =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 			phba->hba_debugfs_root,
 			phba, &lpfc_debugfs_op_dif_err);
-		if (!phba->debug_writeApp) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0803 Cannot create debugfs writeApp\n");
-			goto debug_failed;
-		}
 
 		snprintf(name, sizeof(name), "writeRefInjErr");
 		phba->debug_writeRef =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 			phba->hba_debugfs_root,
 			phba, &lpfc_debugfs_op_dif_err);
-		if (!phba->debug_writeRef) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0804 Cannot create debugfs writeRef\n");
-			goto debug_failed;
-		}
 
 		snprintf(name, sizeof(name), "readGuardInjErr");
 		phba->debug_readGuard =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 			phba->hba_debugfs_root,
 			phba, &lpfc_debugfs_op_dif_err);
-		if (!phba->debug_readGuard) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0808 Cannot create debugfs readGuard\n");
-			goto debug_failed;
-		}
 
 		snprintf(name, sizeof(name), "readAppInjErr");
 		phba->debug_readApp =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 			phba->hba_debugfs_root,
 			phba, &lpfc_debugfs_op_dif_err);
-		if (!phba->debug_readApp) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0805 Cannot create debugfs readApp\n");
-			goto debug_failed;
-		}
 
 		snprintf(name, sizeof(name), "readRefInjErr");
 		phba->debug_readRef =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 			phba->hba_debugfs_root,
 			phba, &lpfc_debugfs_op_dif_err);
-		if (!phba->debug_readRef) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0806 Cannot create debugfs readApp\n");
-			goto debug_failed;
-		}
 
 		/* Setup slow ring trace */
 		if (lpfc_debugfs_max_slow_ring_trc) {
@@ -5496,12 +5828,6 @@
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 				 phba->hba_debugfs_root,
 				 phba, &lpfc_debugfs_op_slow_ring_trc);
-		if (!phba->debug_slow_ring_trc) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "0415 Cannot create debugfs "
-					 "slow_ring_trace\n");
-			goto debug_failed;
-		}
 		if (!phba->slow_ring_trc) {
 			phba->slow_ring_trc = kmalloc(
 				(sizeof(struct lpfc_debugfs_trc) *
@@ -5524,11 +5850,6 @@
 			debugfs_create_file(name, 0644,
 					    phba->hba_debugfs_root,
 					    phba, &lpfc_debugfs_op_nvmeio_trc);
-		if (!phba->debug_nvmeio_trc) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "0574 No create debugfs nvmeio_trc\n");
-			goto debug_failed;
-		}
 
 		atomic_set(&phba->nvmeio_trc_cnt, 0);
 		if (lpfc_debugfs_max_nvmeio_trc) {
@@ -5576,11 +5897,6 @@
 	if (!vport->vport_debugfs_root) {
 		vport->vport_debugfs_root =
 			debugfs_create_dir(name, phba->hba_debugfs_root);
-		if (!vport->vport_debugfs_root) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "0417 Can't create debugfs\n");
-			goto debug_failed;
-		}
 		atomic_inc(&phba->debugfs_vport_count);
 	}
 
@@ -5617,31 +5933,26 @@
 		debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 				 vport->vport_debugfs_root,
 				 vport, &lpfc_debugfs_op_disc_trc);
-	if (!vport->debug_disc_trc) {
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				 "0419 Cannot create debugfs "
-				 "discovery_trace\n");
-		goto debug_failed;
-	}
 	snprintf(name, sizeof(name), "nodelist");
 	vport->debug_nodelist =
 		debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 				 vport->vport_debugfs_root,
 				 vport, &lpfc_debugfs_op_nodelist);
-	if (!vport->debug_nodelist) {
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				 "2985 Can't create debugfs nodelist\n");
-		goto debug_failed;
-	}
 
 	snprintf(name, sizeof(name), "nvmestat");
 	vport->debug_nvmestat =
 		debugfs_create_file(name, 0644,
 				    vport->vport_debugfs_root,
 				    vport, &lpfc_debugfs_op_nvmestat);
-	if (!vport->debug_nvmestat) {
+
+	snprintf(name, sizeof(name), "scsistat");
+	vport->debug_scsistat =
+		debugfs_create_file(name, 0644,
+				    vport->vport_debugfs_root,
+				    vport, &lpfc_debugfs_op_scsistat);
+	if (!vport->debug_scsistat) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				 "0811 Cannot create debugfs nvmestat\n");
+				 "4611 Cannot create debugfs scsistat\n");
 		goto debug_failed;
 	}
 
@@ -5650,22 +5961,12 @@
 		debugfs_create_file(name, 0644,
 				    vport->vport_debugfs_root,
 				    vport, &lpfc_debugfs_op_nvmektime);
-	if (!vport->debug_nvmektime) {
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				 "0815 Cannot create debugfs nvmektime\n");
-		goto debug_failed;
-	}
 
 	snprintf(name, sizeof(name), "cpucheck");
 	vport->debug_cpucheck =
 		debugfs_create_file(name, 0644,
 				    vport->vport_debugfs_root,
 				    vport, &lpfc_debugfs_op_cpucheck);
-	if (!vport->debug_cpucheck) {
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				 "0819 Cannot create debugfs cpucheck\n");
-		goto debug_failed;
-	}
 
 	/*
 	 * The following section is for additional directories/files for the
@@ -5685,11 +5986,6 @@
 	if (!phba->idiag_root) {
 		phba->idiag_root =
 			debugfs_create_dir(name, phba->hba_debugfs_root);
-		if (!phba->idiag_root) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "2922 Can't create idiag debugfs\n");
-			goto debug_failed;
-		}
 		/* Initialize iDiag data structure */
 		memset(&idiag, 0, sizeof(idiag));
 	}
@@ -5700,11 +5996,6 @@
 		phba->idiag_pci_cfg =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 				phba->idiag_root, phba, &lpfc_idiag_op_pciCfg);
-		if (!phba->idiag_pci_cfg) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "2923 Can't create idiag debugfs\n");
-			goto debug_failed;
-		}
 		idiag.offset.last_rd = 0;
 	}
 
@@ -5714,11 +6005,6 @@
 		phba->idiag_bar_acc =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 				phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
-		if (!phba->idiag_bar_acc) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					"3056 Can't create idiag debugfs\n");
-			goto debug_failed;
-		}
 		idiag.offset.last_rd = 0;
 	}
 
@@ -5728,11 +6014,6 @@
 		phba->idiag_que_info =
 			debugfs_create_file(name, S_IFREG|S_IRUGO,
 			phba->idiag_root, phba, &lpfc_idiag_op_queInfo);
-		if (!phba->idiag_que_info) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "2924 Can't create idiag debugfs\n");
-			goto debug_failed;
-		}
 	}
 
 	/* iDiag access PCI function queue */
@@ -5741,11 +6022,6 @@
 		phba->idiag_que_acc =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 				phba->idiag_root, phba, &lpfc_idiag_op_queAcc);
-		if (!phba->idiag_que_acc) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "2926 Can't create idiag debugfs\n");
-			goto debug_failed;
-		}
 	}
 
 	/* iDiag access PCI function doorbell registers */
@@ -5754,11 +6030,6 @@
 		phba->idiag_drb_acc =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 				phba->idiag_root, phba, &lpfc_idiag_op_drbAcc);
-		if (!phba->idiag_drb_acc) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "2927 Can't create idiag debugfs\n");
-			goto debug_failed;
-		}
 	}
 
 	/* iDiag access PCI function control registers */
@@ -5767,11 +6038,6 @@
 		phba->idiag_ctl_acc =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 				phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
-		if (!phba->idiag_ctl_acc) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "2981 Can't create idiag debugfs\n");
-			goto debug_failed;
-		}
 	}
 
 	/* iDiag access mbox commands */
@@ -5780,11 +6046,6 @@
 		phba->idiag_mbx_acc =
 			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
 				phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
-		if (!phba->idiag_mbx_acc) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					"2980 Can't create idiag debugfs\n");
-			goto debug_failed;
-		}
 	}
 
 	/* iDiag extents access commands */
@@ -5796,12 +6057,6 @@
 						    S_IFREG|S_IRUGO|S_IWUSR,
 						    phba->idiag_root, phba,
 						    &lpfc_idiag_op_extAcc);
-			if (!phba->idiag_ext_acc) {
-				lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-						"2986 Cant create "
-						"idiag debugfs\n");
-				goto debug_failed;
-			}
 		}
 	}
 
@@ -5839,6 +6094,9 @@
 	debugfs_remove(vport->debug_nvmestat); /* nvmestat */
 	vport->debug_nvmestat = NULL;
 
+	debugfs_remove(vport->debug_scsistat); /* scsistat */
+	vport->debug_scsistat = NULL;
+
 	debugfs_remove(vport->debug_nvmektime); /* nvmektime */
 	vport->debug_nvmektime = NULL;
 
@@ -5853,21 +6111,22 @@
 
 	if (atomic_read(&phba->debugfs_vport_count) == 0) {
 
+		debugfs_remove(phba->debug_multixri_pools); /* multixripools*/
+		phba->debug_multixri_pools = NULL;
+
 		debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
 		phba->debug_hbqinfo = NULL;
 
+#ifdef LPFC_HDWQ_LOCK_STAT
+		debugfs_remove(phba->debug_lockstat); /* lockstat */
+		phba->debug_lockstat = NULL;
+#endif
 		debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
 		phba->debug_dumpHBASlim = NULL;
 
 		debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
 		phba->debug_dumpHostSlim = NULL;
 
-		debugfs_remove(phba->debug_dumpData); /* dumpData */
-		phba->debug_dumpData = NULL;
-
-		debugfs_remove(phba->debug_dumpDif); /* dumpDif */
-		phba->debug_dumpDif = NULL;
-
 		debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
 		phba->debug_InjErrLBA = NULL;
 
@@ -5988,11 +6247,8 @@
 	lpfc_debug_dump_wq(phba, DUMP_ELS, 0);
 	lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
 
-	for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
-		lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
-
-	for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
-		lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
+	for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
+		lpfc_debug_dump_wq(phba, DUMP_IO, idx);
 
 	lpfc_debug_dump_hdr_rq(phba);
 	lpfc_debug_dump_dat_rq(phba);
@@ -6003,15 +6259,12 @@
 	lpfc_debug_dump_cq(phba, DUMP_ELS, 0);
 	lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
 
-	for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
-		lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
-
-	for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
-		lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
+	for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
+		lpfc_debug_dump_cq(phba, DUMP_IO, idx);
 
 	/*
 	 * Dump Event Queues (EQs)
 	 */
-	for (idx = 0; idx < phba->io_channel_irqs; idx++)
+	for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
 		lpfc_debug_dump_hba_eq(phba, idx);
 }
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 30efc7b..20f2537 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2007-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -50,6 +50,9 @@
 #define LPFC_CPUCHECK_SIZE 8192
 #define LPFC_NVMEIO_TRC_SIZE 8192
 
+/* scsistat output buffer size */
+#define LPFC_SCSISTAT_SIZE 8192
+
 #define LPFC_DEBUG_OUT_LINE_SZ	80
 
 /*
@@ -284,9 +287,11 @@
 
 #endif
 
+/* multixripool output buffer size */
+#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192
+
 enum {
-	DUMP_FCP,
-	DUMP_NVME,
+	DUMP_IO,
 	DUMP_MBX,
 	DUMP_ELS,
 	DUMP_NVMELS,
@@ -324,7 +329,7 @@
  * This function dumps an entry indexed by @idx from a queue specified by the
  * queue descriptor @q.
  **/
-static inline void
+static void
 lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
 {
 	char line_buf[LPFC_LBUF_SZ];
@@ -339,10 +344,10 @@
 
 	esize = q->entry_size;
 	qe_word_cnt = esize / sizeof(uint32_t);
-	pword = q->qe[idx].address;
+	pword = lpfc_sli4_qe(q, idx);
 
 	len = 0;
-	len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
+	len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
 	if (qe_word_cnt > 8)
 		printk(KERN_ERR "%s\n", line_buf);
 
@@ -353,11 +358,11 @@
 			if (qe_word_cnt > 8) {
 				len = 0;
 				memset(line_buf, 0, LPFC_LBUF_SZ);
-				len += snprintf(line_buf+len, LPFC_LBUF_SZ-len,
+				len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len,
 						"%03d: ", i);
 			}
 		}
-		len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
+		len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
 				((uint32_t)*pword) & 0xffffffff);
 		pword++;
 	}
@@ -409,12 +414,9 @@
 	struct lpfc_queue *wq;
 	char *qtypestr;
 
-	if (qtype == DUMP_FCP) {
-		wq = phba->sli4_hba.fcp_wq[wqidx];
-		qtypestr = "FCP";
-	} else if (qtype == DUMP_NVME) {
-		wq = phba->sli4_hba.nvme_wq[wqidx];
-		qtypestr = "NVME";
+	if (qtype == DUMP_IO) {
+		wq = phba->sli4_hba.hdwq[wqidx].io_wq;
+		qtypestr = "IO";
 	} else if (qtype == DUMP_MBX) {
 		wq = phba->sli4_hba.mbx_wq;
 		qtypestr = "MBX";
@@ -427,7 +429,7 @@
 	} else
 		return;
 
-	if (qtype == DUMP_FCP || qtype == DUMP_NVME)
+	if (qtype == DUMP_IO)
 		pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
 			qtypestr, wqidx, wq->queue_id);
 	else
@@ -453,16 +455,13 @@
 	char *qtypestr;
 	int eqidx;
 
-	/* fcp/nvme wq and cq are 1:1, thus same indexes */
+	/* io wq and cq are 1:1, thus same indexes */
+	eq = NULL;
 
-	if (qtype == DUMP_FCP) {
-		wq = phba->sli4_hba.fcp_wq[wqidx];
-		cq = phba->sli4_hba.fcp_cq[wqidx];
-		qtypestr = "FCP";
-	} else if (qtype == DUMP_NVME) {
-		wq = phba->sli4_hba.nvme_wq[wqidx];
-		cq = phba->sli4_hba.nvme_cq[wqidx];
-		qtypestr = "NVME";
+	if (qtype == DUMP_IO) {
+		wq = phba->sli4_hba.hdwq[wqidx].io_wq;
+		cq = phba->sli4_hba.hdwq[wqidx].io_cq;
+		qtypestr = "IO";
 	} else if (qtype == DUMP_MBX) {
 		wq = phba->sli4_hba.mbx_wq;
 		cq = phba->sli4_hba.mbx_cq;
@@ -478,18 +477,18 @@
 	} else
 		return;
 
-	for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) {
-		if (cq->assoc_qid == phba->sli4_hba.hba_eq[eqidx]->queue_id)
+	for (eqidx = 0; eqidx < phba->cfg_hdw_queue; eqidx++) {
+		eq = phba->sli4_hba.hdwq[eqidx].hba_eq;
+		if (cq->assoc_qid == eq->queue_id)
 			break;
 	}
-	if (eqidx == phba->io_channel_irqs) {
+	if (eqidx == phba->cfg_hdw_queue) {
 		pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
 		eqidx = 0;
+		eq = phba->sli4_hba.hdwq[0].hba_eq;
 	}
 
-	eq = phba->sli4_hba.hba_eq[eqidx];
-
-	if (qtype == DUMP_FCP || qtype == DUMP_NVME)
+	if (qtype == DUMP_IO)
 		pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
 			"->EQ[Idx:%d|Qid:%d]:\n",
 			qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
@@ -516,7 +515,7 @@
 {
 	struct lpfc_queue *qp;
 
-	qp = phba->sli4_hba.hba_eq[qidx];
+	qp = phba->sli4_hba.hdwq[qidx].hba_eq;
 
 	pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id);
 
@@ -564,21 +563,12 @@
 {
 	int wq_idx;
 
-	for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
-		if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
+	for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
+		if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid)
 			break;
-	if (wq_idx < phba->cfg_fcp_io_channel) {
-		pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
-		lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
-		return;
-	}
-
-	for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++)
-		if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid)
-			break;
-	if (wq_idx < phba->cfg_nvme_io_channel) {
-		pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
-		lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]);
+	if (wq_idx < phba->cfg_hdw_queue) {
+		pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
+		lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].io_wq);
 		return;
 	}
 
@@ -646,23 +636,13 @@
 {
 	int cq_idx;
 
-	for (cq_idx = 0; cq_idx < phba->cfg_fcp_io_channel; cq_idx++)
-		if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
+	for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
+		if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid)
 			break;
 
-	if (cq_idx < phba->cfg_fcp_io_channel) {
-		pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
-		lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
-		return;
-	}
-
-	for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++)
-		if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid)
-			break;
-
-	if (cq_idx < phba->cfg_nvme_io_channel) {
-		pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
-		lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]);
+	if (cq_idx < phba->cfg_hdw_queue) {
+		pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
+		lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq);
 		return;
 	}
 
@@ -697,13 +677,13 @@
 {
 	int eq_idx;
 
-	for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++)
-		if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
+	for (eq_idx = 0; eq_idx < phba->cfg_hdw_queue; eq_idx++)
+		if (phba->sli4_hba.hdwq[eq_idx].hba_eq->queue_id == qid)
 			break;
 
-	if (eq_idx < phba->io_channel_irqs) {
+	if (eq_idx < phba->cfg_hdw_queue) {
 		printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
-		lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
+		lpfc_debug_dump_q(phba->sli4_hba.hdwq[eq_idx].hba_eq);
 		return;
 	}
 }
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 28e2b60..482e4a8 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -112,6 +112,8 @@
 	uint8_t         nlp_retry;		/* used for ELS retries */
 	uint8_t         nlp_fcp_info;	        /* class info, bits 0-3 */
 #define NLP_FCP_2_DEVICE   0x10			/* FCP-2 device */
+	u8		nlp_nvme_info;	        /* NVME NSLER Support */
+#define NLP_NVME_NSLER     0x1			/* NVME NSLER device */
 
 	uint16_t        nlp_usg_map;	/* ndlp management usage bitmap */
 #define NLP_USG_NODE_ACT_BIT	0x1	/* Indicate ndlp is actively used */
@@ -138,6 +140,7 @@
 
 	uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
 #define NVME_FB_BIT_SHIFT 9    /* PRLI Rsp first burst in 512B units. */
+	uint32_t nlp_defer_did;
 };
 struct lpfc_node_rrq {
 	struct list_head list;
@@ -156,6 +159,7 @@
 /* Defines for nlp_flag (uint32) */
 #define NLP_IGNR_REG_CMPL  0x00000001 /* Rcvd rscn before we cmpl reg login */
 #define NLP_REG_LOGIN_SEND 0x00000002   /* sent reglogin to adapter */
+#define NLP_RELEASE_RPI    0x00000004   /* Release RPI to free pool */
 #define NLP_SUPPRESS_RSP   0x00000010	/* Remote NPort supports suppress rsp */
 #define NLP_PLOGI_SND      0x00000020	/* sent PLOGI request for this entry */
 #define NLP_PRLI_SND       0x00000040	/* sent PRLI request for this entry */
@@ -165,6 +169,7 @@
 #define NLP_ELS_SND_MASK   0x000007e0	/* sent ELS request for this entry */
 #define NLP_NVMET_RECOV    0x00001000   /* NVMET auditing node for recovery. */
 #define NLP_FCP_PRLI_RJT   0x00002000   /* Rport does not support FCP PRLI. */
+#define NLP_UNREG_INP      0x00008000	/* UNREG_RPI cmd is in progress */
 #define NLP_DEFER_RM       0x00010000	/* Remove this ndlp if no longer used */
 #define NLP_DELAY_TMO      0x00020000	/* delay timeout is running for node */
 #define NLP_NPR_2B_DISC    0x00040000	/* node is included in num_disc_nodes */
@@ -293,4 +298,4 @@
 #define NLP_EVT_DEVICE_RM         0xb	/* Device not found in NS / ALPAmap */
 #define NLP_EVT_DEVICE_RECOVERY   0xc	/* Device existence unknown */
 #define NLP_EVT_MAX_EVENT         0xd
-
+#define NLP_EVT_NOTHING_PENDING   0xff
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4dda969..d530399 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -30,6 +30,8 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
 
 #include "lpfc_hw4.h"
 #include "lpfc_hw.h"
@@ -242,6 +244,8 @@
 		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
 		if (elscmd == ELS_CMD_FLOGI)
 			icmd->ulpTimeout = FF_DEF_RATOV * 2;
+		else if (elscmd == ELS_CMD_LOGO)
+			icmd->ulpTimeout = phba->fc_ratov;
 		else
 			icmd->ulpTimeout = phba->fc_ratov * 2;
 	} else {
@@ -313,20 +317,20 @@
 		/* Xmit ELS command <elsCmd> to remote NPORT <did> */
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 				 "0116 Xmit ELS command x%x to remote "
-				 "NPORT x%x I/O tag: x%x, port state:x%x"
-				 " fc_flag:x%x\n",
+				 "NPORT x%x I/O tag: x%x, port state:x%x "
+				 "rpi x%x fc_flag:x%x\n",
 				 elscmd, did, elsiocb->iotag,
-				 vport->port_state,
+				 vport->port_state, ndlp->nlp_rpi,
 				 vport->fc_flag);
 	} else {
 		/* Xmit ELS response <elsCmd> to remote NPORT <did> */
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 				 "0117 Xmit ELS response x%x to remote "
 				 "NPORT x%x I/O tag: x%x, size: x%x "
-				 "port_state x%x fc_flag x%x\n",
+				 "port_state x%x  rpi x%x fc_flag x%x\n",
 				 elscmd, ndlp->nlp_DID, elsiocb->iotag,
 				 cmdSize, vport->port_state,
-				 vport->fc_flag);
+				 ndlp->nlp_rpi, vport->fc_flag);
 	}
 	return elsiocb;
 
@@ -413,7 +417,7 @@
 	/* increment the reference count on ndlp to hold reference
 	 * for the callback routine.
 	 */
-	mbox->context2 = lpfc_nlp_get(ndlp);
+	mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
 
 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
 	if (rc == MBX_NOT_FINISHED) {
@@ -428,7 +432,7 @@
 	 * for the failed mbox command.
 	 */
 	lpfc_nlp_put(ndlp);
-	mp = (struct lpfc_dmabuf *) mbox->context1;
+	mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
 fail_free_mbox:
@@ -502,7 +506,7 @@
 
 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
 	mboxq->vport = vport;
-	mboxq->context1 = dmabuf;
+	mboxq->ctx_buf = dmabuf;
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
 	if (rc == MBX_NOT_FINISHED) {
 		rc = -ENXIO;
@@ -1048,17 +1052,18 @@
 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
 			goto out;
 
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+				 "0150 FLOGI failure Status:x%x/x%x "
+				 "xri x%x TMO:x%x\n",
+				 irsp->ulpStatus, irsp->un.ulpWord[4],
+				 cmdiocb->sli4_xritag, irsp->ulpTimeout);
+
 		/* If this is not a loop open failure, bail out */
 		if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
 		      ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
 					IOERR_LOOP_OPEN_FAILURE)))
 			goto flogifail;
 
-		lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
-				 "0150 FLOGI failure Status:x%x/x%x TMO:x%x\n",
-				 irsp->ulpStatus, irsp->un.ulpWord[4],
-				 irsp->ulpTimeout);
-
 		/* FLOGI failed, so there is no fabric */
 		spin_lock_irq(shost->host_lock);
 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -1111,7 +1116,8 @@
 	/* FLOGI completes successfully */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 			 "0101 FLOGI completes successfully, I/O tag:x%x, "
-			 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
+			 "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
+			 cmdiocb->iotag, cmdiocb->sli4_xritag,
 			 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
 			 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
 			 vport->port_state, vport->fc_flag);
@@ -1155,6 +1161,7 @@
 			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
 			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
 			spin_unlock_irq(&phba->hbalock);
+			phba->fcf.fcf_redisc_attempted = 0; /* reset */
 			goto out;
 		}
 		if (!rc) {
@@ -1169,6 +1176,7 @@
 			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
 			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
 			spin_unlock_irq(&phba->hbalock);
+			phba->fcf.fcf_redisc_attempted = 0; /* reset */
 			goto out;
 		}
 	}
@@ -1200,6 +1208,39 @@
 }
 
 /**
+ * lpfc_cmpl_els_link_down - Completion callback function for ELS command
+ *                           aborted during a link down
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ */
+static void
+lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp;
+	uint32_t *pcmd;
+	uint32_t cmd;
+
+	pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
+	cmd = *pcmd;
+	irsp = &rspiocb->iocb;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"6445 ELS completes after LINK_DOWN: "
+			" Status %x/%x cmd x%x flg x%x\n",
+			irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
+			cmdiocb->iocb_flag);
+
+	if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
+		cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+		atomic_dec(&phba->fabric_iocb_count);
+	}
+	lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
  * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
  * @vport: pointer to a host virtual N_Port data structure.
  * @ndlp: pointer to a node-list data structure.
@@ -1229,9 +1270,10 @@
 	struct serv_parm *sp;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
+	struct lpfc_iocbq defer_flogi_acc;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
-	uint32_t tmo;
+	uint32_t tmo, did;
 	int rc;
 
 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
@@ -1303,6 +1345,35 @@
 		phba->sli3_options, 0, 0);
 
 	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+
+	phba->hba_flag |= HBA_FLOGI_ISSUED;
+
+	/* Check for a deferred FLOGI ACC condition */
+	if (phba->defer_flogi_acc_flag) {
+		did = vport->fc_myDID;
+		vport->fc_myDID = Fabric_DID;
+
+		memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
+
+		defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
+		defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
+						phba->defer_flogi_acc_ox_id;
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+				 "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
+				 " ox_id: x%x, hba_flag x%x\n",
+				 phba->defer_flogi_acc_rx_id,
+				 phba->defer_flogi_acc_ox_id, phba->hba_flag);
+
+		/* Send deferred FLOGI ACC */
+		lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
+				 ndlp, NULL);
+
+		phba->defer_flogi_acc_flag = false;
+
+		vport->fc_myDID = did;
+	}
+
 	if (rc == IOCB_ERROR) {
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
@@ -1338,6 +1409,8 @@
 			Fabric_DID);
 
 	pring = lpfc_phba_elsring(phba);
+	if (unlikely(!pring))
+		return -EIO;
 
 	/*
 	 * Check the txcmplq for an iocb that matches the nport the driver is
@@ -1531,7 +1604,9 @@
 	struct serv_parm *sp;
 	uint8_t  name[sizeof(struct lpfc_name)];
 	uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
+	uint32_t keep_new_nlp_flag = 0;
 	uint16_t keep_nlp_state;
+	u32 keep_nlp_fc4_type = 0;
 	struct lpfc_nvme_rport *keep_nrport = NULL;
 	int  put_node;
 	int  put_rport;
@@ -1551,8 +1626,10 @@
 	 */
 	new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
 
+	/* return immediately if the WWPN matches ndlp */
 	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
 		return ndlp;
+
 	if (phba->sli_rev == LPFC_SLI_REV4) {
 		active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
 						       GFP_KERNEL);
@@ -1561,9 +1638,13 @@
 			       phba->cfg_rrq_xri_bitmap_sz);
 	}
 
-	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-		 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
-		 ndlp, ndlp->nlp_DID, new_ndlp);
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
+			 "3178 PLOGI confirm: ndlp x%x x%x x%x: "
+			 "new_ndlp x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_flag,  ndlp->nlp_fc4_type,
+			 (new_ndlp ? new_ndlp->nlp_DID : 0),
+			 (new_ndlp ? new_ndlp->nlp_flag : 0),
+			 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
 
 	if (!new_ndlp) {
 		rc = memcmp(&ndlp->nlp_portname, name,
@@ -1612,6 +1693,16 @@
 			       phba->cfg_rrq_xri_bitmap_sz);
 	}
 
+	/* At this point in this routine, we know new_ndlp will be
+	 * returned. however, any previous GID_FTs that were done
+	 * would have updated nlp_fc4_type in ndlp, so we must ensure
+	 * new_ndlp has the right value.
+	 */
+	if (vport->fc_flag & FC_FABRIC) {
+		keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
+		new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
+	}
+
 	lpfc_unreg_rpi(vport, new_ndlp);
 	new_ndlp->nlp_DID = ndlp->nlp_DID;
 	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
@@ -1621,9 +1712,36 @@
 		       phba->cfg_rrq_xri_bitmap_sz);
 
 	spin_lock_irq(shost->host_lock);
-	keep_nlp_flag = new_ndlp->nlp_flag;
+	keep_new_nlp_flag = new_ndlp->nlp_flag;
+	keep_nlp_flag = ndlp->nlp_flag;
 	new_ndlp->nlp_flag = ndlp->nlp_flag;
-	ndlp->nlp_flag = keep_nlp_flag;
+
+	/* if new_ndlp had NLP_UNREG_INP set, keep it */
+	if (keep_new_nlp_flag & NLP_UNREG_INP)
+		new_ndlp->nlp_flag |= NLP_UNREG_INP;
+	else
+		new_ndlp->nlp_flag &= ~NLP_UNREG_INP;
+
+	/* if new_ndlp had NLP_RPI_REGISTERED set, keep it */
+	if (keep_new_nlp_flag & NLP_RPI_REGISTERED)
+		new_ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+	else
+		new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
+
+	ndlp->nlp_flag = keep_new_nlp_flag;
+
+	/* if ndlp had NLP_UNREG_INP set, keep it */
+	if (keep_nlp_flag & NLP_UNREG_INP)
+		ndlp->nlp_flag |= NLP_UNREG_INP;
+	else
+		ndlp->nlp_flag &= ~NLP_UNREG_INP;
+
+	/* if ndlp had NLP_RPI_REGISTERED set, keep it */
+	if (keep_nlp_flag & NLP_RPI_REGISTERED)
+		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+	else
+		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
+
 	spin_unlock_irq(shost->host_lock);
 
 	/* Set nlp_states accordingly */
@@ -1661,7 +1779,6 @@
 		if (ndlp->nrport) {
 			ndlp->nrport = NULL;
 			lpfc_nlp_put(ndlp);
-			new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
 		}
 
 		/* We shall actually free the ndlp with both nlp_DID and
@@ -1674,7 +1791,10 @@
 			spin_unlock_irq(&phba->ndlp_lock);
 		}
 
-		/* Two ndlps cannot have the same did on the nodelist */
+		/* Two ndlps cannot have the same did on the nodelist.
+		 * Note: for this case, ndlp has a NULL WWPN so setting
+		 * the nlp_fc4_type isn't required.
+		 */
 		ndlp->nlp_DID = keepDID;
 		lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
 		if (phba->sli_rev == LPFC_SLI_REV4 &&
@@ -1693,8 +1813,13 @@
 
 		lpfc_unreg_rpi(vport, ndlp);
 
-		/* Two ndlps cannot have the same did */
+		/* Two ndlps cannot have the same did and the fc4
+		 * type must be transferred because the ndlp is in
+		 * flight.
+		 */
 		ndlp->nlp_DID = keepDID;
+		ndlp->nlp_fc4_type = keep_nlp_fc4_type;
+
 		if (phba->sli_rev == LPFC_SLI_REV4 &&
 		    active_rrqs_xri_bitmap)
 			memcpy(ndlp->active_rrqs_xri_bitmap,
@@ -1735,6 +1860,12 @@
 	    active_rrqs_xri_bitmap)
 		mempool_free(active_rrqs_xri_bitmap,
 			     phba->active_rrq_pool);
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
+			 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
+			 new_ndlp->nlp_DID, new_ndlp->nlp_flag,
+			 new_ndlp->nlp_fc4_type);
+
 	return new_ndlp;
 }
 
@@ -1866,7 +1997,7 @@
 	IOCB_t *irsp;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_dmabuf *prsp;
-	int disc, rc;
+	int disc;
 
 	/* we pass cmdiocb to state machine which needs rspiocb as well */
 	cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -1895,7 +2026,6 @@
 	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
 	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
 	spin_unlock_irq(shost->host_lock);
-	rc   = 0;
 
 	/* PLOGI completes to NPort <nlp_DID> */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -1934,18 +2064,16 @@
 				 ndlp->nlp_DID, irsp->ulpStatus,
 				 irsp->un.ulpWord[4]);
 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
-		if (lpfc_error_lost_link(irsp))
-			rc = NLP_STE_FREED_NODE;
-		else
-			rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
-						     NLP_EVT_CMPL_PLOGI);
+		if (!lpfc_error_lost_link(irsp))
+			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+						NLP_EVT_CMPL_PLOGI);
 	} else {
 		/* Good status, call state machine */
 		prsp = list_entry(((struct lpfc_dmabuf *)
 				   cmdiocb->context2)->list.next,
 				  struct lpfc_dmabuf, list);
 		ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
-		rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
 					     NLP_EVT_CMPL_PLOGI);
 	}
 
@@ -2002,8 +2130,29 @@
 	int ret;
 
 	ndlp = lpfc_findnode_did(vport, did);
-	if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
-		ndlp = NULL;
+
+	if (ndlp) {
+		/* Defer the processing of the issue PLOGI until after the
+		 * outstanding UNREG_RPI mbox command completes, unless we
+		 * are going offline. This logic does not apply for Fabric DIDs
+		 */
+		if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+		    ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+		    !(vport->fc_flag & FC_OFFLINE_MODE)) {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "4110 Issue PLOGI x%x deferred "
+					 "on NPort x%x rpi x%x Data: x%px\n",
+					 ndlp->nlp_defer_did, ndlp->nlp_DID,
+					 ndlp->nlp_rpi, ndlp);
+
+			/* We can only defer 1st PLOGI */
+			if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
+				ndlp->nlp_defer_did = did;
+			return 0;
+		}
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			ndlp = NULL;
+	}
 
 	/* If ndlp is not NULL, we will bump the reference count on it */
 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
@@ -2137,7 +2286,7 @@
 		else
 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
 						NLP_EVT_CMPL_PRLI);
-	} else
+	} else {
 		/* Good status, call state machine.  However, if another
 		 * PRLI is outstanding, don't call the state machine
 		 * because final disposition to Mapped or Unmapped is
@@ -2145,6 +2294,7 @@
 		 */
 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
 					NLP_EVT_CMPL_PRLI);
+	}
 
 out:
 	lpfc_els_free_iocb(phba, cmdiocb);
@@ -2203,7 +2353,7 @@
 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
 	ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
-	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+	ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC);
 	ndlp->nvme_fb_size = 0;
 
  send_next_prli:
@@ -2285,6 +2435,10 @@
 		npr_nvme = (struct lpfc_nvme_prli *)pcmd;
 		bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
 		bf_set(prli_estabImagePair, npr_nvme, 0);  /* Should be 0 */
+		if (phba->nsler) {
+			bf_set(prli_nsler, npr_nvme, 1);
+			bf_set(prli_conf, npr_nvme, 1);
+		}
 
 		/* Only initiators request first burst. */
 		if ((phba->cfg_nvme_enable_fb) &&
@@ -2682,16 +2836,15 @@
 		goto out;
 	}
 
+	/* The LOGO will not be retried on failure.  A LOGO was
+	 * issued to the remote rport and a ACC or RJT or no Answer are
+	 * all acceptable.  Note the failure and move forward with
+	 * discovery.  The PLOGI will retry.
+	 */
 	if (irsp->ulpStatus) {
-		/* Check for retry */
-		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
-			/* ELS command is being retried */
-			skip_recovery = 1;
-			goto out;
-		}
 		/* LOGO failed */
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-				 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
+				 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
 				 ndlp->nlp_DID, irsp->ulpStatus,
 				 irsp->un.ulpWord[4]);
 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
@@ -2711,8 +2864,8 @@
 		!(vport->fc_flag & FC_PT2PT_PLOGI)) {
 		phba->pport->fc_myDID = 0;
 
-		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
 			if (phba->nvmet_support)
 				lpfc_nvmet_update_targetport(phba);
 			else
@@ -2737,7 +2890,8 @@
 	 * For any other port type, the rpi is unregistered as an implicit
 	 * LOGO.
 	 */
-	if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
+	if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
+	    skip_recovery == 0) {
 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
 		spin_lock_irqsave(shost->host_lock, flags);
 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -2770,6 +2924,8 @@
  * will be stored into the context1 field of the IOCB for the completion
  * callback function to the LOGO ELS command.
  *
+ * Callers of this routine are expected to unregister the RPI first
+ *
  * Return code
  *   0 - successfully issued logo
  *   1 - failed to issue logo
@@ -2811,22 +2967,6 @@
 		"Issue LOGO:      did:x%x",
 		ndlp->nlp_DID, 0, 0);
 
-	/*
-	 * If we are issuing a LOGO, we may try to recover the remote NPort
-	 * by issuing a PLOGI later. Even though we issue ELS cmds by the
-	 * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
-	 * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
-	 * for that ELS cmd. To avoid this situation, lets get rid of the
-	 * RPI right now, before any ELS cmds are sent.
-	 */
-	spin_lock_irq(shost->host_lock);
-	ndlp->nlp_flag |= NLP_ISSUE_LOGO;
-	spin_unlock_irq(shost->host_lock);
-	if (lpfc_unreg_rpi(vport, ndlp)) {
-		lpfc_els_free_iocb(phba, elsiocb);
-		return 0;
-	}
-
 	phba->fc_stat.elsXmitLOGO++;
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
 	spin_lock_irq(shost->host_lock);
@@ -2834,7 +2974,6 @@
 	ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
 	spin_unlock_irq(shost->host_lock);
 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-
 	if (rc == IOCB_ERROR) {
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_LOGO_SND;
@@ -2842,6 +2981,11 @@
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
 	}
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_prev_state = ndlp->nlp_state;
+	spin_unlock_irq(shost->host_lock);
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
 	return 0;
 }
 
@@ -2975,6 +3119,116 @@
 }
 
 /**
+ * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric)
+ *   or the other nport (pt2pt).
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD)
+ *  when connected to a fabric, or to the remote port when connected
+ *  in point-to-point mode. When sent to the Fabric Controller, it will
+ *  replay the RSCN to registered recipients.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RSCN ELS command.
+ *
+ * Return code
+ *   0 - Successfully issued RSCN command
+ *   1 - Failed to issue RSCN command
+ **/
+int
+lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_nodelist *ndlp;
+	struct {
+		struct fc_els_rscn rscn;
+		struct fc_els_rscn_page portid;
+	} *event;
+	uint32_t nportid;
+	uint16_t cmdsize = sizeof(*event);
+
+	/* Not supported for private loop */
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
+	    !(vport->fc_flag & FC_PUBLIC_LOOP))
+		return 1;
+
+	if (vport->fc_flag & FC_PT2PT) {
+		/* find any mapped nport - that would be the other nport */
+		ndlp = lpfc_findnode_mapped(vport);
+		if (!ndlp)
+			return 1;
+	} else {
+		nportid = FC_FID_FCTRL;
+		/* find the fabric controller node */
+		ndlp = lpfc_findnode_did(vport, nportid);
+		if (!ndlp) {
+			/* if one didn't exist, make one */
+			ndlp = lpfc_nlp_init(vport, nportid);
+			if (!ndlp)
+				return 1;
+			lpfc_enqueue_node(vport, ndlp);
+		} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+			ndlp = lpfc_enable_node(vport, ndlp,
+						NLP_STE_UNUSED_NODE);
+			if (!ndlp)
+				return 1;
+		}
+	}
+
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
+
+	if (!elsiocb) {
+		/* This will trigger the release of the node just
+		 * allocated
+		 */
+		lpfc_nlp_put(ndlp);
+		return 1;
+	}
+
+	event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+
+	event->rscn.rscn_cmd = ELS_RSCN;
+	event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
+	event->rscn.rscn_plen = cpu_to_be16(cmdsize);
+
+	nportid = vport->fc_myDID;
+	/* appears that page flags must be 0 for fabric to broadcast RSCN */
+	event->portid.rscn_page_flags = 0;
+	event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16;
+	event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
+	event->portid.rscn_fid[2] = nportid & 0x000000FF;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+			      "Issue RSCN:       did:x%x",
+			      ndlp->nlp_DID, 0, 0);
+
+	phba->fc_stat.elsXmitRSCN++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		/* The additional lpfc_nlp_put will cause the following
+		 * lpfc_els_free_iocb routine to trigger the rlease of
+		 * the node.
+		 */
+		lpfc_nlp_put(ndlp);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	/* This will cause the callback-function lpfc_cmpl_els_cmd to
+	 * trigger the release of node.
+	 */
+	if (!(vport->fc_flag & FC_PT2PT))
+		lpfc_nlp_put(ndlp);
+
+	return 0;
+}
+
+/**
  * lpfc_issue_els_farpr - Issue a farp to an node on a vport
  * @vport: pointer to a host virtual N_Port data structure.
  * @nportid: N_Port identifier to the remote node.
@@ -3250,6 +3504,62 @@
 }
 
 /**
+ * lpfc_link_reset - Issue link reset
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine performs link reset by sending INIT_LINK mailbox command.
+ * For SLI-3 adapter, link attention interrupt is enabled before issuing
+ * INIT_LINK mailbox command.
+ *
+ * Return code
+ *   0 - Link reset initiated successfully
+ *   1 - Failed to initiate link reset
+ **/
+int
+lpfc_link_reset(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t control;
+	int rc;
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			 "2851 Attempt link reset\n");
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2852 Failed to allocate mbox memory");
+		return 1;
+	}
+
+	/* Enable Link attention interrupts */
+	if (phba->sli_rev <= LPFC_SLI_REV3) {
+		spin_lock_irq(&phba->hbalock);
+		phba->sli.sli_flag |= LPFC_PROCESS_LA;
+		control = readl(phba->HCregaddr);
+		control |= HC_LAINT_ENA;
+		writel(control, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	lpfc_init_link(phba, mbox, phba->cfg_topology,
+		       phba->cfg_link_speed);
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	mbox->vport = vport;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2853 Failed to issue INIT_LINK "
+				"mbox command, rc:x%x\n", rc);
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
  * lpfc_els_retry - Make retry decision on an els command iocb
  * @phba: pointer to lpfc hba data structure.
  * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -3285,6 +3595,7 @@
 	int logerr = 0;
 	uint32_t cmd = 0;
 	uint32_t did;
+	int link_reset = 0, rc;
 
 
 	/* Note: context2 may be 0 for internal driver abort
@@ -3366,7 +3677,6 @@
 			retry = 1;
 			break;
 
-		case IOERR_SEQUENCE_TIMEOUT:
 		case IOERR_INVALID_RPI:
 			if (cmd == ELS_CMD_PLOGI &&
 			    did == NameServer_DID) {
@@ -3377,6 +3687,18 @@
 			}
 			retry = 1;
 			break;
+
+		case IOERR_SEQUENCE_TIMEOUT:
+			if (cmd == ELS_CMD_PLOGI &&
+			    did == NameServer_DID &&
+			    (cmdiocb->retry + 1) == maxretry) {
+				/* Reset the Link */
+				link_reset = 1;
+				break;
+			}
+			retry = 1;
+			delay = 100;
+			break;
 		}
 		break;
 
@@ -3533,6 +3855,19 @@
 		break;
 	}
 
+	if (link_reset) {
+		rc = lpfc_link_reset(vport);
+		if (rc) {
+			/* Do not give up. Retry PLOGI one more time and attempt
+			 * link reset if PLOGI fails again.
+			 */
+			retry = 1;
+			delay = 100;
+			goto out_retry;
+		}
+		return 1;
+	}
+
 	if (did == FDMI_DID)
 		retry = 1;
 
@@ -3895,18 +4230,18 @@
 void
 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
-	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
 
-	pmb->context1 = NULL;
-	pmb->context2 = NULL;
+	pmb->ctx_buf = NULL;
+	pmb->ctx_ndlp = NULL;
 
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
 	mempool_free(pmb, phba->mbox_mem_pool);
 	if (ndlp) {
 		lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
-				 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
+				 "0006 rpi%x DID:%x flg:%x %d map:%x x%px\n",
 				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
 				 kref_read(&ndlp->kref),
 				 ndlp->nlp_usg_map, ndlp);
@@ -3975,7 +4310,7 @@
 	/* Check to see if link went down during discovery */
 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
 		if (mbox) {
-			mp = (struct lpfc_dmabuf *) mbox->context1;
+			mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
 			if (mp) {
 				lpfc_mbuf_free(phba, mp->virt, mp->phys);
 				kfree(mp);
@@ -4011,6 +4346,7 @@
 		if ((rspiocb->iocb.ulpStatus == 0)
 		    && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
 			if (!lpfc_unreg_rpi(vport, ndlp) &&
+			    (!(vport->fc_flag & FC_PT2PT)) &&
 			    (ndlp->nlp_state ==  NLP_STE_PLOGI_ISSUE ||
 			     ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) {
 				lpfc_printf_vlog(vport, KERN_INFO,
@@ -4019,7 +4355,7 @@
 					"Data: x%x x%x x%x\n",
 					ndlp->nlp_DID, ndlp->nlp_state,
 					ndlp->nlp_rpi, ndlp->nlp_flag);
-				mp = mbox->context1;
+				mp = mbox->ctx_buf;
 				if (mp) {
 					lpfc_mbuf_free(phba, mp->virt,
 						       mp->phys);
@@ -4032,7 +4368,7 @@
 			/* Increment reference count to ndlp to hold the
 			 * reference to ndlp for the callback function.
 			 */
-			mbox->context2 = lpfc_nlp_get(ndlp);
+			mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
 			mbox->vport = vport;
 			if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
 				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
@@ -4086,7 +4422,7 @@
 				}
 			}
 		}
-		mp = (struct lpfc_dmabuf *) mbox->context1;
+		mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
 		if (mp) {
 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
 			kfree(mp);
@@ -4272,14 +4608,6 @@
 	default:
 		return 1;
 	}
-	/* Xmit ELS ACC response tag <ulpIoTag> */
-	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-			 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
-			 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
-			 "fc_flag x%x\n",
-			 elsiocb->iotag, elsiocb->iocb.ulpContext,
-			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
-			 ndlp->nlp_rpi, vport->fc_flag);
 	if (ndlp->nlp_flag & NLP_LOGO_ACC) {
 		spin_lock_irq(shost->host_lock);
 		if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
@@ -4448,6 +4776,15 @@
 		lpfc_els_free_iocb(phba, elsiocb);
 		return 1;
 	}
+
+	/* Xmit ELS ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
+			 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
+			 "RPI: x%x, fc_flag x%x\n",
+			 rc, elsiocb->iotag, elsiocb->sli4_xritag,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi, vport->fc_flag);
 	return 0;
 }
 
@@ -5281,6 +5618,8 @@
 
 	desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
 
+	if (phba->lmt & LMT_128Gb)
+		rdp_cap |= RDP_PS_128GB;
 	if (phba->lmt & LMT_64Gb)
 		rdp_cap |= RDP_PS_64GB;
 	if (phba->lmt & LMT_32Gb)
@@ -5333,16 +5672,16 @@
 	desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
 	if (vport->fc_flag & FC_FABRIC) {
 		memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
-				sizeof(desc->port_names.wwnn));
+		       sizeof(desc->port_names.wwnn));
 
 		memcpy(desc->port_names.wwpn, &vport->fabric_portname,
-				sizeof(desc->port_names.wwpn));
+		       sizeof(desc->port_names.wwpn));
 	} else {  /* Point to Point */
 		memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
-				sizeof(desc->port_names.wwnn));
+		       sizeof(desc->port_names.wwnn));
 
-		memcpy(desc->port_names.wwnn, &ndlp->nlp_portname,
-				sizeof(desc->port_names.wwpn));
+		memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
+		       sizeof(desc->port_names.wwpn));
 	}
 
 	desc->length = cpu_to_be32(sizeof(desc->port_names));
@@ -5499,7 +5838,7 @@
 		goto prep_mbox_fail;
 	mbox->vport = rdp_context->ndlp->vport;
 	mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
-	mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
+	mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
 	if (rc == MBX_NOT_FINISHED)
 		goto issue_mbox_fail;
@@ -5542,7 +5881,7 @@
 	struct ls_rjt stat;
 
 	if (phba->sli_rev < LPFC_SLI_REV4 ||
-	    bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+	    bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
 						LPFC_SLI_INTF_IF_TYPE_2) {
 		rjt_err = LSRJT_UNABLE_TPC;
 		rjt_expl = LSEXP_REQ_UNSUPPORTED;
@@ -5624,10 +5963,10 @@
 	int rc;
 
 	mb = &pmb->u.mb;
-	lcb_context = (struct lpfc_lcb_context *)pmb->context1;
+	lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp;
 	ndlp = lcb_context->ndlp;
-	pmb->context1 = NULL;
-	pmb->context2 = NULL;
+	pmb->ctx_ndlp = NULL;
+	pmb->ctx_buf = NULL;
 
 	shdr = (union lpfc_sli4_cfg_shdr *)
 			&pmb->u.mqe.un.beacon_config.header.cfg_shdr;
@@ -5701,6 +6040,9 @@
 	stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
 	stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 
+	if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
+		stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+
 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 	phba->fc_stat.elsXmitLSRJT++;
 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
@@ -5731,7 +6073,7 @@
 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
 			 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
 			 LPFC_SLI4_MBX_EMBED);
-	mbox->context1 = (void *)lcb_context;
+	mbox->ctx_ndlp = (void *)lcb_context;
 	mbox->vport = phba->pport;
 	mbox->mbox_cmpl = lpfc_els_lcb_rsp;
 	bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
@@ -6011,6 +6353,25 @@
 		if (vport->phba->nvmet_support)
 			continue;
 
+		/* If we are in the process of doing discovery on this
+		 * NPort, let it continue on its own.
+		 */
+		switch (ndlp->nlp_state) {
+		case  NLP_STE_PLOGI_ISSUE:
+		case  NLP_STE_ADISC_ISSUE:
+		case  NLP_STE_REG_LOGIN_ISSUE:
+		case  NLP_STE_PRLI_ISSUE:
+		case  NLP_STE_LOGO_ISSUE:
+			continue;
+		}
+
+		/* Check to see if we need to NVME rescan this target
+		 * remoteport.
+		 */
+		if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
+		    ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
+			lpfc_nvme_rescan_port(vport, ndlp);
+
 		lpfc_disc_state_machine(vport, ndlp, NULL,
 					NLP_EVT_DEVICE_RECOVERY);
 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
@@ -6114,6 +6475,23 @@
 		fc_host_post_event(shost, fc_get_event_number(),
 			FCH_EVT_RSCN, lp[i]);
 
+	/* Check if RSCN is coming from a direct-connected remote NPort */
+	if (vport->fc_flag & FC_PT2PT) {
+		/* If so, just ACC it, no other action needed for now */
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+				 "2024 pt2pt RSCN %08x Data: x%x x%x\n",
+				 *lp, vport->fc_flag, payload_len);
+		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+		/* Check to see if we need to NVME rescan this target
+		 * remoteport.
+		 */
+		if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
+		    ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
+			lpfc_nvme_rescan_port(vport, ndlp);
+		return 0;
+	}
+
 	/* If we are about to begin discovery, just ACC the RSCN.
 	 * Discovery processing will satisfy it.
 	 */
@@ -6272,6 +6650,7 @@
 lpfc_els_handle_rscn(struct lpfc_vport *vport)
 {
 	struct lpfc_nodelist *ndlp;
+	struct lpfc_hba  *phba = vport->phba;
 
 	/* Ignore RSCN if the port is being torn down. */
 	if (vport->load_flag & FC_UNLOADING) {
@@ -6300,8 +6679,15 @@
 		 * flush the RSCN.  Otherwise, the outstanding requests
 		 * need to complete.
 		 */
-		if (lpfc_issue_gidft(vport) > 0)
+		if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) {
+			if (lpfc_issue_gidft(vport) > 0)
+				return 1;
+		} else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) {
+			if (lpfc_issue_gidpt(vport) > 0)
+				return 1;
+		} else {
 			return 1;
+		}
 	} else {
 		/* Nameserver login in question.  Revalidate. */
 		if (ndlp) {
@@ -6455,6 +6841,11 @@
 	port_state = vport->port_state;
 	vport->fc_flag |= FC_PT2PT;
 	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+
+	/* Acking an unsol FLOGI.  Count 1 for link bounce
+	 * work-around.
+	 */
+	vport->rcv_flogi_cnt++;
 	spin_unlock_irq(shost->host_lock);
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 			 "3311 Rcv Flogi PS x%x new PS x%x "
@@ -6472,6 +6863,25 @@
 
 	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
 
+	/* Defer ACC response until AFTER we issue a FLOGI */
+	if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
+		phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext;
+		phba->defer_flogi_acc_ox_id =
+					cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+
+		vport->fc_myDID = did;
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+				 "3344 Deferring FLOGI ACC: rx_id: x%x,"
+				 " ox_id: x%x, hba_flag x%x\n",
+				 phba->defer_flogi_acc_rx_id,
+				 phba->defer_flogi_acc_ox_id, phba->hba_flag);
+
+		phba->defer_flogi_acc_flag = true;
+
+		return 0;
+	}
+
 	/* Send back ACC */
 	lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
 
@@ -6505,12 +6915,11 @@
 	uint32_t *lp;
 	RNID *rn;
 	struct ls_rjt stat;
-	uint32_t cmd;
 
 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 	lp = (uint32_t *) pcmd->virt;
 
-	cmd = *lp++;
+	lp++;
 	rn = (RNID *) lp;
 
 	/* RNID received */
@@ -6644,11 +7053,11 @@
 
 	mb = &pmb->u.mb;
 
-	ndlp = (struct lpfc_nodelist *) pmb->context2;
-	rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
-	oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
-	pmb->context1 = NULL;
-	pmb->context2 = NULL;
+	ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+	rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
+	oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
+	pmb->ctx_buf = NULL;
+	pmb->ctx_ndlp = NULL;
 
 	if (mb->mbxStatus) {
 		mempool_free(pmb, phba->mbox_mem_pool);
@@ -6732,11 +7141,11 @@
 
 	mb = &pmb->u.mb;
 
-	ndlp = (struct lpfc_nodelist *) pmb->context2;
-	rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
-	oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
-	pmb->context1 = NULL;
-	pmb->context2 = NULL;
+	ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+	rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
+	oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
+	pmb->ctx_ndlp = NULL;
+	pmb->ctx_buf = NULL;
 
 	if (mb->mbxStatus) {
 		mempool_free(pmb, phba->mbox_mem_pool);
@@ -6827,10 +7236,10 @@
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
 	if (mbox) {
 		lpfc_read_lnk_stat(phba, mbox);
-		mbox->context1 = (void *)((unsigned long)
+		mbox->ctx_buf = (void *)((unsigned long)
 			((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
 			cmdiocb->iocb.ulpContext)); /* rx_id */
-		mbox->context2 = lpfc_nlp_get(ndlp);
+		mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
 		mbox->vport = vport;
 		mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
@@ -6990,10 +7399,10 @@
 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
 		if (mbox) {
 			lpfc_read_lnk_stat(phba, mbox);
-			mbox->context1 = (void *)((unsigned long)
+			mbox->ctx_buf = (void *)((unsigned long)
 				((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
 				cmdiocb->iocb.ulpContext)); /* rx_id */
-			mbox->context2 = lpfc_nlp_get(ndlp);
+			mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
 			mbox->vport = vport;
 			mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
 			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
@@ -7099,7 +7508,10 @@
 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
 {
 	struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
-							rrq->nlp_DID);
+						       rrq->nlp_DID);
+	if (!ndlp)
+		return 1;
+
 	if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
 		return lpfc_issue_els_rrq(rrq->vport, ndlp,
 					 rrq->nlp_DID, rrq);
@@ -7269,14 +7681,14 @@
 	uint32_t *lp;
 	IOCB_t *icmd;
 	FARP *fp;
-	uint32_t cmd, cnt, did;
+	uint32_t cnt, did;
 
 	icmd = &cmdiocb->iocb;
 	did = icmd->un.elsreq64.remoteID;
 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 	lp = (uint32_t *) pcmd->virt;
 
-	cmd = *lp++;
+	lp++;
 	fp = (FARP *) lp;
 	/* FARP-REQ received from DID <did> */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -7341,14 +7753,14 @@
 	struct lpfc_dmabuf *pcmd;
 	uint32_t *lp;
 	IOCB_t *icmd;
-	uint32_t cmd, did;
+	uint32_t did;
 
 	icmd = &cmdiocb->iocb;
 	did = icmd->un.elsreq64.remoteID;
 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 	lp = (uint32_t *) pcmd->virt;
 
-	cmd = *lp++;
+	lp++;
 	/* FARP-RSP received from DID <did> */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 			 "0600 FARP-RSP received from DID x%x\n", did);
@@ -7594,18 +8006,40 @@
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		spin_lock(&pring->ring_lock);
 
+	/* First we need to issue aborts to outstanding cmds on txcmpl */
 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
 		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
 			continue;
 
 		if (piocb->vport != vport)
 			continue;
-		list_add_tail(&piocb->dlist, &abort_list);
+
+		/* On the ELS ring we can have ELS_REQUESTs or
+		 * GEN_REQUESTs waiting for a response.
+		 */
+		cmd = &piocb->iocb;
+		if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+			list_add_tail(&piocb->dlist, &abort_list);
+
+			/* If the link is down when flushing ELS commands
+			 * the firmware will not complete them till after
+			 * the link comes back up. This may confuse
+			 * discovery for the new link up, so we need to
+			 * change the compl routine to just clean up the iocb
+			 * and avoid any retry logic.
+			 */
+			if (phba->link_state == LPFC_LINK_DOWN)
+				piocb->iocb_cmpl = lpfc_cmpl_els_link_down;
+		}
+		if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR)
+			list_add_tail(&piocb->dlist, &abort_list);
 	}
+
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		spin_unlock(&pring->ring_lock);
 	spin_unlock_irq(&phba->hbalock);
-	/* Abort each iocb on the aborted list and remove the dlist links. */
+
+	/* Abort each txcmpl iocb on aborted list and remove the dlist links. */
 	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
 		spin_lock_irq(&phba->hbalock);
 		list_del_init(&piocb->dlist);
@@ -7621,6 +8055,9 @@
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		spin_lock(&pring->ring_lock);
 
+	/* No need to abort the txq list,
+	 * just queue them up for lpfc_sli_cancel_iocbs
+	 */
 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
 		cmd = &piocb->iocb;
 
@@ -7641,11 +8078,22 @@
 		list_del_init(&piocb->list);
 		list_add_tail(&piocb->list, &abort_list);
 	}
+
+	/* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */
+	if (vport == phba->pport) {
+		list_for_each_entry_safe(piocb, tmp_iocb,
+					 &phba->fabric_iocb_list, list) {
+			cmd = &piocb->iocb;
+			list_del_init(&piocb->list);
+			list_add_tail(&piocb->list, &abort_list);
+		}
+	}
+
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		spin_unlock(&pring->ring_lock);
 	spin_unlock_irq(&phba->hbalock);
 
-	/* Cancell all the IOCBs from the completions list */
+	/* Cancel all the IOCBs from the completions list */
 	lpfc_sli_cancel_iocbs(phba, &abort_list,
 			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 
@@ -7673,8 +8121,11 @@
 lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)
 {
 	struct lpfc_vport *vport;
+
+	spin_lock_irq(&phba->port_list_lock);
 	list_for_each_entry(vport, &phba->port_list, listentry)
 		lpfc_els_flush_cmd(vport);
+	spin_unlock_irq(&phba->port_list_lock);
 
 	return;
 }
@@ -7849,8 +8300,9 @@
 	struct ls_rjt stat;
 	uint32_t *payload;
 	uint32_t cmd, did, newnode;
-	uint8_t rjt_exp, rjt_err = 0;
+	uint8_t rjt_exp, rjt_err = 0, init_link = 0;
 	IOCB_t *icmd = &elsiocb->iocb;
+	LPFC_MBOXQ_t *mbox;
 
 	if (!vport || !(elsiocb->context2))
 		goto dropit;
@@ -7937,9 +8389,10 @@
 			cmd, did, vport->port_state, vport->fc_flag,
 			vport->fc_myDID, vport->fc_prevDID);
 
-	/* reject till our FLOGI completes */
+	/* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
 	if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
-	    (cmd != ELS_CMD_FLOGI)) {
+	    (cmd != ELS_CMD_FLOGI) &&
+	    !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) {
 		rjt_err = LSRJT_LOGICAL_BSY;
 		rjt_exp = LSEXP_NOTHING_MORE;
 		goto lsrjt;
@@ -7999,6 +8452,19 @@
 			did, vport->port_state, ndlp->nlp_flag);
 
 		phba->fc_stat.elsRcvFLOGI++;
+
+		/* If the driver believes fabric discovery is done and is ready,
+		 * bounce the link.  There is some descrepancy.
+		 */
+		if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
+		    vport->fc_flag & FC_PT2PT &&
+		    vport->rcv_flogi_cnt >= 1) {
+			rjt_err = LSRJT_LOGICAL_BSY;
+			rjt_exp = LSEXP_NOTHING_MORE;
+			init_link++;
+			goto lsrjt;
+		}
+
 		lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
 		if (newnode)
 			lpfc_nlp_put(ndlp);
@@ -8197,6 +8663,14 @@
 		rjt_err = LSRJT_UNABLE_TPC;
 		rjt_exp = LSEXP_INVALID_OX_RX;
 		break;
+	case ELS_CMD_FPIN:
+		/*
+		 * Received FPIN from fabric - pass it to the
+		 * transport FPIN handler.
+		 */
+		fc_host_fpin_rcv(shost, elsiocb->iocb.unsli3.rcvsli3.acc_len,
+				(char *)payload);
+		break;
 	default:
 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
 			"RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
@@ -8227,6 +8701,27 @@
 
 	lpfc_nlp_put(elsiocb->context1);
 	elsiocb->context1 = NULL;
+
+	/* Special case.  Driver received an unsolicited command that
+	 * unsupportable given the driver's current state.  Reset the
+	 * link and start over.
+	 */
+	if (init_link) {
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!mbox)
+			return;
+		lpfc_linkdown(phba);
+		lpfc_init_link(phba, mbox,
+			       phba->cfg_topology,
+			       phba->cfg_link_speed);
+		mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		mbox->vport = vport;
+		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
+		    MBX_NOT_FINISHED)
+			mempool_free(mbox, phba->mbox_mem_pool);
+	}
+
 	return;
 
 dropit:
@@ -8450,7 +8945,7 @@
 {
 	struct lpfc_vport *vport = pmb->vport;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
-	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
 	MAILBOX_t *mb = &pmb->u.mb;
 	int rc;
 
@@ -8497,7 +8992,7 @@
 				lpfc_nlp_put(ndlp);
 				return;
 			}
-
+			/* fall through */
 		default:
 			/* Try to recover from this error */
 			if (phba->sli_rev == LPFC_SLI_REV4)
@@ -8568,7 +9063,7 @@
 	if (mbox) {
 		lpfc_reg_vpi(vport, mbox);
 		mbox->vport = vport;
-		mbox->context2 = lpfc_nlp_get(ndlp);
+		mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
 		mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
 		    == MBX_NOT_FINISHED) {
@@ -9502,7 +9997,8 @@
 				"rport in state 0x%x\n", ndlp->nlp_state);
 		return;
 	}
-	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+	lpfc_printf_log(phba, KERN_ERR,
+			LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR,
 			"3094 Start rport recovery on shost id 0x%x "
 			"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
 			"flags 0x%x\n",
@@ -9515,8 +10011,8 @@
 	 */
 	spin_lock_irqsave(shost->host_lock, flags);
 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+	ndlp->nlp_flag |= NLP_ISSUE_LOGO;
 	spin_unlock_irqrestore(shost->host_lock, flags);
-	lpfc_issue_els_logo(vport, ndlp, 0);
-	lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+	lpfc_unreg_rpi(vport, ndlp);
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index eb71877..749286a 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -118,6 +118,7 @@
 	struct lpfc_work_evt *evtp;
 	int  put_node;
 	int  put_rport;
+	unsigned long iflags;
 
 	rdata = rport->dd_data;
 	ndlp = rdata->pnode;
@@ -132,7 +133,7 @@
 		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
 
 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
-			 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
+			 "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
 			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
 
 	/* Don't defer this if we are in the process of deleting the vport
@@ -170,22 +171,22 @@
 	}
 
 	shost = lpfc_shost_from_vport(vport);
-	spin_lock_irq(shost->host_lock);
+	spin_lock_irqsave(shost->host_lock, iflags);
 	ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
-	spin_unlock_irq(shost->host_lock);
+	spin_unlock_irqrestore(shost->host_lock, iflags);
 
 	/* We need to hold the node by incrementing the reference
 	 * count until this queued work is done
 	 */
 	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
 
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irqsave(&phba->hbalock, iflags);
 	if (evtp->evt_arg1) {
 		evtp->evt = LPFC_EVT_DEV_LOSS;
 		list_add_tail(&evtp->evt_listp, &phba->work_list);
 		lpfc_worker_wake_up(phba);
 	}
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
 
 	return;
 }
@@ -212,14 +213,15 @@
 	int  put_node;
 	int warn_on = 0;
 	int fcf_inuse = 0;
+	unsigned long iflags;
 
 	rport = ndlp->rport;
 	vport = ndlp->vport;
 	shost = lpfc_shost_from_vport(vport);
 
-	spin_lock_irq(shost->host_lock);
+	spin_lock_irqsave(shost->host_lock, iflags);
 	ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
-	spin_unlock_irq(shost->host_lock);
+	spin_unlock_irqrestore(shost->host_lock, iflags);
 
 	if (!rport)
 		return fcf_inuse;
@@ -235,7 +237,7 @@
 		ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
 
 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
-			 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
+			 "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
 			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
 
 	/*
@@ -638,8 +640,6 @@
 	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
 		if (phba->hba_flag & HBA_RRQ_ACTIVE)
 			lpfc_handle_rrq_active(phba);
-		if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
-			lpfc_sli4_fcp_xri_abort_event_proc(phba);
 		if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
 			lpfc_sli4_els_xri_abort_event_proc(phba);
 		if (phba->hba_flag & ASYNC_EVENT)
@@ -859,10 +859,9 @@
 void
 lpfc_linkdown_port(struct lpfc_vport *vport)
 {
-	struct lpfc_hba  *phba = vport->phba;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 
-	if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+	if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
 		fc_host_post_event(shost, fc_get_event_number(),
 				   FCH_EVT_LINKDOWN, 0);
 
@@ -894,11 +893,21 @@
 	/* Block all SCSI stack I/Os */
 	lpfc_scsi_dev_block(phba);
 
+	phba->defer_flogi_acc_flag = false;
+
 	spin_lock_irq(&phba->hbalock);
 	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
 	spin_unlock_irq(&phba->hbalock);
 	if (phba->link_state > LPFC_LINK_DOWN) {
 		phba->link_state = LPFC_LINK_DOWN;
+		if (phba->sli4_hba.conf_trunk) {
+			phba->trunk_link.link0.state = 0;
+			phba->trunk_link.link1.state = 0;
+			phba->trunk_link.link2.state = 0;
+			phba->trunk_link.link3.state = 0;
+			phba->sli4_hba.link_state.logical_speed =
+						LPFC_LINK_SPEED_UNKNOWN;
+		}
 		spin_lock_irq(shost->host_lock);
 		phba->pport->fc_flag &= ~FC_LBIT;
 		spin_unlock_irq(shost->host_lock);
@@ -911,8 +920,8 @@
 
 			vports[i]->fc_myDID = 0;
 
-			if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-			    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+			if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+			    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
 				if (phba->nvmet_support)
 					lpfc_nvmet_update_targetport(phba);
 				else
@@ -921,7 +930,11 @@
 		}
 	}
 	lpfc_destroy_vport_work_array(phba, vports);
-	/* Clean up any firmware default rpi's */
+
+	/* Clean up any SLI3 firmware default rpi's */
+	if (phba->sli_rev > LPFC_SLI_REV3)
+		goto skip_unreg_did;
+
 	mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (mb) {
 		lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
@@ -933,6 +946,7 @@
 		}
 	}
 
+ skip_unreg_did:
 	/* Setup myDID for link up if we are in pt2pt mode */
 	if (phba->pport->fc_flag & FC_PT2PT) {
 		mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -947,6 +961,7 @@
 		}
 		spin_lock_irq(shost->host_lock);
 		phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
+		phba->pport->rcv_flogi_cnt = 0;
 		spin_unlock_irq(shost->host_lock);
 	}
 	return 0;
@@ -997,7 +1012,7 @@
 		(vport != phba->pport))
 		return;
 
-	if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+	if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
 		fc_host_post_event(shost, fc_get_event_number(),
 				   FCH_EVT_LINKUP, 0);
 
@@ -1018,6 +1033,7 @@
 {
 	struct lpfc_vport **vports;
 	int i;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(phba->pport);
 
 	phba->link_state = LPFC_LINK_UP;
 
@@ -1031,6 +1047,18 @@
 			lpfc_linkup_port(vports[i]);
 	lpfc_destroy_vport_work_array(phba, vports);
 
+	/* Clear the pport flogi counter in case the link down was
+	 * absorbed without an ACQE. No lock here - in worker thread
+	 * and discovery is synchronized.
+	 */
+	spin_lock_irq(shost->host_lock);
+	phba->pport->rcv_flogi_cnt = 0;
+	spin_unlock_irq(shost->host_lock);
+
+	/* reinitialize initial FLOGI flag */
+	phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
+	phba->defer_flogi_acc_flag = false;
+
 	return 0;
 }
 
@@ -1992,6 +2020,26 @@
 				"failover and change port state:x%x/x%x\n",
 				phba->pport->port_state, LPFC_VPORT_UNKNOWN);
 		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+
+		if (!phba->fcf.fcf_redisc_attempted) {
+			lpfc_unregister_fcf(phba);
+
+			rc = lpfc_sli4_redisc_fcf_table(phba);
+			if (!rc) {
+				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+						"3195 Rediscover FCF table\n");
+				phba->fcf.fcf_redisc_attempted = 1;
+				lpfc_sli4_clear_fcf_rr_bmask(phba);
+			} else {
+				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+						"3196 Rediscover FCF table "
+						"failed. Status:x%x\n", rc);
+			}
+		} else {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+					"3197 Already rediscover FCF table "
+					"attempted. No more retry\n");
+		}
 		goto stop_flogi_current_fcf;
 	} else {
 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
@@ -2915,7 +2963,7 @@
 void
 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 {
-	struct lpfc_dmabuf *dmabuf = mboxq->context1;
+	struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
 	struct lpfc_vport *vport = mboxq->vport;
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
@@ -3008,7 +3056,7 @@
 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	MAILBOX_t *mb = &pmb->u.mb;
-	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 	struct lpfc_vport  *vport = pmb->vport;
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct serv_parm *sp = &vport->fc_sparam;
@@ -3052,7 +3100,7 @@
 	return;
 
 out:
-	pmb->context1 = NULL;
+	pmb->ctx_buf = NULL;
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
 	lpfc_issue_clear_la(phba, vport);
@@ -3071,8 +3119,9 @@
 	int rc;
 	struct fcf_record *fcf_record;
 	uint32_t fc_flags = 0;
+	unsigned long iflags;
 
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irqsave(&phba->hbalock, iflags);
 	phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
 
 	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -3085,6 +3134,7 @@
 		case LPFC_LINK_SPEED_16GHZ:
 		case LPFC_LINK_SPEED_32GHZ:
 		case LPFC_LINK_SPEED_64GHZ:
+		case LPFC_LINK_SPEED_128GHZ:
 			break;
 		default:
 			phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
@@ -3168,12 +3218,12 @@
 		vport->fc_myDID = phba->fc_pref_DID;
 		fc_flags |= FC_LBIT;
 	}
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
 
 	if (fc_flags) {
-		spin_lock_irq(shost->host_lock);
+		spin_lock_irqsave(shost->host_lock, iflags);
 		vport->fc_flag |= fc_flags;
-		spin_unlock_irq(shost->host_lock);
+		spin_unlock_irqrestore(shost->host_lock, iflags);
 	}
 
 	lpfc_linkup(phba);
@@ -3190,7 +3240,7 @@
 	sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
 	rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
 	if (rc == MBX_NOT_FINISHED) {
-		mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
+		mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
 		kfree(mp);
 		mempool_free(sparam_mbox, phba->mbox_mem_pool);
@@ -3247,22 +3297,22 @@
 		 * The driver is expected to do FIP/FCF. Call the port
 		 * and get the FCF Table.
 		 */
-		spin_lock_irq(&phba->hbalock);
+		spin_lock_irqsave(&phba->hbalock, iflags);
 		if (phba->hba_flag & FCF_TS_INPROG) {
-			spin_unlock_irq(&phba->hbalock);
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
 			return;
 		}
 		/* This is the initial FCF discovery scan */
 		phba->fcf.fcf_flag |= FCF_INIT_DISC;
-		spin_unlock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
 				"2778 Start FCF table scan at linkup\n");
 		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
 						     LPFC_FCOE_FCF_GET_FIRST);
 		if (rc) {
-			spin_lock_irq(&phba->hbalock);
+			spin_lock_irqsave(&phba->hbalock, iflags);
 			phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
-			spin_unlock_irq(&phba->hbalock);
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
 			goto out;
 		}
 		/* Reset FCF roundrobin bmask for new discovery */
@@ -3273,7 +3323,7 @@
 out:
 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
-			 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
+			 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
 			 vport->port_state, sparam_mbox, cfglink_mbox);
 	lpfc_issue_clear_la(phba, vport);
 	return;
@@ -3319,8 +3369,9 @@
 	struct lpfc_mbx_read_top *la;
 	struct lpfc_sli_ring *pring;
 	MAILBOX_t *mb = &pmb->u.mb;
-	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
 	uint8_t attn_type;
+	unsigned long iflags;
 
 	/* Unblock ELS traffic */
 	pring = lpfc_phba_elsring(phba);
@@ -3342,12 +3393,12 @@
 
 	memcpy(&phba->alpa_map[0], mp->virt, 128);
 
-	spin_lock_irq(shost->host_lock);
+	spin_lock_irqsave(shost->host_lock, iflags);
 	if (bf_get(lpfc_mbx_read_top_pb, la))
 		vport->fc_flag |= FC_BYPASSED_MODE;
 	else
 		vport->fc_flag &= ~FC_BYPASSED_MODE;
-	spin_unlock_irq(shost->host_lock);
+	spin_unlock_irqrestore(shost->host_lock, iflags);
 
 	if (phba->fc_eventTag <= la->eventTag) {
 		phba->fc_stat.LinkMultiEvent++;
@@ -3358,12 +3409,12 @@
 
 	phba->fc_eventTag = la->eventTag;
 	if (phba->sli_rev < LPFC_SLI_REV4) {
-		spin_lock_irq(&phba->hbalock);
+		spin_lock_irqsave(&phba->hbalock, iflags);
 		if (bf_get(lpfc_mbx_read_top_mm, la))
 			phba->sli.sli_flag |= LPFC_MENLO_MAINT;
 		else
 			phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
-		spin_unlock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
 	}
 
 	phba->link_events++;
@@ -3476,15 +3527,15 @@
 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	struct lpfc_vport  *vport = pmb->vport;
-	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
-	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 
-	pmb->context1 = NULL;
-	pmb->context2 = NULL;
+	pmb->ctx_buf = NULL;
+	pmb->ctx_ndlp = NULL;
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
-			 "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+			 "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
 			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
 			 kref_read(&ndlp->kref),
 			 ndlp->nlp_usg_map, ndlp);
@@ -3611,8 +3662,8 @@
 		spin_unlock_irq(shost->host_lock);
 		vport->fc_myDID = 0;
 
-		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
 			if (phba->nvmet_support)
 				lpfc_nvmet_update_targetport(phba);
 			else
@@ -3689,8 +3740,8 @@
 	vport_buff = (uint8_t *) vport_info;
 	do {
 		/* free dma buffer from previous round */
-		if (pmb->context1) {
-			mp = (struct lpfc_dmabuf *)pmb->context1;
+		if (pmb->ctx_buf) {
+			mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
 			kfree(mp);
 		}
@@ -3712,7 +3763,7 @@
 
 		if (phba->sli_rev == LPFC_SLI_REV4) {
 			byte_count = pmb->u.mqe.un.mb_words[5];
-			mp = (struct lpfc_dmabuf *)pmb->context1;
+			mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 			if (byte_count > sizeof(struct static_vport_info) -
 					offset)
 				byte_count = sizeof(struct static_vport_info)
@@ -3777,8 +3828,8 @@
 out:
 	kfree(vport_info);
 	if (mbx_wait_rc != MBX_TIMEOUT) {
-		if (pmb->context1) {
-			mp = (struct lpfc_dmabuf *)pmb->context1;
+		if (pmb->ctx_buf) {
+			mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
 			kfree(mp);
 		}
@@ -3799,13 +3850,13 @@
 {
 	struct lpfc_vport *vport = pmb->vport;
 	MAILBOX_t *mb = &pmb->u.mb;
-	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
 	struct lpfc_nodelist *ndlp;
 	struct Scsi_Host *shost;
 
-	ndlp = (struct lpfc_nodelist *) pmb->context2;
-	pmb->context1 = NULL;
-	pmb->context2 = NULL;
+	ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+	pmb->ctx_ndlp = NULL;
+	pmb->ctx_buf = NULL;
 
 	if (mb->mbxStatus) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -3874,11 +3925,9 @@
 int
 lpfc_issue_gidft(struct lpfc_vport *vport)
 {
-	struct lpfc_hba *phba = vport->phba;
-
 	/* Good status, issue CT Request to NameServer */
-	if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-	    (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
+	if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+	    (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
 		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
 			/* Cannot issue NameServer FCP Query, so finish up
 			 * discovery
@@ -3893,8 +3942,8 @@
 		vport->gidft_inp++;
 	}
 
-	if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-	    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+	if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+	    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
 		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
 			/* Cannot issue NameServer NVME Query, so finish up
 			 * discovery
@@ -3913,6 +3962,35 @@
 	return vport->gidft_inp;
 }
 
+/**
+ * lpfc_issue_gidpt - issue a GID_PT for all N_Ports
+ * @vport: The virtual port for which this call is being executed.
+ *
+ * This routine will issue a GID_PT to get a list of all N_Ports
+ *
+ * Return value :
+ *   0 - Failure to issue a GID_PT
+ *   1 - GID_PT issued
+ **/
+int
+lpfc_issue_gidpt(struct lpfc_vport *vport)
+{
+	/* Good status, issue CT Request to NameServer */
+	if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
+		/* Cannot issue NameServer FCP Query, so finish up
+		 * discovery
+		 */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+				 "0606 %s Port TYPE %x %s\n",
+				 "Failed to issue GID_PT to ",
+				 GID_PT_N_PORT,
+				 "Finishing discovery.");
+		return 0;
+	}
+	vport->gidft_inp++;
+	return 1;
+}
+
 /*
  * This routine handles processing a NameServer REG_LOGIN mailbox
  * command upon completion. It is setup in the LPFC_MBOXQ
@@ -3923,12 +4001,12 @@
 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	MAILBOX_t *mb = &pmb->u.mb;
-	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
-	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
 	struct lpfc_vport *vport = pmb->vport;
 
-	pmb->context1 = NULL;
-	pmb->context2 = NULL;
+	pmb->ctx_buf = NULL;
+	pmb->ctx_ndlp = NULL;
 	vport->gidft_inp = 0;
 
 	if (mb->mbxStatus) {
@@ -3969,7 +4047,7 @@
 	ndlp->nlp_type |= NLP_FABRIC;
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
-			 "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
+			 "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
 			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
 			 kref_read(&ndlp->kref),
 			 ndlp->nlp_usg_map, ndlp);
@@ -3981,12 +4059,12 @@
 		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
 		lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
 
-		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
+		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
 			lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
 
-		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
+		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
 			lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
 				    FC_TYPE_NVME);
 
@@ -4022,7 +4100,7 @@
 	struct fc_rport_identifiers rport_ids;
 	struct lpfc_hba  *phba = vport->phba;
 
-	if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+	if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
 		return;
 
 	/* Remote port has reappeared. Re-register w/ FC transport */
@@ -4074,15 +4152,21 @@
 	rdata->pnode = lpfc_nlp_get(ndlp);
 
 	if (ndlp->nlp_type & NLP_FCP_TARGET)
-		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+		rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
 	if (ndlp->nlp_type & NLP_FCP_INITIATOR)
-		rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+		rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
+	if (ndlp->nlp_type & NLP_NVME_INITIATOR)
+		rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
+	if (ndlp->nlp_type & NLP_NVME_TARGET)
+		rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
+	if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
+		rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
 
 	if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
 		fc_remote_port_rolechg(rport, rport_ids.roles);
 
 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
-			 "3183 rport register x%06x, rport %p role x%x\n",
+			 "3183 rport register x%06x, rport x%px role x%x\n",
 			 ndlp->nlp_DID, rport, rport_ids.roles);
 
 	if ((rport->scsi_target_id != -1) &&
@@ -4097,9 +4181,8 @@
 {
 	struct fc_rport *rport = ndlp->rport;
 	struct lpfc_vport *vport = ndlp->vport;
-	struct lpfc_hba  *phba = vport->phba;
 
-	if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+	if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
 		return;
 
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
@@ -4107,7 +4190,7 @@
 		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
-			 "3184 rport unregister x%06x, rport %p\n",
+			 "3184 rport unregister x%06x, rport x%px\n",
 			 ndlp->nlp_DID, rport);
 
 	fc_remote_port_delete(rport);
@@ -4119,8 +4202,9 @@
 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	unsigned long iflags;
 
-	spin_lock_irq(shost->host_lock);
+	spin_lock_irqsave(shost->host_lock, iflags);
 	switch (state) {
 	case NLP_STE_UNUSED_NODE:
 		vport->fc_unused_cnt += count;
@@ -4150,7 +4234,7 @@
 			vport->fc_npr_cnt += count;
 		break;
 	}
-	spin_unlock_irq(shost->host_lock);
+	spin_unlock_irqrestore(shost->host_lock, iflags);
 }
 
 static void
@@ -4193,7 +4277,7 @@
 
 	if (new_state ==  NLP_STE_MAPPED_NODE ||
 	    new_state == NLP_STE_UNMAPPED_NODE) {
-		if (ndlp->nlp_fc4_type & NLP_FC4_FCP ||
+		if (ndlp->nlp_fc4_type ||
 		    ndlp->nlp_DID == Fabric_DID ||
 		    ndlp->nlp_DID == NameServer_DID ||
 		    ndlp->nlp_DID == FDMI_DID) {
@@ -4385,6 +4469,7 @@
 	NLP_INT_NODE_ACT(ndlp);
 	atomic_set(&ndlp->cmd_pending, 0);
 	ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
+	ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
 }
 
 struct lpfc_nodelist *
@@ -4392,18 +4477,31 @@
 		 int state)
 {
 	struct lpfc_hba *phba = vport->phba;
-	uint32_t did;
+	uint32_t did, flag;
 	unsigned long flags;
 	unsigned long *active_rrqs_xri_bitmap = NULL;
 	int rpi = LPFC_RPI_ALLOC_ERROR;
+	uint32_t defer_did = 0;
 
 	if (!ndlp)
 		return NULL;
 
 	if (phba->sli_rev == LPFC_SLI_REV4) {
-		rpi = lpfc_sli4_alloc_rpi(vport->phba);
-		if (rpi == LPFC_RPI_ALLOC_ERROR)
+		if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
+			rpi = lpfc_sli4_alloc_rpi(vport->phba);
+		else
+			rpi = ndlp->nlp_rpi;
+
+		if (rpi == LPFC_RPI_ALLOC_ERROR) {
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+					 "0359 %s: ndlp:x%px "
+					 "usgmap:x%x refcnt:%d FAILED RPI "
+					 " ALLOC\n",
+					 __func__,
+					 (void *)ndlp, ndlp->nlp_usg_map,
+					 kref_read(&ndlp->kref));
 			return NULL;
+		}
 	}
 
 	spin_lock_irqsave(&phba->ndlp_lock, flags);
@@ -4411,9 +4509,9 @@
 	if (NLP_CHK_FREE_REQ(ndlp)) {
 		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
-				"0277 lpfc_enable_node: ndlp:x%p "
+				"0277 %s: ndlp:x%px "
 				"usgmap:x%x refcnt:%d\n",
-				(void *)ndlp, ndlp->nlp_usg_map,
+				__func__, (void *)ndlp, ndlp->nlp_usg_map,
 				kref_read(&ndlp->kref));
 		goto free_rpi;
 	}
@@ -4421,23 +4519,30 @@
 	if (NLP_CHK_NODE_ACT(ndlp)) {
 		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
-				"0278 lpfc_enable_node: ndlp:x%p "
+				"0278 %s: ndlp:x%px "
 				"usgmap:x%x refcnt:%d\n",
-				(void *)ndlp, ndlp->nlp_usg_map,
+				__func__, (void *)ndlp, ndlp->nlp_usg_map,
 				kref_read(&ndlp->kref));
 		goto free_rpi;
 	}
 
-	/* Keep the original DID */
+	/* First preserve the orginal DID, xri_bitmap and some flags */
 	did = ndlp->nlp_DID;
+	flag = (ndlp->nlp_flag & NLP_UNREG_INP);
+	if (flag & NLP_UNREG_INP)
+		defer_did = ndlp->nlp_defer_did;
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
 
-	/* re-initialize ndlp except of ndlp linked list pointer */
+	/* Zero ndlp except of ndlp linked list pointer */
 	memset((((char *)ndlp) + sizeof (struct list_head)), 0,
 		sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
-	lpfc_initialize_node(vport, ndlp, did);
 
+	/* Next reinitialize and restore saved objects */
+	lpfc_initialize_node(vport, ndlp, did);
+	ndlp->nlp_flag |= flag;
+	if (flag & NLP_UNREG_INP)
+		ndlp->nlp_defer_did = defer_did;
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
 
@@ -4446,7 +4551,7 @@
 		ndlp->nlp_rpi = rpi;
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
 				 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
-				 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+				 "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
 				 ndlp->nlp_flag,
 				 kref_read(&ndlp->kref),
 				 ndlp->nlp_usg_map, ndlp);
@@ -4455,6 +4560,14 @@
 
 	if (state != NLP_STE_UNUSED_NODE)
 		lpfc_nlp_set_state(vport, ndlp, state);
+	else
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+				 "0013 rpi:%x DID:%x flg:%x refcnt:%d "
+				 "map:%x x%px STATE=UNUSED\n",
+				 ndlp->nlp_rpi, ndlp->nlp_DID,
+				 ndlp->nlp_flag,
+				 kref_read(&ndlp->kref),
+				 ndlp->nlp_usg_map, ndlp);
 
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
 		"node enable:       did:x%x",
@@ -4586,12 +4699,15 @@
 		case CMD_GEN_REQUEST64_CR:
 			if (iocb->context_un.ndlp == ndlp)
 				return 1;
+			/* fall through */
 		case CMD_ELS_REQUEST64_CR:
 			if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
 				return 1;
+			/* fall through */
 		case CMD_XMIT_ELS_RSP64_CX:
 			if (iocb->context1 == (uint8_t *) ndlp)
 				return 1;
+			/* fall through */
 		}
 	} else if (pring->ringno == LPFC_FCP_RING) {
 		/* Skip match check if waiting to relogin to FCP target */
@@ -4697,11 +4813,31 @@
 	struct lpfc_vport  *vport = pmb->vport;
 	struct lpfc_nodelist *ndlp;
 
-	ndlp = (struct lpfc_nodelist *)(pmb->context1);
+	ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
 	if (!ndlp)
 		return;
 	lpfc_issue_els_logo(vport, ndlp, 0);
 	mempool_free(pmb, phba->mbox_mem_pool);
+
+	/* Check to see if there are any deferred events to process */
+	if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+	    (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "1434 UNREG cmpl deferred logo x%x "
+				 "on NPort x%x Data: x%x x%px\n",
+				 ndlp->nlp_rpi, ndlp->nlp_DID,
+				 ndlp->nlp_defer_did, ndlp);
+
+		ndlp->nlp_flag &= ~NLP_UNREG_INP;
+		ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
+		lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+	} else {
+		if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+			lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+			ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+		}
+		ndlp->nlp_flag &= ~NLP_UNREG_INP;
+	}
 }
 
 /*
@@ -4730,6 +4866,21 @@
 					 "did x%x\n",
 					 ndlp->nlp_rpi, ndlp->nlp_flag,
 					 ndlp->nlp_DID);
+
+		/* If there is already an UNREG in progress for this ndlp,
+		 * no need to queue up another one.
+		 */
+		if (ndlp->nlp_flag & NLP_UNREG_INP) {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "1436 unreg_rpi SKIP UNREG x%x on "
+					 "NPort x%x deferred x%x  flg x%x "
+					 "Data: x%px\n",
+					 ndlp->nlp_rpi, ndlp->nlp_DID,
+					 ndlp->nlp_defer_did,
+					 ndlp->nlp_flag, ndlp);
+			goto out;
+		}
+
 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 		if (mbox) {
 			/* SLI4 ports require the physical rpi value. */
@@ -4740,26 +4891,43 @@
 			lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
 			mbox->vport = vport;
 			if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
-				mbox->context1 = ndlp;
+				mbox->ctx_ndlp = ndlp;
 				mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
 			} else {
 				if (phba->sli_rev == LPFC_SLI_REV4 &&
 				    (!(vport->load_flag & FC_UNLOADING)) &&
 				    (bf_get(lpfc_sli_intf_if_type,
-				     &phba->sli4_hba.sli_intf) ==
+				     &phba->sli4_hba.sli_intf) >=
 				      LPFC_SLI_INTF_IF_TYPE_2) &&
 				    (kref_read(&ndlp->kref) > 0)) {
-					mbox->context1 = lpfc_nlp_get(ndlp);
+					mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
 					mbox->mbox_cmpl =
 						lpfc_sli4_unreg_rpi_cmpl_clr;
 					/*
 					 * accept PLOGIs after unreg_rpi_cmpl
 					 */
 					acc_plogi = 0;
-				} else
+				} else if (vport->load_flag & FC_UNLOADING) {
+					mbox->ctx_ndlp = NULL;
 					mbox->mbox_cmpl =
 						lpfc_sli_def_mbox_cmpl;
+				} else {
+					mbox->ctx_ndlp = ndlp;
+					mbox->mbox_cmpl =
+						lpfc_sli_def_mbox_cmpl;
+				}
 			}
+			if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
+			    Fabric_DID_MASK) &&
+			    (!(vport->fc_flag & FC_OFFLINE_MODE)))
+				ndlp->nlp_flag |= NLP_UNREG_INP;
+
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "1433 unreg_rpi UNREG x%x on "
+					 "NPort x%x deferred flg x%x "
+					 "Data:x%px\n",
+					 ndlp->nlp_rpi, ndlp->nlp_DID,
+					 ndlp->nlp_flag, ndlp);
 
 			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
 			if (rc == MBX_NOT_FINISHED) {
@@ -4768,7 +4936,7 @@
 			}
 		}
 		lpfc_no_rpi(phba, ndlp);
-
+out:
 		if (phba->sli_rev != LPFC_SLI_REV4)
 			ndlp->nlp_rpi = 0;
 		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
@@ -4836,7 +5004,7 @@
 				 mbox);
 		mbox->vport = vport;
 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-		mbox->context1 = NULL;
+		mbox->ctx_ndlp = NULL;
 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
 		if (rc != MBX_TIMEOUT)
 			mempool_free(mbox, phba->mbox_mem_pool);
@@ -4855,13 +5023,17 @@
 	LPFC_MBOXQ_t     *mbox;
 	int rc;
 
+	/* Unreg DID is an SLI3 operation. */
+	if (phba->sli_rev > LPFC_SLI_REV3)
+		return;
+
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (mbox) {
 		lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
 			       mbox);
 		mbox->vport = vport;
 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-		mbox->context1 = NULL;
+		mbox->ctx_ndlp = NULL;
 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
 		if (rc != MBX_TIMEOUT)
 			mempool_free(mbox, phba->mbox_mem_pool);
@@ -4894,16 +5066,16 @@
 			 ndlp->nlp_state, ndlp->nlp_rpi);
 	if (NLP_CHK_FREE_REQ(ndlp)) {
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
-				"0280 lpfc_cleanup_node: ndlp:x%p "
+				"0280 %s: ndlp:x%px "
 				"usgmap:x%x refcnt:%d\n",
-				(void *)ndlp, ndlp->nlp_usg_map,
+				__func__, (void *)ndlp, ndlp->nlp_usg_map,
 				kref_read(&ndlp->kref));
 		lpfc_dequeue_node(vport, ndlp);
 	} else {
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
-				"0281 lpfc_cleanup_node: ndlp:x%p "
+				"0281 %s: ndlp:x%px "
 				"usgmap:x%x refcnt:%d\n",
-				(void *)ndlp, ndlp->nlp_usg_map,
+				__func__, (void *)ndlp, ndlp->nlp_usg_map,
 				kref_read(&ndlp->kref));
 		lpfc_disable_node(vport, ndlp);
 	}
@@ -4915,8 +5087,8 @@
 	if ((mb = phba->sli.mbox_active)) {
 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
-		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
-			mb->context2 = NULL;
+		   (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+			mb->ctx_ndlp = NULL;
 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 		}
 	}
@@ -4926,18 +5098,18 @@
 	list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
 		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
 			(mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
-			(ndlp != (struct lpfc_nodelist *) mb->context2))
+			(ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
 			continue;
 
-		mb->context2 = NULL;
+		mb->ctx_ndlp = NULL;
 		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 	}
 
 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
-		    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
-			mp = (struct lpfc_dmabuf *) (mb->context1);
+		    (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+			mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
 			if (mp) {
 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
 				kfree(mp);
@@ -4964,6 +5136,8 @@
 	list_del_init(&ndlp->els_retry_evt.evt_listp);
 	list_del_init(&ndlp->dev_loss_evt.evt_listp);
 	lpfc_cleanup_vports_rrqs(vport, ndlp);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		ndlp->nlp_flag |= NLP_RELEASE_RPI;
 	lpfc_unreg_rpi(vport, ndlp);
 
 	return 0;
@@ -4992,7 +5166,7 @@
 		 * allocated by the firmware.
 		 */
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
-				 "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+				 "0005 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
 				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
 				 kref_read(&ndlp->kref),
 				 ndlp->nlp_usg_map, ndlp);
@@ -5007,7 +5181,7 @@
 				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
 				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
 				mbox->vport = vport;
-				mbox->context2 = ndlp;
+				mbox->ctx_ndlp = ndlp;
 				rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
 				if (rc == MBX_NOT_FINISHED) {
 					mempool_free(mbox, phba->mbox_mem_pool);
@@ -5028,8 +5202,8 @@
 		 * for registered rport so need to cleanup rport
 		 */
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
-				"0940 removed node x%p DID x%x "
-				" rport not null %p\n",
+				"0940 removed node x%px DID x%x "
+				" rport not null x%px\n",
 				ndlp, ndlp->nlp_DID, ndlp->rport);
 		rport = ndlp->rport;
 		rdata = rport->dd_data;
@@ -5103,15 +5277,15 @@
 
 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 		if (lpfc_matchdid(vport, ndlp, did)) {
-			data1 = (((uint32_t) ndlp->nlp_state << 24) |
-				 ((uint32_t) ndlp->nlp_xri << 16) |
-				 ((uint32_t) ndlp->nlp_type << 8) |
-				 ((uint32_t) ndlp->nlp_rpi & 0xff));
+			data1 = (((uint32_t)ndlp->nlp_state << 24) |
+				 ((uint32_t)ndlp->nlp_xri << 16) |
+				 ((uint32_t)ndlp->nlp_type << 8) |
+				 ((uint32_t)ndlp->nlp_usg_map & 0xff));
 			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
 					 "0929 FIND node DID "
-					 "Data: x%p x%x x%x x%x %p\n",
+					 "Data: x%px x%x x%x x%x x%x x%px\n",
 					 ndlp, ndlp->nlp_DID,
-					 ndlp->nlp_flag, data1,
+					 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
 					 ndlp->active_rrqs_xri_bitmap);
 			return ndlp;
 		}
@@ -5137,6 +5311,41 @@
 }
 
 struct lpfc_nodelist *
+lpfc_findnode_mapped(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp;
+	uint32_t data1;
+	unsigned long iflags;
+
+	spin_lock_irqsave(shost->host_lock, iflags);
+
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
+		    ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+			data1 = (((uint32_t)ndlp->nlp_state << 24) |
+				 ((uint32_t)ndlp->nlp_xri << 16) |
+				 ((uint32_t)ndlp->nlp_type << 8) |
+				 ((uint32_t)ndlp->nlp_rpi & 0xff));
+			spin_unlock_irqrestore(shost->host_lock, iflags);
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+					 "2025 FIND node DID "
+					 "Data: x%px x%x x%x x%x x%px\n",
+					 ndlp, ndlp->nlp_DID,
+					 ndlp->nlp_flag, data1,
+					 ndlp->active_rrqs_xri_bitmap);
+			return ndlp;
+		}
+	}
+	spin_unlock_irqrestore(shost->host_lock, iflags);
+
+	/* FIND node did <did> NOT FOUND */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+			 "2026 FIND mapped did NOT FOUND.\n");
+	return NULL;
+}
+
+struct lpfc_nodelist *
 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -5161,8 +5370,11 @@
 		if (vport->phba->nvmet_support)
 			return NULL;
 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
-		if (!ndlp)
+		if (!ndlp) {
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+					 "0014 Could not enable ndlp\n");
 			return NULL;
+		}
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
 		spin_unlock_irq(shost->host_lock);
@@ -5428,12 +5640,10 @@
 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
 	LIST_HEAD(completions);
-	struct lpfc_sli *psli;
 	IOCB_t     *icmd;
 	struct lpfc_iocbq    *iocb, *next_iocb;
 	struct lpfc_sli_ring *pring;
 
-	psli = &phba->sli;
 	pring = lpfc_phba_elsring(phba);
 	if (unlikely(!pring))
 		return;
@@ -5734,7 +5944,7 @@
 
 	case LPFC_LINK_UP:
 		lpfc_issue_clear_la(phba, vport);
-		/* Drop thru */
+		/* fall through */
 	case LPFC_LINK_UNKNOWN:
 	case LPFC_WARM_START:
 	case LPFC_INIT_START:
@@ -5774,12 +5984,12 @@
 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	MAILBOX_t *mb = &pmb->u.mb;
-	struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *) (pmb->context1);
-	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
 	struct lpfc_vport    *vport = pmb->vport;
 
-	pmb->context1 = NULL;
-	pmb->context2 = NULL;
+	pmb->ctx_buf = NULL;
+	pmb->ctx_ndlp = NULL;
 
 	if (phba->sli_rev < LPFC_SLI_REV4)
 		ndlp->nlp_rpi = mb->un.varWords[0];
@@ -5787,7 +5997,7 @@
 	ndlp->nlp_type |= NLP_FABRIC;
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
-			 "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+			 "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
 			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
 			 kref_read(&ndlp->kref),
 			 ndlp->nlp_usg_map, ndlp);
@@ -5841,8 +6051,8 @@
 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 		if (filter(ndlp, param)) {
 			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
-					 "3185 FIND node filter %p DID "
-					 "ndlp %p did x%x flg x%x st x%x "
+					 "3185 FIND node filter %ps DID "
+					 "ndlp x%px did x%x flg x%x st x%x "
 					 "xri x%x type x%x rpi x%x\n",
 					 filter, ndlp, ndlp->nlp_DID,
 					 ndlp->nlp_flag, ndlp->nlp_state,
@@ -5852,7 +6062,7 @@
 		}
 	}
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
-			 "3186 FIND node filter %p NOT FOUND.\n", filter);
+			 "3186 FIND node filter %ps NOT FOUND.\n", filter);
 	return NULL;
 }
 
@@ -5892,10 +6102,11 @@
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_nodelist *ndlp;
+	unsigned long flags;
 
-	spin_lock_irq(shost->host_lock);
+	spin_lock_irqsave(shost->host_lock, flags);
 	ndlp = __lpfc_findnode_rpi(vport, rpi);
-	spin_unlock_irq(shost->host_lock);
+	spin_unlock_irqrestore(shost->host_lock, flags);
 	return ndlp;
 }
 
@@ -5938,14 +6149,14 @@
 		}
 	}
 
-	spin_lock_irqsave(&phba->hbalock, flags);
+	spin_lock_irqsave(&phba->port_list_lock, flags);
 	list_for_each_entry(vport, &phba->port_list, listentry) {
 		if (vport->vpi == i) {
-			spin_unlock_irqrestore(&phba->hbalock, flags);
+			spin_unlock_irqrestore(&phba->port_list_lock, flags);
 			return vport;
 		}
 	}
-	spin_unlock_irqrestore(&phba->hbalock, flags);
+	spin_unlock_irqrestore(&phba->port_list_lock, flags);
 	return NULL;
 }
 
@@ -5976,7 +6187,7 @@
 		ndlp->nlp_rpi = rpi;
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
 				 "0007 rpi:%x DID:%x flg:%x refcnt:%d "
-				 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+				 "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
 				 ndlp->nlp_flag,
 				 kref_read(&ndlp->kref),
 				 ndlp->nlp_usg_map, ndlp);
@@ -6014,8 +6225,9 @@
 		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
 
 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
-			"0279 lpfc_nlp_release: ndlp:x%p did %x "
+			"0279 %s: ndlp:x%px did %x "
 			"usgmap:x%x refcnt:%d rpi:%x\n",
+			__func__,
 			(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
 			kref_read(&ndlp->kref), ndlp->nlp_rpi);
 
@@ -6027,8 +6239,6 @@
 	spin_lock_irqsave(&phba->ndlp_lock, flags);
 	NLP_CLR_NODE_ACT(ndlp);
 	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
-	if (phba->sli_rev == LPFC_SLI_REV4)
-		lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
 
 	/* free ndlp memory for final ndlp release */
 	if (NLP_CHK_FREE_REQ(ndlp)) {
@@ -6064,9 +6274,9 @@
 		if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
 			spin_unlock_irqrestore(&phba->ndlp_lock, flags);
 			lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
-				"0276 lpfc_nlp_get: ndlp:x%p "
+				"0276 %s: ndlp:x%px "
 				"usgmap:x%x refcnt:%d\n",
-				(void *)ndlp, ndlp->nlp_usg_map,
+				__func__, (void *)ndlp, ndlp->nlp_usg_map,
 				kref_read(&ndlp->kref));
 			return NULL;
 		} else
@@ -6092,9 +6302,9 @@
 		return 1;
 
 	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
-	"node put:        did:x%x flg:x%x refcnt:x%x",
-		ndlp->nlp_DID, ndlp->nlp_flag,
-		kref_read(&ndlp->kref));
+			"node put:        did:x%x flg:x%x refcnt:x%x",
+			ndlp->nlp_DID, ndlp->nlp_flag,
+			kref_read(&ndlp->kref));
 	phba = ndlp->phba;
 	spin_lock_irqsave(&phba->ndlp_lock, flags);
 	/* Check the ndlp memory free acknowledge flag to avoid the
@@ -6104,9 +6314,9 @@
 	if (NLP_CHK_FREE_ACK(ndlp)) {
 		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
 		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
-				"0274 lpfc_nlp_put: ndlp:x%p "
+				"0274 %s: ndlp:x%px "
 				"usgmap:x%x refcnt:%d\n",
-				(void *)ndlp, ndlp->nlp_usg_map,
+				__func__, (void *)ndlp, ndlp->nlp_usg_map,
 				kref_read(&ndlp->kref));
 		return 1;
 	}
@@ -6117,9 +6327,9 @@
 	if (NLP_CHK_IACT_REQ(ndlp)) {
 		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
 		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
-				"0275 lpfc_nlp_put: ndlp:x%p "
+				"0275 %s: ndlp:x%px "
 				"usgmap:x%x refcnt:%d\n",
-				(void *)ndlp, ndlp->nlp_usg_map,
+				__func__, (void *)ndlp, ndlp->nlp_usg_map,
 				kref_read(&ndlp->kref));
 		return 1;
 	}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 009aa0e..436cdc8 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -115,6 +115,7 @@
 		uint32_t PortID;
 		struct gid {
 			uint8_t PortType;	/* for GID_PT requests */
+#define GID_PT_N_PORT	1
 			uint8_t DomainScope;
 			uint8_t AreaScope;
 			uint8_t Fc4Type;	/* for GID_FT requests */
@@ -559,6 +560,8 @@
 #define fc_vft_hdr_hopct_WORD		word1
 };
 
+#include <uapi/scsi/fc/fc_els.h>
+
 /*
  *  Extended Link Service LS_COMMAND codes (Payload Word 0)
  */
@@ -598,10 +601,12 @@
 #define ELS_CMD_RPL       0x57000000
 #define ELS_CMD_FAN       0x60000000
 #define ELS_CMD_RSCN      0x61040000
+#define ELS_CMD_RSCN_XMT  0x61040008
 #define ELS_CMD_SCR       0x62000000
 #define ELS_CMD_RNID      0x78000000
 #define ELS_CMD_LIRR      0x7A000000
 #define ELS_CMD_LCB	  0x81000000
+#define ELS_CMD_FPIN	  0x16000000
 #else	/*  __LITTLE_ENDIAN_BITFIELD */
 #define ELS_CMD_MASK      0xffff
 #define ELS_RSP_MASK      0xff
@@ -638,10 +643,12 @@
 #define ELS_CMD_RPL       0x57
 #define ELS_CMD_FAN       0x60
 #define ELS_CMD_RSCN      0x0461
+#define ELS_CMD_RSCN_XMT  0x08000461
 #define ELS_CMD_SCR       0x62
 #define ELS_CMD_RNID      0x78
 #define ELS_CMD_LIRR      0x7A
 #define ELS_CMD_LCB	  0x81
+#define ELS_CMD_FPIN	  ELS_FPIN
 #endif
 
 /*
@@ -836,7 +843,7 @@
 	struct lpfc_name portName;
 	struct lpfc_name nodeName;
 	uint32_t DID;
-} ADISC;
+} __packed ADISC;
 
 typedef struct _FARP {		/* Structure is in Big Endian format */
 	uint32_t Mflags:8;
@@ -866,7 +873,7 @@
 	uint32_t Fdid;
 	struct lpfc_name FportName;
 	struct lpfc_name FnodeName;
-} FAN;
+} __packed FAN;
 
 typedef struct _SCR {		/* Structure is in Big Endian format */
 	uint8_t resvd1;
@@ -910,7 +917,7 @@
 	union {
 		RNID_TOP_DISC topologyDisc;	/* topology disc (0xdf) */
 	} un;
-} RNID;
+} __packed RNID;
 
 typedef struct  _RPS {		/* Structure is in Big Endian format */
 	union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 083f8c8..bd53347 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2009-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -186,6 +186,7 @@
 #define LPFC_CTL_PDEV_CTL_FRL_ALL	0x00
 #define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE	0x10
 #define LPFC_CTL_PDEV_CTL_FRL_NIC	0x20
+#define LPFC_CTL_PDEV_CTL_DDL_RAS	0x1000000
 
 #define LPFC_FW_DUMP_REQUEST    (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
 
@@ -193,9 +194,13 @@
 #define LPFC_ACT_INTR_CNT	4
 
 /* Algrithmns for scheduling FCP commands to WQs */
-#define	LPFC_FCP_SCHED_ROUND_ROBIN	0
+#define	LPFC_FCP_SCHED_BY_HDWQ		0
 #define	LPFC_FCP_SCHED_BY_CPU		1
 
+/* Algrithmns for NameServer Query after RSCN */
+#define LPFC_NS_QUERY_GID_FT	0
+#define LPFC_NS_QUERY_GID_PT	1
+
 /* Delay Multiplier constant */
 #define LPFC_DMULT_CONST       651042
 #define LPFC_DMULT_MAX         1023
@@ -203,12 +208,18 @@
 /* Configuration of Interrupts / sec for entire HBA port */
 #define LPFC_MIN_IMAX          5000
 #define LPFC_MAX_IMAX          5000000
-#define LPFC_DEF_IMAX          150000
+#define LPFC_DEF_IMAX          0
+
+#define LPFC_IMAX_THRESHOLD    1000
+#define LPFC_MAX_AUTO_EQ_DELAY 120
+#define LPFC_EQ_DELAY_STEP     15
+#define LPFC_EQD_ISR_TRIGGER   20000
+/* 1s intervals */
+#define LPFC_EQ_DELAY_MSECS    1000
 
 #define LPFC_MIN_CPU_MAP       0
-#define LPFC_MAX_CPU_MAP       2
+#define LPFC_MAX_CPU_MAP       1
 #define LPFC_HBA_CPU_MAP       1
-#define LPFC_DRIVER_CPU_MAP    2  /* Default */
 
 /* PORT_CAPABILITIES constants. */
 #define LPFC_MAX_SUPPORTED_PAGES	8
@@ -964,6 +975,7 @@
 /* Subsystem Definitions */
 #define LPFC_MBOX_SUBSYSTEM_NA		0x0
 #define LPFC_MBOX_SUBSYSTEM_COMMON	0x1
+#define LPFC_MBOX_SUBSYSTEM_LOWLEVEL	0xB
 #define LPFC_MBOX_SUBSYSTEM_FCOE	0xC
 
 /* Device Specific Definitions */
@@ -1029,6 +1041,10 @@
 #define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS	0x21
 #define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE		0x22
 #define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK	0x23
+#define LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE		0x42
+
+/* Low level Opcodes */
+#define LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION		0x37
 
 /* Mailbox command structures */
 struct eq_context {
@@ -1162,6 +1178,45 @@
 	uint32_t context[2];
 };
 
+
+
+struct lpfc_mbx_set_ras_fwlog {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word4;
+#define lpfc_fwlog_enable_SHIFT		0
+#define lpfc_fwlog_enable_MASK		0x00000001
+#define lpfc_fwlog_enable_WORD		word4
+#define lpfc_fwlog_loglvl_SHIFT		8
+#define lpfc_fwlog_loglvl_MASK		0x0000000F
+#define lpfc_fwlog_loglvl_WORD		word4
+#define lpfc_fwlog_ra_SHIFT		15
+#define lpfc_fwlog_ra_WORD		0x00000008
+#define lpfc_fwlog_buffcnt_SHIFT	16
+#define lpfc_fwlog_buffcnt_MASK		0x000000FF
+#define lpfc_fwlog_buffcnt_WORD		word4
+#define lpfc_fwlog_buffsz_SHIFT		24
+#define lpfc_fwlog_buffsz_MASK		0x000000FF
+#define lpfc_fwlog_buffsz_WORD		word4
+			uint32_t word5;
+#define lpfc_fwlog_acqe_SHIFT		0
+#define lpfc_fwlog_acqe_MASK		0x0000FFFF
+#define lpfc_fwlog_acqe_WORD		word5
+#define lpfc_fwlog_cqid_SHIFT		16
+#define lpfc_fwlog_cqid_MASK		0x0000FFFF
+#define lpfc_fwlog_cqid_WORD		word5
+#define LPFC_MAX_FWLOG_PAGE	16
+			struct dma_address lwpd;
+			struct dma_address buff_fwlog[LPFC_MAX_FWLOG_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+
 struct cq_context {
 	uint32_t word0;
 #define lpfc_cq_context_event_SHIFT	31
@@ -1839,18 +1894,19 @@
 	union {
 		struct {
 			uint32_t word0;
-#define lpfc_mbx_set_diag_lpbk_type_SHIFT	0
-#define lpfc_mbx_set_diag_lpbk_type_MASK	0x00000003
-#define lpfc_mbx_set_diag_lpbk_type_WORD	word0
-#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE		0x0
-#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL	0x1
-#define LPFC_DIAG_LOOPBACK_TYPE_SERDES		0x2
-#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT	16
-#define lpfc_mbx_set_diag_lpbk_link_num_MASK	0x0000003F
-#define lpfc_mbx_set_diag_lpbk_link_num_WORD	word0
-#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT	22
-#define lpfc_mbx_set_diag_lpbk_link_type_MASK	0x00000003
-#define lpfc_mbx_set_diag_lpbk_link_type_WORD	word0
+#define lpfc_mbx_set_diag_lpbk_type_SHIFT		0
+#define lpfc_mbx_set_diag_lpbk_type_MASK		0x00000003
+#define lpfc_mbx_set_diag_lpbk_type_WORD		word0
+#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE			0x0
+#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL		0x1
+#define LPFC_DIAG_LOOPBACK_TYPE_SERDES			0x2
+#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED	0x3
+#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT		16
+#define lpfc_mbx_set_diag_lpbk_link_num_MASK		0x0000003F
+#define lpfc_mbx_set_diag_lpbk_link_num_WORD		word0
+#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT		22
+#define lpfc_mbx_set_diag_lpbk_link_type_MASK		0x00000003
+#define lpfc_mbx_set_diag_lpbk_link_type_WORD		word0
 		} req;
 		struct {
 			uint32_t word0;
@@ -1994,6 +2050,23 @@
 	uint32_t sge_len;
 };
 
+struct sli4_hybrid_sgl {
+	struct list_head list_node;
+	struct sli4_sge *dma_sgl;
+	dma_addr_t dma_phys_sgl;
+};
+
+struct fcp_cmd_rsp_buf {
+	struct list_head list_node;
+
+	/* for storing cmd/rsp dma alloc'ed virt_addr */
+	struct fcp_cmnd *fcp_cmnd;
+	struct fcp_rsp *fcp_rsp;
+
+	/* for storing this cmd/rsp's dma mapped phys addr from per CPU pool */
+	dma_addr_t fcp_cmd_rsp_dma_handle;
+};
+
 struct sli4_sge_diseed {	/* SLI-4 */
 	uint32_t ref_tag;
 	uint32_t ref_tag_tran;
@@ -2733,6 +2806,9 @@
 #define lpfc_mbx_rd_conf_lnk_ldv_SHIFT		8
 #define lpfc_mbx_rd_conf_lnk_ldv_MASK		0x00000001
 #define lpfc_mbx_rd_conf_lnk_ldv_WORD		word2
+#define lpfc_mbx_rd_conf_trunk_SHIFT		12
+#define lpfc_mbx_rd_conf_trunk_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_trunk_WORD		word2
 #define lpfc_mbx_rd_conf_topology_SHIFT		24
 #define lpfc_mbx_rd_conf_topology_MASK		0x000000FF
 #define lpfc_mbx_rd_conf_topology_WORD		word2
@@ -3390,6 +3466,9 @@
 #define cfg_xib_SHIFT				4
 #define cfg_xib_MASK				0x00000001
 #define cfg_xib_WORD				word19
+#define cfg_xpsgl_SHIFT				6
+#define cfg_xpsgl_MASK				0x00000001
+#define cfg_xpsgl_WORD				word19
 #define cfg_eqdr_SHIFT				8
 #define cfg_eqdr_MASK				0x00000001
 #define cfg_eqdr_WORD				word19
@@ -3401,6 +3480,10 @@
 #define cfg_bv1s_MASK                           0x00000001
 #define cfg_bv1s_WORD                           word19
 
+#define cfg_nsler_SHIFT                         12
+#define cfg_nsler_MASK                          0x00000001
+#define cfg_nsler_WORD                          word19
+
 	uint32_t word20;
 #define cfg_max_tow_xri_SHIFT			0
 #define cfg_max_tow_xri_MASK			0x0000ffff
@@ -3468,6 +3551,15 @@
 	uint8_t  data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
 };
 
+struct lpfc_mbx_set_trunk_mode {
+	struct mbox_header header;
+	uint32_t word0;
+#define lpfc_mbx_set_trunk_mode_WORD      word0
+#define lpfc_mbx_set_trunk_mode_SHIFT     0
+#define lpfc_mbx_set_trunk_mode_MASK      0xFF
+	uint32_t word1;
+	uint32_t word2;
+};
 
 struct lpfc_mbx_get_sli4_parameters {
 	struct mbox_header header;
@@ -3789,6 +3881,9 @@
 #define lpfc_wr_object_eof_SHIFT		31
 #define lpfc_wr_object_eof_MASK			0x00000001
 #define lpfc_wr_object_eof_WORD			word4
+#define lpfc_wr_object_eas_SHIFT		29
+#define lpfc_wr_object_eas_MASK			0x00000001
+#define lpfc_wr_object_eas_WORD			word4
 #define lpfc_wr_object_write_length_SHIFT	0
 #define lpfc_wr_object_write_length_MASK	0x00FFFFFF
 #define lpfc_wr_object_write_length_WORD	word4
@@ -3799,6 +3894,15 @@
 		} request;
 		struct {
 			uint32_t actual_write_length;
+			uint32_t word5;
+#define lpfc_wr_object_change_status_SHIFT	0
+#define lpfc_wr_object_change_status_MASK	0x000000FF
+#define lpfc_wr_object_change_status_WORD	word5
+#define LPFC_CHANGE_STATUS_NO_RESET_NEEDED	0x00
+#define LPFC_CHANGE_STATUS_PHYS_DEV_RESET	0x01
+#define LPFC_CHANGE_STATUS_FW_RESET		0x02
+#define LPFC_CHANGE_STATUS_PORT_MIGRATION	0x04
+#define LPFC_CHANGE_STATUS_PCI_RESET		0x05
 		} response;
 	} u;
 };
@@ -3867,7 +3971,9 @@
 		struct lpfc_mbx_set_feature  set_feature;
 		struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
 		struct lpfc_mbx_set_host_data set_host_data;
+		struct lpfc_mbx_set_trunk_mode set_trunk_mode;
 		struct lpfc_mbx_nop nop;
+		struct lpfc_mbx_set_ras_fwlog ras_fwlog;
 	} un;
 };
 
@@ -4002,6 +4108,8 @@
 	uint32_t trailer;
 };
 
+extern const char *const trunk_errmsg[];
+
 struct lpfc_acqe_fc_la {
 	uint32_t word0;
 #define lpfc_acqe_fc_la_speed_SHIFT		24
@@ -4035,6 +4143,7 @@
 #define LPFC_FC_LA_TYPE_MDS_LINK_DOWN	0x4
 #define LPFC_FC_LA_TYPE_MDS_LOOPBACK	0x5
 #define LPFC_FC_LA_TYPE_UNEXP_WWPN	0x6
+#define LPFC_FC_LA_TYPE_TRUNKING_EVENT  0x7
 #define lpfc_acqe_fc_la_port_type_SHIFT		6
 #define lpfc_acqe_fc_la_port_type_MASK		0x00000003
 #define lpfc_acqe_fc_la_port_type_WORD		word0
@@ -4043,6 +4152,32 @@
 #define lpfc_acqe_fc_la_port_number_SHIFT	0
 #define lpfc_acqe_fc_la_port_number_MASK	0x0000003F
 #define lpfc_acqe_fc_la_port_number_WORD	word0
+
+/* Attention Type is 0x07 (Trunking Event) word0 */
+#define lpfc_acqe_fc_la_trunk_link_status_port0_SHIFT	16
+#define lpfc_acqe_fc_la_trunk_link_status_port0_MASK	0x0000001
+#define lpfc_acqe_fc_la_trunk_link_status_port0_WORD	word0
+#define lpfc_acqe_fc_la_trunk_link_status_port1_SHIFT	17
+#define lpfc_acqe_fc_la_trunk_link_status_port1_MASK	0x0000001
+#define lpfc_acqe_fc_la_trunk_link_status_port1_WORD	word0
+#define lpfc_acqe_fc_la_trunk_link_status_port2_SHIFT	18
+#define lpfc_acqe_fc_la_trunk_link_status_port2_MASK	0x0000001
+#define lpfc_acqe_fc_la_trunk_link_status_port2_WORD	word0
+#define lpfc_acqe_fc_la_trunk_link_status_port3_SHIFT	19
+#define lpfc_acqe_fc_la_trunk_link_status_port3_MASK	0x0000001
+#define lpfc_acqe_fc_la_trunk_link_status_port3_WORD	word0
+#define lpfc_acqe_fc_la_trunk_config_port0_SHIFT	20
+#define lpfc_acqe_fc_la_trunk_config_port0_MASK		0x0000001
+#define lpfc_acqe_fc_la_trunk_config_port0_WORD		word0
+#define lpfc_acqe_fc_la_trunk_config_port1_SHIFT	21
+#define lpfc_acqe_fc_la_trunk_config_port1_MASK		0x0000001
+#define lpfc_acqe_fc_la_trunk_config_port1_WORD		word0
+#define lpfc_acqe_fc_la_trunk_config_port2_SHIFT	22
+#define lpfc_acqe_fc_la_trunk_config_port2_MASK		0x0000001
+#define lpfc_acqe_fc_la_trunk_config_port2_WORD		word0
+#define lpfc_acqe_fc_la_trunk_config_port3_SHIFT	23
+#define lpfc_acqe_fc_la_trunk_config_port3_MASK		0x0000001
+#define lpfc_acqe_fc_la_trunk_config_port3_WORD		word0
 	uint32_t word1;
 #define lpfc_acqe_fc_la_llink_spd_SHIFT		16
 #define lpfc_acqe_fc_la_llink_spd_MASK		0x0000FFFF
@@ -4050,6 +4185,12 @@
 #define lpfc_acqe_fc_la_fault_SHIFT		0
 #define lpfc_acqe_fc_la_fault_MASK		0x000000FF
 #define lpfc_acqe_fc_la_fault_WORD		word1
+#define lpfc_acqe_fc_la_trunk_fault_SHIFT		0
+#define lpfc_acqe_fc_la_trunk_fault_MASK		0x0000000F
+#define lpfc_acqe_fc_la_trunk_fault_WORD		word1
+#define lpfc_acqe_fc_la_trunk_linkmask_SHIFT		4
+#define lpfc_acqe_fc_la_trunk_linkmask_MASK		0x000000F
+#define lpfc_acqe_fc_la_trunk_linkmask_WORD		word1
 #define LPFC_FC_LA_FAULT_NONE		0x0
 #define LPFC_FC_LA_FAULT_LOCAL		0x1
 #define LPFC_FC_LA_FAULT_REMOTE		0x2
@@ -4197,6 +4338,12 @@
 #define wqe_rcvoxid_SHIFT     16
 #define wqe_rcvoxid_MASK      0x0000FFFF
 #define wqe_rcvoxid_WORD      word9
+#define wqe_sof_SHIFT         24
+#define wqe_sof_MASK          0x000000FF
+#define wqe_sof_WORD          word9
+#define wqe_eof_SHIFT         16
+#define wqe_eof_MASK          0x000000FF
+#define wqe_eof_WORD          word9
 	uint32_t word10;
 #define wqe_ebde_cnt_SHIFT    0
 #define wqe_ebde_cnt_MASK     0x0000000f
@@ -4478,6 +4625,7 @@
 #define prli_type_code_WORD             word1
 	uint32_t word_rsvd2;
 	uint32_t word_rsvd3;
+
 	uint32_t word4;
 #define prli_fba_SHIFT                  0
 #define prli_fba_MASK                   0x00000001
@@ -4494,6 +4642,9 @@
 #define prli_conf_SHIFT                 7
 #define prli_conf_MASK                  0x00000001
 #define prli_conf_WORD                  word4
+#define prli_nsler_SHIFT		8
+#define prli_nsler_MASK			0x00000001
+#define prli_nsler_WORD			word4
 	uint32_t word5;
 #define prli_fb_sz_SHIFT                0
 #define prli_fb_sz_MASK                 0x0000ffff
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 9acb5b4..e8813d2 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -37,7 +37,9 @@
 #include <linux/miscdevice.h>
 #include <linux/percpu.h>
 #include <linux/msi.h>
+#include <linux/irq.h>
 #include <linux/bitops.h>
+#include <linux/crash_dump.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -64,15 +66,8 @@
 #include "lpfc_version.h"
 #include "lpfc_ids.h"
 
-char *_dump_buf_data;
-unsigned long _dump_buf_data_order;
-char *_dump_buf_dif;
-unsigned long _dump_buf_dif_order;
-spinlock_t _dump_buf_lock;
-
 /* Used when mapping IRQ vectors in a driver centric manner */
-uint16_t *lpfc_used_cpu;
-uint32_t lpfc_present_cpu;
+static uint32_t lpfc_present_cpu;
 
 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
 static int lpfc_post_rcv_buf(struct lpfc_hba *);
@@ -93,6 +88,8 @@
 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
+static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
+static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
 
 static struct scsi_transport_template *lpfc_transport_template = NULL;
 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -447,19 +444,19 @@
 				"READ_SPARM mbxStatus x%x\n",
 				mb->mbxCommand, mb->mbxStatus);
 		phba->link_state = LPFC_HBA_ERROR;
-		mp = (struct lpfc_dmabuf *) pmb->context1;
+		mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 		mempool_free(pmb, phba->mbox_mem_pool);
 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
 		kfree(mp);
 		return -EIO;
 	}
 
-	mp = (struct lpfc_dmabuf *) pmb->context1;
+	mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
 
 	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
-	pmb->context1 = NULL;
+	pmb->ctx_buf = NULL;
 	lpfc_update_vport_wwn(vport);
 
 	/* Update the fc_host data structures with new wwn. */
@@ -1037,14 +1034,14 @@
 static int
 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
 {
-	struct lpfc_scsi_buf *psb, *psb_next;
+	struct lpfc_io_buf *psb, *psb_next;
 	struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
+	struct lpfc_sli4_hdw_queue *qp;
 	LIST_HEAD(aborts);
 	LIST_HEAD(nvme_aborts);
 	LIST_HEAD(nvmet_aborts);
-	unsigned long iflag = 0;
 	struct lpfc_sglq *sglq_entry = NULL;
-	int cnt;
+	int cnt, idx;
 
 
 	lpfc_sli_hbqbuf_free_all(phba);
@@ -1071,47 +1068,39 @@
 
 
 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
-	/* abts_scsi_buf_list_lock required because worker thread uses this
+
+	/* abts_xxxx_buf_list_lock required because worker thread uses this
 	 * list.
 	 */
-	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
-		spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
-		list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
+	cnt = 0;
+	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+		qp = &phba->sli4_hba.hdwq[idx];
+
+		spin_lock(&qp->abts_io_buf_list_lock);
+		list_splice_init(&qp->lpfc_abts_io_buf_list,
 				 &aborts);
-		spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
-	}
 
-	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-		list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
-				 &nvme_aborts);
-		list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
-				 &nvmet_aborts);
-		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-	}
-
-	spin_unlock_irq(&phba->hbalock);
-
-	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
-		psb->pCmd = NULL;
-		psb->status = IOSTAT_SUCCESS;
-	}
-	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
-	list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
-	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
-
-	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-		cnt = 0;
-		list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
+		list_for_each_entry_safe(psb, psb_next, &aborts, list) {
 			psb->pCmd = NULL;
 			psb->status = IOSTAT_SUCCESS;
 			cnt++;
 		}
-		spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
-		phba->put_nvme_bufs += cnt;
-		list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
-		spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
+		spin_lock(&qp->io_buf_list_put_lock);
+		list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
+		qp->put_io_bufs += qp->abts_scsi_io_bufs;
+		qp->put_io_bufs += qp->abts_nvme_io_bufs;
+		qp->abts_scsi_io_bufs = 0;
+		qp->abts_nvme_io_bufs = 0;
+		spin_unlock(&qp->io_buf_list_put_lock);
+		spin_unlock(&qp->abts_io_buf_list_lock);
+	}
+	spin_unlock_irq(&phba->hbalock);
 
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+		spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+		list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+				 &nvmet_aborts);
+		spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 		list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
 			ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
 			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
@@ -1119,7 +1108,7 @@
 	}
 
 	lpfc_sli4_free_sp_events(phba);
-	return 0;
+	return cnt;
 }
 
 /**
@@ -1239,6 +1228,108 @@
 	return;
 }
 
+static void
+lpfc_hb_eq_delay_work(struct work_struct *work)
+{
+	struct lpfc_hba *phba = container_of(to_delayed_work(work),
+					     struct lpfc_hba, eq_delay_work);
+	struct lpfc_eq_intr_info *eqi, *eqi_new;
+	struct lpfc_queue *eq, *eq_next;
+	unsigned char *eqcnt = NULL;
+	uint32_t usdelay;
+	int i;
+	bool update = false;
+
+	if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
+		return;
+
+	if (phba->link_state == LPFC_HBA_ERROR ||
+	    phba->pport->fc_flag & FC_OFFLINE_MODE)
+		goto requeue;
+
+	eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
+			GFP_KERNEL);
+	if (!eqcnt)
+		goto requeue;
+
+	if (phba->cfg_irq_chann > 1) {
+		/* Loop thru all IRQ vectors */
+		for (i = 0; i < phba->cfg_irq_chann; i++) {
+			/* Get the EQ corresponding to the IRQ vector */
+			eq = phba->sli4_hba.hba_eq_hdl[i].eq;
+			if (!eq)
+				continue;
+			if (eq->q_mode) {
+				update = true;
+				break;
+			}
+			if (eqcnt[eq->last_cpu] < 2)
+				eqcnt[eq->last_cpu]++;
+		}
+	} else
+		update = true;
+
+	for_each_present_cpu(i) {
+		eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
+		if (!update && eqcnt[i] < 2) {
+			eqi->icnt = 0;
+			continue;
+		}
+
+		usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
+			   LPFC_EQ_DELAY_STEP;
+		if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
+			usdelay = LPFC_MAX_AUTO_EQ_DELAY;
+
+		eqi->icnt = 0;
+
+		list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
+			if (eq->last_cpu != i) {
+				eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
+						      eq->last_cpu);
+				list_move_tail(&eq->cpu_list, &eqi_new->list);
+				continue;
+			}
+			if (usdelay != eq->q_mode)
+				lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
+							 usdelay);
+		}
+	}
+
+	kfree(eqcnt);
+
+requeue:
+	queue_delayed_work(phba->wq, &phba->eq_delay_work,
+			   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
+}
+
+/**
+ * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * For each heartbeat, this routine does some heuristic methods to adjust
+ * XRI distribution. The goal is to fully utilize free XRIs.
+ **/
+static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
+{
+	u32 i;
+	u32 hwq_count;
+
+	hwq_count = phba->cfg_hdw_queue;
+	for (i = 0; i < hwq_count; i++) {
+		/* Adjust XRIs in private pool */
+		lpfc_adjust_pvt_pool_count(phba, i);
+
+		/* Adjust high watermark */
+		lpfc_adjust_high_watermark(phba, i);
+
+#ifdef LPFC_MXP_STAT
+		/* Snapshot pbl, pvt and busy count */
+		lpfc_snapshot_mxp(phba, i);
+#endif
+	}
+}
+
 /**
  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
  * @phba: pointer to lpfc hba data structure.
@@ -1264,16 +1355,11 @@
 	int retval, i;
 	struct lpfc_sli *psli = &phba->sli;
 	LIST_HEAD(completions);
-	struct lpfc_queue *qp;
-	unsigned long time_elapsed;
-	uint32_t tick_cqe, max_cqe, val;
-	uint64_t tot, data1, data2, data3;
-	struct lpfc_nvmet_tgtport *tgtp;
-	struct lpfc_register reg_data;
-	struct nvme_fc_local_port *localport;
-	struct lpfc_nvme_lport *lport;
-	struct lpfc_nvme_ctrl_stat *cstat;
-	void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
+
+	if (phba->cfg_xri_rebalancing) {
+		/* Multi-XRI pools handler */
+		lpfc_hb_mxp_handler(phba);
+	}
 
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
@@ -1288,107 +1374,6 @@
 		(phba->pport->fc_flag & FC_OFFLINE_MODE))
 		return;
 
-	if (phba->cfg_auto_imax) {
-		if (!phba->last_eqdelay_time) {
-			phba->last_eqdelay_time = jiffies;
-			goto skip_eqdelay;
-		}
-		time_elapsed = jiffies - phba->last_eqdelay_time;
-		phba->last_eqdelay_time = jiffies;
-
-		tot = 0xffff;
-		/* Check outstanding IO count */
-		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-			if (phba->nvmet_support) {
-				tgtp = phba->targetport->private;
-				/* Calculate outstanding IOs */
-				tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
-				tot += atomic_read(&tgtp->xmt_fcp_release);
-				tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
-			} else {
-				localport = phba->pport->localport;
-				if (!localport || !localport->private)
-					goto skip_eqdelay;
-				lport = (struct lpfc_nvme_lport *)
-					localport->private;
-				tot = 0;
-				for (i = 0;
-					i < phba->cfg_nvme_io_channel; i++) {
-					cstat = &lport->cstat[i];
-					data1 = atomic_read(
-						&cstat->fc4NvmeInputRequests);
-					data2 = atomic_read(
-						&cstat->fc4NvmeOutputRequests);
-					data3 = atomic_read(
-						&cstat->fc4NvmeControlRequests);
-					tot += (data1 + data2 + data3);
-					tot -= atomic_read(
-						&cstat->fc4NvmeIoCmpls);
-				}
-			}
-		}
-
-		/* Interrupts per sec per EQ */
-		val = phba->cfg_fcp_imax / phba->io_channel_irqs;
-		tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
-
-		/* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
-		max_cqe = time_elapsed * tick_cqe;
-
-		for (i = 0; i < phba->io_channel_irqs; i++) {
-			/* Fast-path EQ */
-			qp = phba->sli4_hba.hba_eq[i];
-			if (!qp)
-				continue;
-
-			/* Use no EQ delay if we don't have many outstanding
-			 * IOs, or if we are only processing 1 CQE/ISR or less.
-			 * Otherwise, assume we can process up to lpfc_fcp_imax
-			 * interrupts per HBA.
-			 */
-			if (tot < LPFC_NODELAY_MAX_IO ||
-			    qp->EQ_cqe_cnt <= max_cqe)
-				val = 0;
-			else
-				val = phba->cfg_fcp_imax;
-
-			if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
-				/* Use EQ Delay Register method */
-
-				/* Convert for EQ Delay register */
-				if (val) {
-					/* First, interrupts per sec per EQ */
-					val = phba->cfg_fcp_imax /
-						phba->io_channel_irqs;
-
-					/* us delay between each interrupt */
-					val = LPFC_SEC_TO_USEC / val;
-				}
-				if (val != qp->q_mode) {
-					reg_data.word0 = 0;
-					bf_set(lpfc_sliport_eqdelay_id,
-					       &reg_data, qp->queue_id);
-					bf_set(lpfc_sliport_eqdelay_delay,
-					       &reg_data, val);
-					writel(reg_data.word0, eqdreg);
-				}
-			} else {
-				/* Use mbox command method */
-				if (val != qp->q_mode)
-					lpfc_modify_hba_eq_delay(phba, i,
-								 1, val);
-			}
-
-			/*
-			 * val is cfg_fcp_imax or 0 for mbox delay or us delay
-			 * between interrupts for EQDR.
-			 */
-			qp->q_mode = val;
-			qp->EQ_cqe_cnt = 0;
-		}
-	}
-
-skip_eqdelay:
 	spin_lock_irq(&phba->pport->work_port_lock);
 
 	if (time_after(phba->last_completion_time +
@@ -1537,6 +1522,7 @@
 	spin_unlock_irq(&phba->hbalock);
 
 	lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+	lpfc_sli_flush_io_rings(phba);
 	lpfc_offline(phba);
 	lpfc_hba_down_post(phba);
 	lpfc_unblock_mgmt_io(phba);
@@ -1798,10 +1784,16 @@
 				"2887 Reset Needed: Attempting Port "
 				"Recovery...\n");
 	lpfc_offline_prep(phba, mbx_action);
+	lpfc_sli_flush_io_rings(phba);
 	lpfc_offline(phba);
 	/* release interrupt for possible resource change */
 	lpfc_sli4_disable_intr(phba);
-	lpfc_sli_brdrestart(phba);
+	rc = lpfc_sli_brdrestart(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6309 Failed to restart board\n");
+		return rc;
+	}
 	/* request and enable interrupt */
 	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
 	if (intr_mode == LPFC_INTR_ERROR) {
@@ -1843,8 +1835,12 @@
 	/* If the pci channel is offline, ignore possible errors, since
 	 * we cannot communicate with the pci card anyway.
 	 */
-	if (pci_channel_offline(phba->pcidev))
+	if (pci_channel_offline(phba->pcidev)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3166 pci channel is offline\n");
+		lpfc_sli4_offline_eratt(phba);
 		return;
+	}
 
 	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
@@ -1908,7 +1904,7 @@
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"7624 Firmware not ready: Failing UE recovery,"
 				" waited %dSec", i);
-		lpfc_sli4_offline_eratt(phba);
+		phba->link_state = LPFC_HBA_ERROR;
 		break;
 
 	case LPFC_SLI_INTF_IF_TYPE_2:
@@ -1921,6 +1917,7 @@
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"3151 PCI bus read access failure: x%x\n",
 				readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
+			lpfc_sli4_offline_eratt(phba);
 			return;
 		}
 		reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
@@ -1981,9 +1978,8 @@
 		}
 		/* fall through for not able to recover */
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"3152 Unrecoverable error, bring the port "
-				"offline\n");
-		lpfc_sli4_offline_eratt(phba);
+				"3152 Unrecoverable error\n");
+		phba->link_state = LPFC_HBA_ERROR;
 		break;
 	case LPFC_SLI_INTF_IF_TYPE_1:
 	default:
@@ -2855,7 +2851,7 @@
 						&vport->fc_nodes, nlp_listp) {
 				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
 						LOG_NODE,
-						"0282 did:x%x ndlp:x%p "
+						"0282 did:x%x ndlp:x%px "
 						"usgmap:x%x refcnt:%d\n",
 						ndlp->nlp_DID, (void *)ndlp,
 						ndlp->nlp_usg_map,
@@ -2938,7 +2934,9 @@
 void
 lpfc_stop_hba_timers(struct lpfc_hba *phba)
 {
-	lpfc_stop_vport_timers(phba->pport);
+	if (phba->pport)
+		lpfc_stop_vport_timers(phba->pport);
+	cancel_delayed_work_sync(&phba->eq_delay_work);
 	del_timer_sync(&phba->sli.mbox_tmo);
 	del_timer_sync(&phba->fabric_block_timer);
 	del_timer_sync(&phba->eratt_poll);
@@ -2955,7 +2953,7 @@
 		del_timer_sync(&phba->fcp_poll_timer);
 		break;
 	case LPFC_PCI_DEV_OC:
-		/* Stop any OneConnect device sepcific driver timers */
+		/* Stop any OneConnect device specific driver timers */
 		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
 		break;
 	default:
@@ -3057,7 +3055,7 @@
 			ndlp->nlp_rpi = rpi;
 			lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
 					 "0009 rpi:%x DID:%x "
-					 "flg:%x map:%x %p\n", ndlp->nlp_rpi,
+					 "flg:%x map:%x x%px\n", ndlp->nlp_rpi,
 					 ndlp->nlp_DID, ndlp->nlp_flag,
 					 ndlp->nlp_usg_map, ndlp);
 		}
@@ -3066,6 +3064,245 @@
 }
 
 /**
+ * lpfc_create_expedite_pool - create expedite pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
+ * to expedite pool. Mark them as expedite.
+ **/
+static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
+{
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_io_buf *lpfc_ncmd;
+	struct lpfc_io_buf *lpfc_ncmd_next;
+	struct lpfc_epd_pool *epd_pool;
+	unsigned long iflag;
+
+	epd_pool = &phba->epd_pool;
+	qp = &phba->sli4_hba.hdwq[0];
+
+	spin_lock_init(&epd_pool->lock);
+	spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
+	spin_lock(&epd_pool->lock);
+	INIT_LIST_HEAD(&epd_pool->list);
+	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+				 &qp->lpfc_io_buf_list_put, list) {
+		list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
+		lpfc_ncmd->expedite = true;
+		qp->put_io_bufs--;
+		epd_pool->count++;
+		if (epd_pool->count >= XRI_BATCH)
+			break;
+	}
+	spin_unlock(&epd_pool->lock);
+	spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
+}
+
+/**
+ * lpfc_destroy_expedite_pool - destroy expedite pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
+ * of HWQ 0. Clear the mark.
+ **/
+static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
+{
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_io_buf *lpfc_ncmd;
+	struct lpfc_io_buf *lpfc_ncmd_next;
+	struct lpfc_epd_pool *epd_pool;
+	unsigned long iflag;
+
+	epd_pool = &phba->epd_pool;
+	qp = &phba->sli4_hba.hdwq[0];
+
+	spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
+	spin_lock(&epd_pool->lock);
+	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+				 &epd_pool->list, list) {
+		list_move_tail(&lpfc_ncmd->list,
+			       &qp->lpfc_io_buf_list_put);
+		lpfc_ncmd->flags = false;
+		qp->put_io_bufs++;
+		epd_pool->count--;
+	}
+	spin_unlock(&epd_pool->lock);
+	spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
+}
+
+/**
+ * lpfc_create_multixri_pools - create multi-XRI pools
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine initialize public, private per HWQ. Then, move XRIs from
+ * lpfc_io_buf_list_put to public pool. High and low watermark are also
+ * Initialized.
+ **/
+void lpfc_create_multixri_pools(struct lpfc_hba *phba)
+{
+	u32 i, j;
+	u32 hwq_count;
+	u32 count_per_hwq;
+	struct lpfc_io_buf *lpfc_ncmd;
+	struct lpfc_io_buf *lpfc_ncmd_next;
+	unsigned long iflag;
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_multixri_pool *multixri_pool;
+	struct lpfc_pbl_pool *pbl_pool;
+	struct lpfc_pvt_pool *pvt_pool;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
+			phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
+			phba->sli4_hba.io_xri_cnt);
+
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+		lpfc_create_expedite_pool(phba);
+
+	hwq_count = phba->cfg_hdw_queue;
+	count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
+
+	for (i = 0; i < hwq_count; i++) {
+		multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
+
+		if (!multixri_pool) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"1238 Failed to allocate memory for "
+					"multixri_pool\n");
+
+			if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+				lpfc_destroy_expedite_pool(phba);
+
+			j = 0;
+			while (j < i) {
+				qp = &phba->sli4_hba.hdwq[j];
+				kfree(qp->p_multixri_pool);
+				j++;
+			}
+			phba->cfg_xri_rebalancing = 0;
+			return;
+		}
+
+		qp = &phba->sli4_hba.hdwq[i];
+		qp->p_multixri_pool = multixri_pool;
+
+		multixri_pool->xri_limit = count_per_hwq;
+		multixri_pool->rrb_next_hwqid = i;
+
+		/* Deal with public free xri pool */
+		pbl_pool = &multixri_pool->pbl_pool;
+		spin_lock_init(&pbl_pool->lock);
+		spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
+		spin_lock(&pbl_pool->lock);
+		INIT_LIST_HEAD(&pbl_pool->list);
+		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+					 &qp->lpfc_io_buf_list_put, list) {
+			list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
+			qp->put_io_bufs--;
+			pbl_pool->count++;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
+				pbl_pool->count, i);
+		spin_unlock(&pbl_pool->lock);
+		spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
+
+		/* Deal with private free xri pool */
+		pvt_pool = &multixri_pool->pvt_pool;
+		pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
+		pvt_pool->low_watermark = XRI_BATCH;
+		spin_lock_init(&pvt_pool->lock);
+		spin_lock_irqsave(&pvt_pool->lock, iflag);
+		INIT_LIST_HEAD(&pvt_pool->list);
+		pvt_pool->count = 0;
+		spin_unlock_irqrestore(&pvt_pool->lock, iflag);
+	}
+}
+
+/**
+ * lpfc_destroy_multixri_pools - destroy multi-XRI pools
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
+ **/
+static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
+{
+	u32 i;
+	u32 hwq_count;
+	struct lpfc_io_buf *lpfc_ncmd;
+	struct lpfc_io_buf *lpfc_ncmd_next;
+	unsigned long iflag;
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_multixri_pool *multixri_pool;
+	struct lpfc_pbl_pool *pbl_pool;
+	struct lpfc_pvt_pool *pvt_pool;
+
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+		lpfc_destroy_expedite_pool(phba);
+
+	if (!(phba->pport->load_flag & FC_UNLOADING))
+		lpfc_sli_flush_io_rings(phba);
+
+	hwq_count = phba->cfg_hdw_queue;
+
+	for (i = 0; i < hwq_count; i++) {
+		qp = &phba->sli4_hba.hdwq[i];
+		multixri_pool = qp->p_multixri_pool;
+		if (!multixri_pool)
+			continue;
+
+		qp->p_multixri_pool = NULL;
+
+		spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
+
+		/* Deal with public free xri pool */
+		pbl_pool = &multixri_pool->pbl_pool;
+		spin_lock(&pbl_pool->lock);
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
+				pbl_pool->count, i);
+
+		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+					 &pbl_pool->list, list) {
+			list_move_tail(&lpfc_ncmd->list,
+				       &qp->lpfc_io_buf_list_put);
+			qp->put_io_bufs++;
+			pbl_pool->count--;
+		}
+
+		INIT_LIST_HEAD(&pbl_pool->list);
+		pbl_pool->count = 0;
+
+		spin_unlock(&pbl_pool->lock);
+
+		/* Deal with private free xri pool */
+		pvt_pool = &multixri_pool->pvt_pool;
+		spin_lock(&pvt_pool->lock);
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
+				pvt_pool->count, i);
+
+		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+					 &pvt_pool->list, list) {
+			list_move_tail(&lpfc_ncmd->list,
+				       &qp->lpfc_io_buf_list_put);
+			qp->put_io_bufs++;
+			pvt_pool->count--;
+		}
+
+		INIT_LIST_HEAD(&pvt_pool->list);
+		pvt_pool->count = 0;
+
+		spin_unlock(&pvt_pool->lock);
+		spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
+
+		kfree(multixri_pool);
+	}
+}
+
+/**
  * lpfc_online - Initialize and bring a HBA online
  * @phba: pointer to lpfc hba data structure.
  *
@@ -3147,6 +3384,9 @@
 	}
 	lpfc_destroy_vport_work_array(phba, vports);
 
+	if (phba->cfg_xri_rebalancing)
+		lpfc_create_multixri_pools(phba);
+
 	lpfc_unblock_mgmt_io(phba);
 	return 0;
 }
@@ -3235,7 +3475,7 @@
 					lpfc_printf_vlog(ndlp->vport,
 							 KERN_INFO, LOG_NODE,
 							 "0011 lpfc_offline: "
-							 "ndlp:x%p did %x "
+							 "ndlp:x%px did %x "
 							 "usgmap:x%x rpi:%x\n",
 							 ndlp, ndlp->nlp_DID,
 							 ndlp->nlp_usg_map,
@@ -3305,6 +3545,9 @@
 			spin_unlock_irq(shost->host_lock);
 		}
 	lpfc_destroy_vport_work_array(phba, vports);
+
+	if (phba->cfg_xri_rebalancing)
+		lpfc_destroy_multixri_pools(phba);
 }
 
 /**
@@ -3318,7 +3561,7 @@
 static void
 lpfc_scsi_free(struct lpfc_hba *phba)
 {
-	struct lpfc_scsi_buf *sb, *sb_next;
+	struct lpfc_io_buf *sb, *sb_next;
 
 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
 		return;
@@ -3350,50 +3593,59 @@
 	spin_unlock(&phba->scsi_buf_list_get_lock);
 	spin_unlock_irq(&phba->hbalock);
 }
+
 /**
- * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
+ * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is to free all the NVME buffers and IOCBs from the driver
+ * This routine is to free all the IO buffers and IOCBs from the driver
  * list back to kernel. It is called from lpfc_pci_remove_one to free
  * the internal resources before the device is removed from the system.
  **/
-static void
-lpfc_nvme_free(struct lpfc_hba *phba)
+void
+lpfc_io_free(struct lpfc_hba *phba)
 {
-	struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+	struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
+	struct lpfc_sli4_hdw_queue *qp;
+	int idx;
 
-	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
-		return;
+	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+		qp = &phba->sli4_hba.hdwq[idx];
+		/* Release all the lpfc_nvme_bufs maintained by this host. */
+		spin_lock(&qp->io_buf_list_put_lock);
+		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+					 &qp->lpfc_io_buf_list_put,
+					 list) {
+			list_del(&lpfc_ncmd->list);
+			qp->put_io_bufs--;
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+			if (phba->cfg_xpsgl && !phba->nvmet_support)
+				lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+			lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
+			kfree(lpfc_ncmd);
+			qp->total_io_bufs--;
+		}
+		spin_unlock(&qp->io_buf_list_put_lock);
 
-	spin_lock_irq(&phba->hbalock);
-
-	/* Release all the lpfc_nvme_bufs maintained by this host. */
-	spin_lock(&phba->nvme_buf_list_put_lock);
-	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
-				 &phba->lpfc_nvme_buf_list_put, list) {
-		list_del(&lpfc_ncmd->list);
-		phba->put_nvme_bufs--;
-		dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
-			      lpfc_ncmd->dma_handle);
-		kfree(lpfc_ncmd);
-		phba->total_nvme_bufs--;
+		spin_lock(&qp->io_buf_list_get_lock);
+		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+					 &qp->lpfc_io_buf_list_get,
+					 list) {
+			list_del(&lpfc_ncmd->list);
+			qp->get_io_bufs--;
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+			if (phba->cfg_xpsgl && !phba->nvmet_support)
+				lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+			lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
+			kfree(lpfc_ncmd);
+			qp->total_io_bufs--;
+		}
+		spin_unlock(&qp->io_buf_list_get_lock);
 	}
-	spin_unlock(&phba->nvme_buf_list_put_lock);
-
-	spin_lock(&phba->nvme_buf_list_get_lock);
-	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
-				 &phba->lpfc_nvme_buf_list_get, list) {
-		list_del(&lpfc_ncmd->list);
-		phba->get_nvme_bufs--;
-		dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
-			      lpfc_ncmd->dma_handle);
-		kfree(lpfc_ncmd);
-		phba->total_nvme_bufs--;
-	}
-	spin_unlock(&phba->nvme_buf_list_get_lock);
-	spin_unlock_irq(&phba->hbalock);
 }
+
 /**
  * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
  * @phba: pointer to lpfc hba data structure.
@@ -3635,8 +3887,102 @@
 	return rc;
 }
 
+int
+lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
+{
+	LIST_HEAD(blist);
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_io_buf *lpfc_cmd;
+	struct lpfc_io_buf *iobufp, *prev_iobufp;
+	int idx, cnt, xri, inserted;
+
+	cnt = 0;
+	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+		qp = &phba->sli4_hba.hdwq[idx];
+		spin_lock_irq(&qp->io_buf_list_get_lock);
+		spin_lock(&qp->io_buf_list_put_lock);
+
+		/* Take everything off the get and put lists */
+		list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
+		list_splice(&qp->lpfc_io_buf_list_put, &blist);
+		INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
+		INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
+		cnt += qp->get_io_bufs + qp->put_io_bufs;
+		qp->get_io_bufs = 0;
+		qp->put_io_bufs = 0;
+		qp->total_io_bufs = 0;
+		spin_unlock(&qp->io_buf_list_put_lock);
+		spin_unlock_irq(&qp->io_buf_list_get_lock);
+	}
+
+	/*
+	 * Take IO buffers off blist and put on cbuf sorted by XRI.
+	 * This is because POST_SGL takes a sequential range of XRIs
+	 * to post to the firmware.
+	 */
+	for (idx = 0; idx < cnt; idx++) {
+		list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
+		if (!lpfc_cmd)
+			return cnt;
+		if (idx == 0) {
+			list_add_tail(&lpfc_cmd->list, cbuf);
+			continue;
+		}
+		xri = lpfc_cmd->cur_iocbq.sli4_xritag;
+		inserted = 0;
+		prev_iobufp = NULL;
+		list_for_each_entry(iobufp, cbuf, list) {
+			if (xri < iobufp->cur_iocbq.sli4_xritag) {
+				if (prev_iobufp)
+					list_add(&lpfc_cmd->list,
+						 &prev_iobufp->list);
+				else
+					list_add(&lpfc_cmd->list, cbuf);
+				inserted = 1;
+				break;
+			}
+			prev_iobufp = iobufp;
+		}
+		if (!inserted)
+			list_add_tail(&lpfc_cmd->list, cbuf);
+	}
+	return cnt;
+}
+
+int
+lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
+{
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_io_buf *lpfc_cmd;
+	int idx, cnt;
+
+	qp = phba->sli4_hba.hdwq;
+	cnt = 0;
+	while (!list_empty(cbuf)) {
+		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+			list_remove_head(cbuf, lpfc_cmd,
+					 struct lpfc_io_buf, list);
+			if (!lpfc_cmd)
+				return cnt;
+			cnt++;
+			qp = &phba->sli4_hba.hdwq[idx];
+			lpfc_cmd->hdwq_no = idx;
+			lpfc_cmd->hdwq = qp;
+			lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
+			lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
+			spin_lock(&qp->io_buf_list_put_lock);
+			list_add_tail(&lpfc_cmd->list,
+				      &qp->lpfc_io_buf_list_put);
+			qp->put_io_bufs++;
+			qp->total_io_bufs++;
+			spin_unlock(&qp->io_buf_list_put_lock);
+		}
+	}
+	return cnt;
+}
+
 /**
- * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
+ * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine first calculates the sizes of the current els and allocated
@@ -3648,94 +3994,190 @@
  *   0 - successful (for now, it always returns 0)
  **/
 int
-lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
+lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
 {
-	struct lpfc_scsi_buf *psb, *psb_next;
-	uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
-	LIST_HEAD(scsi_sgl_list);
-	int rc;
+	struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
+	uint16_t i, lxri, els_xri_cnt;
+	uint16_t io_xri_cnt, io_xri_max;
+	LIST_HEAD(io_sgl_list);
+	int rc, cnt;
 
 	/*
-	 * update on pci function's els xri-sgl list
+	 * update on pci function's allocated nvme xri-sgl list
 	 */
+
+	/* maximum number of xris available for nvme buffers */
 	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
-	phba->total_scsi_bufs = 0;
-
-	/*
-	 * update on pci function's allocated scsi xri-sgl list
-	 */
-	/* maximum number of xris available for scsi buffers */
-	phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
-				      els_xri_cnt;
-
-	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
-		return 0;
-
-	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
-		phba->sli4_hba.scsi_xri_max =  /* Split them up */
-			(phba->sli4_hba.scsi_xri_max *
-			 phba->cfg_xri_split) / 100;
-
-	spin_lock_irq(&phba->scsi_buf_list_get_lock);
-	spin_lock(&phba->scsi_buf_list_put_lock);
-	list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
-	list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
-	spin_unlock(&phba->scsi_buf_list_put_lock);
-	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+	io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+	phba->sli4_hba.io_xri_max = io_xri_max;
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-			"6060 Current allocated SCSI xri-sgl count:%d, "
-			"maximum  SCSI xri count:%d (split:%d)\n",
-			phba->sli4_hba.scsi_xri_cnt,
-			phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
+			"6074 Current allocated XRI sgl count:%d, "
+			"maximum XRI count:%d\n",
+			phba->sli4_hba.io_xri_cnt,
+			phba->sli4_hba.io_xri_max);
 
-	if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
-		/* max scsi xri shrinked below the allocated scsi buffers */
-		scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
-					phba->sli4_hba.scsi_xri_max;
-		/* release the extra allocated scsi buffers */
-		for (i = 0; i < scsi_xri_cnt; i++) {
-			list_remove_head(&scsi_sgl_list, psb,
-					 struct lpfc_scsi_buf, list);
-			if (psb) {
+	cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
+
+	if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
+		/* max nvme xri shrunk below the allocated nvme buffers */
+		io_xri_cnt = phba->sli4_hba.io_xri_cnt -
+					phba->sli4_hba.io_xri_max;
+		/* release the extra allocated nvme buffers */
+		for (i = 0; i < io_xri_cnt; i++) {
+			list_remove_head(&io_sgl_list, lpfc_ncmd,
+					 struct lpfc_io_buf, list);
+			if (lpfc_ncmd) {
 				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
-					      psb->data, psb->dma_handle);
-				kfree(psb);
+					      lpfc_ncmd->data,
+					      lpfc_ncmd->dma_handle);
+				kfree(lpfc_ncmd);
 			}
 		}
-		spin_lock_irq(&phba->scsi_buf_list_get_lock);
-		phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
-		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+		phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
 	}
 
-	/* update xris associated to remaining allocated scsi buffers */
-	psb = NULL;
-	psb_next = NULL;
-	list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
+	/* update xris associated to remaining allocated nvme buffers */
+	lpfc_ncmd = NULL;
+	lpfc_ncmd_next = NULL;
+	phba->sli4_hba.io_xri_cnt = cnt;
+	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+				 &io_sgl_list, list) {
 		lxri = lpfc_sli4_next_xritag(phba);
 		if (lxri == NO_XRI) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-					"2560 Failed to allocate xri for "
-					"scsi buffer\n");
+					"6075 Failed to allocate xri for "
+					"nvme buffer\n");
 			rc = -ENOMEM;
 			goto out_free_mem;
 		}
-		psb->cur_iocbq.sli4_lxritag = lxri;
-		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+		lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
+		lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
 	}
-	spin_lock_irq(&phba->scsi_buf_list_get_lock);
-	spin_lock(&phba->scsi_buf_list_put_lock);
-	list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
-	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
-	spin_unlock(&phba->scsi_buf_list_put_lock);
-	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+	cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
 	return 0;
 
 out_free_mem:
-	lpfc_scsi_free(phba);
+	lpfc_io_free(phba);
 	return rc;
 }
 
+/**
+ * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates nvme buffers for device with SLI-4 interface spec,
+ * the nvme buffer contains all the necessary information needed to initiate
+ * an I/O. After allocating up to @num_to_allocate IO buffers and put
+ * them on a list, it post them to the port by using SGL block post.
+ *
+ * Return codes:
+ *   int - number of IO buffers that were allocated and posted.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+int
+lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
+{
+	struct lpfc_io_buf *lpfc_ncmd;
+	struct lpfc_iocbq *pwqeq;
+	uint16_t iotag, lxri = 0;
+	int bcnt, num_posted;
+	LIST_HEAD(prep_nblist);
+	LIST_HEAD(post_nblist);
+	LIST_HEAD(nvme_nblist);
+
+	phba->sli4_hba.io_xri_cnt = 0;
+	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+		lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
+		if (!lpfc_ncmd)
+			break;
+		/*
+		 * Get memory from the pci pool to map the virt space to
+		 * pci bus space for an I/O. The DMA buffer includes the
+		 * number of SGE's necessary to support the sg_tablesize.
+		 */
+		lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+						  GFP_KERNEL,
+						  &lpfc_ncmd->dma_handle);
+		if (!lpfc_ncmd->data) {
+			kfree(lpfc_ncmd);
+			break;
+		}
+
+		if (phba->cfg_xpsgl && !phba->nvmet_support) {
+			INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
+		} else {
+			/*
+			 * 4K Page alignment is CRITICAL to BlockGuard, double
+			 * check to be sure.
+			 */
+			if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+			    (((unsigned long)(lpfc_ncmd->data) &
+			    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+						"3369 Memory alignment err: "
+						"addr=%lx\n",
+						(unsigned long)lpfc_ncmd->data);
+				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+					      lpfc_ncmd->data,
+					      lpfc_ncmd->dma_handle);
+				kfree(lpfc_ncmd);
+				break;
+			}
+		}
+
+		INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
+
+		lxri = lpfc_sli4_next_xritag(phba);
+		if (lxri == NO_XRI) {
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+			kfree(lpfc_ncmd);
+			break;
+		}
+		pwqeq = &lpfc_ncmd->cur_iocbq;
+
+		/* Allocate iotag for lpfc_ncmd->cur_iocbq. */
+		iotag = lpfc_sli_next_iotag(phba, pwqeq);
+		if (iotag == 0) {
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+			kfree(lpfc_ncmd);
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+					"6121 Failed to allocate IOTAG for"
+					" XRI:0x%x\n", lxri);
+			lpfc_sli4_free_xri(phba, lxri);
+			break;
+		}
+		pwqeq->sli4_lxritag = lxri;
+		pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+		pwqeq->context1 = lpfc_ncmd;
+
+		/* Initialize local short-hand pointers. */
+		lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
+		lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
+		lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
+		spin_lock_init(&lpfc_ncmd->buf_lock);
+
+		/* add the nvme buffer to a post list */
+		list_add_tail(&lpfc_ncmd->list, &post_nblist);
+		phba->sli4_hba.io_xri_cnt++;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+			"6114 Allocate %d out of %d requested new NVME "
+			"buffers\n", bcnt, num_to_alloc);
+
+	/* post the list of nvme buffer sgls to port if available */
+	if (!list_empty(&post_nblist))
+		num_posted = lpfc_sli4_post_io_sgl_list(
+				phba, &post_nblist, bcnt);
+	else
+		num_posted = 0;
+
+	return num_posted;
+}
+
 static uint64_t
 lpfc_get_wwpn(struct lpfc_hba *phba)
 {
@@ -3772,111 +4214,6 @@
 }
 
 /**
- * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine first calculates the sizes of the current els and allocated
- * scsi sgl lists, and then goes through all sgls to updates the physical
- * XRIs assigned due to port function reset. During port initialization, the
- * current els and allocated scsi sgl lists are 0s.
- *
- * Return codes
- *   0 - successful (for now, it always returns 0)
- **/
-int
-lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
-{
-	struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
-	uint16_t i, lxri, els_xri_cnt;
-	uint16_t nvme_xri_cnt, nvme_xri_max;
-	LIST_HEAD(nvme_sgl_list);
-	int rc, cnt;
-
-	phba->total_nvme_bufs = 0;
-	phba->get_nvme_bufs = 0;
-	phba->put_nvme_bufs = 0;
-
-	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
-		return 0;
-	/*
-	 * update on pci function's allocated nvme xri-sgl list
-	 */
-
-	/* maximum number of xris available for nvme buffers */
-	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
-	nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
-	phba->sli4_hba.nvme_xri_max = nvme_xri_max;
-	phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
-
-	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-			"6074 Current allocated NVME xri-sgl count:%d, "
-			"maximum  NVME xri count:%d\n",
-			phba->sli4_hba.nvme_xri_cnt,
-			phba->sli4_hba.nvme_xri_max);
-
-	spin_lock_irq(&phba->nvme_buf_list_get_lock);
-	spin_lock(&phba->nvme_buf_list_put_lock);
-	list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
-	list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
-	cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
-	phba->get_nvme_bufs = 0;
-	phba->put_nvme_bufs = 0;
-	spin_unlock(&phba->nvme_buf_list_put_lock);
-	spin_unlock_irq(&phba->nvme_buf_list_get_lock);
-
-	if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
-		/* max nvme xri shrunk below the allocated nvme buffers */
-		spin_lock_irq(&phba->nvme_buf_list_get_lock);
-		nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
-					phba->sli4_hba.nvme_xri_max;
-		spin_unlock_irq(&phba->nvme_buf_list_get_lock);
-		/* release the extra allocated nvme buffers */
-		for (i = 0; i < nvme_xri_cnt; i++) {
-			list_remove_head(&nvme_sgl_list, lpfc_ncmd,
-					 struct lpfc_nvme_buf, list);
-			if (lpfc_ncmd) {
-				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
-					      lpfc_ncmd->data,
-					      lpfc_ncmd->dma_handle);
-				kfree(lpfc_ncmd);
-			}
-		}
-		spin_lock_irq(&phba->nvme_buf_list_get_lock);
-		phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
-		spin_unlock_irq(&phba->nvme_buf_list_get_lock);
-	}
-
-	/* update xris associated to remaining allocated nvme buffers */
-	lpfc_ncmd = NULL;
-	lpfc_ncmd_next = NULL;
-	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
-				 &nvme_sgl_list, list) {
-		lxri = lpfc_sli4_next_xritag(phba);
-		if (lxri == NO_XRI) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-					"6075 Failed to allocate xri for "
-					"nvme buffer\n");
-			rc = -ENOMEM;
-			goto out_free_mem;
-		}
-		lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
-		lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
-	}
-	spin_lock_irq(&phba->nvme_buf_list_get_lock);
-	spin_lock(&phba->nvme_buf_list_put_lock);
-	list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
-	phba->get_nvme_bufs = cnt;
-	INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
-	spin_unlock(&phba->nvme_buf_list_put_lock);
-	spin_unlock_irq(&phba->nvme_buf_list_get_lock);
-	return 0;
-
-out_free_mem:
-	lpfc_nvme_free(phba);
-	return rc;
-}
-
-/**
  * lpfc_create_port - Create an FC port
  * @phba: pointer to lpfc hba data structure.
  * @instance: a unique integer ID to this FC port.
@@ -3951,17 +4288,35 @@
 	vport->fc_rscn_flush = 0;
 	lpfc_get_vport_cfgparam(vport);
 
+	/* Adjust value in vport */
+	vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
+
 	shost->unique_id = instance;
 	shost->max_id = LPFC_MAX_TARGET;
 	shost->max_lun = vport->cfg_max_luns;
 	shost->this_id = -1;
 	shost->max_cmd_len = 16;
-	shost->nr_hw_queues = phba->cfg_fcp_io_channel;
+
 	if (phba->sli_rev == LPFC_SLI_REV4) {
+		if (!phba->cfg_fcp_mq_threshold ||
+		    phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
+			phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
+
+		shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
+					    phba->cfg_fcp_mq_threshold);
+
 		shost->dma_boundary =
 			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
-		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
-	}
+
+		if (phba->cfg_xpsgl && !phba->nvmet_support)
+			shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
+		else
+			shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
+	} else
+		/* SLI-3 has a limited number of hardware queues (3),
+		 * thus there is only one for FCP processing.
+		 */
+		shost->nr_hw_queues = 1;
 
 	/*
 	 * Set initial can_queue value since 0 is no longer supported and
@@ -3988,13 +4343,16 @@
 
 	timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
 
+	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+		lpfc_setup_bg(phba, shost);
+
 	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
 	if (error)
 		goto out_put_shost;
 
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irq(&phba->port_list_lock);
 	list_add_tail(&vport->listentry, &phba->port_list);
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irq(&phba->port_list_lock);
 	return vport;
 
 out_put_shost:
@@ -4020,9 +4378,9 @@
 	fc_remove_host(shost);
 	scsi_remove_host(shost);
 
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irq(&phba->port_list_lock);
 	list_del_init(&vport->listentry);
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irq(&phba->port_list_lock);
 
 	lpfc_cleanup(vport);
 	return;
@@ -4106,6 +4464,32 @@
 	return stat;
 }
 
+static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	fc_host_supported_speeds(shost) = 0;
+	if (phba->lmt & LMT_128Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
+	if (phba->lmt & LMT_64Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
+	if (phba->lmt & LMT_32Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
+	if (phba->lmt & LMT_16Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
+	if (phba->lmt & LMT_10Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
+	if (phba->lmt & LMT_8Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
+	if (phba->lmt & LMT_4Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
+	if (phba->lmt & LMT_2Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
+	if (phba->lmt & LMT_1Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
+}
+
 /**
  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
  * @shost: pointer to SCSI host data structure.
@@ -4133,23 +4517,7 @@
 	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
 				 sizeof fc_host_symbolic_name(shost));
 
-	fc_host_supported_speeds(shost) = 0;
-	if (phba->lmt & LMT_64Gb)
-		fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
-	if (phba->lmt & LMT_32Gb)
-		fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
-	if (phba->lmt & LMT_16Gb)
-		fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
-	if (phba->lmt & LMT_10Gb)
-		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
-	if (phba->lmt & LMT_8Gb)
-		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
-	if (phba->lmt & LMT_4Gb)
-		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
-	if (phba->lmt & LMT_2Gb)
-		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
-	if (phba->lmt & LMT_1Gb)
-		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
+	lpfc_host_supported_speeds_set(shost);
 
 	fc_host_maxframe_size(shost) =
 		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
@@ -4205,7 +4573,8 @@
 {
 	/* Reset some HBA SLI4 setup states */
 	lpfc_stop_hba_timers(phba);
-	phba->pport->work_port_events = 0;
+	if (phba->pport)
+		phba->pport->work_port_events = 0;
 	phba->sli4_hba.intr_enable = 0;
 }
 
@@ -4467,6 +4836,9 @@
 		case LPFC_FC_LA_SPEED_64G:
 			port_speed = 64000;
 			break;
+		case LPFC_FC_LA_SPEED_128G:
+			port_speed = 128000;
+			break;
 		default:
 			port_speed = 0;
 		}
@@ -4609,6 +4981,136 @@
 }
 
 /**
+ * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
+ * topology.
+ * @phba: pointer to lpfc hba data structure.
+ * @evt_code: asynchronous event code.
+ * @speed_code: asynchronous event link speed code.
+ *
+ * This routine is to parse the giving SLI4 async event link speed code into
+ * value of Read topology link speed.
+ *
+ * Return: link speed in terms of Read topology.
+ **/
+static uint8_t
+lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
+{
+	uint8_t port_speed;
+
+	switch (speed_code) {
+	case LPFC_FC_LA_SPEED_1G:
+		port_speed = LPFC_LINK_SPEED_1GHZ;
+		break;
+	case LPFC_FC_LA_SPEED_2G:
+		port_speed = LPFC_LINK_SPEED_2GHZ;
+		break;
+	case LPFC_FC_LA_SPEED_4G:
+		port_speed = LPFC_LINK_SPEED_4GHZ;
+		break;
+	case LPFC_FC_LA_SPEED_8G:
+		port_speed = LPFC_LINK_SPEED_8GHZ;
+		break;
+	case LPFC_FC_LA_SPEED_16G:
+		port_speed = LPFC_LINK_SPEED_16GHZ;
+		break;
+	case LPFC_FC_LA_SPEED_32G:
+		port_speed = LPFC_LINK_SPEED_32GHZ;
+		break;
+	case LPFC_FC_LA_SPEED_64G:
+		port_speed = LPFC_LINK_SPEED_64GHZ;
+		break;
+	case LPFC_FC_LA_SPEED_128G:
+		port_speed = LPFC_LINK_SPEED_128GHZ;
+		break;
+	case LPFC_FC_LA_SPEED_256G:
+		port_speed = LPFC_LINK_SPEED_256GHZ;
+		break;
+	default:
+		port_speed = 0;
+		break;
+	}
+
+	return port_speed;
+}
+
+#define trunk_link_status(__idx)\
+	bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
+	       ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
+		"Link up" : "Link down") : "NA"
+/* Did port __idx reported an error */
+#define trunk_port_fault(__idx)\
+	bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
+	       (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
+
+static void
+lpfc_update_trunk_link_status(struct lpfc_hba *phba,
+			      struct lpfc_acqe_fc_la *acqe_fc)
+{
+	uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
+	uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
+
+	phba->sli4_hba.link_state.speed =
+		lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
+				bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
+
+	phba->sli4_hba.link_state.logical_speed =
+				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
+	/* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
+	phba->fc_linkspeed =
+		 lpfc_async_link_speed_to_read_top(
+				phba,
+				bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
+
+	if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
+		phba->trunk_link.link0.state =
+			bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
+			? LPFC_LINK_UP : LPFC_LINK_DOWN;
+		phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
+	}
+	if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
+		phba->trunk_link.link1.state =
+			bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
+			? LPFC_LINK_UP : LPFC_LINK_DOWN;
+		phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
+	}
+	if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
+		phba->trunk_link.link2.state =
+			bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
+			? LPFC_LINK_UP : LPFC_LINK_DOWN;
+		phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
+	}
+	if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
+		phba->trunk_link.link3.state =
+			bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
+			? LPFC_LINK_UP : LPFC_LINK_DOWN;
+		phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"2910 Async FC Trunking Event - Speed:%d\n"
+			"\tLogical speed:%d "
+			"port0: %s port1: %s port2: %s port3: %s\n",
+			phba->sli4_hba.link_state.speed,
+			phba->sli4_hba.link_state.logical_speed,
+			trunk_link_status(0), trunk_link_status(1),
+			trunk_link_status(2), trunk_link_status(3));
+
+	if (port_fault)
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3202 trunk error:0x%x (%s) seen on port0:%s "
+				/*
+				 * SLI-4: We have only 0xA error codes
+				 * defined as of now. print an appropriate
+				 * message in case driver needs to be updated.
+				 */
+				"port1:%s port2:%s port3:%s\n", err, err > 0xA ?
+				"UNDEFINED. update driver." : trunk_errmsg[err],
+				trunk_port_fault(0), trunk_port_fault(1),
+				trunk_port_fault(2), trunk_port_fault(3));
+}
+
+
+/**
  * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
  * @phba: pointer to lpfc hba data structure.
  * @acqe_fc: pointer to the async fc completion queue entry.
@@ -4633,6 +5135,13 @@
 				bf_get(lpfc_trailer_type, acqe_fc));
 		return;
 	}
+
+	if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
+	    LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
+		lpfc_update_trunk_link_status(phba, acqe_fc);
+		return;
+	}
+
 	/* Keep the link status for extra SLI4 state machine reference */
 	phba->sli4_hba.link_state.speed =
 			lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
@@ -4648,8 +5157,14 @@
 				bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
 	phba->sli4_hba.link_state.fault =
 				bf_get(lpfc_acqe_link_fault, acqe_fc);
-	phba->sli4_hba.link_state.logical_speed =
+
+	if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
+	    LPFC_FC_LA_TYPE_LINK_DOWN)
+		phba->sli4_hba.link_state.logical_speed = 0;
+	else if	(!phba->sli4_hba.conf_trunk)
+		phba->sli4_hba.link_state.logical_speed =
 				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
+
 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
 			"2896 Async FC event - Speed:%dGBaud Topology:x%x "
 			"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
@@ -4762,6 +5277,8 @@
 	struct temp_event temp_event_data;
 	struct lpfc_acqe_misconfigured_event *misconfigured;
 	struct Scsi_Host  *shost;
+	struct lpfc_vport **vports;
+	int rc, i;
 
 	evt_type = bf_get(lpfc_trailer_type, acqe_sli);
 
@@ -4887,6 +5404,25 @@
 			sprintf(message, "Unknown event status x%02x", status);
 			break;
 		}
+
+		/* Issue READ_CONFIG mbox command to refresh supported speeds */
+		rc = lpfc_sli4_read_config(phba);
+		if (rc) {
+			phba->lmt = 0;
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"3194 Unable to retrieve supported "
+					"speeds, rc = 0x%x\n", rc);
+		}
+		vports = lpfc_create_vport_work_array(phba);
+		if (vports != NULL) {
+			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+					i++) {
+				shost = lpfc_shost_from_vport(vports[i]);
+				lpfc_host_supported_speeds_set(shost);
+			}
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+
 		phba->sli4_hba.lnk_info.optic_state = status;
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"3176 Port Name %c %s\n", port_name, message);
@@ -5044,7 +5580,7 @@
 			break;
 		}
 		/* If fast FCF failover rescan event is pending, do nothing */
-		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
+		if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
 			spin_unlock_irq(&phba->hbalock);
 			break;
 		}
@@ -5625,7 +6161,10 @@
 	/* Initialize ndlp management spinlock */
 	spin_lock_init(&phba->ndlp_lock);
 
+	/* Initialize port_list spinlock */
+	spin_lock_init(&phba->port_list_lock);
 	INIT_LIST_HEAD(&phba->port_list);
+
 	INIT_LIST_HEAD(&phba->work_list);
 	init_waitqueue_head(&phba->wait_4_mlo_m_q);
 
@@ -5640,24 +6179,11 @@
 				"NVME" : " "),
 			(phba->nvmet_support ? "NVMET" : " "));
 
-	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
-		/* Initialize the scsi buffer list used by driver for scsi IO */
-		spin_lock_init(&phba->scsi_buf_list_get_lock);
-		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
-		spin_lock_init(&phba->scsi_buf_list_put_lock);
-		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
-	}
-
-	if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
-		(phba->nvmet_support == 0)) {
-		/* Initialize the NVME buffer list used by driver for NVME IO */
-		spin_lock_init(&phba->nvme_buf_list_get_lock);
-		INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
-		phba->get_nvme_bufs = 0;
-		spin_lock_init(&phba->nvme_buf_list_put_lock);
-		INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
-		phba->put_nvme_bufs = 0;
-	}
+	/* Initialize the IO buffer list used by driver for SLI3 SCSI */
+	spin_lock_init(&phba->scsi_buf_list_get_lock);
+	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
+	spin_lock_init(&phba->scsi_buf_list_put_lock);
+	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
 
 	/* Initialize the fabric iocb list */
 	INIT_LIST_HEAD(&phba->fabric_iocb_list);
@@ -5681,6 +6207,8 @@
 	/* Heartbeat timer */
 	timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
 
+	INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
+
 	return 0;
 }
 
@@ -5698,7 +6226,7 @@
 static int
 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
 {
-	int rc;
+	int rc, entry_sz;
 
 	/*
 	 * Initialize timers used by driver
@@ -5743,6 +6271,11 @@
 	lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
 	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
 
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		entry_sz = sizeof(struct sli4_sge);
+	else
+		entry_sz = sizeof(struct ulp_bde64);
+
 	/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
 	if (phba->cfg_enable_bg) {
 		/*
@@ -5756,7 +6289,7 @@
 		 */
 		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
 			sizeof(struct fcp_rsp) +
-			(LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
+			(LPFC_MAX_SG_SEG_CNT * entry_sz);
 
 		if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
 			phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
@@ -5771,7 +6304,7 @@
 		 */
 		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
 			sizeof(struct fcp_rsp) +
-			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+			((phba->cfg_sg_seg_cnt + 2) * entry_sz);
 
 		/* Total BDEs in BPL for scsi_sg_list */
 		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
@@ -5796,6 +6329,24 @@
 	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
 		return -ENOMEM;
 
+	phba->lpfc_sg_dma_buf_pool =
+		dma_pool_create("lpfc_sg_dma_buf_pool",
+				&phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
+				BPL_ALIGN_SZ, 0);
+
+	if (!phba->lpfc_sg_dma_buf_pool)
+		goto fail_free_mem;
+
+	phba->lpfc_cmd_rsp_buf_pool =
+			dma_pool_create("lpfc_cmd_rsp_buf_pool",
+					&phba->pcidev->dev,
+					sizeof(struct fcp_cmnd) +
+					sizeof(struct fcp_rsp),
+					BPL_ALIGN_SZ, 0);
+
+	if (!phba->lpfc_cmd_rsp_buf_pool)
+		goto fail_free_dma_buf_pool;
+
 	/*
 	 * Enable sr-iov virtual functions if supported and configured
 	 * through the module parameter.
@@ -5814,6 +6365,13 @@
 	}
 
 	return 0;
+
+fail_free_dma_buf_pool:
+	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+	phba->lpfc_sg_dma_buf_pool = NULL;
+fail_free_mem:
+	lpfc_mem_free(phba);
+	return -ENOMEM;
 }
 
 /**
@@ -5852,14 +6410,13 @@
 	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
 	struct lpfc_mqe *mqe;
 	int longs;
-	int fof_vectors = 0;
 	int extra;
 	uint64_t wwn;
 	u32 if_type;
 	u32 if_fam;
 
-	phba->sli4_hba.num_online_cpu = num_online_cpus();
 	phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
+	phba->sli4_hba.num_possible_cpu = num_possible_cpus();
 	phba->sli4_hba.curr_disp_cpu = 0;
 
 	/* Get all the module params for configuring this host */
@@ -5875,6 +6432,11 @@
 	if (rc)
 		return -ENODEV;
 
+	/* Allocate all driver workqueues here */
+
+	/* The lpfc_wq workqueue for deferred irq use */
+	phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+
 	/*
 	 * Initialize timers used by driver
 	 */
@@ -5909,82 +6471,6 @@
 	 * The WQ create will allocate the ring.
 	 */
 
-	/*
-	 * 1 for cmd, 1 for rsp, NVME adds an extra one
-	 * for boundary conditions in its max_sgl_segment template.
-	 */
-	extra = 2;
-	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
-		extra++;
-
-	/*
-	 * It doesn't matter what family our adapter is in, we are
-	 * limited to 2 Pages, 512 SGEs, for our SGL.
-	 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
-	 */
-	max_buf_size = (2 * SLI4_PAGE_SIZE);
-	if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
-		phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
-
-	/*
-	 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
-	 * used to create the sg_dma_buf_pool must be calculated.
-	 */
-	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
-		/*
-		 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
-		 * the FCP rsp, and a SGE. Sice we have no control
-		 * over how many protection segments the SCSI Layer
-		 * will hand us (ie: there could be one for every block
-		 * in the IO), just allocate enough SGEs to accomidate
-		 * our max amount and we need to limit lpfc_sg_seg_cnt
-		 * to minimize the risk of running out.
-		 */
-		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
-				sizeof(struct fcp_rsp) + max_buf_size;
-
-		/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
-		phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
-
-		if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
-			phba->cfg_sg_seg_cnt =
-				LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
-	} else {
-		/*
-		 * The scsi_buf for a regular I/O holds the FCP cmnd,
-		 * the FCP rsp, a SGE for each, and a SGE for up to
-		 * cfg_sg_seg_cnt data segments.
-		 */
-		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
-				sizeof(struct fcp_rsp) +
-				((phba->cfg_sg_seg_cnt + extra) *
-				sizeof(struct sli4_sge));
-
-		/* Total SGEs for scsi_sg_list */
-		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
-
-		/*
-		 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
-		 * need to post 1 page for the SGL.
-		 */
-	}
-
-	/* Initialize the host templates with the updated values. */
-	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-	lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
-
-	if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
-		phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
-	else
-		phba->cfg_sg_dma_buf_size =
-			SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
-
-	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
-			"9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
-			phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
-			phba->cfg_total_seg_cnt);
-
 	/* Initialize buffer queue management fields */
 	INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
@@ -5993,18 +6479,17 @@
 	/*
 	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
 	 */
-	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
-		/* Initialize the Abort scsi buffer list used by driver */
-		spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
-		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
-	}
+	/* Initialize the Abort buffer list used by driver */
+	spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
 
 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
 		/* Initialize the Abort nvme buffer list used by driver */
-		spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
-		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+		spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
 		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
+		spin_lock_init(&phba->sli4_hba.t_active_list_lock);
+		INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
 	}
 
 	/* This abort list used by worker thread */
@@ -6138,6 +6623,8 @@
 						" NVME_TARGET_FC infrastructure"
 						" is not in kernel\n");
 #endif
+				/* Not supported for NVMET */
+				phba->cfg_xri_rebalancing = 0;
 				break;
 			}
 		}
@@ -6202,22 +6689,148 @@
 		}
 	}
 
+	/*
+	 * 1 for cmd, 1 for rsp, NVME adds an extra one
+	 * for boundary conditions in its max_sgl_segment template.
+	 */
+	extra = 2;
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+		extra++;
+
+	/*
+	 * It doesn't matter what family our adapter is in, we are
+	 * limited to 2 Pages, 512 SGEs, for our SGL.
+	 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
+	 */
+	max_buf_size = (2 * SLI4_PAGE_SIZE);
+
+	/*
+	 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
+	 * used to create the sg_dma_buf_pool must be calculated.
+	 */
+	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
+		/* Both cfg_enable_bg and cfg_external_dif code paths */
+
+		/*
+		 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
+		 * the FCP rsp, and a SGE. Sice we have no control
+		 * over how many protection segments the SCSI Layer
+		 * will hand us (ie: there could be one for every block
+		 * in the IO), just allocate enough SGEs to accomidate
+		 * our max amount and we need to limit lpfc_sg_seg_cnt
+		 * to minimize the risk of running out.
+		 */
+		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+				sizeof(struct fcp_rsp) + max_buf_size;
+
+		/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
+		phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
+
+		/*
+		 * If supporting DIF, reduce the seg count for scsi to
+		 * allow room for the DIF sges.
+		 */
+		if (phba->cfg_enable_bg &&
+		    phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
+			phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
+		else
+			phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
+
+	} else {
+		/*
+		 * The scsi_buf for a regular I/O holds the FCP cmnd,
+		 * the FCP rsp, a SGE for each, and a SGE for up to
+		 * cfg_sg_seg_cnt data segments.
+		 */
+		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+				sizeof(struct fcp_rsp) +
+				((phba->cfg_sg_seg_cnt + extra) *
+				sizeof(struct sli4_sge));
+
+		/* Total SGEs for scsi_sg_list */
+		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
+		phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
+
+		/*
+		 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
+		 * need to post 1 page for the SGL.
+		 */
+	}
+
+	if (phba->cfg_xpsgl && !phba->nvmet_support)
+		phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
+	else if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
+		phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
+	else
+		phba->cfg_sg_dma_buf_size =
+				SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
+
+	phba->border_sge_num = phba->cfg_sg_dma_buf_size /
+			       sizeof(struct sli4_sge);
+
+	/* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+		if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+					"6300 Reducing NVME sg segment "
+					"cnt to %d\n",
+					LPFC_MAX_NVME_SEG_CNT);
+			phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+		} else
+			phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+	}
+
+	/* Initialize the host templates with the updated values. */
+	lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+	lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+	lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+			"9087 sg_seg_cnt:%d dmabuf_size:%d "
+			"total:%d scsi:%d nvme:%d\n",
+			phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+			phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
+			phba->cfg_nvme_seg_cnt);
+
+	if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
+		i = phba->cfg_sg_dma_buf_size;
+	else
+		i = SLI4_PAGE_SIZE;
+
+	phba->lpfc_sg_dma_buf_pool =
+			dma_pool_create("lpfc_sg_dma_buf_pool",
+					&phba->pcidev->dev,
+					phba->cfg_sg_dma_buf_size,
+					i, 0);
+	if (!phba->lpfc_sg_dma_buf_pool)
+		goto out_free_bsmbx;
+
+	phba->lpfc_cmd_rsp_buf_pool =
+			dma_pool_create("lpfc_cmd_rsp_buf_pool",
+					&phba->pcidev->dev,
+					sizeof(struct fcp_cmnd) +
+					sizeof(struct fcp_rsp),
+					i, 0);
+	if (!phba->lpfc_cmd_rsp_buf_pool)
+		goto out_free_sg_dma_buf;
+
 	mempool_free(mboxq, phba->mbox_mem_pool);
 
 	/* Verify OAS is supported */
 	lpfc_sli4_oas_verify(phba);
-	if (phba->cfg_fof)
-		fof_vectors = 1;
+
+	/* Verify RAS support on adapter */
+	lpfc_sli4_ras_init(phba);
 
 	/* Verify all the SLI4 queues */
 	rc = lpfc_sli4_queue_verify(phba);
 	if (rc)
-		goto out_free_bsmbx;
+		goto out_free_cmd_rsp_buf;
 
 	/* Create driver internal CQE event pool */
 	rc = lpfc_sli4_cq_event_pool_create(phba);
 	if (rc)
-		goto out_free_bsmbx;
+		goto out_free_cmd_rsp_buf;
 
 	/* Initialize sgl lists per host */
 	lpfc_init_sgl_list(phba);
@@ -6248,9 +6861,9 @@
 		goto out_remove_rpi_hdrs;
 	}
 
-	phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
-						sizeof(struct lpfc_hba_eq_hdl),
-						GFP_KERNEL);
+	phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
+					    sizeof(struct lpfc_hba_eq_hdl),
+					    GFP_KERNEL);
 	if (!phba->sli4_hba.hba_eq_hdl) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"2572 Failed allocate memory for "
@@ -6259,7 +6872,7 @@
 		goto out_free_fcf_rr_bmask;
 	}
 
-	phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
+	phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
 					sizeof(struct lpfc_vector_map_info),
 					GFP_KERNEL);
 	if (!phba->sli4_hba.cpu_map) {
@@ -6269,21 +6882,14 @@
 		rc = -ENOMEM;
 		goto out_free_hba_eq_hdl;
 	}
-	if (lpfc_used_cpu == NULL) {
-		lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
-						GFP_KERNEL);
-		if (!lpfc_used_cpu) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"3335 Failed allocate memory for msi-x "
-					"interrupt vector mapping\n");
-			kfree(phba->sli4_hba.cpu_map);
-			rc = -ENOMEM;
-			goto out_free_hba_eq_hdl;
-		}
-		for (i = 0; i < lpfc_present_cpu; i++)
-			lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
-	}
 
+	phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
+	if (!phba->sli4_hba.eq_info) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3321 Failed allocation for per_cpu stats\n");
+		rc = -ENOMEM;
+		goto out_free_hba_cpu_map;
+	}
 	/*
 	 * Enable sr-iov virtual functions if supported and configured
 	 * through the module parameter.
@@ -6303,6 +6909,8 @@
 
 	return 0;
 
+out_free_hba_cpu_map:
+	kfree(phba->sli4_hba.cpu_map);
 out_free_hba_eq_hdl:
 	kfree(phba->sli4_hba.hba_eq_hdl);
 out_free_fcf_rr_bmask:
@@ -6313,6 +6921,12 @@
 	lpfc_free_active_sgl(phba);
 out_destroy_cq_event_pool:
 	lpfc_sli4_cq_event_pool_destroy(phba);
+out_free_cmd_rsp_buf:
+	dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
+	phba->lpfc_cmd_rsp_buf_pool = NULL;
+out_free_sg_dma_buf:
+	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+	phba->lpfc_sg_dma_buf_pool = NULL;
 out_free_bsmbx:
 	lpfc_destroy_bootstrap_mbox(phba);
 out_free_mem:
@@ -6332,10 +6946,12 @@
 {
 	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
 
+	free_percpu(phba->sli4_hba.eq_info);
+
 	/* Free memory allocated for msi-x interrupt vector to CPU mapping */
 	kfree(phba->sli4_hba.cpu_map);
+	phba->sli4_hba.num_possible_cpu = 0;
 	phba->sli4_hba.num_present_cpu = 0;
-	phba->sli4_hba.num_online_cpu = 0;
 	phba->sli4_hba.curr_disp_cpu = 0;
 
 	/* Free memory allocated for fast-path work queue handles */
@@ -6437,12 +7053,6 @@
 		return error;
 	}
 
-	/* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
-	if (phba->sli_rev == LPFC_SLI_REV4)
-		phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
-	else
-		phba->wq = NULL;
-
 	return 0;
 }
 
@@ -6673,11 +7283,8 @@
 	/* els xri-sgl book keeping */
 	phba->sli4_hba.els_xri_cnt = 0;
 
-	/* scsi xri-buffer book keeping */
-	phba->sli4_hba.scsi_xri_cnt = 0;
-
 	/* nvme xri-buffer book keeping */
-	phba->sli4_hba.nvme_xri_cnt = 0;
+	phba->sli4_hba.io_xri_cnt = 0;
 }
 
 /**
@@ -6771,9 +7378,9 @@
 	if (!dmabuf)
 		return NULL;
 
-	dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
-					   LPFC_HDR_TEMPLATE_SIZE,
-					   &dmabuf->phys, GFP_KERNEL);
+	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+					  LPFC_HDR_TEMPLATE_SIZE,
+					  &dmabuf->phys, GFP_KERNEL);
 	if (!dmabuf->virt) {
 		rpi_hdr = NULL;
 		goto err_free_dmabuf;
@@ -6891,6 +7498,9 @@
 static void
 lpfc_hba_free(struct lpfc_hba *phba)
 {
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		kfree(phba->sli4_hba.hdwq);
+
 	/* Release the driver assigned board number */
 	idr_remove(&lpfc_hba_index, phba->brd_no);
 
@@ -6926,10 +7536,6 @@
 	phba->fc_arbtov = FF_DEF_ARBTOV;
 
 	atomic_set(&phba->sdev_cnt, 0);
-	atomic_set(&phba->fc4ScsiInputRequests, 0);
-	atomic_set(&phba->fc4ScsiOutputRequests, 0);
-	atomic_set(&phba->fc4ScsiControlRequests, 0);
-	atomic_set(&phba->fc4ScsiIoCmpls, 0);
 	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
 	if (!vport)
 		return -ENODEV;
@@ -7007,7 +7613,6 @@
 	uint32_t old_mask;
 	uint32_t old_guard;
 
-	int pagecnt = 10;
 	if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 				"1478 Registering BlockGuard with the "
@@ -7044,56 +7649,6 @@
 				"layer, Bad protection parameters: %d %d\n",
 				old_mask, old_guard);
 	}
-
-	if (!_dump_buf_data) {
-		while (pagecnt) {
-			spin_lock_init(&_dump_buf_lock);
-			_dump_buf_data =
-				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
-			if (_dump_buf_data) {
-				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"9043 BLKGRD: allocated %d pages for "
-				       "_dump_buf_data at 0x%p\n",
-				       (1 << pagecnt), _dump_buf_data);
-				_dump_buf_data_order = pagecnt;
-				memset(_dump_buf_data, 0,
-				       ((1 << PAGE_SHIFT) << pagecnt));
-				break;
-			} else
-				--pagecnt;
-		}
-		if (!_dump_buf_data_order)
-			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-				"9044 BLKGRD: ERROR unable to allocate "
-			       "memory for hexdump\n");
-	} else
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
-		       "\n", _dump_buf_data);
-	if (!_dump_buf_dif) {
-		while (pagecnt) {
-			_dump_buf_dif =
-				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
-			if (_dump_buf_dif) {
-				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"9046 BLKGRD: allocated %d pages for "
-				       "_dump_buf_dif at 0x%p\n",
-				       (1 << pagecnt), _dump_buf_dif);
-				_dump_buf_dif_order = pagecnt;
-				memset(_dump_buf_dif, 0,
-				       ((1 << PAGE_SHIFT) << pagecnt));
-				break;
-			} else
-				--pagecnt;
-		}
-		if (!_dump_buf_dif_order)
-			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-			"9047 BLKGRD: ERROR unable to allocate "
-			       "memory for hexdump\n");
-	} else
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
-		       _dump_buf_dif);
 }
 
 /**
@@ -7118,8 +7673,6 @@
 	 */
 	shost = pci_get_drvdata(phba->pcidev);
 	shost->can_queue = phba->cfg_hba_queue_depth - 10;
-	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
-		lpfc_setup_bg(phba, shost);
 
 	lpfc_host_attrib_init(shost);
 
@@ -7155,26 +7708,22 @@
 static int
 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
 {
-	struct pci_dev *pdev;
+	struct pci_dev *pdev = phba->pcidev;
 	unsigned long bar0map_len, bar2map_len;
 	int i, hbq_count;
 	void *ptr;
-	int error = -ENODEV;
+	int error;
 
-	/* Obtain PCI device reference */
-	if (!phba->pcidev)
-		return error;
-	else
-		pdev = phba->pcidev;
+	if (!pdev)
+		return -ENODEV;
 
 	/* Set the device DMA mask size */
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
-	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
-		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
-			return error;
-		}
-	}
+	error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (error)
+		error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (error)
+		return error;
+	error = -ENODEV;
 
 	/* Get the bus address of Bar0 and Bar2 and the number of bytes
 	 * required by each mapping.
@@ -7202,8 +7751,8 @@
 	}
 
 	/* Allocate memory for SLI-2 structures */
-	phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
-						&phba->slim2p.phys, GFP_KERNEL);
+	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+					       &phba->slim2p.phys, GFP_KERNEL);
 	if (!phba->slim2p.virt)
 		goto out_iounmap;
 
@@ -7621,8 +8170,8 @@
 	 * plus an alignment restriction of 16 bytes.
 	 */
 	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
-	dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
-					   &dmabuf->phys, GFP_KERNEL);
+	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
+					  &dmabuf->phys, GFP_KERNEL);
 	if (!dmabuf->virt) {
 		kfree(dmabuf);
 		return -ENOMEM;
@@ -7711,7 +8260,7 @@
 	struct lpfc_rsrc_desc_fcfcoe *desc;
 	char *pdesc_0;
 	uint16_t forced_link_speed;
-	uint32_t if_type;
+	uint32_t if_type, qmin;
 	int length, i, rc = 0, rc2;
 
 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -7753,14 +8302,23 @@
 			phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
 		}
 
+		phba->sli4_hba.conf_trunk =
+			bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
 		phba->sli4_hba.extents_in_use =
 			bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
 		phba->sli4_hba.max_cfg_param.max_xri =
 			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+		/* Reduce resource usage in kdump environment */
+		if (is_kdump_kernel() &&
+		    phba->sli4_hba.max_cfg_param.max_xri > 512)
+			phba->sli4_hba.max_cfg_param.max_xri = 512;
 		phba->sli4_hba.max_cfg_param.xri_base =
 			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
 		phba->sli4_hba.max_cfg_param.max_vpi =
 			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+		/* Limit the max we support */
+		if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
+			phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
 		phba->sli4_hba.max_cfg_param.vpi_base =
 			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
 		phba->sli4_hba.max_cfg_param.max_rpi =
@@ -7811,38 +8369,39 @@
 				phba->sli4_hba.max_cfg_param.max_rq);
 
 		/*
-		 * Calculate NVME queue resources based on how
-		 * many WQ/CQs are available.
+		 * Calculate queue resources based on how
+		 * many WQ/CQ/EQs are available.
 		 */
-		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-			length = phba->sli4_hba.max_cfg_param.max_wq;
-			if (phba->sli4_hba.max_cfg_param.max_cq <
-			    phba->sli4_hba.max_cfg_param.max_wq)
-				length = phba->sli4_hba.max_cfg_param.max_cq;
+		qmin = phba->sli4_hba.max_cfg_param.max_wq;
+		if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
+			qmin = phba->sli4_hba.max_cfg_param.max_cq;
+		if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
+			qmin = phba->sli4_hba.max_cfg_param.max_eq;
+		/*
+		 * Whats left after this can go toward NVME / FCP.
+		 * The minus 4 accounts for ELS, NVME LS, MBOX
+		 * plus one extra. When configured for
+		 * NVMET, FCP io channel WQs are not created.
+		 */
+		qmin -= 4;
 
-			/*
-			 * Whats left after this can go toward NVME.
-			 * The minus 6 accounts for ELS, NVME LS, MBOX
-			 * fof plus a couple extra. When configured for
-			 * NVMET, FCP io channel WQs are not created.
-			 */
-			length -= 6;
-			if (!phba->nvmet_support)
-				length -= phba->cfg_fcp_io_channel;
-
-			if (phba->cfg_nvme_io_channel > length) {
-				lpfc_printf_log(
-					phba, KERN_ERR, LOG_SLI,
-					"2005 Reducing NVME IO channel to %d: "
-					"WQ %d CQ %d NVMEIO %d FCPIO %d\n",
-					length,
+		/* Check to see if there is enough for NVME */
+		if ((phba->cfg_irq_chann > qmin) ||
+		    (phba->cfg_hdw_queue > qmin)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2005 Reducing Queues: "
+					"WQ %d CQ %d EQ %d: min %d: "
+					"IRQ %d HDWQ %d\n",
 					phba->sli4_hba.max_cfg_param.max_wq,
 					phba->sli4_hba.max_cfg_param.max_cq,
-					phba->cfg_nvme_io_channel,
-					phba->cfg_fcp_io_channel);
+					phba->sli4_hba.max_cfg_param.max_eq,
+					qmin, phba->cfg_irq_chann,
+					phba->cfg_hdw_queue);
 
-				phba->cfg_nvme_io_channel = length;
-			}
+			if (phba->cfg_irq_chann > qmin)
+				phba->cfg_irq_chann = qmin;
+			if (phba->cfg_hdw_queue > qmin)
+				phba->cfg_hdw_queue = qmin;
 		}
 	}
 
@@ -7971,7 +8530,7 @@
 	else
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"3028 GET_FUNCTION_CONFIG: failed to find "
-				"Resrouce Descriptor:x%x\n",
+				"Resource Descriptor:x%x\n",
 				LPFC_RSRC_DESC_TYPE_FCFCOE);
 
 read_cfg_out:
@@ -8054,53 +8613,22 @@
 static int
 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
 {
-	int io_channel;
-	int fof_vectors = phba->cfg_fof ? 1 : 0;
-
 	/*
 	 * Sanity check for configured queue parameters against the run-time
 	 * device parameters
 	 */
 
-	/* Sanity check on HBA EQ parameters */
-	io_channel = phba->io_channel_irqs;
-
-	if (phba->sli4_hba.num_online_cpu < io_channel) {
-		lpfc_printf_log(phba,
-				KERN_ERR, LOG_INIT,
-				"3188 Reducing IO channels to match number of "
-				"online CPUs: from %d to %d\n",
-				io_channel, phba->sli4_hba.num_online_cpu);
-		io_channel = phba->sli4_hba.num_online_cpu;
-	}
-
-	if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"2575 Reducing IO channels to match number of "
-				"available EQs: from %d to %d\n",
-				io_channel,
-				phba->sli4_hba.max_cfg_param.max_eq);
-		io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
-	}
-
-	/* The actual number of FCP / NVME event queues adopted */
-	if (io_channel != phba->io_channel_irqs)
-		phba->io_channel_irqs = io_channel;
-	if (phba->cfg_fcp_io_channel > io_channel)
-		phba->cfg_fcp_io_channel = io_channel;
-	if (phba->cfg_nvme_io_channel > io_channel)
-		phba->cfg_nvme_io_channel = io_channel;
 	if (phba->nvmet_support) {
-		if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
-			phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+		if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
+			phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+		if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+			phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
 	}
-	if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
-		phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
 
 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-			"2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
-			phba->io_channel_irqs, phba->cfg_fcp_io_channel,
-			phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
+			"2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
+			phba->cfg_hdw_queue, phba->cfg_irq_chann,
+			phba->cfg_nvmet_mrq);
 
 	/* Get EQ depth from module parameter, fake the default for now */
 	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -8113,80 +8641,56 @@
 }
 
 static int
-lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
+lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
 {
 	struct lpfc_queue *qdesc;
+	u32 wqesize;
+	int cpu;
 
-	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
-				      phba->sli4_hba.cq_esize,
-				      LPFC_CQE_EXP_COUNT);
-	if (!qdesc) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0508 Failed allocate fast-path NVME CQ (%d)\n",
-				wqidx);
-		return 1;
-	}
-	qdesc->qe_valid = 1;
-	phba->sli4_hba.nvme_cq[wqidx] = qdesc;
-
-	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
-				      LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
-	if (!qdesc) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0509 Failed allocate fast-path NVME WQ (%d)\n",
-				wqidx);
-		return 1;
-	}
-	phba->sli4_hba.nvme_wq[wqidx] = qdesc;
-	list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
-	return 0;
-}
-
-static int
-lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
-{
-	struct lpfc_queue *qdesc;
-	uint32_t wqesize;
-
-	/* Create Fast Path FCP CQs */
+	cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
+	/* Create Fast Path IO CQs */
 	if (phba->enab_exp_wqcq_pages)
 		/* Increase the CQ size when WQEs contain an embedded cdb */
 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
 					      phba->sli4_hba.cq_esize,
-					      LPFC_CQE_EXP_COUNT);
+					      LPFC_CQE_EXP_COUNT, cpu);
 
 	else
 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
 					      phba->sli4_hba.cq_esize,
-					      phba->sli4_hba.cq_ecount);
+					      phba->sli4_hba.cq_ecount, cpu);
 	if (!qdesc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-			"0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
+			"0499 Failed allocate fast-path IO CQ (%d)\n", idx);
 		return 1;
 	}
 	qdesc->qe_valid = 1;
-	phba->sli4_hba.fcp_cq[wqidx] = qdesc;
+	qdesc->hdwq = idx;
+	qdesc->chann = cpu;
+	phba->sli4_hba.hdwq[idx].io_cq = qdesc;
 
-	/* Create Fast Path FCP WQs */
+	/* Create Fast Path IO WQs */
 	if (phba->enab_exp_wqcq_pages) {
 		/* Increase the WQ size when WQEs contain an embedded cdb */
 		wqesize = (phba->fcp_embed_io) ?
 			LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
 					      wqesize,
-					      LPFC_WQE_EXP_COUNT);
+					      LPFC_WQE_EXP_COUNT, cpu);
 	} else
 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
 					      phba->sli4_hba.wq_esize,
-					      phba->sli4_hba.wq_ecount);
+					      phba->sli4_hba.wq_ecount, cpu);
 
 	if (!qdesc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0503 Failed allocate fast-path FCP WQ (%d)\n",
-				wqidx);
+				"0503 Failed allocate fast-path IO WQ (%d)\n",
+				idx);
 		return 1;
 	}
-	phba->sli4_hba.fcp_wq[wqidx] = qdesc;
+	qdesc->hdwq = idx;
+	qdesc->chann = cpu;
+	phba->sli4_hba.hdwq[idx].io_wq = qdesc;
 	list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
 	return 0;
 }
@@ -8209,16 +8713,16 @@
 lpfc_sli4_queue_create(struct lpfc_hba *phba)
 {
 	struct lpfc_queue *qdesc;
-	int idx, io_channel;
+	int idx, cpu, eqcpu;
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_vector_map_info *cpup;
+	struct lpfc_vector_map_info *eqcpup;
+	struct lpfc_eq_intr_info *eqi;
 
 	/*
 	 * Create HBA Record arrays.
 	 * Both NVME and FCP will share that same vectors / EQs
 	 */
-	io_channel = phba->io_channel_irqs;
-	if (!io_channel)
-		return -ERANGE;
-
 	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
 	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
 	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
@@ -8230,87 +8734,37 @@
 	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
 	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
 
-	phba->sli4_hba.hba_eq =  kcalloc(io_channel,
-					sizeof(struct lpfc_queue *),
-					GFP_KERNEL);
-	if (!phba->sli4_hba.hba_eq) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-			"2576 Failed allocate memory for "
-			"fast-path EQ record array\n");
-		goto out_error;
-	}
-
-	if (phba->cfg_fcp_io_channel) {
-		phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
-						sizeof(struct lpfc_queue *),
-						GFP_KERNEL);
-		if (!phba->sli4_hba.fcp_cq) {
+	if (!phba->sli4_hba.hdwq) {
+		phba->sli4_hba.hdwq = kcalloc(
+			phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
+			GFP_KERNEL);
+		if (!phba->sli4_hba.hdwq) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"2577 Failed allocate memory for "
-					"fast-path CQ record array\n");
+					"6427 Failed allocate memory for "
+					"fast-path Hardware Queue array\n");
 			goto out_error;
 		}
-		phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
-						sizeof(struct lpfc_queue *),
-						GFP_KERNEL);
-		if (!phba->sli4_hba.fcp_wq) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"2578 Failed allocate memory for "
-					"fast-path FCP WQ record array\n");
-			goto out_error;
-		}
-		/*
-		 * Since the first EQ can have multiple CQs associated with it,
-		 * this array is used to quickly see if we have a FCP fast-path
-		 * CQ match.
-		 */
-		phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
-							sizeof(uint16_t),
-							GFP_KERNEL);
-		if (!phba->sli4_hba.fcp_cq_map) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"2545 Failed allocate memory for "
-					"fast-path CQ map\n");
-			goto out_error;
+		/* Prepare hardware queues to take IO buffers */
+		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+			qp = &phba->sli4_hba.hdwq[idx];
+			spin_lock_init(&qp->io_buf_list_get_lock);
+			spin_lock_init(&qp->io_buf_list_put_lock);
+			INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
+			INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
+			qp->get_io_bufs = 0;
+			qp->put_io_bufs = 0;
+			qp->total_io_bufs = 0;
+			spin_lock_init(&qp->abts_io_buf_list_lock);
+			INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
+			qp->abts_scsi_io_bufs = 0;
+			qp->abts_nvme_io_bufs = 0;
+			INIT_LIST_HEAD(&qp->sgl_list);
+			INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
+			spin_lock_init(&qp->hdwq_lock);
 		}
 	}
 
-	if (phba->cfg_nvme_io_channel) {
-		phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
-						sizeof(struct lpfc_queue *),
-						GFP_KERNEL);
-		if (!phba->sli4_hba.nvme_cq) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"6077 Failed allocate memory for "
-					"fast-path CQ record array\n");
-			goto out_error;
-		}
-
-		phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
-						sizeof(struct lpfc_queue *),
-						GFP_KERNEL);
-		if (!phba->sli4_hba.nvme_wq) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"2581 Failed allocate memory for "
-					"fast-path NVME WQ record array\n");
-			goto out_error;
-		}
-
-		/*
-		 * Since the first EQ can have multiple CQs associated with it,
-		 * this array is used to quickly see if we have a NVME fast-path
-		 * CQ match.
-		 */
-		phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
-							sizeof(uint16_t),
-							GFP_KERNEL);
-		if (!phba->sli4_hba.nvme_cq_map) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"6078 Failed allocate memory for "
-					"fast-path CQ map\n");
-			goto out_error;
-		}
-
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
 		if (phba->nvmet_support) {
 			phba->sli4_hba.nvmet_cqset = kcalloc(
 					phba->cfg_nvmet_mrq,
@@ -8348,43 +8802,85 @@
 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
 
 	/* Create HBA Event Queues (EQs) */
-	for (idx = 0; idx < io_channel; idx++) {
-		/* Create EQs */
+	for_each_present_cpu(cpu) {
+		/* We only want to create 1 EQ per vector, even though
+		 * multiple CPUs might be using that vector. so only
+		 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
+		 */
+		cpup = &phba->sli4_hba.cpu_map[cpu];
+		if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+			continue;
+
+		/* Get a ptr to the Hardware Queue associated with this CPU */
+		qp = &phba->sli4_hba.hdwq[cpup->hdwq];
+
+		/* Allocate an EQ */
 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
 					      phba->sli4_hba.eq_esize,
-					      phba->sli4_hba.eq_ecount);
+					      phba->sli4_hba.eq_ecount, cpu);
 		if (!qdesc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"0497 Failed allocate EQ (%d)\n", idx);
+					"0497 Failed allocate EQ (%d)\n",
+					cpup->hdwq);
 			goto out_error;
 		}
 		qdesc->qe_valid = 1;
-		phba->sli4_hba.hba_eq[idx] = qdesc;
+		qdesc->hdwq = cpup->hdwq;
+		qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
+		qdesc->last_cpu = qdesc->chann;
+
+		/* Save the allocated EQ in the Hardware Queue */
+		qp->hba_eq = qdesc;
+
+		eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
+		list_add(&qdesc->cpu_list, &eqi->list);
 	}
 
-	/* FCP and NVME io channels are not required to be balanced */
+	/* Now we need to populate the other Hardware Queues, that share
+	 * an IRQ vector, with the associated EQ ptr.
+	 */
+	for_each_present_cpu(cpu) {
+		cpup = &phba->sli4_hba.cpu_map[cpu];
 
-	for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
-		if (lpfc_alloc_fcp_wq_cq(phba, idx))
-			goto out_error;
+		/* Check for EQ already allocated in previous loop */
+		if (cpup->flag & LPFC_CPU_FIRST_IRQ)
+			continue;
 
-	for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
-		if (lpfc_alloc_nvme_wq_cq(phba, idx))
+		/* Check for multiple CPUs per hdwq */
+		qp = &phba->sli4_hba.hdwq[cpup->hdwq];
+		if (qp->hba_eq)
+			continue;
+
+		/* We need to share an EQ for this hdwq */
+		eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
+		eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
+		qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
+	}
+
+	/* Allocate IO Path SLI4 CQ/WQs */
+	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+		if (lpfc_alloc_io_wq_cq(phba, idx))
 			goto out_error;
+	}
 
 	if (phba->nvmet_support) {
 		for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+			cpu = lpfc_find_cpu_handle(phba, idx,
+						   LPFC_FIND_BY_HDWQ);
 			qdesc = lpfc_sli4_queue_alloc(phba,
 						      LPFC_DEFAULT_PAGE_SIZE,
 						      phba->sli4_hba.cq_esize,
-						      phba->sli4_hba.cq_ecount);
+						      phba->sli4_hba.cq_ecount,
+						      cpu);
 			if (!qdesc) {
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"3142 Failed allocate NVME "
-					"CQ Set (%d)\n", idx);
+						"3142 Failed allocate NVME "
+						"CQ Set (%d)\n", idx);
 				goto out_error;
 			}
 			qdesc->qe_valid = 1;
+			qdesc->hdwq = idx;
+			qdesc->chann = cpu;
 			phba->sli4_hba.nvmet_cqset[idx] = qdesc;
 		}
 	}
@@ -8393,10 +8889,11 @@
 	 * Create Slow Path Completion Queues (CQs)
 	 */
 
+	cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
 	/* Create slow-path Mailbox Command Complete Queue */
 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
 				      phba->sli4_hba.cq_esize,
-				      phba->sli4_hba.cq_ecount);
+				      phba->sli4_hba.cq_ecount, cpu);
 	if (!qdesc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0500 Failed allocate slow-path mailbox CQ\n");
@@ -8408,13 +8905,14 @@
 	/* Create slow-path ELS Complete Queue */
 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
 				      phba->sli4_hba.cq_esize,
-				      phba->sli4_hba.cq_ecount);
+				      phba->sli4_hba.cq_ecount, cpu);
 	if (!qdesc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0501 Failed allocate slow-path ELS CQ\n");
 		goto out_error;
 	}
 	qdesc->qe_valid = 1;
+	qdesc->chann = cpu;
 	phba->sli4_hba.els_cq = qdesc;
 
 
@@ -8426,12 +8924,13 @@
 
 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
 				      phba->sli4_hba.mq_esize,
-				      phba->sli4_hba.mq_ecount);
+				      phba->sli4_hba.mq_ecount, cpu);
 	if (!qdesc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0505 Failed allocate slow-path MQ\n");
 		goto out_error;
 	}
+	qdesc->chann = cpu;
 	phba->sli4_hba.mbx_wq = qdesc;
 
 	/*
@@ -8441,12 +8940,13 @@
 	/* Create slow-path ELS Work Queue */
 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
 				      phba->sli4_hba.wq_esize,
-				      phba->sli4_hba.wq_ecount);
+				      phba->sli4_hba.wq_ecount, cpu);
 	if (!qdesc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0504 Failed allocate slow-path ELS WQ\n");
 		goto out_error;
 	}
+	qdesc->chann = cpu;
 	phba->sli4_hba.els_wq = qdesc;
 	list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
 
@@ -8454,24 +8954,26 @@
 		/* Create NVME LS Complete Queue */
 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
 					      phba->sli4_hba.cq_esize,
-					      phba->sli4_hba.cq_ecount);
+					      phba->sli4_hba.cq_ecount, cpu);
 		if (!qdesc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"6079 Failed allocate NVME LS CQ\n");
 			goto out_error;
 		}
+		qdesc->chann = cpu;
 		qdesc->qe_valid = 1;
 		phba->sli4_hba.nvmels_cq = qdesc;
 
 		/* Create NVME LS Work Queue */
 		qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
 					      phba->sli4_hba.wq_esize,
-					      phba->sli4_hba.wq_ecount);
+					      phba->sli4_hba.wq_ecount, cpu);
 		if (!qdesc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"6080 Failed allocate NVME LS WQ\n");
 			goto out_error;
 		}
+		qdesc->chann = cpu;
 		phba->sli4_hba.nvmels_wq = qdesc;
 		list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
 	}
@@ -8483,7 +8985,7 @@
 	/* Create Receive Queue for header */
 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
 				      phba->sli4_hba.rq_esize,
-				      phba->sli4_hba.rq_ecount);
+				      phba->sli4_hba.rq_ecount, cpu);
 	if (!qdesc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0506 Failed allocate receive HRQ\n");
@@ -8494,7 +8996,7 @@
 	/* Create Receive Queue for data */
 	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
 				      phba->sli4_hba.rq_esize,
-				      phba->sli4_hba.rq_ecount);
+				      phba->sli4_hba.rq_ecount, cpu);
 	if (!qdesc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0507 Failed allocate receive DRQ\n");
@@ -8502,24 +9004,30 @@
 	}
 	phba->sli4_hba.dat_rq = qdesc;
 
-	if (phba->nvmet_support) {
+	if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
+	    phba->nvmet_support) {
 		for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+			cpu = lpfc_find_cpu_handle(phba, idx,
+						   LPFC_FIND_BY_HDWQ);
 			/* Create NVMET Receive Queue for header */
 			qdesc = lpfc_sli4_queue_alloc(phba,
 						      LPFC_DEFAULT_PAGE_SIZE,
 						      phba->sli4_hba.rq_esize,
-						      LPFC_NVMET_RQE_DEF_COUNT);
+						      LPFC_NVMET_RQE_DEF_COUNT,
+						      cpu);
 			if (!qdesc) {
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 						"3146 Failed allocate "
 						"receive HRQ\n");
 				goto out_error;
 			}
+			qdesc->hdwq = idx;
 			phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
 
 			/* Only needed for header of RQ pair */
-			qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
-					      GFP_KERNEL);
+			qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
+						   GFP_KERNEL,
+						   cpu_to_node(cpu));
 			if (qdesc->rqbp == NULL) {
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 						"6131 Failed allocate "
@@ -8534,20 +9042,35 @@
 			qdesc = lpfc_sli4_queue_alloc(phba,
 						      LPFC_DEFAULT_PAGE_SIZE,
 						      phba->sli4_hba.rq_esize,
-						      LPFC_NVMET_RQE_DEF_COUNT);
+						      LPFC_NVMET_RQE_DEF_COUNT,
+						      cpu);
 			if (!qdesc) {
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 						"3156 Failed allocate "
 						"receive DRQ\n");
 				goto out_error;
 			}
+			qdesc->hdwq = idx;
 			phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
 		}
 	}
 
-	/* Create the Queues needed for Flash Optimized Fabric operations */
-	if (phba->cfg_fof)
-		lpfc_fof_queue_create(phba);
+	/* Clear NVME stats */
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+			memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
+			       sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
+		}
+	}
+
+	/* Clear SCSI stats */
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+			memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
+			       sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
+		}
+	}
+
 	return 0;
 
 out_error:
@@ -8580,11 +9103,31 @@
 }
 
 static inline void
-lpfc_sli4_release_queue_map(uint16_t **qmap)
+lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
 {
-	if (*qmap != NULL) {
-		kfree(*qmap);
-		*qmap = NULL;
+	struct lpfc_sli4_hdw_queue *hdwq;
+	struct lpfc_queue *eq;
+	uint32_t idx;
+
+	hdwq = phba->sli4_hba.hdwq;
+
+	/* Loop thru all Hardware Queues */
+	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+		/* Free the CQ/WQ corresponding to the Hardware Queue */
+		lpfc_sli4_queue_free(hdwq[idx].io_cq);
+		lpfc_sli4_queue_free(hdwq[idx].io_wq);
+		hdwq[idx].io_cq = NULL;
+		hdwq[idx].io_wq = NULL;
+		if (phba->cfg_xpsgl && !phba->nvmet_support)
+			lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
+		lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
+	}
+	/* Loop thru all IRQ vectors */
+	for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+		/* Free the EQ corresponding to the IRQ vector */
+		eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
+		lpfc_sli4_queue_free(eq);
+		phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
 	}
 }
 
@@ -8603,33 +9146,23 @@
 void
 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 {
-	if (phba->cfg_fof)
-		lpfc_fof_queue_destroy(phba);
+	/*
+	 * Set FREE_INIT before beginning to free the queues.
+	 * Wait until the users of queues to acknowledge to
+	 * release queues by clearing FREE_WAIT.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
+	while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
+		spin_unlock_irq(&phba->hbalock);
+		msleep(20);
+		spin_lock_irq(&phba->hbalock);
+	}
+	spin_unlock_irq(&phba->hbalock);
 
 	/* Release HBA eqs */
-	lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
-
-	/* Release FCP cqs */
-	lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
-				 phba->cfg_fcp_io_channel);
-
-	/* Release FCP wqs */
-	lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
-				 phba->cfg_fcp_io_channel);
-
-	/* Release FCP CQ mapping array */
-	lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
-
-	/* Release NVME cqs */
-	lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
-					phba->cfg_nvme_io_channel);
-
-	/* Release NVME wqs */
-	lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
-					phba->cfg_nvme_io_channel);
-
-	/* Release NVME CQ mapping array */
-	lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
+	if (phba->sli4_hba.hdwq)
+		lpfc_sli4_release_hdwq(phba);
 
 	if (phba->nvmet_support) {
 		lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
@@ -8665,6 +9198,11 @@
 
 	/* Everything on this list has been freed */
 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
+
+	/* Done with freeing the queues */
+	spin_lock_irq(&phba->hbalock);
+	phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
+	spin_unlock_irq(&phba->hbalock);
 }
 
 int
@@ -8710,10 +9248,9 @@
 			qidx, (uint32_t)rc);
 		return rc;
 	}
-	cq->chann = qidx;
 
 	if (qtype != LPFC_MBOX) {
-		/* Setup nvme_cq_map for fast lookup */
+		/* Setup cq_map for fast lookup */
 		if (cq_map)
 			*cq_map = cq->queue_id;
 
@@ -8725,12 +9262,11 @@
 		rc = lpfc_wq_create(phba, wq, cq, qtype);
 		if (rc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
+				"4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
 				qidx, (uint32_t)rc);
 			/* no need to tear down cq - caller will do so */
 			return rc;
 		}
-		wq->chann = qidx;
 
 		/* Bind this CQ/WQ to the NVME ring */
 		pring = wq->pring;
@@ -8760,6 +9296,38 @@
 }
 
 /**
+ * lpfc_setup_cq_lookup - Setup the CQ lookup table
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will populate the cq_lookup table by all
+ * available CQ queue_id's.
+ **/
+static void
+lpfc_setup_cq_lookup(struct lpfc_hba *phba)
+{
+	struct lpfc_queue *eq, *childq;
+	int qidx;
+
+	memset(phba->sli4_hba.cq_lookup, 0,
+	       (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
+	/* Loop thru all IRQ vectors */
+	for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
+		/* Get the EQ corresponding to the IRQ vector */
+		eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
+		if (!eq)
+			continue;
+		/* Loop through all CQs associated with that EQ */
+		list_for_each_entry(childq, &eq->child_list, list) {
+			if (childq->queue_id > phba->sli4_hba.cq_max)
+				continue;
+			if (childq->subtype == LPFC_IO)
+				phba->sli4_hba.cq_lookup[childq->queue_id] =
+					childq;
+		}
+	}
+}
+
+/**
  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
  * @phba: pointer to lpfc hba data structure.
  *
@@ -8776,9 +9344,11 @@
 {
 	uint32_t shdr_status, shdr_add_status;
 	union lpfc_sli4_cfg_shdr *shdr;
+	struct lpfc_vector_map_info *cpup;
+	struct lpfc_sli4_hdw_queue *qp;
 	LPFC_MBOXQ_t *mboxq;
-	int qidx;
-	uint32_t length, io_channel;
+	int qidx, cpu;
+	uint32_t length, usdelay;
 	int rc = -ENOMEM;
 
 	/* Check for dual-ULP support */
@@ -8829,89 +9399,71 @@
 	/*
 	 * Set up HBA Event Queues (EQs)
 	 */
-	io_channel = phba->io_channel_irqs;
+	qp = phba->sli4_hba.hdwq;
 
 	/* Set up HBA event queue */
-	if (io_channel && !phba->sli4_hba.hba_eq) {
+	if (!qp) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"3147 Fast-path EQs not allocated\n");
 		rc = -ENOMEM;
 		goto out_error;
 	}
-	for (qidx = 0; qidx < io_channel; qidx++) {
-		if (!phba->sli4_hba.hba_eq[qidx]) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"0522 Fast-path EQ (%d) not "
-					"allocated\n", qidx);
-			rc = -ENOMEM;
-			goto out_destroy;
+
+	/* Loop thru all IRQ vectors */
+	for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
+		/* Create HBA Event Queues (EQs) in order */
+		for_each_present_cpu(cpu) {
+			cpup = &phba->sli4_hba.cpu_map[cpu];
+
+			/* Look for the CPU thats using that vector with
+			 * LPFC_CPU_FIRST_IRQ set.
+			 */
+			if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+				continue;
+			if (qidx != cpup->eq)
+				continue;
+
+			/* Create an EQ for that vector */
+			rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
+					    phba->cfg_fcp_imax);
+			if (rc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"0523 Failed setup of fast-path"
+						" EQ (%d), rc = 0x%x\n",
+						cpup->eq, (uint32_t)rc);
+				goto out_destroy;
+			}
+
+			/* Save the EQ for that vector in the hba_eq_hdl */
+			phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
+				qp[cpup->hdwq].hba_eq;
+
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2584 HBA EQ setup: queue[%d]-id=%d\n",
+					cpup->eq,
+					qp[cpup->hdwq].hba_eq->queue_id);
 		}
-		rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
-						phba->cfg_fcp_imax);
+	}
+
+	/* Loop thru all Hardware Queues */
+	for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+		cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
+		cpup = &phba->sli4_hba.cpu_map[cpu];
+
+		/* Create the CQ/WQ corresponding to the Hardware Queue */
+		rc = lpfc_create_wq_cq(phba,
+				       phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
+				       qp[qidx].io_cq,
+				       qp[qidx].io_wq,
+				       &phba->sli4_hba.hdwq[qidx].io_cq_map,
+				       qidx,
+				       LPFC_IO);
 		if (rc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"0523 Failed setup of fast-path EQ "
-					"(%d), rc = 0x%x\n", qidx,
-					(uint32_t)rc);
-			goto out_destroy;
-		}
-		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"2584 HBA EQ setup: queue[%d]-id=%d\n",
-				qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
-	}
-
-	if (phba->cfg_nvme_io_channel) {
-		if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"6084 Fast-path NVME %s array not allocated\n",
-				(phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
-			rc = -ENOMEM;
-			goto out_destroy;
-		}
-
-		for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
-			rc = lpfc_create_wq_cq(phba,
-					phba->sli4_hba.hba_eq[
-						qidx % io_channel],
-					phba->sli4_hba.nvme_cq[qidx],
-					phba->sli4_hba.nvme_wq[qidx],
-					&phba->sli4_hba.nvme_cq_map[qidx],
-					qidx, LPFC_NVME);
-			if (rc) {
-				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"6123 Failed to setup fastpath "
-					"NVME WQ/CQ (%d), rc = 0x%x\n",
-					qidx, (uint32_t)rc);
-				goto out_destroy;
-			}
-		}
-	}
-
-	if (phba->cfg_fcp_io_channel) {
-		/* Set up fast-path FCP Response Complete Queue */
-		if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"3148 Fast-path FCP %s array not allocated\n",
-				phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
-			rc = -ENOMEM;
-			goto out_destroy;
-		}
-
-		for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
-			rc = lpfc_create_wq_cq(phba,
-					phba->sli4_hba.hba_eq[
-						qidx % io_channel],
-					phba->sli4_hba.fcp_cq[qidx],
-					phba->sli4_hba.fcp_wq[qidx],
-					&phba->sli4_hba.fcp_cq_map[qidx],
-					qidx, LPFC_FCP);
-			if (rc) {
-				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"0535 Failed to setup fastpath "
-					"FCP WQ/CQ (%d), rc = 0x%x\n",
+					"IO WQ/CQ (%d), rc = 0x%x\n",
 					qidx, (uint32_t)rc);
-				goto out_destroy;
-			}
+			goto out_destroy;
 		}
 	}
 
@@ -8930,7 +9482,7 @@
 		goto out_destroy;
 	}
 
-	rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
+	rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
 			       phba->sli4_hba.mbx_cq,
 			       phba->sli4_hba.mbx_wq,
 			       NULL, 0, LPFC_MBOX);
@@ -8951,7 +9503,7 @@
 		if (phba->cfg_nvmet_mrq > 1) {
 			rc = lpfc_cq_create_set(phba,
 					phba->sli4_hba.nvmet_cqset,
-					phba->sli4_hba.hba_eq,
+					qp,
 					LPFC_WCQ, LPFC_NVMET);
 			if (rc) {
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8963,7 +9515,7 @@
 		} else {
 			/* Set up NVMET Receive Complete Queue */
 			rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
-					    phba->sli4_hba.hba_eq[0],
+					    qp[0].hba_eq,
 					    LPFC_WCQ, LPFC_NVMET);
 			if (rc) {
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8977,7 +9529,7 @@
 					"6090 NVMET CQ setup: cq-id=%d, "
 					"parent eq-id=%d\n",
 					phba->sli4_hba.nvmet_cqset[0]->queue_id,
-					phba->sli4_hba.hba_eq[0]->queue_id);
+					qp[0].hba_eq->queue_id);
 		}
 	}
 
@@ -8989,14 +9541,14 @@
 		rc = -ENOMEM;
 		goto out_destroy;
 	}
-	rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
-					phba->sli4_hba.els_cq,
-					phba->sli4_hba.els_wq,
-					NULL, 0, LPFC_ELS);
+	rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
+			       phba->sli4_hba.els_cq,
+			       phba->sli4_hba.els_wq,
+			       NULL, 0, LPFC_ELS);
 	if (rc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-			"0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
-			(uint32_t)rc);
+				"0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
+				(uint32_t)rc);
 		goto out_destroy;
 	}
 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -9004,7 +9556,7 @@
 			phba->sli4_hba.els_wq->queue_id,
 			phba->sli4_hba.els_cq->queue_id);
 
-	if (phba->cfg_nvme_io_channel) {
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
 		/* Set up NVME LS Complete Queue */
 		if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9013,14 +9565,14 @@
 			rc = -ENOMEM;
 			goto out_destroy;
 		}
-		rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
-					phba->sli4_hba.nvmels_cq,
-					phba->sli4_hba.nvmels_wq,
-					NULL, 0, LPFC_NVME_LS);
+		rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
+				       phba->sli4_hba.nvmels_cq,
+				       phba->sli4_hba.nvmels_wq,
+				       NULL, 0, LPFC_NVME_LS);
 		if (rc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0529 Failed setup of NVVME LS WQ/CQ: "
-				"rc = 0x%x\n", (uint32_t)rc);
+					"0526 Failed setup of NVVME LS WQ/CQ: "
+					"rc = 0x%x\n", (uint32_t)rc);
 			goto out_destroy;
 		}
 
@@ -9106,20 +9658,29 @@
 			phba->sli4_hba.dat_rq->queue_id,
 			phba->sli4_hba.els_cq->queue_id);
 
-	if (phba->cfg_fof) {
-		rc = lpfc_fof_queue_setup(phba);
-		if (rc) {
+	if (phba->cfg_fcp_imax)
+		usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
+	else
+		usdelay = 0;
+
+	for (qidx = 0; qidx < phba->cfg_irq_chann;
+	     qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
+		lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
+					 usdelay);
+
+	if (phba->sli4_hba.cq_max) {
+		kfree(phba->sli4_hba.cq_lookup);
+		phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
+			sizeof(struct lpfc_queue *), GFP_KERNEL);
+		if (!phba->sli4_hba.cq_lookup) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"0549 Failed setup of FOF Queues: "
-					"rc = 0x%x\n", rc);
+					"0549 Failed setup of CQ Lookup table: "
+					"size 0x%x\n", phba->sli4_hba.cq_max);
+			rc = -ENOMEM;
 			goto out_destroy;
 		}
+		lpfc_setup_cq_lookup(phba);
 	}
-
-	for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
-		lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
-					 phba->cfg_fcp_imax);
-
 	return 0;
 
 out_destroy:
@@ -9143,12 +9704,10 @@
 void
 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
 {
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_queue *eq;
 	int qidx;
 
-	/* Unset the queues created for Flash Optimized Fabric operations */
-	if (phba->cfg_fof)
-		lpfc_fof_queue_destroy(phba);
-
 	/* Unset mailbox command work queue */
 	if (phba->sli4_hba.mbx_wq)
 		lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
@@ -9166,17 +9725,6 @@
 		lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
 				phba->sli4_hba.dat_rq);
 
-	/* Unset FCP work queue */
-	if (phba->sli4_hba.fcp_wq)
-		for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
-			lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
-
-	/* Unset NVME work queue */
-	if (phba->sli4_hba.nvme_wq) {
-		for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
-			lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
-	}
-
 	/* Unset mailbox command complete queue */
 	if (phba->sli4_hba.mbx_cq)
 		lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
@@ -9189,11 +9737,6 @@
 	if (phba->sli4_hba.nvmels_cq)
 		lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
 
-	/* Unset NVME response complete queue */
-	if (phba->sli4_hba.nvme_cq)
-		for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
-			lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
-
 	if (phba->nvmet_support) {
 		/* Unset NVMET MRQ queue */
 		if (phba->sli4_hba.nvmet_mrq_hdr) {
@@ -9212,15 +9755,26 @@
 		}
 	}
 
-	/* Unset FCP response complete queue */
-	if (phba->sli4_hba.fcp_cq)
-		for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
-			lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
+	/* Unset fast-path SLI4 queues */
+	if (phba->sli4_hba.hdwq) {
+		/* Loop thru all Hardware Queues */
+		for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+			/* Destroy the CQ/WQ corresponding to Hardware Queue */
+			qp = &phba->sli4_hba.hdwq[qidx];
+			lpfc_wq_destroy(phba, qp->io_wq);
+			lpfc_cq_destroy(phba, qp->io_cq);
+		}
+		/* Loop thru all IRQ vectors */
+		for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
+			/* Destroy the EQ corresponding to the IRQ vector */
+			eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
+			lpfc_eq_destroy(phba, eq);
+		}
+	}
 
-	/* Unset fast-path event queue */
-	if (phba->sli4_hba.hba_eq)
-		for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
-			lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
+	kfree(phba->sli4_hba.cq_lookup);
+	phba->sli4_hba.cq_lookup = NULL;
+	phba->sli4_hba.cq_max = 0;
 }
 
 /**
@@ -9536,25 +10090,20 @@
 static int
 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
 {
-	struct pci_dev *pdev;
+	struct pci_dev *pdev = phba->pcidev;
 	unsigned long bar0map_len, bar1map_len, bar2map_len;
-	int error = -ENODEV;
+	int error;
 	uint32_t if_type;
 
-	/* Obtain PCI device reference */
-	if (!phba->pcidev)
-		return error;
-	else
-		pdev = phba->pcidev;
+	if (!pdev)
+		return -ENODEV;
 
 	/* Set the device DMA mask size */
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
-	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
-		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
-			return error;
-		}
-	}
+	error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (error)
+		error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (error)
+		return error;
 
 	/*
 	 * The BARs and register set definitions and offset locations are
@@ -9562,7 +10111,7 @@
 	 */
 	if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
 				  &phba->sli4_hba.sli_intf.word0)) {
-		return error;
+		return -ENODEV;
 	}
 
 	/* There is no SLI3 failback for SLI4 devices. */
@@ -9572,7 +10121,7 @@
 				"2894 SLI_INTF reg contents invalid "
 				"sli_intf reg 0x%x\n",
 				phba->sli4_hba.sli_intf.word0);
-		return error;
+		return -ENODEV;
 	}
 
 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
@@ -9596,7 +10145,7 @@
 			dev_printk(KERN_ERR, &pdev->dev,
 				   "ioremap failed for SLI4 PCI config "
 				   "registers.\n");
-			goto out;
+			return -ENODEV;
 		}
 		phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
 		/* Set up BAR0 PCI config space register memory map */
@@ -9607,7 +10156,7 @@
 		if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
 			dev_printk(KERN_ERR, &pdev->dev,
 			   "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
-			goto out;
+			return -ENODEV;
 		}
 		phba->sli4_hba.conf_regs_memmap_p =
 				ioremap(phba->pci_bar0_map, bar0map_len);
@@ -9615,7 +10164,7 @@
 			dev_printk(KERN_ERR, &pdev->dev,
 				"ioremap failed for SLI4 PCI config "
 				"registers.\n");
-				goto out;
+			return -ENODEV;
 		}
 		lpfc_sli4_bar0_register_memmap(phba, if_type);
 	}
@@ -9661,6 +10210,7 @@
 		if (!phba->sli4_hba.drbl_regs_memmap_p) {
 			dev_err(&pdev->dev,
 			   "ioremap failed for SLI4 HBA doorbell registers.\n");
+			error = -ENOMEM;
 			goto out_iounmap_conf;
 		}
 		phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
@@ -9710,6 +10260,7 @@
 		if (!phba->sli4_hba.dpp_regs_memmap_p) {
 			dev_err(&pdev->dev,
 			   "ioremap failed for SLI4 HBA dpp registers.\n");
+			error = -ENOMEM;
 			goto out_iounmap_ctrl;
 		}
 		phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
@@ -9720,13 +10271,13 @@
 	case LPFC_SLI_INTF_IF_TYPE_0:
 	case LPFC_SLI_INTF_IF_TYPE_2:
 		phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
-		phba->sli4_hba.sli4_eq_release = lpfc_sli4_eq_release;
-		phba->sli4_hba.sli4_cq_release = lpfc_sli4_cq_release;
+		phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
+		phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
 		break;
 	case LPFC_SLI_INTF_IF_TYPE_6:
 		phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
-		phba->sli4_hba.sli4_eq_release = lpfc_sli4_if6_eq_release;
-		phba->sli4_hba.sli4_cq_release = lpfc_sli4_if6_cq_release;
+		phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
+		phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
 		break;
 	default:
 		break;
@@ -9740,7 +10291,7 @@
 	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
 out_iounmap_conf:
 	iounmap(phba->sli4_hba.conf_regs_memmap_p);
-out:
+
 	return error;
 }
 
@@ -10007,58 +10558,409 @@
 }
 
 /**
+ * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
+ * @phba: pointer to lpfc hba data structure.
+ * @id: EQ vector index or Hardware Queue index
+ * @match: LPFC_FIND_BY_EQ = match by EQ
+ *         LPFC_FIND_BY_HDWQ = match by Hardware Queue
+ * Return the CPU that matches the selection criteria
+ */
+static uint16_t
+lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
+{
+	struct lpfc_vector_map_info *cpup;
+	int cpu;
+
+	/* Loop through all CPUs */
+	for_each_present_cpu(cpu) {
+		cpup = &phba->sli4_hba.cpu_map[cpu];
+
+		/* If we are matching by EQ, there may be multiple CPUs using
+		 * using the same vector, so select the one with
+		 * LPFC_CPU_FIRST_IRQ set.
+		 */
+		if ((match == LPFC_FIND_BY_EQ) &&
+		    (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
+		    (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
+		    (cpup->eq == id))
+			return cpu;
+
+		/* If matching by HDWQ, select the first CPU that matches */
+		if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
+			return cpu;
+	}
+	return 0;
+}
+
+#ifdef CONFIG_X86
+/**
+ * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
+ * @phba: pointer to lpfc hba data structure.
+ * @cpu: CPU map index
+ * @phys_id: CPU package physical id
+ * @core_id: CPU core id
+ */
+static int
+lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
+		uint16_t phys_id, uint16_t core_id)
+{
+	struct lpfc_vector_map_info *cpup;
+	int idx;
+
+	for_each_present_cpu(idx) {
+		cpup = &phba->sli4_hba.cpu_map[idx];
+		/* Does the cpup match the one we are looking for */
+		if ((cpup->phys_id == phys_id) &&
+		    (cpup->core_id == core_id) &&
+		    (cpu != idx))
+			return 1;
+	}
+	return 0;
+}
+#endif
+
+/**
  * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
  * @phba: pointer to lpfc hba data structure.
  * @vectors: number of msix vectors allocated.
  *
  * The routine will figure out the CPU affinity assignment for every
- * MSI-X vector allocated for the HBA.  The hba_eq_hdl will be updated
- * with a pointer to the CPU mask that defines ALL the CPUs this vector
- * can be associated with. If the vector can be unquely associated with
- * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
+ * MSI-X vector allocated for the HBA.
  * In addition, the CPU to IO channel mapping will be calculated
  * and the phba->sli4_hba.cpu_map array will reflect this.
  */
 static void
 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
 {
+	int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
+	int max_phys_id, min_phys_id;
+	int max_core_id, min_core_id;
 	struct lpfc_vector_map_info *cpup;
-	int index = 0;
-	int vec = 0;
-	int cpu;
+	struct lpfc_vector_map_info *new_cpup;
+	const struct cpumask *maskp;
 #ifdef CONFIG_X86
 	struct cpuinfo_x86 *cpuinfo;
 #endif
 
 	/* Init cpu_map array */
-	memset(phba->sli4_hba.cpu_map, 0xff,
-	       (sizeof(struct lpfc_vector_map_info) *
-	       phba->sli4_hba.num_present_cpu));
+	for_each_possible_cpu(cpu) {
+		cpup = &phba->sli4_hba.cpu_map[cpu];
+		cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
+		cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
+		cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
+		cpup->eq = LPFC_VECTOR_MAP_EMPTY;
+		cpup->irq = LPFC_VECTOR_MAP_EMPTY;
+		cpup->flag = 0;
+	}
+
+	max_phys_id = 0;
+	min_phys_id = LPFC_VECTOR_MAP_EMPTY;
+	max_core_id = 0;
+	min_core_id = LPFC_VECTOR_MAP_EMPTY;
 
 	/* Update CPU map with physical id and core id of each CPU */
-	cpup = phba->sli4_hba.cpu_map;
-	for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+	for_each_present_cpu(cpu) {
+		cpup = &phba->sli4_hba.cpu_map[cpu];
 #ifdef CONFIG_X86
 		cpuinfo = &cpu_data(cpu);
 		cpup->phys_id = cpuinfo->phys_proc_id;
 		cpup->core_id = cpuinfo->cpu_core_id;
+		if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
+			cpup->flag |= LPFC_CPU_MAP_HYPER;
 #else
 		/* No distinction between CPUs for other platforms */
 		cpup->phys_id = 0;
-		cpup->core_id = 0;
+		cpup->core_id = cpu;
 #endif
-		cpup->channel_id = index;  /* For now round robin */
-		cpup->irq = pci_irq_vector(phba->pcidev, vec);
-		vec++;
-		if (vec >= vectors)
-			vec = 0;
-		index++;
-		if (index >= phba->cfg_fcp_io_channel)
-			index = 0;
-		cpup++;
-	}
-}
 
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"3328 CPU %d physid %d coreid %d flag x%x\n",
+				cpu, cpup->phys_id, cpup->core_id, cpup->flag);
+
+		if (cpup->phys_id > max_phys_id)
+			max_phys_id = cpup->phys_id;
+		if (cpup->phys_id < min_phys_id)
+			min_phys_id = cpup->phys_id;
+
+		if (cpup->core_id > max_core_id)
+			max_core_id = cpup->core_id;
+		if (cpup->core_id < min_core_id)
+			min_core_id = cpup->core_id;
+	}
+
+	for_each_possible_cpu(i) {
+		struct lpfc_eq_intr_info *eqi =
+			per_cpu_ptr(phba->sli4_hba.eq_info, i);
+
+		INIT_LIST_HEAD(&eqi->list);
+		eqi->icnt = 0;
+	}
+
+	/* This loop sets up all CPUs that are affinitized with a
+	 * irq vector assigned to the driver. All affinitized CPUs
+	 * will get a link to that vectors IRQ and EQ.
+	 *
+	 * NULL affinity mask handling:
+	 * If irq count is greater than one, log an error message.
+	 * If the null mask is received for the first irq, find the
+	 * first present cpu, and assign the eq index to ensure at
+	 * least one EQ is assigned.
+	 */
+	for (idx = 0; idx <  phba->cfg_irq_chann; idx++) {
+		/* Get a CPU mask for all CPUs affinitized to this vector */
+		maskp = pci_irq_get_affinity(phba->pcidev, idx);
+		if (!maskp) {
+			if (phba->cfg_irq_chann > 1)
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"3329 No affinity mask found "
+						"for vector %d (%d)\n",
+						idx, phba->cfg_irq_chann);
+			if (!idx) {
+				cpu = cpumask_first(cpu_present_mask);
+				cpup = &phba->sli4_hba.cpu_map[cpu];
+				cpup->eq = idx;
+				cpup->irq = pci_irq_vector(phba->pcidev, idx);
+				cpup->flag |= LPFC_CPU_FIRST_IRQ;
+			}
+			break;
+		}
+
+		i = 0;
+		/* Loop through all CPUs associated with vector idx */
+		for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+			/* Set the EQ index and IRQ for that vector */
+			cpup = &phba->sli4_hba.cpu_map[cpu];
+			cpup->eq = idx;
+			cpup->irq = pci_irq_vector(phba->pcidev, idx);
+
+			/* If this is the first CPU thats assigned to this
+			 * vector, set LPFC_CPU_FIRST_IRQ.
+			 */
+			if (!i)
+				cpup->flag |= LPFC_CPU_FIRST_IRQ;
+			i++;
+
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"3336 Set Affinity: CPU %d "
+					"irq %d eq %d flag x%x\n",
+					cpu, cpup->irq, cpup->eq, cpup->flag);
+		}
+	}
+
+	/* After looking at each irq vector assigned to this pcidev, its
+	 * possible to see that not ALL CPUs have been accounted for.
+	 * Next we will set any unassigned (unaffinitized) cpu map
+	 * entries to a IRQ on the same phys_id.
+	 */
+	first_cpu = cpumask_first(cpu_present_mask);
+	start_cpu = first_cpu;
+
+	for_each_present_cpu(cpu) {
+		cpup = &phba->sli4_hba.cpu_map[cpu];
+
+		/* Is this CPU entry unassigned */
+		if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
+			/* Mark CPU as IRQ not assigned by the kernel */
+			cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
+
+			/* If so, find a new_cpup thats on the the SAME
+			 * phys_id as cpup. start_cpu will start where we
+			 * left off so all unassigned entries don't get assgined
+			 * the IRQ of the first entry.
+			 */
+			new_cpu = start_cpu;
+			for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+				new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+				if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
+				    (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
+				    (new_cpup->phys_id == cpup->phys_id))
+					goto found_same;
+				new_cpu = cpumask_next(
+					new_cpu, cpu_present_mask);
+				if (new_cpu == nr_cpumask_bits)
+					new_cpu = first_cpu;
+			}
+			/* At this point, we leave the CPU as unassigned */
+			continue;
+found_same:
+			/* We found a matching phys_id, so copy the IRQ info */
+			cpup->eq = new_cpup->eq;
+			cpup->irq = new_cpup->irq;
+
+			/* Bump start_cpu to the next slot to minmize the
+			 * chance of having multiple unassigned CPU entries
+			 * selecting the same IRQ.
+			 */
+			start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+			if (start_cpu == nr_cpumask_bits)
+				start_cpu = first_cpu;
+
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"3337 Set Affinity: CPU %d "
+					"irq %d from id %d same "
+					"phys_id (%d)\n",
+					cpu, cpup->irq, new_cpu, cpup->phys_id);
+		}
+	}
+
+	/* Set any unassigned cpu map entries to a IRQ on any phys_id */
+	start_cpu = first_cpu;
+
+	for_each_present_cpu(cpu) {
+		cpup = &phba->sli4_hba.cpu_map[cpu];
+
+		/* Is this entry unassigned */
+		if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
+			/* Mark it as IRQ not assigned by the kernel */
+			cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
+
+			/* If so, find a new_cpup thats on ANY phys_id
+			 * as the cpup. start_cpu will start where we
+			 * left off so all unassigned entries don't get
+			 * assigned the IRQ of the first entry.
+			 */
+			new_cpu = start_cpu;
+			for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+				new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+				if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
+				    (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
+					goto found_any;
+				new_cpu = cpumask_next(
+					new_cpu, cpu_present_mask);
+				if (new_cpu == nr_cpumask_bits)
+					new_cpu = first_cpu;
+			}
+			/* We should never leave an entry unassigned */
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3339 Set Affinity: CPU %d "
+					"irq %d UNASSIGNED\n",
+					cpup->hdwq, cpup->irq);
+			continue;
+found_any:
+			/* We found an available entry, copy the IRQ info */
+			cpup->eq = new_cpup->eq;
+			cpup->irq = new_cpup->irq;
+
+			/* Bump start_cpu to the next slot to minmize the
+			 * chance of having multiple unassigned CPU entries
+			 * selecting the same IRQ.
+			 */
+			start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+			if (start_cpu == nr_cpumask_bits)
+				start_cpu = first_cpu;
+
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"3338 Set Affinity: CPU %d "
+					"irq %d from id %d (%d/%d)\n",
+					cpu, cpup->irq, new_cpu,
+					new_cpup->phys_id, new_cpup->core_id);
+		}
+	}
+
+	/* Assign hdwq indices that are unique across all cpus in the map
+	 * that are also FIRST_CPUs.
+	 */
+	idx = 0;
+	for_each_present_cpu(cpu) {
+		cpup = &phba->sli4_hba.cpu_map[cpu];
+
+		/* Only FIRST IRQs get a hdwq index assignment. */
+		if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+			continue;
+
+		/* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
+		cpup->hdwq = idx;
+		idx++;
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3333 Set Affinity: CPU %d (phys %d core %d): "
+				"hdwq %d eq %d irq %d flg x%x\n",
+				cpu, cpup->phys_id, cpup->core_id,
+				cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+	}
+	/* Finally we need to associate a hdwq with each cpu_map entry
+	 * This will be 1 to 1 - hdwq to cpu, unless there are less
+	 * hardware queues then CPUs. For that case we will just round-robin
+	 * the available hardware queues as they get assigned to CPUs.
+	 * The next_idx is the idx from the FIRST_CPU loop above to account
+	 * for irq_chann < hdwq.  The idx is used for round-robin assignments
+	 * and needs to start at 0.
+	 */
+	next_idx = idx;
+	start_cpu = 0;
+	idx = 0;
+	for_each_present_cpu(cpu) {
+		cpup = &phba->sli4_hba.cpu_map[cpu];
+
+		/* FIRST cpus are already mapped. */
+		if (cpup->flag & LPFC_CPU_FIRST_IRQ)
+			continue;
+
+		/* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
+		 * of the unassigned cpus to the next idx so that all
+		 * hdw queues are fully utilized.
+		 */
+		if (next_idx < phba->cfg_hdw_queue) {
+			cpup->hdwq = next_idx;
+			next_idx++;
+			continue;
+		}
+
+		/* Not a First CPU and all hdw_queues are used.  Reuse a
+		 * Hardware Queue for another CPU, so be smart about it
+		 * and pick one that has its IRQ/EQ mapped to the same phys_id
+		 * (CPU package) and core_id.
+		 */
+		new_cpu = start_cpu;
+		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+			new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+			if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
+			    new_cpup->phys_id == cpup->phys_id &&
+			    new_cpup->core_id == cpup->core_id) {
+				goto found_hdwq;
+			}
+			new_cpu = cpumask_next(new_cpu, cpu_present_mask);
+			if (new_cpu == nr_cpumask_bits)
+				new_cpu = first_cpu;
+		}
+
+		/* If we can't match both phys_id and core_id,
+		 * settle for just a phys_id match.
+		 */
+		new_cpu = start_cpu;
+		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+			new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+			if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
+			    new_cpup->phys_id == cpup->phys_id)
+				goto found_hdwq;
+
+			new_cpu = cpumask_next(new_cpu, cpu_present_mask);
+			if (new_cpu == nr_cpumask_bits)
+				new_cpu = first_cpu;
+		}
+
+		/* Otherwise just round robin on cfg_hdw_queue */
+		cpup->hdwq = idx % phba->cfg_hdw_queue;
+		idx++;
+		goto logit;
+ found_hdwq:
+		/* We found an available entry, copy the IRQ info */
+		start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+		if (start_cpu == nr_cpumask_bits)
+			start_cpu = first_cpu;
+		cpup->hdwq = new_cpup->hdwq;
+ logit:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3335 Set Affinity: CPU %d (phys %d core %d): "
+				"hdwq %d eq %d irq %d flg x%x\n",
+				cpu, cpup->phys_id, cpup->core_id,
+				cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+	}
+
+	/* The cpu_map array will be used later during initialization
+	 * when EQ / CQ / WQs are allocated and configured.
+	 */
+	return;
+}
 
 /**
  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
@@ -10078,12 +10980,10 @@
 	char *name;
 
 	/* Set up MSI-X multi-message vectors */
-	vectors = phba->io_channel_irqs;
-	if (phba->cfg_fof)
-		vectors++;
+	vectors = phba->cfg_irq_chann;
 
 	rc = pci_alloc_irq_vectors(phba->pcidev,
-				(phba->nvmet_support) ? 1 : 2,
+				1,
 				vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
 	if (rc < 0) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -10101,17 +11001,10 @@
 
 		phba->sli4_hba.hba_eq_hdl[index].idx = index;
 		phba->sli4_hba.hba_eq_hdl[index].phba = phba;
-		atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
-		if (phba->cfg_fof && (index == (vectors - 1)))
-			rc = request_irq(pci_irq_vector(phba->pcidev, index),
-				 &lpfc_sli4_fof_intr_handler, 0,
-				 name,
-				 &phba->sli4_hba.hba_eq_hdl[index]);
-		else
-			rc = request_irq(pci_irq_vector(phba->pcidev, index),
-				 &lpfc_sli4_hba_intr_handler, 0,
-				 name,
-				 &phba->sli4_hba.hba_eq_hdl[index]);
+		rc = request_irq(pci_irq_vector(phba->pcidev, index),
+			 &lpfc_sli4_hba_intr_handler, 0,
+			 name,
+			 &phba->sli4_hba.hba_eq_hdl[index]);
 		if (rc) {
 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 					"0486 MSI-X fast-path (%d) "
@@ -10120,24 +11013,16 @@
 		}
 	}
 
-	if (phba->cfg_fof)
-		vectors--;
-
-	if (vectors != phba->io_channel_irqs) {
+	if (vectors != phba->cfg_irq_chann) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"3238 Reducing IO channels to match number of "
 				"MSI-X vectors, requested %d got %d\n",
-				phba->io_channel_irqs, vectors);
-		if (phba->cfg_fcp_io_channel > vectors)
-			phba->cfg_fcp_io_channel = vectors;
-		if (phba->cfg_nvme_io_channel > vectors)
-			phba->cfg_nvme_io_channel = vectors;
-		if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
-			phba->io_channel_irqs = phba->cfg_fcp_io_channel;
-		else
-			phba->io_channel_irqs = phba->cfg_nvme_io_channel;
+				phba->cfg_irq_chann, vectors);
+		if (phba->cfg_irq_chann > vectors)
+			phba->cfg_irq_chann = vectors;
+		if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
+			phba->cfg_nvmet_mrq = vectors;
 	}
-	lpfc_cpu_affinity_check(phba, vectors);
 
 	return rc;
 
@@ -10159,10 +11044,10 @@
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine is invoked to enable the MSI interrupt mode to device with
- * SLI-4 interface spec. The kernel function pci_enable_msi() is called
- * to enable the MSI vector. The device driver is responsible for calling
- * the request_irq() to register MSI vector with a interrupt the handler,
- * which is done in this function.
+ * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
+ * called to enable the MSI vector. The device driver is responsible for
+ * calling the request_irq() to register MSI vector with a interrupt the
+ * handler, which is done in this function.
  *
  * Return codes
  * 	0 - successful
@@ -10173,34 +11058,31 @@
 {
 	int rc, index;
 
-	rc = pci_enable_msi(phba->pcidev);
-	if (!rc)
+	rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
+				   PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
+	if (rc > 0)
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 				"0487 PCI enable MSI mode success.\n");
 	else {
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 				"0488 PCI enable MSI mode failed (%d)\n", rc);
-		return rc;
+		return rc ? rc : -1;
 	}
 
 	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
 			 0, LPFC_DRIVER_NAME, phba);
 	if (rc) {
-		pci_disable_msi(phba->pcidev);
+		pci_free_irq_vectors(phba->pcidev);
 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 				"0490 MSI request_irq failed (%d)\n", rc);
 		return rc;
 	}
 
-	for (index = 0; index < phba->io_channel_irqs; index++) {
+	for (index = 0; index < phba->cfg_irq_chann; index++) {
 		phba->sli4_hba.hba_eq_hdl[index].idx = index;
 		phba->sli4_hba.hba_eq_hdl[index].phba = phba;
 	}
 
-	if (phba->cfg_fof) {
-		phba->sli4_hba.hba_eq_hdl[index].idx = index;
-		phba->sli4_hba.hba_eq_hdl[index].phba = phba;
-	}
 	return 0;
 }
 
@@ -10261,17 +11143,10 @@
 			phba->intr_type = INTx;
 			intr_mode = 0;
 
-			for (idx = 0; idx < phba->io_channel_irqs; idx++) {
+			for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
 				eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
 				eqhdl->idx = idx;
 				eqhdl->phba = phba;
-				atomic_set(&eqhdl->hba_eq_in_use, 1);
-			}
-			if (phba->cfg_fof) {
-				eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
-				eqhdl->idx = idx;
-				eqhdl->phba = phba;
-				atomic_set(&eqhdl->hba_eq_in_use, 1);
 			}
 		}
 	}
@@ -10295,13 +11170,13 @@
 		int index;
 
 		/* Free up MSI-X multi-message vectors */
-		for (index = 0; index < phba->io_channel_irqs; index++)
+		for (index = 0; index < phba->cfg_irq_chann; index++) {
+			irq_set_affinity_hint(
+				pci_irq_vector(phba->pcidev, index),
+				NULL);
 			free_irq(pci_irq_vector(phba->pcidev, index),
 					&phba->sli4_hba.hba_eq_hdl[index]);
-
-		if (phba->cfg_fof)
-			free_irq(pci_irq_vector(phba->pcidev, index),
-					&phba->sli4_hba.hba_eq_hdl[index]);
+		}
 	} else {
 		free_irq(phba->pcidev->irq, phba);
 	}
@@ -10362,10 +11237,11 @@
 static void
 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
 {
+	struct lpfc_sli4_hdw_queue *qp;
+	int idx, ccnt;
 	int wait_time = 0;
-	int nvme_xri_cmpl = 1;
+	int io_xri_cmpl = 1;
 	int nvmet_xri_cmpl = 1;
-	int fcp_xri_cmpl = 1;
 	int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
 
 	/* Driver just aborted IOs during the hba_unset process.  Pause
@@ -10378,32 +11254,31 @@
 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
 		lpfc_nvme_wait_for_io_drain(phba);
 
-	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
-		fcp_xri_cmpl =
-			list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+	ccnt = 0;
+	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+		qp = &phba->sli4_hba.hdwq[idx];
+		io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
+		if (!io_xri_cmpl) /* if list is NOT empty */
+			ccnt++;
+	}
+	if (ccnt)
+		io_xri_cmpl = 0;
+
 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-		nvme_xri_cmpl =
-			list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
 		nvmet_xri_cmpl =
 			list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
 	}
 
-	while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
-	       !nvmet_xri_cmpl) {
+	while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
 		if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
 			if (!nvmet_xri_cmpl)
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 						"6424 NVMET XRI exchange busy "
 						"wait time: %d seconds.\n",
 						wait_time/1000);
-			if (!nvme_xri_cmpl)
+			if (!io_xri_cmpl)
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-						"6100 NVME XRI exchange busy "
-						"wait time: %d seconds.\n",
-						wait_time/1000);
-			if (!fcp_xri_cmpl)
-				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-						"2877 FCP XRI exchange busy "
+						"6100 IO XRI exchange busy "
 						"wait time: %d seconds.\n",
 						wait_time/1000);
 			if (!els_xri_cmpl)
@@ -10417,17 +11292,22 @@
 			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
 			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
 		}
+
+		ccnt = 0;
+		for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+			qp = &phba->sli4_hba.hdwq[idx];
+			io_xri_cmpl = list_empty(
+			    &qp->lpfc_abts_io_buf_list);
+			if (!io_xri_cmpl) /* if list is NOT empty */
+				ccnt++;
+		}
+		if (ccnt)
+			io_xri_cmpl = 0;
+
 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-			nvme_xri_cmpl = list_empty(
-				&phba->sli4_hba.lpfc_abts_nvme_buf_list);
 			nvmet_xri_cmpl = list_empty(
 				&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
 		}
-
-		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
-			fcp_xri_cmpl = list_empty(
-				&phba->sli4_hba.lpfc_abts_scsi_buf_list);
-
 		els_xri_cmpl =
 			list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
 
@@ -10452,7 +11332,8 @@
 	struct pci_dev *pdev = phba->pcidev;
 
 	lpfc_stop_hba_timers(phba);
-	phba->sli4_hba.intr_enable = 0;
+	if (phba->pport)
+		phba->sli4_hba.intr_enable = 0;
 
 	/*
 	 * Gracefully wait out the potential current outstanding asynchronous
@@ -10496,6 +11377,9 @@
 	/* Stop kthread signal shall trigger work_done one more time */
 	kthread_stop(phba->worker_thread);
 
+	/* Disable FW logging to host memory */
+	lpfc_ras_stop_fwlog(phba);
+
 	/* Unset the queues shared with the hardware then release all
 	 * allocated resources.
 	 */
@@ -10505,8 +11389,13 @@
 	/* Reset SLI4 HBA FCoE function */
 	lpfc_pci_function_reset(phba);
 
+	/* Free RAS DMA memory */
+	if (phba->ras_fwlog.ras_enabled)
+		lpfc_sli4_ras_dma_free(phba);
+
 	/* Stop the SLI4 device port */
-	phba->pport->work_port_events = 0;
+	if (phba->pport)
+		phba->pport->work_port_events = 0;
 }
 
  /**
@@ -10656,26 +11545,56 @@
 					   mbx_sli4_parameters);
 	phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
 	phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
-	phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
-			      bf_get(cfg_xib, mbx_sli4_parameters));
 
-	if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
-	    !phba->nvme_support) {
-		phba->nvme_support = 0;
-		phba->nvmet_support = 0;
-		phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
-		phba->cfg_nvme_io_channel = 0;
-		phba->io_channel_irqs = phba->cfg_fcp_io_channel;
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
-				"6101 Disabling NVME support: "
-				"Not supported by firmware: %d %d\n",
-				bf_get(cfg_nvme, mbx_sli4_parameters),
-				bf_get(cfg_xib, mbx_sli4_parameters));
+	/* Check for Extended Pre-Registered SGL support */
+	phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
 
-		/* If firmware doesn't support NVME, just use SCSI support */
-		if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
-			return -ENODEV;
-		phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+	/* Check for firmware nvme support */
+	rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
+		     bf_get(cfg_xib, mbx_sli4_parameters));
+
+	if (rc) {
+		/* Save this to indicate the Firmware supports NVME */
+		sli4_params->nvme = 1;
+
+		/* Firmware NVME support, check driver FC4 NVME support */
+		if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
+					"6133 Disabling NVME support: "
+					"FC4 type not supported: x%x\n",
+					phba->cfg_enable_fc4_type);
+			goto fcponly;
+		}
+	} else {
+		/* No firmware NVME support, check driver FC4 NVME support */
+		sli4_params->nvme = 0;
+		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
+					"6101 Disabling NVME support: Not "
+					"supported by firmware (%d %d) x%x\n",
+					bf_get(cfg_nvme, mbx_sli4_parameters),
+					bf_get(cfg_xib, mbx_sli4_parameters),
+					phba->cfg_enable_fc4_type);
+fcponly:
+			phba->nvme_support = 0;
+			phba->nvmet_support = 0;
+			phba->cfg_nvmet_mrq = 0;
+			phba->cfg_nvme_seg_cnt = 0;
+
+			/* If no FC4 type support, move to just SCSI support */
+			if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+				return -ENODEV;
+			phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+		}
+	}
+
+	/* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
+	 * accommodate 512K and 1M IOs in a single nvme buf and supply
+	 * enough NVME LS iocb buffers for larger connectivity counts.
+	 */
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+		phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+		phba->cfg_iocb_cnt = 5;
 	}
 
 	/* Only embed PBDE for if_type 6, PBDE support requires xib be set */
@@ -10741,6 +11660,15 @@
 		phba->mds_diags_support = 1;
 	else
 		phba->mds_diags_support = 0;
+
+	/*
+	 * Check if the SLI port supports NSLER
+	 */
+	if (bf_get(cfg_nsler, mbx_sli4_parameters))
+		phba->nsler = 1;
+	else
+		phba->nsler = 0;
+
 	return 0;
 }
 
@@ -10969,9 +11897,9 @@
 	kfree(phba->vpi_ids);
 
 	lpfc_stop_hba_timers(phba);
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irq(&phba->port_list_lock);
 	list_del_init(&vport->listentry);
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irq(&phba->port_list_lock);
 
 	lpfc_debugfs_terminate(vport);
 
@@ -10989,6 +11917,8 @@
 	 * corresponding pools here.
 	 */
 	lpfc_scsi_free(phba);
+	lpfc_free_iocb_list(phba);
+
 	lpfc_mem_free_all(phba);
 
 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
@@ -11167,7 +12097,7 @@
 	lpfc_scsi_dev_block(phba);
 
 	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
-	lpfc_sli_flush_fcp_rings(phba);
+	lpfc_sli_flush_io_rings(phba);
 
 	/* stop all timers */
 	lpfc_stop_hba_timers(phba);
@@ -11197,7 +12127,7 @@
 	lpfc_stop_hba_timers(phba);
 
 	/* Clean up all driver's outstanding SCSI I/Os */
-	lpfc_sli_flush_fcp_rings(phba);
+	lpfc_sli_flush_io_rings(phba);
 }
 
 /**
@@ -11333,10 +12263,6 @@
 
 	/* Bring device online, it will be no-op for non-fatal error resume */
 	lpfc_online(phba);
-
-	/* Clean up Advanced Error Reporting (AER) if needed */
-	if (phba->hba_flag & HBA_AER_ENABLED)
-		pci_cleanup_aer_uncorrect_error_status(pdev);
 }
 
 /**
@@ -11618,28 +12544,11 @@
 	/* Get the default values for Model Name and Description */
 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 
-	/* Create SCSI host to the physical port */
-	error = lpfc_create_shost(phba);
-	if (error) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1415 Failed to create scsi host.\n");
-		goto out_unset_driver_resource;
-	}
-
-	/* Configure sysfs attributes */
-	vport = phba->pport;
-	error = lpfc_alloc_sysfs_attr(vport);
-	if (error) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1416 Failed to allocate sysfs attr\n");
-		goto out_destroy_shost;
-	}
-
-	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
 	/* Now, trying to enable interrupt and bring up the device */
 	cfg_mode = phba->cfg_use_msi;
 
 	/* Put device to a known state before enabling interrupt */
+	phba->pport = NULL;
 	lpfc_stop_port(phba);
 
 	/* Configure and enable interrupt */
@@ -11648,18 +12557,34 @@
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0426 Failed to enable interrupt.\n");
 		error = -ENODEV;
-		goto out_free_sysfs_attr;
+		goto out_unset_driver_resource;
 	}
 	/* Default to single EQ for non-MSI-X */
 	if (phba->intr_type != MSIX) {
-		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
-			phba->cfg_fcp_io_channel = 1;
+		phba->cfg_irq_chann = 1;
 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-			phba->cfg_nvme_io_channel = 1;
 			if (phba->nvmet_support)
 				phba->cfg_nvmet_mrq = 1;
 		}
-		phba->io_channel_irqs = 1;
+	}
+	lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
+
+	/* Create SCSI host to the physical port */
+	error = lpfc_create_shost(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1415 Failed to create scsi host.\n");
+		goto out_disable_intr;
+	}
+	vport = phba->pport;
+	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
+
+	/* Configure sysfs attributes */
+	error = lpfc_alloc_sysfs_attr(vport);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1416 Failed to allocate sysfs attr\n");
+		goto out_destroy_shost;
 	}
 
 	/* Set up SLI-4 HBA */
@@ -11667,7 +12592,7 @@
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"1421 Failed to set up hba\n");
 		error = -ENODEV;
-		goto out_disable_intr;
+		goto out_free_sysfs_attr;
 	}
 
 	/* Log the current active interrupt mode */
@@ -11680,19 +12605,20 @@
 	/* NVME support in FW earlier in the driver load corrects the
 	 * FC4 type making a check for nvme_support unnecessary.
 	 */
-	if ((phba->nvmet_support == 0) &&
-	    (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
-		/* Create NVME binding with nvme_fc_transport. This
-		 * ensures the vport is initialized.  If the localport
-		 * create fails, it should not unload the driver to
-		 * support field issues.
-		 */
-		error = lpfc_nvme_create_localport(vport);
-		if (error) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"6004 NVME registration failed, "
-					"error x%x\n",
-					error);
+	if (phba->nvmet_support == 0) {
+		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+			/* Create NVME binding with nvme_fc_transport. This
+			 * ensures the vport is initialized.  If the localport
+			 * create fails, it should not unload the driver to
+			 * support field issues.
+			 */
+			error = lpfc_nvme_create_localport(vport);
+			if (error) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"6004 NVME registration "
+						"failed, error x%x\n",
+						error);
+			}
 		}
 	}
 
@@ -11702,14 +12628,18 @@
 
 	/* Check if there are static vports to be created. */
 	lpfc_create_static_vport(phba);
+
+	/* Enable RAS FW log support */
+	lpfc_sli4_ras_setup(phba);
+
 	return 0;
 
-out_disable_intr:
-	lpfc_sli4_disable_intr(phba);
 out_free_sysfs_attr:
 	lpfc_free_sysfs_attr(vport);
 out_destroy_shost:
 	lpfc_destroy_shost(phba);
+out_disable_intr:
+	lpfc_sli4_disable_intr(phba);
 out_unset_driver_resource:
 	lpfc_unset_driver_resource_phase2(phba);
 out_unset_driver_resource_s4:
@@ -11772,25 +12702,28 @@
 	lpfc_nvmet_destroy_targetport(phba);
 	lpfc_nvme_destroy_localport(vport);
 
+	/* De-allocate multi-XRI pools */
+	if (phba->cfg_xri_rebalancing)
+		lpfc_destroy_multixri_pools(phba);
+
 	/*
 	 * Bring down the SLI Layer. This step disables all interrupts,
 	 * clears the rings, discards all mailbox commands, and resets
 	 * the HBA FCoE function.
 	 */
 	lpfc_debugfs_terminate(vport);
-	lpfc_sli4_hba_unset(phba);
 
 	lpfc_stop_hba_timers(phba);
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irq(&phba->port_list_lock);
 	list_del_init(&vport->listentry);
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irq(&phba->port_list_lock);
 
 	/* Perform scsi free before driver resource_unset since scsi
 	 * buffers are released to their corresponding pools here.
 	 */
-	lpfc_scsi_free(phba);
-	lpfc_nvme_free(phba);
+	lpfc_io_free(phba);
 	lpfc_free_iocb_list(phba);
+	lpfc_sli4_hba_unset(phba);
 
 	lpfc_unset_driver_resource_phase2(phba);
 	lpfc_sli4_driver_resource_unset(phba);
@@ -11966,12 +12899,8 @@
 	/* Block all SCSI devices' I/Os on the host */
 	lpfc_scsi_dev_block(phba);
 
-	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
-	lpfc_sli_flush_fcp_rings(phba);
-
-	/* Flush the outstanding NVME IOs if fc4 type enabled. */
-	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
-		lpfc_sli_flush_nvme_rings(phba);
+	/* Flush all driver's outstanding I/Os as we are to reset */
+	lpfc_sli_flush_io_rings(phba);
 
 	/* stop all timers */
 	lpfc_stop_hba_timers(phba);
@@ -12002,12 +12931,8 @@
 	/* stop all timers */
 	lpfc_stop_hba_timers(phba);
 
-	/* Clean up all driver's outstanding SCSI I/Os */
-	lpfc_sli_flush_fcp_rings(phba);
-
-	/* Flush the outstanding NVME IOs if fc4 type enabled. */
-	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
-		lpfc_sli_flush_nvme_rings(phba);
+	/* Clean up all driver's outstanding I/Os */
+	lpfc_sli_flush_io_rings(phba);
 }
 
 /**
@@ -12148,10 +13073,6 @@
 		/* Bring the device back online */
 		lpfc_online(phba);
 	}
-
-	/* Clean up Advanced Error Reporting (AER) if needed */
-	if (phba->hba_flag & HBA_AER_ENABLED)
-		pci_cleanup_aer_uncorrect_error_status(pdev);
 }
 
 /**
@@ -12412,7 +13333,7 @@
  * is destroyed.
  *
  **/
-void
+static void
 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
 {
 
@@ -12432,164 +13353,30 @@
 }
 
 /**
- * lpfc_fof_queue_setup - Set up all the fof queues
+ * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to set up all the fof queues for the FC HBA
- * operation.
- *
- * Return codes
- *      0 - successful
- *      -ENOMEM - No available memory
+ * This routine checks to see if RAS is supported by the adapter. Check the
+ * function through which RAS support enablement is to be done.
  **/
-int
-lpfc_fof_queue_setup(struct lpfc_hba *phba)
+void
+lpfc_sli4_ras_init(struct lpfc_hba *phba)
 {
-	struct lpfc_sli_ring *pring;
-	int rc;
-
-	rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
-	if (rc)
-		return -ENOMEM;
-
-	if (phba->cfg_fof) {
-
-		rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
-				    phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
-		if (rc)
-			goto out_oas_cq;
-
-		rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
-				    phba->sli4_hba.oas_cq, LPFC_FCP);
-		if (rc)
-			goto out_oas_wq;
-
-		/* Bind this CQ/WQ to the NVME ring */
-		pring = phba->sli4_hba.oas_wq->pring;
-		pring->sli.sli4.wqp =
-			(void *)phba->sli4_hba.oas_wq;
-		phba->sli4_hba.oas_cq->pring = pring;
-	}
-
-	return 0;
-
-out_oas_wq:
-	lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
-out_oas_cq:
-	lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
-	return rc;
-
-}
-
-/**
- * lpfc_fof_queue_create - Create all the fof queues
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to allocate all the fof queues for the FC HBA
- * operation. For each SLI4 queue type, the parameters such as queue entry
- * count (queue depth) shall be taken from the module parameter. For now,
- * we just use some constant number as place holder.
- *
- * Return codes
- *      0 - successful
- *      -ENOMEM - No availble memory
- *      -EIO - The mailbox failed to complete successfully.
- **/
-int
-lpfc_fof_queue_create(struct lpfc_hba *phba)
-{
-	struct lpfc_queue *qdesc;
-	uint32_t wqesize;
-
-	/* Create FOF EQ */
-	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
-				      phba->sli4_hba.eq_esize,
-				      phba->sli4_hba.eq_ecount);
-	if (!qdesc)
-		goto out_error;
-
-	qdesc->qe_valid = 1;
-	phba->sli4_hba.fof_eq = qdesc;
-
-	if (phba->cfg_fof) {
-
-		/* Create OAS CQ */
-		if (phba->enab_exp_wqcq_pages)
-			qdesc = lpfc_sli4_queue_alloc(phba,
-						      LPFC_EXPANDED_PAGE_SIZE,
-						      phba->sli4_hba.cq_esize,
-						      LPFC_CQE_EXP_COUNT);
+	switch (phba->pcidev->device) {
+	case PCI_DEVICE_ID_LANCER_G6_FC:
+	case PCI_DEVICE_ID_LANCER_G7_FC:
+		phba->ras_fwlog.ras_hwsupport = true;
+		if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
+		    phba->cfg_ras_fwlog_buffsize)
+			phba->ras_fwlog.ras_enabled = true;
 		else
-			qdesc = lpfc_sli4_queue_alloc(phba,
-						      LPFC_DEFAULT_PAGE_SIZE,
-						      phba->sli4_hba.cq_esize,
-						      phba->sli4_hba.cq_ecount);
-		if (!qdesc)
-			goto out_error;
-
-		qdesc->qe_valid = 1;
-		phba->sli4_hba.oas_cq = qdesc;
-
-		/* Create OAS WQ */
-		if (phba->enab_exp_wqcq_pages) {
-			wqesize = (phba->fcp_embed_io) ?
-				LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
-			qdesc = lpfc_sli4_queue_alloc(phba,
-						      LPFC_EXPANDED_PAGE_SIZE,
-						      wqesize,
-						      LPFC_WQE_EXP_COUNT);
-		} else
-			qdesc = lpfc_sli4_queue_alloc(phba,
-						      LPFC_DEFAULT_PAGE_SIZE,
-						      phba->sli4_hba.wq_esize,
-						      phba->sli4_hba.wq_ecount);
-
-		if (!qdesc)
-			goto out_error;
-
-		phba->sli4_hba.oas_wq = qdesc;
-		list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
-
+			phba->ras_fwlog.ras_enabled = false;
+		break;
+	default:
+		phba->ras_fwlog.ras_hwsupport = false;
 	}
-	return 0;
-
-out_error:
-	lpfc_fof_queue_destroy(phba);
-	return -ENOMEM;
 }
 
-/**
- * lpfc_fof_queue_destroy - Destroy all the fof queues
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to release all the SLI4 queues with the FC HBA
- * operation.
- *
- * Return codes
- *      0 - successful
- **/
-int
-lpfc_fof_queue_destroy(struct lpfc_hba *phba)
-{
-	/* Release FOF Event queue */
-	if (phba->sli4_hba.fof_eq != NULL) {
-		lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
-		phba->sli4_hba.fof_eq = NULL;
-	}
-
-	/* Release OAS Completion queue */
-	if (phba->sli4_hba.oas_cq != NULL) {
-		lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
-		phba->sli4_hba.oas_cq = NULL;
-	}
-
-	/* Release OAS Work queue */
-	if (phba->sli4_hba.oas_wq != NULL) {
-		lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
-		phba->sli4_hba.oas_wq = NULL;
-	}
-	return 0;
-}
 
 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
 
@@ -12661,7 +13448,6 @@
 	lpfc_nvmet_cmd_template();
 
 	/* Initialize in case vector mapping is needed */
-	lpfc_used_cpu = NULL;
 	lpfc_present_cpu = num_present_cpus();
 
 	error = pci_register_driver(&lpfc_driver);
@@ -12687,20 +13473,6 @@
 	pci_unregister_driver(&lpfc_driver);
 	fc_release_transport(lpfc_transport_template);
 	fc_release_transport(lpfc_vport_transport_template);
-	if (_dump_buf_data) {
-		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
-				"_dump_buf_data at 0x%p\n",
-				(1L << _dump_buf_data_order), _dump_buf_data);
-		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
-	}
-
-	if (_dump_buf_dif) {
-		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
-				"_dump_buf_dif at 0x%p\n",
-				(1L << _dump_buf_dif_order), _dump_buf_dif);
-		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
-	}
-	kfree(lpfc_used_cpu);
 	idr_destroy(&lpfc_hba_index);
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index deb094f..8abe933 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -94,7 +94,7 @@
 	memset(mp->virt, 0, LPFC_BPL_SIZE);
 	INIT_LIST_HEAD(&mp->list);
 	/* save address for completion */
-	pmb->context1 = (uint8_t *)mp;
+	pmb->ctx_buf = (uint8_t *)mp;
 	mb->un.varWords[3] = putPaddrLow(mp->phys);
 	mb->un.varWords[4] = putPaddrHigh(mp->phys);
 	mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
@@ -139,7 +139,7 @@
 	void *ctx;
 
 	mb = &pmb->u.mb;
-	ctx = pmb->context2;
+	ctx = pmb->ctx_buf;
 
 	/* Setup to dump VPD region */
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -151,7 +151,7 @@
 	mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
 	mb->un.varDmp.co = 0;
 	mb->un.varDmp.resp_offset = 0;
-	pmb->context2 = ctx;
+	pmb->ctx_buf = ctx;
 	mb->mbxOwner = OWN_HOST;
 	return;
 }
@@ -172,7 +172,7 @@
 
 	mb = &pmb->u.mb;
 	/* Save context so that we can restore after memset */
-	ctx = pmb->context2;
+	ctx = pmb->ctx_buf;
 
 	/* Setup to dump VPD region */
 	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
@@ -186,7 +186,7 @@
 	mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
 	mb->un.varDmp.co = 0;
 	mb->un.varDmp.resp_offset = 0;
-	pmb->context2 = ctx;
+	pmb->ctx_buf = ctx;
 	return;
 }
 
@@ -304,7 +304,7 @@
 	/* Save address for later completion and set the owner to host so that
 	 * the FW knows this mailbox is available for processing.
 	 */
-	pmb->context1 = (uint8_t *)mp;
+	pmb->ctx_buf = (uint8_t *)mp;
 	mb->mbxOwner = OWN_HOST;
 	return (0);
 }
@@ -513,9 +513,9 @@
 		break;
 	}
 
-	if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
-		mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
-		/* Failover is not tried for Lancer G6 */
+	if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+	     phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+	    mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
 		phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
 	}
@@ -631,7 +631,7 @@
 		mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
 
 	/* save address for completion */
-	pmb->context1 = mp;
+	pmb->ctx_buf = mp;
 
 	return (0);
 }
@@ -783,7 +783,7 @@
 	memcpy(sparam, param, sizeof (struct serv_parm));
 
 	/* save address for completion */
-	pmb->context1 = (uint8_t *) mp;
+	pmb->ctx_buf = (uint8_t *)mp;
 
 	mb->mbxCommand = MBX_REG_LOGIN64;
 	mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
@@ -858,7 +858,7 @@
 		mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
 		mbox->vport = vport;
 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-		mbox->context1 = NULL;
+		mbox->ctx_ndlp = NULL;
 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
 		if (rc == MBX_NOT_FINISHED)
 			mempool_free(mbox, phba->mbox_mem_pool);
@@ -1827,9 +1827,9 @@
 		 * page, this is used as a priori size of SLI4_PAGE_SIZE for
 		 * the later DMA memory free.
 		 */
-		viraddr = dma_zalloc_coherent(&phba->pcidev->dev,
-					      SLI4_PAGE_SIZE, &phyaddr,
-					      GFP_KERNEL);
+		viraddr = dma_alloc_coherent(&phba->pcidev->dev,
+					     SLI4_PAGE_SIZE, &phyaddr,
+					     GFP_KERNEL);
 		/* In case of malloc fails, proceed with whatever we have */
 		if (!viraddr)
 			break;
@@ -2095,8 +2095,8 @@
 	if (phba->nvmet_support) {
 		bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1);
 		/* iaab/iaar NOT set for now */
-		 bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
-		 bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
+		bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
+		bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
 	}
 	return;
 }
@@ -2288,7 +2288,7 @@
 	INIT_LIST_HEAD(&mp->list);
 
 	/* save address for completion */
-	mbox->context1 = (uint8_t *) mp;
+	mbox->ctx_buf = (uint8_t *)mp;
 
 	mb->mbxCommand = MBX_DUMP_MEMORY;
 	mb->un.varDmp.type = DMP_NV_PARAMS;
@@ -2305,7 +2305,7 @@
 	MAILBOX_t *mb;
 	int rc = FAILURE;
 	struct lpfc_rdp_context *rdp_context =
-			(struct lpfc_rdp_context *)(mboxq->context2);
+			(struct lpfc_rdp_context *)(mboxq->ctx_ndlp);
 
 	mb = &mboxq->u.mb;
 	if (mb->mbxStatus)
@@ -2323,9 +2323,9 @@
 static void
 lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
 {
-	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
 	struct lpfc_rdp_context *rdp_context =
-			(struct lpfc_rdp_context *)(mbox->context2);
+			(struct lpfc_rdp_context *)(mbox->ctx_ndlp);
 
 	if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
 		goto error_mbuf_free;
@@ -2341,7 +2341,7 @@
 	lpfc_read_lnk_stat(phba, mbox);
 	mbox->vport = rdp_context->ndlp->vport;
 	mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
-	mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
+	mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
 	if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
 		goto error_cmd_free;
 
@@ -2359,9 +2359,9 @@
 lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
 {
 	int rc;
-	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (mbox->context1);
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
 	struct lpfc_rdp_context *rdp_context =
-			(struct lpfc_rdp_context *)(mbox->context2);
+			(struct lpfc_rdp_context *)(mbox->ctx_ndlp);
 
 	if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
 		goto error;
@@ -2375,7 +2375,7 @@
 	INIT_LIST_HEAD(&mp->list);
 
 	/* save address for completion */
-	mbox->context1 = mp;
+	mbox->ctx_buf = mp;
 	mbox->vport = rdp_context->ndlp->vport;
 
 	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
@@ -2391,7 +2391,7 @@
 	mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
 
 	mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
-	mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
+	mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
 	if (rc == MBX_NOT_FINISHED)
 		goto error;
@@ -2436,7 +2436,7 @@
 
 	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
 	/* save address for completion */
-	mbox->context1 = mp;
+	mbox->ctx_buf = mp;
 
 	bf_set(lpfc_mbx_memory_dump_type3_type,
 		&mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 9c22a2c..ae09bb8 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -72,8 +72,8 @@
  * lpfc_mem_alloc - create and allocate all PCI and memory pools
  * @phba: HBA to allocate pools for
  *
- * Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool,
- * lpfc_mbuf_pool, lpfc_hrb_pool.  Creates and allocates kmalloc-backed mempools
+ * Description: Creates and allocates PCI pools lpfc_mbuf_pool,
+ * lpfc_hrb_pool.  Creates and allocates kmalloc-backed mempools
  * for LPFC_MBOXQ_t and lpfc_nodelist.  Also allocates the VPI bitmask.
  *
  * Notes: Not interrupt-safe.  Must be called with no locks held.  If any
@@ -89,36 +89,12 @@
 	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
 	int i;
 
-	if (phba->sli_rev == LPFC_SLI_REV4) {
-		/* Calculate alignment */
-		if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
-			i = phba->cfg_sg_dma_buf_size;
-		else
-			i = SLI4_PAGE_SIZE;
-
-		phba->lpfc_sg_dma_buf_pool =
-			dma_pool_create("lpfc_sg_dma_buf_pool",
-					&phba->pcidev->dev,
-					phba->cfg_sg_dma_buf_size,
-					i, 0);
-		if (!phba->lpfc_sg_dma_buf_pool)
-			goto fail;
-
-	} else {
-		phba->lpfc_sg_dma_buf_pool =
-			dma_pool_create("lpfc_sg_dma_buf_pool",
-					&phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
-					align, 0);
-
-		if (!phba->lpfc_sg_dma_buf_pool)
-			goto fail;
-	}
 
 	phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
 							LPFC_BPL_SIZE,
 							align, 0);
 	if (!phba->lpfc_mbuf_pool)
-		goto fail_free_dma_buf_pool;
+		goto fail;
 
 	pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
 				       sizeof(struct lpfc_dmabuf),
@@ -208,9 +184,6 @@
  fail_free_lpfc_mbuf_pool:
 	dma_pool_destroy(phba->lpfc_mbuf_pool);
 	phba->lpfc_mbuf_pool = NULL;
- fail_free_dma_buf_pool:
-	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
-	phba->lpfc_sg_dma_buf_pool = NULL;
  fail:
 	return -ENOMEM;
 }
@@ -248,25 +221,22 @@
 
 	/* Free HBQ pools */
 	lpfc_sli_hbqbuf_free_all(phba);
-	if (phba->lpfc_nvmet_drb_pool)
-		dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
+	dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
 	phba->lpfc_nvmet_drb_pool = NULL;
-	if (phba->lpfc_drb_pool)
-		dma_pool_destroy(phba->lpfc_drb_pool);
+
+	dma_pool_destroy(phba->lpfc_drb_pool);
 	phba->lpfc_drb_pool = NULL;
-	if (phba->lpfc_hrb_pool)
-		dma_pool_destroy(phba->lpfc_hrb_pool);
+
+	dma_pool_destroy(phba->lpfc_hrb_pool);
 	phba->lpfc_hrb_pool = NULL;
-	if (phba->txrdy_payload_pool)
-		dma_pool_destroy(phba->txrdy_payload_pool);
+
+	dma_pool_destroy(phba->txrdy_payload_pool);
 	phba->txrdy_payload_pool = NULL;
 
-	if (phba->lpfc_hbq_pool)
-		dma_pool_destroy(phba->lpfc_hbq_pool);
+	dma_pool_destroy(phba->lpfc_hbq_pool);
 	phba->lpfc_hbq_pool = NULL;
 
-	if (phba->rrq_pool)
-		mempool_destroy(phba->rrq_pool);
+	mempool_destroy(phba->rrq_pool);
 	phba->rrq_pool = NULL;
 
 	/* Free NLP memory pool */
@@ -290,10 +260,6 @@
 	dma_pool_destroy(phba->lpfc_mbuf_pool);
 	phba->lpfc_mbuf_pool = NULL;
 
-	/* Free DMA buffer memory pool */
-	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
-	phba->lpfc_sg_dma_buf_pool = NULL;
-
 	/* Free Device Data memory pool */
 	if (phba->device_data_mem_pool) {
 		/* Ensure all objects have been returned to the pool */
@@ -330,7 +296,7 @@
 
 	/* Free memory used in mailbox queue back to mailbox memory pool */
 	list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
-		mp = (struct lpfc_dmabuf *) (mbox->context1);
+		mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
 		if (mp) {
 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
 			kfree(mp);
@@ -340,7 +306,7 @@
 	}
 	/* Free memory used in mailbox cmpl list back to mailbox memory pool */
 	list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
-		mp = (struct lpfc_dmabuf *) (mbox->context1);
+		mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
 		if (mp) {
 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
 			kfree(mp);
@@ -354,7 +320,7 @@
 	spin_unlock_irq(&phba->hbalock);
 	if (psli->mbox_active) {
 		mbox = psli->mbox_active;
-		mp = (struct lpfc_dmabuf *) (mbox->context1);
+		mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
 		if (mp) {
 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
 			kfree(mp);
@@ -366,6 +332,13 @@
 	/* Free and destroy all the allocated memory pools */
 	lpfc_mem_free(phba);
 
+	/* Free DMA buffer memory pool */
+	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+	phba->lpfc_sg_dma_buf_pool = NULL;
+
+	dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
+	phba->lpfc_cmd_rsp_buf_pool = NULL;
+
 	/* Free the iocb lookup array */
 	kfree(psli->iocbq_lookup);
 	psli->iocbq_lookup = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bd9bce9..fc6e454 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -360,6 +360,7 @@
 	case  NLP_STE_NPR_NODE:
 		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
 			break;
+		/* fall through */
 	case  NLP_STE_REG_LOGIN_ISSUE:
 	case  NLP_STE_PRLI_ISSUE:
 	case  NLP_STE_UNMAPPED_NODE:
@@ -467,7 +468,7 @@
 	 */
 	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
 	/*
-	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
+	 * mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
 	 * command issued in lpfc_cmpl_els_acc().
 	 */
 	mbox->vport = vport;
@@ -535,8 +536,8 @@
 	struct lpfc_nodelist *ndlp;
 	uint32_t cmd;
 
-	elsiocb = (struct lpfc_iocbq *)mboxq->context1;
-	ndlp = (struct lpfc_nodelist *) mboxq->context2;
+	elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf;
+	ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp;
 	vport = mboxq->vport;
 	cmd = elsiocb->drvrTimeout;
 
@@ -613,7 +614,7 @@
 		}
 out:
 		/* If we are authenticated, move to the proper state */
-		if (ndlp->nlp_type & NLP_FCP_TARGET)
+		if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
 		else
 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -798,9 +799,15 @@
 			if (npr->writeXferRdyDis)
 				ndlp->nlp_flag |= NLP_FIRSTBURST;
 		}
-		if (npr->Retry)
+		if (npr->Retry && ndlp->nlp_type &
+					(NLP_FCP_INITIATOR | NLP_FCP_TARGET))
 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
 
+		if (npr->Retry && phba->nsler &&
+		    ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET))
+			ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
+
+
 		/* If this driver is in nvme target mode, set the ndlp's fc4
 		 * type to NVME provided the PRLI response claims NVME FC4
 		 * type.  Target mode does not issue gft_id so doesn't get
@@ -825,7 +832,7 @@
 			"rport rolechg:   role:x%x did:x%x flg:x%x",
 			roles, ndlp->nlp_DID, ndlp->nlp_flag);
 
-		if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+		if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
 			fc_remote_port_rolechg(rport, roles);
 	}
 }
@@ -836,22 +843,27 @@
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
 	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+		spin_unlock_irq(shost->host_lock);
 		return 0;
 	}
 
 	if (!(vport->fc_flag & FC_PT2PT)) {
 		/* Check config parameter use-adisc or FCP-2 */
-		if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
+		if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
 		    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
-		     (ndlp->nlp_type & NLP_FCP_TARGET))) {
+		     (ndlp->nlp_type & NLP_FCP_TARGET)))) {
 			spin_lock_irq(shost->host_lock);
 			ndlp->nlp_flag |= NLP_NPR_ADISC;
 			spin_unlock_irq(shost->host_lock);
 			return 1;
 		}
 	}
+
+	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+	spin_unlock_irq(shost->host_lock);
 	lpfc_unreg_rpi(vport, ndlp);
 	return 0;
 }
@@ -865,14 +877,27 @@
  * This function will send a unreg_login mailbox command to the firmware
  * to release a rpi.
  **/
-void
-lpfc_release_rpi(struct lpfc_hba *phba,
-		struct lpfc_vport *vport,
-		uint16_t rpi)
+static void
+lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
+		 struct lpfc_nodelist *ndlp, uint16_t rpi)
 {
 	LPFC_MBOXQ_t *pmb;
 	int rc;
 
+	/* If there is already an UNREG in progress for this ndlp,
+	 * no need to queue up another one.
+	 */
+	if (ndlp->nlp_flag & NLP_UNREG_INP) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "1435 release_rpi SKIP UNREG x%x on "
+				 "NPort x%x deferred x%x  flg x%x "
+				 "Data: x%px\n",
+				 ndlp->nlp_rpi, ndlp->nlp_DID,
+				 ndlp->nlp_defer_did,
+				 ndlp->nlp_flag, ndlp);
+		return;
+	}
+
 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
 			GFP_KERNEL);
 	if (!pmb)
@@ -881,6 +906,18 @@
 	else {
 		lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		pmb->vport = vport;
+		pmb->ctx_ndlp = ndlp;
+
+		if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+		    (!(vport->fc_flag & FC_OFFLINE_MODE)))
+			ndlp->nlp_flag |= NLP_UNREG_INP;
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "1437 release_rpi UNREG x%x "
+				 "on NPort x%x flg x%x\n",
+				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
+
 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 		if (rc == MBX_NOT_FINISHED)
 			mempool_free(pmb, phba->mbox_mem_pool);
@@ -901,7 +938,7 @@
 		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
 		(!pmb->u.mb.mbxStatus)) {
 		rpi = pmb->u.mb.un.varWords[0];
-		lpfc_release_rpi(phba, vport, rpi);
+		lpfc_release_rpi(phba, vport, ndlp, rpi);
 	}
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
 			 "0271 Illegal State Transition: node x%x "
@@ -1253,7 +1290,7 @@
 			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
 			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
 		}
-		mbox->context2 = lpfc_nlp_get(ndlp);
+		mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
 		mbox->vport = vport;
 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
 		    != MBX_NOT_FINISHED) {
@@ -1267,7 +1304,7 @@
 		 * command
 		 */
 		lpfc_nlp_put(ndlp);
-		mp = (struct lpfc_dmabuf *) mbox->context1;
+		mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
 		kfree(mp);
 		mempool_free(mbox, phba->mbox_mem_pool);
@@ -1329,7 +1366,7 @@
 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
 		!mb->mbxStatus) {
 		rpi = pmb->u.mb.un.varWords[0];
-		lpfc_release_rpi(phba, vport, rpi);
+		lpfc_release_rpi(phba, vport, ndlp, rpi);
 	}
 	return ndlp->nlp_state;
 }
@@ -1630,16 +1667,17 @@
 	LPFC_MBOXQ_t	  *mb;
 	LPFC_MBOXQ_t	  *nextmb;
 	struct lpfc_dmabuf *mp;
+	struct lpfc_nodelist *ns_ndlp;
 
 	cmdiocb = (struct lpfc_iocbq *) arg;
 
 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
 	if ((mb = phba->sli.mbox_active)) {
 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
-		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+		   (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
 			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
 			lpfc_nlp_put(ndlp);
-			mb->context2 = NULL;
+			mb->ctx_ndlp = NULL;
 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 		}
 	}
@@ -1647,8 +1685,8 @@
 	spin_lock_irq(&phba->hbalock);
 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
-		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
-			mp = (struct lpfc_dmabuf *) (mb->context1);
+		   (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
+			mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
 			if (mp) {
 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
 				kfree(mp);
@@ -1662,6 +1700,13 @@
 	}
 	spin_unlock_irq(&phba->hbalock);
 
+	/* software abort if any GID_FT is outstanding */
+	if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
+		ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
+		if (ns_ndlp && NLP_CHK_NODE_ACT(ns_ndlp))
+			lpfc_els_abort(phba, ns_ndlp);
+	}
+
 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
 	return ndlp->nlp_state;
 }
@@ -1702,7 +1747,6 @@
 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
 	MAILBOX_t *mb = &pmb->u.mb;
 	uint32_t did  = mb->un.varWords[1];
-	int rc = 0;
 
 	if (mb->mbxStatus) {
 		/* RegLogin failed */
@@ -1759,8 +1803,8 @@
 			 * is configured try it.
 			 */
 			ndlp->nlp_fc4_type |= NLP_FC4_FCP;
-			if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-			     (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+			if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+			    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
 				ndlp->nlp_fc4_type |= NLP_FC4_NVME;
 				/* We need to update the localport also */
 				lpfc_nvme_update_localport(vport);
@@ -1770,14 +1814,25 @@
 			ndlp->nlp_fc4_type |= NLP_FC4_FCP;
 
 		} else if (ndlp->nlp_fc4_type == 0) {
-			rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
-					 0, ndlp->nlp_DID);
-			return ndlp->nlp_state;
+			/* If we are only configured for FCP, the driver
+			 * should just issue PRLI for FCP. Otherwise issue
+			 * GFT_ID to determine if remote port supports NVME.
+			 */
+			if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
+				lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0,
+					    ndlp->nlp_DID);
+				return ndlp->nlp_state;
+			}
+			ndlp->nlp_fc4_type = NLP_FC4_FCP;
 		}
 
 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
-		lpfc_issue_els_prli(vport, ndlp, 0);
+		if (lpfc_issue_els_prli(vport, ndlp, 0)) {
+			lpfc_issue_els_logo(vport, ndlp, 0);
+			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		}
 	} else {
 		if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
 			phba->targetport->port_id = vport->fc_myDID;
@@ -1975,6 +2030,11 @@
 		if (bf_get_be32(prli_init, nvpr))
 			ndlp->nlp_type |= NLP_NVME_INITIATOR;
 
+		if (phba->nsler && bf_get_be32(prli_nsler, nvpr))
+			ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
+		else
+			ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
+
 		/* Target driver cannot solicit NVME FB. */
 		if (bf_get_be32(prli_tgt, nvpr)) {
 			/* Complete the nvme target roles.  The transport
@@ -2318,6 +2378,7 @@
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
 	spin_unlock_irq(shost->host_lock);
 	lpfc_disc_set_adisc(vport, ndlp);
 
@@ -2395,6 +2456,7 @@
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
 	spin_unlock_irq(shost->host_lock);
 	lpfc_disc_set_adisc(vport, ndlp);
 	return ndlp->nlp_state;
@@ -2652,6 +2714,7 @@
 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
 	spin_unlock_irq(shost->host_lock);
 	return ndlp->nlp_state;
 }
@@ -2851,17 +2914,21 @@
 	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
 			 uint32_t);
 	uint32_t got_ndlp = 0;
+	uint32_t data1;
 
 	if (lpfc_nlp_get(ndlp))
 		got_ndlp = 1;
 
 	cur_state = ndlp->nlp_state;
 
+	data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
+		((uint32_t)ndlp->nlp_type));
 	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 			 "0211 DSM in event x%x on NPort x%x in "
-			 "state %d Data: x%x\n",
-			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
+			 "state %d rpi x%x Data: x%x x%x\n",
+			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
+			 ndlp->nlp_flag, data1);
 
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
 		 "DSM in:          evt:%d ste:%d did:x%x",
@@ -2872,9 +2939,13 @@
 
 	/* DSM out state <rc> on NPort <nlp_DID> */
 	if (got_ndlp) {
+		data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
+			((uint32_t)ndlp->nlp_type));
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-			 "0212 DSM out state %d on NPort x%x Data: x%x\n",
-			 rc, ndlp->nlp_DID, ndlp->nlp_flag);
+			 "0212 DSM out state %d on NPort x%x "
+			 "rpi x%x Data: x%x x%x\n",
+			 rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag,
+			 data1);
 
 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
 			"DSM out:         ste:%d did:x%x flg:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 918ae18..a227e36 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -56,12 +56,12 @@
 
 /* NVME initiator-based functions */
 
-static struct lpfc_nvme_buf *
+static struct lpfc_io_buf *
 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
-		  int expedite);
+		  int idx, int expedite);
 
 static void
-lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
+lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
 
 static struct nvme_fc_port_template lpfc_nvme_template;
 
@@ -229,7 +229,7 @@
 	if (qhandle == NULL)
 		return -ENOMEM;
 
-	qhandle->cpu_id = smp_processor_id();
+	qhandle->cpu_id = raw_smp_processor_id();
 	qhandle->qidx = qidx;
 	/*
 	 * NVME qidx == 0 is the admin queue, so both admin queue
@@ -239,7 +239,7 @@
 	if (qidx) {
 		str = "IO ";  /* IO queue */
 		qhandle->index = ((qidx - 1) %
-			vport->phba->cfg_nvme_io_channel);
+			lpfc_nvme_template.max_hw_queues);
 	} else {
 		str = "ADM";  /* Admin queue */
 		qhandle->index = qidx;
@@ -247,7 +247,7 @@
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
 			 "6073 Binding %s HdwQueue %d  (cpu %d) to "
-			 "io_channel %d qhandle %p\n", str,
+			 "hdw_queue %d qhandle x%px\n", str,
 			 qidx, qhandle->cpu_id, qhandle->index, qhandle);
 	*handle = (void *)qhandle;
 	return 0;
@@ -282,7 +282,7 @@
 	vport = lport->vport;
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
-			"6001 ENTER.  lpfc_pnvme %p, qidx x%xi qhandle %p\n",
+			"6001 ENTER.  lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
 			lport, qidx, handle);
 	kfree(handle);
 }
@@ -293,11 +293,12 @@
 	struct lpfc_nvme_lport *lport = localport->private;
 
 	lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
-			 "6173 localport %p delete complete\n",
+			 "6173 localport x%px delete complete\n",
 			 lport);
 
 	/* release any threads waiting for the unreg to complete */
-	complete(&lport->lport_unreg_done);
+	if (lport->vport->localport)
+		complete(lport->lport_unreg_cmp);
 }
 
 /* lpfc_nvme_remoteport_delete
@@ -311,7 +312,7 @@
  * Return value :
  * None
  */
-void
+static void
 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
 {
 	struct lpfc_nvme_rport *rport = remoteport->private;
@@ -331,7 +332,7 @@
 	 * calling state machine to remove the node.
 	 */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
-			"6146 remoteport delete of remoteport %p\n",
+			"6146 remoteport delete of remoteport x%px\n",
 			remoteport);
 	spin_lock_irq(&vport->phba->hbalock);
 
@@ -382,8 +383,8 @@
 	ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
 			 "6047 nvme cmpl Enter "
-			 "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
-			 "lsreg:%p bmp:%p ndlp:%p\n",
+			 "Data %px DID %x Xri: %x status %x reason x%x "
+			 "cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n",
 			 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
 			 cmdwqe->sli4_xritag, status,
 			 (wcqe->parameter & 0xffff),
@@ -403,7 +404,7 @@
 	else
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
 				 "6046 nvme cmpl without done call back? "
-				 "Data %p DID %x Xri: %x status %x\n",
+				 "Data %px DID %x Xri: %x status %x\n",
 				pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
 				cmdwqe->sli4_xritag, status);
 	if (ndlp) {
@@ -435,6 +436,7 @@
 		return 1;
 
 	wqe = &genwqe->wqe;
+	/* Initialize only 64 bytes */
 	memset(wqe, 0, sizeof(union lpfc_wqe));
 
 	genwqe->context3 = (uint8_t *)bmp;
@@ -515,7 +517,8 @@
 	/* Issue GEN REQ WQE for NPORT <did> */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 			 "6050 Issue GEN REQ WQE to NPORT x%x "
-			 "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
+			 "Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px "
+			 "xmit:%d 1st:%d\n",
 			 ndlp->nlp_DID, genwqe->iotag,
 			 vport->port_state,
 			genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
@@ -528,7 +531,7 @@
 	lpfc_nvmeio_data(phba, "NVME LS  XMIT: xri x%x iotag x%x to x%06x\n",
 			 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
 
-	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
+	rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
 	if (rc) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 				 "6045 Issue GEN REQ WQE to NPORT x%x "
@@ -593,7 +596,7 @@
 	ndlp = rport->ndlp;
 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
-				 "6051 Remoteport %p, rport has invalid ndlp. "
+				 "6051 Remoteport x%px, rport has invalid ndlp. "
 				 "Failing LS Req\n", pnvme_rport);
 		return -ENODEV;
 	}
@@ -645,10 +648,10 @@
 
 	/* Expand print to include key fields. */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
-			 "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
-			 "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
-			 ndlp->nlp_DID,
-			 pnvme_lport, pnvme_rport,
+			 "6149 Issue LS Req to DID 0x%06x lport x%px, "
+			 "rport x%px lsreq x%px rqstlen:%d rsplen:%d "
+			 "%pad %pad\n",
+			 ndlp->nlp_DID, pnvme_lport, pnvme_rport,
 			 pnvme_lsreq, pnvme_lsreq->rqstlen,
 			 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
 			 &pnvme_lsreq->rspdma);
@@ -664,8 +667,8 @@
 	if (ret != WQE_SUCCESS) {
 		atomic_inc(&lport->xmt_ls_err);
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
-				 "6052 EXIT. issue ls wqe failed lport %p, "
-				 "rport %p lsreq%p Status %x DID %x\n",
+				 "6052 EXIT. issue ls wqe failed lport x%px, "
+				 "rport x%px lsreq x%px Status %x DID %x\n",
 				 pnvme_lport, pnvme_rport, pnvme_lsreq,
 				 ret, ndlp->nlp_DID);
 		lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
@@ -722,7 +725,7 @@
 
 	/* Expand print to include key fields. */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
-			 "6040 ENTER.  lport %p, rport %p lsreq %p rqstlen:%d "
+			 "6040 ENTER.  lport x%px, rport x%px lsreq x%px rqstlen:%d "
 			 "rsplen:%d %pad %pad\n",
 			 pnvme_lport, pnvme_rport,
 			 pnvme_lsreq, pnvme_lsreq->rqstlen,
@@ -760,7 +763,7 @@
 /* Fix up the existing sgls for NVME IO. */
 static inline void
 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
-		       struct lpfc_nvme_buf *lpfc_ncmd,
+		       struct lpfc_io_buf *lpfc_ncmd,
 		       struct nvmefc_fcp_req *nCmd)
 {
 	struct lpfc_hba  *phba = vport->phba;
@@ -783,7 +786,7 @@
 	 * rather than the virtual memory to ease the restore
 	 * operation.
 	 */
-	sgl = lpfc_ncmd->nvme_sgl;
+	sgl = lpfc_ncmd->dma_sgl;
 	sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
 	if (phba->cfg_nvme_embed_cmd) {
 		sgl->addr_hi = 0;
@@ -857,7 +860,7 @@
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 static void
 lpfc_nvme_ktime(struct lpfc_hba *phba,
-		struct lpfc_nvme_buf *lpfc_ncmd)
+		struct lpfc_io_buf *lpfc_ncmd)
 {
 	uint64_t seg1, seg2, seg3, seg4;
 	uint64_t segsum;
@@ -955,57 +958,54 @@
 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
 			  struct lpfc_wcqe_complete *wcqe)
 {
-	struct lpfc_nvme_buf *lpfc_ncmd =
-		(struct lpfc_nvme_buf *)pwqeIn->context1;
+	struct lpfc_io_buf *lpfc_ncmd =
+		(struct lpfc_io_buf *)pwqeIn->context1;
 	struct lpfc_vport *vport = pwqeIn->vport;
 	struct nvmefc_fcp_req *nCmd;
 	struct nvme_fc_ersp_iu *ep;
 	struct nvme_fc_cmd_iu *cp;
-	struct lpfc_nvme_rport *rport;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nvme_fcpreq_priv *freqpriv;
 	struct lpfc_nvme_lport *lport;
-	struct lpfc_nvme_ctrl_stat *cstat;
-	unsigned long flags;
 	uint32_t code, status, idx;
 	uint16_t cid, sqhd, data;
 	uint32_t *ptr;
 
 	/* Sanity check on return of outstanding command */
-	if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
-		if (!lpfc_ncmd) {
-			lpfc_printf_vlog(vport, KERN_ERR,
-					 LOG_NODE | LOG_NVME_IOERR,
-					 "6071 Null lpfc_ncmd pointer. No "
-					 "release, skip completion\n");
-			return;
-		}
+	if (!lpfc_ncmd) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+				 LOG_NODE | LOG_NVME_IOERR,
+				 "6071 Null lpfc_ncmd pointer. No "
+				 "release, skip completion\n");
+		return;
+	}
 
+	/* Guard against abort handler being called at same time */
+	spin_lock(&lpfc_ncmd->buf_lock);
+
+	if (!lpfc_ncmd->nvmeCmd) {
+		spin_unlock(&lpfc_ncmd->buf_lock);
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
-				 "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
-				 "nvmeCmd %p nrport %p\n",
-				 lpfc_ncmd, lpfc_ncmd->nvmeCmd,
-				 lpfc_ncmd->nrport);
+				 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
+				 "nvmeCmd x%px\n",
+				 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
 
 		/* Release the lpfc_ncmd regardless of the missing elements. */
 		lpfc_release_nvme_buf(phba, lpfc_ncmd);
 		return;
 	}
 	nCmd = lpfc_ncmd->nvmeCmd;
-	rport = lpfc_ncmd->nrport;
 	status = bf_get(lpfc_wcqe_c_status, wcqe);
 
-	if (vport->localport) {
+	idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
+	phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
+
+	if (unlikely(status && vport->localport)) {
 		lport = (struct lpfc_nvme_lport *)vport->localport->private;
 		if (lport) {
-			idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
-			cstat = &lport->cstat[idx];
-			atomic_inc(&cstat->fc4NvmeIoCmpls);
-			if (status) {
-				if (bf_get(lpfc_wcqe_c_xb, wcqe))
-					atomic_inc(&lport->cmpl_fcp_xb);
-				atomic_inc(&lport->cmpl_fcp_err);
-			}
+			if (bf_get(lpfc_wcqe_c_xb, wcqe))
+				atomic_inc(&lport->cmpl_fcp_xb);
+			atomic_inc(&lport->cmpl_fcp_err);
 		}
 	}
 
@@ -1016,18 +1016,11 @@
 	 * Catch race where our node has transitioned, but the
 	 * transport is still transitioning.
 	 */
-	ndlp = rport->ndlp;
+	ndlp = lpfc_ncmd->ndlp;
 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
-				 "6061 rport %p,  DID x%06x node not ready.\n",
-				 rport, rport->remoteport->port_id);
-
-		ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id);
-		if (!ndlp) {
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
-					 "6062 Ignoring NVME cmpl.  No ndlp\n");
-			goto out_err;
-		}
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+				 "6062 Ignoring NVME cmpl.  No ndlp\n");
+		goto out_err;
 	}
 
 	code = bf_get(lpfc_wcqe_c_code, wcqe);
@@ -1109,19 +1102,22 @@
 			if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
 				lpfc_printf_vlog(vport, KERN_INFO,
 					 LOG_NVME_IOERR,
-					 "6032 Delay Aborted cmd %p "
-					 "nvme cmd %p, xri x%x, "
+					 "6032 Delay Aborted cmd x%px "
+					 "nvme cmd x%px, xri x%x, "
 					 "xb %d\n",
 					 lpfc_ncmd, nCmd,
 					 lpfc_ncmd->cur_iocbq.sli4_xritag,
 					 bf_get(lpfc_wcqe_c_xb, wcqe));
+			/* fall through */
 		default:
 out_err:
 			lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
 					 "6072 NVME Completion Error: xri %x "
-					 "status x%x result x%x placed x%x\n",
+					 "status x%x result x%x [x%x] "
+					 "placed x%x\n",
 					 lpfc_ncmd->cur_iocbq.sli4_xritag,
 					 lpfc_ncmd->status, lpfc_ncmd->result,
+					 wcqe->parameter,
 					 wcqe->total_data_placed);
 			nCmd->transferred_length = 0;
 			nCmd->rcv_rsplen = 0;
@@ -1146,14 +1142,19 @@
 		phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
 		lpfc_nvme_ktime(phba, lpfc_ncmd);
 	}
-	if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
-		if (lpfc_ncmd->cpu != smp_processor_id())
-			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
-					 "6701 CPU Check cmpl: "
-					 "cpu %d expect %d\n",
-					 smp_processor_id(), lpfc_ncmd->cpu);
-		if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
-			phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
+	if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
+		uint32_t cpu;
+		idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
+		cpu = raw_smp_processor_id();
+		if (cpu < LPFC_CHECK_CPU_CNT) {
+			if (lpfc_ncmd->cpu != cpu)
+				lpfc_printf_vlog(vport,
+						 KERN_INFO, LOG_NVME_IOERR,
+						 "6701 CPU Check cmpl: "
+						 "cpu %d expect %d\n",
+						 cpu, lpfc_ncmd->cpu);
+			phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
+		}
 	}
 #endif
 
@@ -1164,13 +1165,11 @@
 	if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
 		freqpriv = nCmd->private;
 		freqpriv->nvme_buf = NULL;
-		nCmd->done(nCmd);
 		lpfc_ncmd->nvmeCmd = NULL;
-	}
-
-	spin_lock_irqsave(&phba->hbalock, flags);
-	lpfc_ncmd->nrport = NULL;
-	spin_unlock_irqrestore(&phba->hbalock, flags);
+		spin_unlock(&lpfc_ncmd->buf_lock);
+		nCmd->done(nCmd);
+	} else
+		spin_unlock(&lpfc_ncmd->buf_lock);
 
 	/* Call release with XB=1 to queue the IO into the abort list. */
 	lpfc_release_nvme_buf(phba, lpfc_ncmd);
@@ -1195,9 +1194,9 @@
  **/
 static int
 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
-		      struct lpfc_nvme_buf *lpfc_ncmd,
+		      struct lpfc_io_buf *lpfc_ncmd,
 		      struct lpfc_nodelist *pnode,
-		      struct lpfc_nvme_ctrl_stat *cstat)
+		      struct lpfc_fc4_ctrl_stat *cstat)
 {
 	struct lpfc_hba *phba = vport->phba;
 	struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
@@ -1205,7 +1204,7 @@
 	union lpfc_wqe128 *wqe = &pwqeq->wqe;
 	uint32_t req_len;
 
-	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+	if (!NLP_CHK_NODE_ACT(pnode))
 		return -EINVAL;
 
 	/*
@@ -1235,7 +1234,7 @@
 			} else {
 				wqe->fcp_iwrite.initial_xfer_len = 0;
 			}
-			atomic_inc(&cstat->fc4NvmeOutputRequests);
+			cstat->output_requests++;
 		} else {
 			/* From the iread template, initialize words 7 - 11 */
 			memcpy(&wqe->words[7],
@@ -1248,14 +1247,17 @@
 			/* Word 5 */
 			wqe->fcp_iread.rsrvd5 = 0;
 
-			atomic_inc(&cstat->fc4NvmeInputRequests);
+			cstat->input_requests++;
 		}
 	} else {
 		/* From the icmnd template, initialize words 4 - 11 */
 		memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
 		       sizeof(uint32_t) * 8);
-		atomic_inc(&cstat->fc4NvmeControlRequests);
+		cstat->control_requests++;
 	}
+
+	if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
+		bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
 	/*
 	 * Finish initializing those WQE fields that are independent
 	 * of the nvme_cmnd request_buffer
@@ -1301,20 +1303,22 @@
  **/
 static int
 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
-		      struct lpfc_nvme_buf *lpfc_ncmd)
+		      struct lpfc_io_buf *lpfc_ncmd)
 {
 	struct lpfc_hba *phba = vport->phba;
 	struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
 	union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
-	struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
+	struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
+	struct sli4_hybrid_sgl *sgl_xtra = NULL;
 	struct scatterlist *data_sg;
 	struct sli4_sge *first_data_sgl;
 	struct ulp_bde64 *bde;
-	dma_addr_t physaddr;
+	dma_addr_t physaddr = 0;
 	uint32_t num_bde = 0;
-	uint32_t dma_len;
+	uint32_t dma_len = 0;
 	uint32_t dma_offset = 0;
-	int nseg, i;
+	int nseg, i, j;
+	bool lsp_just_set = false;
 
 	/* Fix up the command and response DMA stuff. */
 	lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
@@ -1351,6 +1355,9 @@
 		 */
 		nseg = nCmd->sg_cnt;
 		data_sg = nCmd->first_sgl;
+
+		/* for tracking the segment boundaries */
+		j = 2;
 		for (i = 0; i < nseg; i++) {
 			if (data_sg == NULL) {
 				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1359,23 +1366,76 @@
 				lpfc_ncmd->seg_cnt = 0;
 				return 1;
 			}
-			physaddr = data_sg->dma_address;
-			dma_len = data_sg->length;
-			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
-			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
-			sgl->word2 = le32_to_cpu(sgl->word2);
-			if ((num_bde + 1) == nseg)
-				bf_set(lpfc_sli4_sge_last, sgl, 1);
-			else
-				bf_set(lpfc_sli4_sge_last, sgl, 0);
-			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
-			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
-			sgl->word2 = cpu_to_le32(sgl->word2);
-			sgl->sge_len = cpu_to_le32(dma_len);
 
-			dma_offset += dma_len;
-			data_sg = sg_next(data_sg);
-			sgl++;
+			sgl->word2 = 0;
+			if ((num_bde + 1) == nseg) {
+				bf_set(lpfc_sli4_sge_last, sgl, 1);
+				bf_set(lpfc_sli4_sge_type, sgl,
+				       LPFC_SGE_TYPE_DATA);
+			} else {
+				bf_set(lpfc_sli4_sge_last, sgl, 0);
+
+				/* expand the segment */
+				if (!lsp_just_set &&
+				    !((j + 1) % phba->border_sge_num) &&
+				    ((nseg - 1) != i)) {
+					/* set LSP type */
+					bf_set(lpfc_sli4_sge_type, sgl,
+					       LPFC_SGE_TYPE_LSP);
+
+					sgl_xtra = lpfc_get_sgl_per_hdwq(
+							phba, lpfc_ncmd);
+
+					if (unlikely(!sgl_xtra)) {
+						lpfc_ncmd->seg_cnt = 0;
+						return 1;
+					}
+					sgl->addr_lo = cpu_to_le32(putPaddrLow(
+						       sgl_xtra->dma_phys_sgl));
+					sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+						       sgl_xtra->dma_phys_sgl));
+
+				} else {
+					bf_set(lpfc_sli4_sge_type, sgl,
+					       LPFC_SGE_TYPE_DATA);
+				}
+			}
+
+			if (!(bf_get(lpfc_sli4_sge_type, sgl) &
+				     LPFC_SGE_TYPE_LSP)) {
+				if ((nseg - 1) == i)
+					bf_set(lpfc_sli4_sge_last, sgl, 1);
+
+				physaddr = data_sg->dma_address;
+				dma_len = data_sg->length;
+				sgl->addr_lo = cpu_to_le32(
+							 putPaddrLow(physaddr));
+				sgl->addr_hi = cpu_to_le32(
+							putPaddrHigh(physaddr));
+
+				bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+				sgl->word2 = cpu_to_le32(sgl->word2);
+				sgl->sge_len = cpu_to_le32(dma_len);
+
+				dma_offset += dma_len;
+				data_sg = sg_next(data_sg);
+
+				sgl++;
+
+				lsp_just_set = false;
+			} else {
+				sgl->word2 = cpu_to_le32(sgl->word2);
+
+				sgl->sge_len = cpu_to_le32(
+						     phba->cfg_sg_dma_buf_size);
+
+				sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+				i = i - 1;
+
+				lsp_just_set = true;
+			}
+
+			j++;
 		}
 		if (phba->cfg_enable_pbde) {
 			/* Use PBDE support for first SGL only, offset == 0 */
@@ -1395,6 +1455,8 @@
 		}
 
 	} else {
+		lpfc_ncmd->seg_cnt = 0;
+
 		/* For this clause to be valid, the payload_length
 		 * and sg_cnt must zero.
 		 */
@@ -1434,13 +1496,13 @@
 {
 	int ret = 0;
 	int expedite = 0;
-	int idx;
+	int idx, cpu;
 	struct lpfc_nvme_lport *lport;
-	struct lpfc_nvme_ctrl_stat *cstat;
+	struct lpfc_fc4_ctrl_stat *cstat;
 	struct lpfc_vport *vport;
 	struct lpfc_hba *phba;
 	struct lpfc_nodelist *ndlp;
-	struct lpfc_nvme_buf *lpfc_ncmd;
+	struct lpfc_io_buf *lpfc_ncmd;
 	struct lpfc_nvme_rport *rport;
 	struct lpfc_nvme_qhandle *lpfc_queue_info;
 	struct lpfc_nvme_fcpreq_priv *freqpriv;
@@ -1475,7 +1537,7 @@
 		goto out_fail;
 	}
 
-	if (vport->load_flag & FC_UNLOADING) {
+	if (unlikely(vport->load_flag & FC_UNLOADING)) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
 				 "6124 Fail IO, Driver unload\n");
 		atomic_inc(&lport->xmt_fcp_err);
@@ -1506,8 +1568,8 @@
 	ndlp = rport->ndlp;
 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
-				 "6053 Fail IO, ndlp not ready: rport %p "
-				  "ndlp %p, DID x%06x\n",
+				 "6053 Busy IO, ndlp not ready: rport x%px "
+				  "ndlp x%px, DID x%06x\n",
 				 rport, ndlp, pnvme_rport->port_id);
 		atomic_inc(&lport->xmt_fcp_err);
 		ret = -EBUSY;
@@ -1558,7 +1620,15 @@
 		}
 	}
 
-	lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
+	/* Lookup Hardware Queue index based on fcp_io_sched module parameter */
+	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
+		idx = lpfc_queue_info->index;
+	} else {
+		cpu = raw_smp_processor_id();
+		idx = phba->sli4_hba.cpu_map[cpu].hdwq;
+	}
+
+	lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
 	if (lpfc_ncmd == NULL) {
 		atomic_inc(&lport->xmt_fcp_noxri);
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
@@ -1585,9 +1655,8 @@
 	 */
 	freqpriv->nvme_buf = lpfc_ncmd;
 	lpfc_ncmd->nvmeCmd = pnvme_fcreq;
-	lpfc_ncmd->nrport = rport;
 	lpfc_ncmd->ndlp = ndlp;
-	lpfc_ncmd->start_time = jiffies;
+	lpfc_ncmd->qidx = lpfc_queue_info->qidx;
 
 	/*
 	 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
@@ -1597,9 +1666,8 @@
 	 * index to use and that they have affinitized a CPU to this hardware
 	 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
 	 */
-	idx = lpfc_queue_info->index;
 	lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
-	cstat = &lport->cstat[idx];
+	cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
 
 	lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
 	ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
@@ -1617,7 +1685,7 @@
 			 lpfc_ncmd->cur_iocbq.sli4_xritag,
 			 lpfc_queue_info->index, ndlp->nlp_DID);
 
-	ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
+	ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
 	if (ret) {
 		atomic_inc(&lport->xmt_fcp_wqerr);
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
@@ -1628,26 +1696,26 @@
 		goto out_free_nvme_buf;
 	}
 
+	if (phba->cfg_xri_rebalancing)
+		lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
+
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	if (lpfc_ncmd->ts_cmd_start)
 		lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
 
 	if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
-		lpfc_ncmd->cpu = smp_processor_id();
-		if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
-			/* Check for admin queue */
-			if (lpfc_queue_info->qidx) {
+		cpu = raw_smp_processor_id();
+		if (cpu < LPFC_CHECK_CPU_CNT) {
+			lpfc_ncmd->cpu = cpu;
+			if (idx != cpu)
 				lpfc_printf_vlog(vport,
-						 KERN_ERR, LOG_NVME_IOERR,
+						 KERN_INFO, LOG_NVME_IOERR,
 						"6702 CPU Check cmd: "
 						"cpu %d wq %d\n",
 						lpfc_ncmd->cpu,
 						lpfc_queue_info->index);
-			}
-			lpfc_ncmd->cpu = lpfc_queue_info->index;
+			phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
 		}
-		if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
-			phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
 	}
 #endif
 	return 0;
@@ -1655,11 +1723,11 @@
  out_free_nvme_buf:
 	if (lpfc_ncmd->nvmeCmd->sg_cnt) {
 		if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
-			atomic_dec(&cstat->fc4NvmeOutputRequests);
+			cstat->output_requests--;
 		else
-			atomic_dec(&cstat->fc4NvmeInputRequests);
+			cstat->input_requests--;
 	} else
-		atomic_dec(&cstat->fc4NvmeControlRequests);
+		cstat->control_requests--;
 	lpfc_release_nvme_buf(phba, lpfc_ncmd);
  out_fail:
 	return ret;
@@ -1719,7 +1787,7 @@
 	struct lpfc_nvme_lport *lport;
 	struct lpfc_vport *vport;
 	struct lpfc_hba *phba;
-	struct lpfc_nvme_buf *lpfc_nbuf;
+	struct lpfc_io_buf *lpfc_nbuf;
 	struct lpfc_iocbq *abts_buf;
 	struct lpfc_iocbq *nvmereq_wqe;
 	struct lpfc_nvme_fcpreq_priv *freqpriv;
@@ -1753,7 +1821,7 @@
 	/* Announce entry to new IO submit field. */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
 			 "6002 Abort Request to rport DID x%06x "
-			 "for nvme_fc_req %p\n",
+			 "for nvme_fc_req x%px\n",
 			 pnvme_rport->port_id,
 			 pnvme_fcreq);
 
@@ -1762,7 +1830,7 @@
 	 */
 	spin_lock_irqsave(&phba->hbalock, flags);
 	/* driver queued commands are in process of being flushed */
-	if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
+	if (phba->hba_flag & HBA_IOQ_FLUSH) {
 		spin_unlock_irqrestore(&phba->hbalock, flags);
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
 				 "6139 Driver in reset cleanup - flushing "
@@ -1787,6 +1855,9 @@
 	}
 	nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
 
+	/* Guard against IO completion being called at same time */
+	spin_lock(&lpfc_nbuf->buf_lock);
+
 	/*
 	 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
 	 * state must match the nvme_fcreq passed by the nvme
@@ -1795,24 +1866,22 @@
 	 * has not seen it yet.
 	 */
 	if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
-		spin_unlock_irqrestore(&phba->hbalock, flags);
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
 				 "6143 NVME req mismatch: "
-				 "lpfc_nbuf %p nvmeCmd %p, "
-				 "pnvme_fcreq %p.  Skipping Abort xri x%x\n",
+				 "lpfc_nbuf x%px nvmeCmd x%px, "
+				 "pnvme_fcreq x%px.  Skipping Abort xri x%x\n",
 				 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
 				 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
-		return;
+		goto out_unlock;
 	}
 
 	/* Don't abort IOs no longer on the pending queue. */
 	if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
-		spin_unlock_irqrestore(&phba->hbalock, flags);
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
-				 "6142 NVME IO req %p not queued - skipping "
+				 "6142 NVME IO req x%px not queued - skipping "
 				 "abort req xri x%x\n",
 				 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
-		return;
+		goto out_unlock;
 	}
 
 	atomic_inc(&lport->xmt_fcp_abort);
@@ -1822,24 +1891,22 @@
 
 	/* Outstanding abort is in progress */
 	if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
-		spin_unlock_irqrestore(&phba->hbalock, flags);
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
 				 "6144 Outstanding NVME I/O Abort Request "
-				 "still pending on nvme_fcreq %p, "
-				 "lpfc_ncmd %p xri x%x\n",
+				 "still pending on nvme_fcreq x%px, "
+				 "lpfc_ncmd %px xri x%x\n",
 				 pnvme_fcreq, lpfc_nbuf,
 				 nvmereq_wqe->sli4_xritag);
-		return;
+		goto out_unlock;
 	}
 
 	abts_buf = __lpfc_sli_get_iocbq(phba);
 	if (!abts_buf) {
-		spin_unlock_irqrestore(&phba->hbalock, flags);
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
 				 "6136 No available abort wqes. Skipping "
-				 "Abts req for nvme_fcreq %p xri x%x\n",
+				 "Abts req for nvme_fcreq x%px xri x%x\n",
 				 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
-		return;
+		goto out_unlock;
 	}
 
 	/* Ready - mark outstanding as aborted by driver. */
@@ -1851,11 +1918,10 @@
 	/* WQEs are reused.  Clear stale data and set key fields to
 	 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
 	 */
-	memset(abts_wqe, 0, sizeof(union lpfc_wqe));
+	memset(abts_wqe, 0, sizeof(*abts_wqe));
 	bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
 
 	/* word 7 */
-	bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
 	bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
 	bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
 	       nvmereq_wqe->iocb.ulpClass);
@@ -1870,7 +1936,6 @@
 	       abts_buf->iotag);
 
 	/* word 10 */
-	bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->hba_wqidx);
 	bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
 	bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
 
@@ -1884,12 +1949,13 @@
 	abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
 	abts_buf->vport = vport;
 	abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
-	ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
+	ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
+	spin_unlock(&lpfc_nbuf->buf_lock);
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 	if (ret_val) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
 				 "6137 Failed abts issue_wqe with status x%x "
-				 "for nvme_fcreq %p.\n",
+				 "for nvme_fcreq x%px.\n",
 				 ret_val, pnvme_fcreq);
 		lpfc_sli_release_iocbq(phba, abts_buf);
 		return;
@@ -1900,6 +1966,12 @@
 			 "ox_id x%x on reqtag x%x\n",
 			 nvmereq_wqe->sli4_xritag,
 			 abts_buf->iotag);
+	return;
+
+out_unlock:
+	spin_unlock(&lpfc_nbuf->buf_lock);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return;
 }
 
 /* Declare and initialization an instance of the FC NVME template. */
@@ -1929,456 +2001,63 @@
 };
 
 /**
- * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
- * @phba: pointer to lpfc hba data structure.
- * @nblist: pointer to nvme buffer list.
- * @count: number of scsi buffers on the list.
- *
- * This routine is invoked to post a block of @count scsi sgl pages from a
- * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
- * No Lock is held.
- *
- **/
-static int
-lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
-			      struct list_head *nblist,
-			      int count)
-{
-	struct lpfc_nvme_buf *lpfc_ncmd;
-	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
-	struct sgl_page_pairs *sgl_pg_pairs;
-	void *viraddr;
-	LPFC_MBOXQ_t *mbox;
-	uint32_t reqlen, alloclen, pg_pairs;
-	uint32_t mbox_tmo;
-	uint16_t xritag_start = 0;
-	int rc = 0;
-	uint32_t shdr_status, shdr_add_status;
-	dma_addr_t pdma_phys_bpl1;
-	union lpfc_sli4_cfg_shdr *shdr;
-
-	/* Calculate the requested length of the dma memory */
-	reqlen = count * sizeof(struct sgl_page_pairs) +
-		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
-	if (reqlen > SLI4_PAGE_SIZE) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-				"6118 Block sgl registration required DMA "
-				"size (%d) great than a page\n", reqlen);
-		return -ENOMEM;
-	}
-	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-	if (!mbox) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"6119 Failed to allocate mbox cmd memory\n");
-		return -ENOMEM;
-	}
-
-	/* Allocate DMA memory and set up the non-embedded mailbox command */
-	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
-				LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
-				LPFC_SLI4_MBX_NEMBED);
-
-	if (alloclen < reqlen) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"6120 Allocated DMA memory size (%d) is "
-				"less than the requested DMA memory "
-				"size (%d)\n", alloclen, reqlen);
-		lpfc_sli4_mbox_cmd_free(phba, mbox);
-		return -ENOMEM;
-	}
-
-	/* Get the first SGE entry from the non-embedded DMA memory */
-	viraddr = mbox->sge_array->addr[0];
-
-	/* Set up the SGL pages in the non-embedded DMA pages */
-	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
-	sgl_pg_pairs = &sgl->sgl_pg_pairs;
-
-	pg_pairs = 0;
-	list_for_each_entry(lpfc_ncmd, nblist, list) {
-		/* Set up the sge entry */
-		sgl_pg_pairs->sgl_pg0_addr_lo =
-			cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
-		sgl_pg_pairs->sgl_pg0_addr_hi =
-			cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
-		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
-			pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
-						SGL_PAGE_SIZE;
-		else
-			pdma_phys_bpl1 = 0;
-		sgl_pg_pairs->sgl_pg1_addr_lo =
-			cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
-		sgl_pg_pairs->sgl_pg1_addr_hi =
-			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
-		/* Keep the first xritag on the list */
-		if (pg_pairs == 0)
-			xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
-		sgl_pg_pairs++;
-		pg_pairs++;
-	}
-	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
-	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
-	/* Perform endian conversion if necessary */
-	sgl->word0 = cpu_to_le32(sgl->word0);
-
-	if (!phba->sli4_hba.intr_enable)
-		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
-	else {
-		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
-		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
-	}
-	shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
-	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
-	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
-	if (rc != MBX_TIMEOUT)
-		lpfc_sli4_mbox_cmd_free(phba, mbox);
-	if (shdr_status || shdr_add_status || rc) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"6125 POST_SGL_BLOCK mailbox command failed "
-				"status x%x add_status x%x mbx status x%x\n",
-				shdr_status, shdr_add_status, rc);
-		rc = -ENXIO;
-	}
-	return rc;
-}
-
-/**
- * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
- * @phba: pointer to lpfc hba data structure.
- * @post_nblist: pointer to the nvme buffer list.
- *
- * This routine walks a list of nvme buffers that was passed in. It attempts
- * to construct blocks of nvme buffer sgls which contains contiguous xris and
- * uses the non-embedded SGL block post mailbox commands to post to the port.
- * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
- * embedded SGL post mailbox command for posting. The @post_nblist passed in
- * must be local list, thus no lock is needed when manipulate the list.
- *
- * Returns: 0 = failure, non-zero number of successfully posted buffers.
- **/
-static int
-lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
-			     struct list_head *post_nblist, int sb_count)
-{
-	struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
-	int status, sgl_size;
-	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
-	dma_addr_t pdma_phys_sgl1;
-	int last_xritag = NO_XRI;
-	int cur_xritag;
-	LIST_HEAD(prep_nblist);
-	LIST_HEAD(blck_nblist);
-	LIST_HEAD(nvme_nblist);
-
-	/* sanity check */
-	if (sb_count <= 0)
-		return -EINVAL;
-
-	sgl_size = phba->cfg_sg_dma_buf_size;
-
-	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
-		list_del_init(&lpfc_ncmd->list);
-		block_cnt++;
-		if ((last_xritag != NO_XRI) &&
-		    (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
-			/* a hole in xri block, form a sgl posting block */
-			list_splice_init(&prep_nblist, &blck_nblist);
-			post_cnt = block_cnt - 1;
-			/* prepare list for next posting block */
-			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
-			block_cnt = 1;
-		} else {
-			/* prepare list for next posting block */
-			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
-			/* enough sgls for non-embed sgl mbox command */
-			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
-				list_splice_init(&prep_nblist, &blck_nblist);
-				post_cnt = block_cnt;
-				block_cnt = 0;
-			}
-		}
-		num_posting++;
-		last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
-
-		/* end of repost sgl list condition for NVME buffers */
-		if (num_posting == sb_count) {
-			if (post_cnt == 0) {
-				/* last sgl posting block */
-				list_splice_init(&prep_nblist, &blck_nblist);
-				post_cnt = block_cnt;
-			} else if (block_cnt == 1) {
-				/* last single sgl with non-contiguous xri */
-				if (sgl_size > SGL_PAGE_SIZE)
-					pdma_phys_sgl1 =
-						lpfc_ncmd->dma_phys_sgl +
-						SGL_PAGE_SIZE;
-				else
-					pdma_phys_sgl1 = 0;
-				cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
-				status = lpfc_sli4_post_sgl(phba,
-						lpfc_ncmd->dma_phys_sgl,
-						pdma_phys_sgl1, cur_xritag);
-				if (status) {
-					/* failure, put on abort nvme list */
-					lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
-				} else {
-					/* success, put on NVME buffer list */
-					lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
-					lpfc_ncmd->status = IOSTAT_SUCCESS;
-					num_posted++;
-				}
-				/* success, put on NVME buffer sgl list */
-				list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
-			}
-		}
-
-		/* continue until a nembed page worth of sgls */
-		if (post_cnt == 0)
-			continue;
-
-		/* post block of NVME buffer list sgls */
-		status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
-						       post_cnt);
-
-		/* don't reset xirtag due to hole in xri block */
-		if (block_cnt == 0)
-			last_xritag = NO_XRI;
-
-		/* reset NVME buffer post count for next round of posting */
-		post_cnt = 0;
-
-		/* put posted NVME buffer-sgl posted on NVME buffer sgl list */
-		while (!list_empty(&blck_nblist)) {
-			list_remove_head(&blck_nblist, lpfc_ncmd,
-					 struct lpfc_nvme_buf, list);
-			if (status) {
-				/* failure, put on abort nvme list */
-				lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
-			} else {
-				/* success, put on NVME buffer list */
-				lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
-				lpfc_ncmd->status = IOSTAT_SUCCESS;
-				num_posted++;
-			}
-			list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
-		}
-	}
-	/* Push NVME buffers with sgl posted to the available list */
-	while (!list_empty(&nvme_nblist)) {
-		list_remove_head(&nvme_nblist, lpfc_ncmd,
-				 struct lpfc_nvme_buf, list);
-		lpfc_release_nvme_buf(phba, lpfc_ncmd);
-	}
-	return num_posted;
-}
-
-/**
- * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine walks the list of nvme buffers that have been allocated and
- * repost them to the port by using SGL block post. This is needed after a
- * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
- * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
- * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
- *
- * Returns: 0 = success, non-zero failure.
- **/
-int
-lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
-{
-	LIST_HEAD(post_nblist);
-	int num_posted, rc = 0;
-
-	/* get all NVME buffers need to repost to a local list */
-	spin_lock_irq(&phba->nvme_buf_list_get_lock);
-	spin_lock(&phba->nvme_buf_list_put_lock);
-	list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
-	list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
-	phba->get_nvme_bufs = 0;
-	phba->put_nvme_bufs = 0;
-	spin_unlock(&phba->nvme_buf_list_put_lock);
-	spin_unlock_irq(&phba->nvme_buf_list_get_lock);
-
-	/* post the list of nvme buffer sgls to port if available */
-	if (!list_empty(&post_nblist)) {
-		num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
-						phba->sli4_hba.nvme_xri_cnt);
-		/* failed to post any nvme buffer, return error */
-		if (num_posted == 0)
-			rc = -EIO;
-	}
-	return rc;
-}
-
-/**
- * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
- * @vport: The virtual port for which this call being executed.
- * @num_to_allocate: The requested number of buffers to allocate.
- *
- * This routine allocates nvme buffers for device with SLI-4 interface spec,
- * the nvme buffer contains all the necessary information needed to initiate
- * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
- * them on a list, it post them to the port by using SGL block post.
- *
- * Return codes:
- *   int - number of nvme buffers that were allocated and posted.
- *   0 = failure, less than num_to_alloc is a partial failure.
- **/
-static int
-lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
-{
-	struct lpfc_hba *phba = vport->phba;
-	struct lpfc_nvme_buf *lpfc_ncmd;
-	struct lpfc_iocbq *pwqeq;
-	union lpfc_wqe128 *wqe;
-	struct sli4_sge *sgl;
-	dma_addr_t pdma_phys_sgl;
-	uint16_t iotag, lxri = 0;
-	int bcnt, num_posted, sgl_size;
-	LIST_HEAD(prep_nblist);
-	LIST_HEAD(post_nblist);
-	LIST_HEAD(nvme_nblist);
-
-	sgl_size = phba->cfg_sg_dma_buf_size;
-
-	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
-		lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
-		if (!lpfc_ncmd)
-			break;
-		/*
-		 * Get memory from the pci pool to map the virt space to
-		 * pci bus space for an I/O. The DMA buffer includes the
-		 * number of SGE's necessary to support the sg_tablesize.
-		 */
-		lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
-						  GFP_KERNEL,
-						  &lpfc_ncmd->dma_handle);
-		if (!lpfc_ncmd->data) {
-			kfree(lpfc_ncmd);
-			break;
-		}
-
-		lxri = lpfc_sli4_next_xritag(phba);
-		if (lxri == NO_XRI) {
-			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
-				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
-			kfree(lpfc_ncmd);
-			break;
-		}
-		pwqeq = &(lpfc_ncmd->cur_iocbq);
-		wqe = &pwqeq->wqe;
-
-		/* Allocate iotag for lpfc_ncmd->cur_iocbq. */
-		iotag = lpfc_sli_next_iotag(phba, pwqeq);
-		if (iotag == 0) {
-			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
-				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
-			kfree(lpfc_ncmd);
-			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-					"6121 Failed to allocated IOTAG for"
-					" XRI:0x%x\n", lxri);
-			lpfc_sli4_free_xri(phba, lxri);
-			break;
-		}
-		pwqeq->sli4_lxritag = lxri;
-		pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
-		pwqeq->iocb_flag |= LPFC_IO_NVME;
-		pwqeq->context1 = lpfc_ncmd;
-		pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
-
-		/* Initialize local short-hand pointers. */
-		lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
-		sgl = lpfc_ncmd->nvme_sgl;
-		pdma_phys_sgl = lpfc_ncmd->dma_handle;
-		lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;
-
-		/* Rsp SGE will be filled in when we rcv an IO
-		 * from the NVME Layer to be sent.
-		 * The cmd is going to be embedded so we need a SKIP SGE.
-		 */
-		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
-		bf_set(lpfc_sli4_sge_last, sgl, 0);
-		sgl->word2 = cpu_to_le32(sgl->word2);
-		/* Fill in word 3 / sgl_len during cmd submission */
-
-		lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
-
-		/* Initialize WQE */
-		memset(wqe, 0, sizeof(union lpfc_wqe));
-
-		/* add the nvme buffer to a post list */
-		list_add_tail(&lpfc_ncmd->list, &post_nblist);
-		spin_lock_irq(&phba->nvme_buf_list_get_lock);
-		phba->sli4_hba.nvme_xri_cnt++;
-		spin_unlock_irq(&phba->nvme_buf_list_get_lock);
-	}
-	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
-			"6114 Allocate %d out of %d requested new NVME "
-			"buffers\n", bcnt, num_to_alloc);
-
-	/* post the list of nvme buffer sgls to port if available */
-	if (!list_empty(&post_nblist))
-		num_posted = lpfc_post_nvme_sgl_list(phba,
-						     &post_nblist, bcnt);
-	else
-		num_posted = 0;
-
-	return num_posted;
-}
-
-static inline struct lpfc_nvme_buf *
-lpfc_nvme_buf(struct lpfc_hba *phba)
-{
-	struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
-
-	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
-				 &phba->lpfc_nvme_buf_list_get, list) {
-		list_del_init(&lpfc_ncmd->list);
-		phba->get_nvme_bufs--;
-		return lpfc_ncmd;
-	}
-	return NULL;
-}
-
-/**
- * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
+ * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
  * @phba: The HBA for which this call is being executed.
  *
- * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
+ * This routine removes a nvme buffer from head of @hdwq io_buf_list
  * and returns to caller.
  *
  * Return codes:
  *   NULL - Error
  *   Pointer to lpfc_nvme_buf - Success
  **/
-static struct lpfc_nvme_buf *
+static struct lpfc_io_buf *
 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
-		  int expedite)
+		  int idx, int expedite)
 {
-	struct lpfc_nvme_buf *lpfc_ncmd = NULL;
-	unsigned long iflag = 0;
+	struct lpfc_io_buf *lpfc_ncmd;
+	struct lpfc_sli4_hdw_queue *qp;
+	struct sli4_sge *sgl;
+	struct lpfc_iocbq *pwqeq;
+	union lpfc_wqe128 *wqe;
 
-	spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
-	if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
-		lpfc_ncmd = lpfc_nvme_buf(phba);
-	if (!lpfc_ncmd) {
-		spin_lock(&phba->nvme_buf_list_put_lock);
-		list_splice(&phba->lpfc_nvme_buf_list_put,
-			    &phba->lpfc_nvme_buf_list_get);
-		phba->get_nvme_bufs += phba->put_nvme_bufs;
-		INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
-		phba->put_nvme_bufs = 0;
-		spin_unlock(&phba->nvme_buf_list_put_lock);
-		if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
-			lpfc_ncmd = lpfc_nvme_buf(phba);
-	}
-	spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
+	lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
 
-	if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
-		atomic_inc(&ndlp->cmd_pending);
-		lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
+	if (lpfc_ncmd) {
+		pwqeq = &(lpfc_ncmd->cur_iocbq);
+		wqe = &pwqeq->wqe;
+
+		/* Setup key fields in buffer that may have been changed
+		 * if other protocols used this buffer.
+		 */
+		pwqeq->iocb_flag = LPFC_IO_NVME;
+		pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
+		lpfc_ncmd->start_time = jiffies;
+		lpfc_ncmd->flags = 0;
+
+		/* Rsp SGE will be filled in when we rcv an IO
+		 * from the NVME Layer to be sent.
+		 * The cmd is going to be embedded so we need a SKIP SGE.
+		 */
+		sgl = lpfc_ncmd->dma_sgl;
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
+		bf_set(lpfc_sli4_sge_last, sgl, 0);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		/* Fill in word 3 / sgl_len during cmd submission */
+
+		/* Initialize 64 bytes only */
+		memset(wqe, 0, sizeof(union lpfc_wqe));
+
+		if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
+			atomic_inc(&ndlp->cmd_pending);
+			lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
+		}
+
+	} else {
+		qp = &phba->sli4_hba.hdwq[idx];
+		qp->empty_io_bufs++;
 	}
+
 	return  lpfc_ncmd;
 }
 
@@ -2388,22 +2067,23 @@
  * @lpfc_ncmd: The nvme buffer which is being released.
  *
  * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
- * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
+ * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
  * and cannot be reused for at least RA_TOV amount of time if it was
  * aborted.
  **/
 static void
-lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
+lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
 {
+	struct lpfc_sli4_hdw_queue *qp;
 	unsigned long iflag = 0;
 
-	if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
+	if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
 		atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
 
-	lpfc_ncmd->nonsg_phys = 0;
 	lpfc_ncmd->ndlp = NULL;
-	lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
+	lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
 
+	qp = lpfc_ncmd->hdwq;
 	if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
 				"6310 XB release deferred for "
@@ -2411,20 +2091,13 @@
 				lpfc_ncmd->cur_iocbq.sli4_xritag,
 				lpfc_ncmd->cur_iocbq.iotag);
 
-		spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
-					iflag);
+		spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
 		list_add_tail(&lpfc_ncmd->list,
-			&phba->sli4_hba.lpfc_abts_nvme_buf_list);
-		spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
-					iflag);
-	} else {
-		lpfc_ncmd->nvmeCmd = NULL;
-		lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
-		spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
-		list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
-		phba->put_nvme_bufs++;
-		spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
-	}
+			&qp->lpfc_abts_io_buf_list);
+		qp->abts_nvme_io_bufs++;
+		spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
+	} else
+		lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
 }
 
 /**
@@ -2451,8 +2124,6 @@
 	struct nvme_fc_port_info nfcp_info;
 	struct nvme_fc_local_port *localport;
 	struct lpfc_nvme_lport *lport;
-	struct lpfc_nvme_ctrl_stat *cstat;
-	int len, i;
 
 	/* Initialize this localport instance.  The vport wwn usage ensures
 	 * that NPIV is accounted for.
@@ -2462,39 +2133,33 @@
 	nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
 	nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
 
-	/* Limit to LPFC_MAX_NVME_SEG_CNT.
-	 * For now need + 1 to get around NVME transport logic.
+	/* We need to tell the transport layer + 1 because it takes page
+	 * alignment into account. When space for the SGL is allocated we
+	 * allocate + 3, one for cmd, one for rsp and one for this alignment
 	 */
-	if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
-		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
-				 "6300 Reducing sg segment cnt to %d\n",
-				 LPFC_MAX_NVME_SEG_CNT);
-		phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
-	} else {
-		phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
-	}
 	lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
-	lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
 
-	cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
-			phba->cfg_nvme_io_channel), GFP_KERNEL);
-	if (!cstat)
-		return -ENOMEM;
+	/* Advertise how many hw queues we support based on fcp_io_sched */
+	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
+		lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
+	else
+		lpfc_nvme_template.max_hw_queues =
+			phba->sli4_hba.num_present_cpu;
+
+	if (!IS_ENABLED(CONFIG_NVME_FC))
+		return ret;
 
 	/* localport is allocated from the stack, but the registration
 	 * call allocates heap memory as well as the private area.
 	 */
-#if (IS_ENABLED(CONFIG_NVME_FC))
+
 	ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
 					 &vport->phba->pcidev->dev, &localport);
-#else
-	ret = -ENOMEM;
-#endif
 	if (!ret) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
 				 "6005 Successfully registered local "
-				 "NVME port num %d, localP %p, private %p, "
-				 "sg_seg %d\n",
+				 "NVME port num %d, localP x%px, private "
+				 "x%px, sg_seg %d\n",
 				 localport->port_num, localport,
 				 localport->private,
 				 lpfc_nvme_template.max_sgl_segments);
@@ -2503,7 +2168,6 @@
 		lport = (struct lpfc_nvme_lport *)localport->private;
 		vport->localport = localport;
 		lport->vport = vport;
-		lport->cstat = cstat;
 		vport->nvmei_support = 1;
 
 		atomic_set(&lport->xmt_fcp_noxri, 0);
@@ -2520,30 +2184,12 @@
 		atomic_set(&lport->cmpl_ls_err, 0);
 		atomic_set(&lport->fc4NvmeLsRequests, 0);
 		atomic_set(&lport->fc4NvmeLsCmpls, 0);
-
-		for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
-			cstat = &lport->cstat[i];
-			atomic_set(&cstat->fc4NvmeInputRequests, 0);
-			atomic_set(&cstat->fc4NvmeOutputRequests, 0);
-			atomic_set(&cstat->fc4NvmeControlRequests, 0);
-			atomic_set(&cstat->fc4NvmeIoCmpls, 0);
-		}
-
-		/* Don't post more new bufs if repost already recovered
-		 * the nvme sgls.
-		 */
-		if (phba->sli4_hba.nvme_xri_cnt == 0) {
-			len  = lpfc_new_nvme_buf(vport,
-						 phba->sli4_hba.nvme_xri_max);
-			vport->phba->total_nvme_bufs += len;
-		}
-	} else {
-		kfree(cstat);
 	}
 
 	return ret;
 }
 
+#if (IS_ENABLED(CONFIG_NVME_FC))
 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
  *
  * The driver has to wait for the host nvme transport to callback
@@ -2554,13 +2200,15 @@
  * An uninterruptible wait is used because of the risk of transport-to-
  * driver state mismatch.
  */
-void
+static void
 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
-			   struct lpfc_nvme_lport *lport)
+			   struct lpfc_nvme_lport *lport,
+			   struct completion *lport_unreg_cmp)
 {
-#if (IS_ENABLED(CONFIG_NVME_FC))
 	u32 wait_tmo;
-	int ret;
+	int ret, i, pending = 0;
+	struct lpfc_sli_ring  *pring;
+	struct lpfc_hba  *phba = vport->phba;
 
 	/* Host transport has to clean up and confirm requiring an indefinite
 	 * wait. Print a message if a 10 second wait expires and renew the
@@ -2568,22 +2216,29 @@
 	 */
 	wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
 	while (true) {
-		ret = wait_for_completion_timeout(&lport->lport_unreg_done,
-						  wait_tmo);
+		ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
 		if (unlikely(!ret)) {
+			pending = 0;
+			for (i = 0; i < phba->cfg_hdw_queue; i++) {
+				pring = phba->sli4_hba.hdwq[i].io_wq->pring;
+				if (!pring)
+					continue;
+				if (pring->txcmplq_cnt)
+					pending += pring->txcmplq_cnt;
+			}
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
-					 "6176 Lport %p Localport %p wait "
-					 "timed out. Renewing.\n",
-					 lport, vport->localport);
+					 "6176 Lport x%px Localport x%px wait "
+					 "timed out. Pending %d. Renewing.\n",
+					 lport, vport->localport, pending);
 			continue;
 		}
 		break;
 	}
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
-			 "6177 Lport %p Localport %p Complete Success\n",
+			 "6177 Lport x%px Localport x%px Complete Success\n",
 			 lport, vport->localport);
-#endif
 }
+#endif
 
 /**
  * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
@@ -2601,32 +2256,30 @@
 #if (IS_ENABLED(CONFIG_NVME_FC))
 	struct nvme_fc_local_port *localport;
 	struct lpfc_nvme_lport *lport;
-	struct lpfc_nvme_ctrl_stat *cstat;
 	int ret;
+	DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
 
 	if (vport->nvmei_support == 0)
 		return;
 
 	localport = vport->localport;
-	vport->localport = NULL;
 	lport = (struct lpfc_nvme_lport *)localport->private;
-	cstat = lport->cstat;
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
-			 "6011 Destroying NVME localport %p\n",
+			 "6011 Destroying NVME localport x%px\n",
 			 localport);
 
 	/* lport's rport list is clear.  Unregister
 	 * lport and release resources.
 	 */
-	init_completion(&lport->lport_unreg_done);
+	lport->lport_unreg_cmp = &lport_unreg_cmp;
 	ret = nvme_fc_unregister_localport(localport);
 
 	/* Wait for completion.  This either blocks
 	 * indefinitely or succeeds
 	 */
-	lpfc_nvme_lport_unreg_wait(vport, lport);
-	kfree(cstat);
+	lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
+	vport->localport = NULL;
 
 	/* Regardless of the unregister upcall response, clear
 	 * nvmei_support.  All rports are unregistered and the
@@ -2663,12 +2316,12 @@
 	lport = (struct lpfc_nvme_lport *)localport->private;
 	if (!lport) {
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
-				 "6171 Update NVME fail. localP %p, No lport\n",
+				 "6171 Update NVME fail. localP x%px, No lport\n",
 				 localport);
 		return;
 	}
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
-			 "6012 Update NVME lport %p did x%x\n",
+			 "6012 Update NVME lport x%px did x%x\n",
 			 localport, vport->fc_myDID);
 
 	localport->port_id = vport->fc_myDID;
@@ -2678,7 +2331,7 @@
 		localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
-			 "6030 bound lport %p to DID x%06x\n",
+			 "6030 bound lport x%px to DID x%06x\n",
 			 lport, localport->port_id);
 #endif
 }
@@ -2727,9 +2380,13 @@
 
 	spin_lock_irq(&vport->phba->hbalock);
 	oldrport = lpfc_ndlp_get_nrport(ndlp);
-	spin_unlock_irq(&vport->phba->hbalock);
-	if (!oldrport)
+	if (oldrport) {
+		prev_ndlp = oldrport->ndlp;
+		spin_unlock_irq(&vport->phba->hbalock);
+	} else {
+		spin_unlock_irq(&vport->phba->hbalock);
 		lpfc_nlp_get(ndlp);
+	}
 
 	ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
 	if (!ret) {
@@ -2748,25 +2405,34 @@
 			/* New remoteport record does not guarantee valid
 			 * host private memory area.
 			 */
-			prev_ndlp = oldrport->ndlp;
 			if (oldrport == remote_port->private) {
 				/* Same remoteport - ndlp should match.
 				 * Just reuse.
 				 */
 				lpfc_printf_vlog(ndlp->vport, KERN_INFO,
 						 LOG_NVME_DISC,
-						 "6014 Rebinding lport to "
-						 "remoteport %p wwpn 0x%llx, "
-						 "Data: x%x x%x %p %p x%x x%06x\n",
+						 "6014 Rebind lport to current "
+						 "remoteport x%px wwpn 0x%llx, "
+						 "Data: x%x x%x x%px x%px x%x "
+						 " x%06x\n",
 						 remote_port,
 						 remote_port->port_name,
 						 remote_port->port_id,
 						 remote_port->port_role,
-						 prev_ndlp,
+						 oldrport->ndlp,
 						 ndlp,
 						 ndlp->nlp_type,
 						 ndlp->nlp_DID);
-				return 0;
+
+				/* It's a complete rebind only if the driver
+				 * is registering with the same ndlp. Otherwise
+				 * the driver likely executed a node swap
+				 * prior to this registration and the ndlp to
+				 * remoteport binding needs to be redone.
+				 */
+				if (prev_ndlp == ndlp)
+					return 0;
+
 			}
 
 			/* Sever the ndlp<->rport association
@@ -2800,10 +2466,10 @@
 		spin_unlock_irq(&vport->phba->hbalock);
 		lpfc_printf_vlog(vport, KERN_INFO,
 				 LOG_NVME_DISC | LOG_NODE,
-				 "6022 Binding new rport to "
-				 "lport %p Remoteport %p rport %p WWNN 0x%llx, "
+				 "6022 Bind lport x%px to remoteport x%px "
+				 "rport x%px WWNN 0x%llx, "
 				 "Rport WWPN 0x%llx DID "
-				 "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
+				 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
 				 lport, remote_port, rport,
 				 rpinfo.node_name, rpinfo.port_name,
 				 rpinfo.port_id, rpinfo.port_role,
@@ -2822,6 +2488,53 @@
 #endif
 }
 
+/**
+ * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
+ *
+ * If the ndlp represents an NVME Target, that we are logged into,
+ * ping the NVME FC Transport layer to initiate a device rescan
+ * on this remote NPort.
+ */
+void
+lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	struct lpfc_nvme_rport *nrport;
+	struct nvme_fc_remote_port *remoteport = NULL;
+
+	spin_lock_irq(&vport->phba->hbalock);
+	nrport = lpfc_ndlp_get_nrport(ndlp);
+	if (nrport)
+		remoteport = nrport->remoteport;
+	spin_unlock_irq(&vport->phba->hbalock);
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+			 "6170 Rescan NPort DID x%06x type x%x "
+			 "state x%x nrport x%px remoteport x%px\n",
+			 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
+			 nrport, remoteport);
+
+	if (!nrport || !remoteport)
+		goto rescan_exit;
+
+	/* Only rescan if we are an NVME target in the MAPPED state */
+	if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
+	    ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+		nvme_fc_rescan_remoteport(remoteport);
+
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+				 "6172 NVME rescanned DID x%06x "
+				 "port_state x%x\n",
+				 ndlp->nlp_DID, remoteport->port_state);
+	}
+	return;
+ rescan_exit:
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+			 "6169 Skip NVME Rport Rescan, NVME remoteport "
+			 "unregistered\n");
+#endif
+}
+
 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
  *
  * There is no notion of Devloss or rport recovery from the current
@@ -2865,7 +2578,7 @@
 		goto input_err;
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
-			 "6033 Unreg nvme remoteport %p, portname x%llx, "
+			 "6033 Unreg nvme remoteport x%px, portname x%llx, "
 			 "port_id x%06x, portstate x%x port type x%x\n",
 			 remoteport, remoteport->port_name,
 			 remoteport->port_id, remoteport->port_state,
@@ -2903,7 +2616,7 @@
  input_err:
 #endif
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
-			 "6168 State error: lport %p, rport%p FCID x%06x\n",
+			 "6168 State error: lport x%px, rport x%px FCID x%06x\n",
 			 vport->localport, ndlp->rport, ndlp->nlp_DID);
 }
 
@@ -2911,6 +2624,7 @@
  * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
  * @phba: pointer to lpfc hba data structure.
  * @axri: pointer to the fcp xri abort wcqe structure.
+ * @lpfc_ncmd: The nvme job structure for the request being aborted.
  *
  * This routine is invoked by the worker thread to process a SLI4 fast-path
  * NVME aborted xri.  Aborted NVME IO commands are completed to the transport
@@ -2918,58 +2632,33 @@
  **/
 void
 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
-			   struct sli4_wcqe_xri_aborted *axri)
+			   struct sli4_wcqe_xri_aborted *axri,
+			   struct lpfc_io_buf *lpfc_ncmd)
 {
 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
-	struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
 	struct nvmefc_fcp_req *nvme_cmd = NULL;
-	struct lpfc_nodelist *ndlp;
-	unsigned long iflag = 0;
+	struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
 
-	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
-		return;
-	spin_lock_irqsave(&phba->hbalock, iflag);
-	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-	list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
-				 &phba->sli4_hba.lpfc_abts_nvme_buf_list,
-				 list) {
-		if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
-			list_del_init(&lpfc_ncmd->list);
-			lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
-			lpfc_ncmd->status = IOSTAT_SUCCESS;
-			spin_unlock(
-				&phba->sli4_hba.abts_nvme_buf_list_lock);
 
-			spin_unlock_irqrestore(&phba->hbalock, iflag);
-			ndlp = lpfc_ncmd->ndlp;
-			if (ndlp)
-				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
-
-			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-					"6311 nvme_cmd %p xri x%x tag x%x "
-					"abort complete and xri released\n",
-					lpfc_ncmd->nvmeCmd, xri,
-					lpfc_ncmd->cur_iocbq.iotag);
-
-			/* Aborted NVME commands are required to not complete
-			 * before the abort exchange command fully completes.
-			 * Once completed, it is available via the put list.
-			 */
-			if (lpfc_ncmd->nvmeCmd) {
-				nvme_cmd = lpfc_ncmd->nvmeCmd;
-				nvme_cmd->done(nvme_cmd);
-				lpfc_ncmd->nvmeCmd = NULL;
-			}
-			lpfc_release_nvme_buf(phba, lpfc_ncmd);
-			return;
-		}
-	}
-	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
-	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	if (ndlp)
+		lpfc_sli4_abts_err_handler(phba, ndlp, axri);
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-			"6312 XRI Aborted xri x%x not found\n", xri);
+			"6311 nvme_cmd %p xri x%x tag x%x abort complete and "
+			"xri released\n",
+			lpfc_ncmd->nvmeCmd, xri,
+			lpfc_ncmd->cur_iocbq.iotag);
 
+	/* Aborted NVME commands are required to not complete
+	 * before the abort exchange command fully completes.
+	 * Once completed, it is available via the put list.
+	 */
+	if (lpfc_ncmd->nvmeCmd) {
+		nvme_cmd = lpfc_ncmd->nvmeCmd;
+		nvme_cmd->done(nvme_cmd);
+		lpfc_ncmd->nvmeCmd = NULL;
+	}
+	lpfc_release_nvme_buf(phba, lpfc_ncmd);
 }
 
 /**
@@ -2988,14 +2677,16 @@
 	struct lpfc_sli_ring  *pring;
 	u32 i, wait_cnt = 0;
 
-	if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq)
+	if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
 		return;
 
-	/* Cycle through all NVME rings and make sure all outstanding
+	/* Cycle through all IO rings and make sure all outstanding
 	 * WQEs have been removed from the txcmplqs.
 	 */
-	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
-		pring = phba->sli4_hba.nvme_wq[i]->pring;
+	for (i = 0; i < phba->cfg_hdw_queue; i++) {
+		if (!phba->sli4_hba.hdwq[i].io_wq)
+			continue;
+		pring = phba->sli4_hba.hdwq[i].io_wq->pring;
 
 		if (!pring)
 			continue;
@@ -3016,3 +2707,50 @@
 		}
 	}
 }
+
+void
+lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	struct lpfc_io_buf *lpfc_ncmd;
+	struct nvmefc_fcp_req *nCmd;
+	struct lpfc_nvme_fcpreq_priv *freqpriv;
+
+	if (!pwqeIn->context1) {
+		lpfc_sli_release_iocbq(phba, pwqeIn);
+		return;
+	}
+	/* For abort iocb just return, IO iocb will do a done call */
+	if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
+	    CMD_ABORT_XRI_CX) {
+		lpfc_sli_release_iocbq(phba, pwqeIn);
+		return;
+	}
+	lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
+
+	spin_lock(&lpfc_ncmd->buf_lock);
+	if (!lpfc_ncmd->nvmeCmd) {
+		spin_unlock(&lpfc_ncmd->buf_lock);
+		lpfc_release_nvme_buf(phba, lpfc_ncmd);
+		return;
+	}
+
+	nCmd = lpfc_ncmd->nvmeCmd;
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+			"6194 NVME Cancel xri %x\n",
+			lpfc_ncmd->cur_iocbq.sli4_xritag);
+
+	nCmd->transferred_length = 0;
+	nCmd->rcv_rsplen = 0;
+	nCmd->status = NVME_SC_INTERNAL;
+	freqpriv = nCmd->private;
+	freqpriv->nvme_buf = NULL;
+	lpfc_ncmd->nvmeCmd = NULL;
+
+	spin_unlock(&lpfc_ncmd->buf_lock);
+	nCmd->done(nCmd);
+
+	/* Call release with XB=1 to queue the IO into the abort list. */
+	lpfc_release_nvme_buf(phba, lpfc_ncmd);
+#endif
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index cfd4719..593c48f 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -30,6 +30,9 @@
 #define LPFC_NVME_FB_SHIFT		9
 #define LPFC_NVME_MAX_FB		(1 << 20)	/* 1M */
 
+#define LPFC_MAX_NVME_INFO_TMP_LEN	100
+#define LPFC_NVME_INFO_MORE_STR		"\nCould be more info...\n"
+
 #define lpfc_ndlp_get_nrport(ndlp)					\
 	((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG))	\
 	? NULL : ndlp->nrport)
@@ -40,19 +43,11 @@
 	uint32_t cpu_id;	/* current cpu id at time of create */
 };
 
-struct lpfc_nvme_ctrl_stat {
-	atomic_t fc4NvmeInputRequests;
-	atomic_t fc4NvmeOutputRequests;
-	atomic_t fc4NvmeControlRequests;
-	atomic_t fc4NvmeIoCmpls;
-};
-
 /* Declare nvme-based local and remote port definitions. */
 struct lpfc_nvme_lport {
 	struct lpfc_vport *vport;
-	struct completion lport_unreg_done;
+	struct completion *lport_unreg_cmp;
 	/* Add stats counters here */
-	struct lpfc_nvme_ctrl_stat *cstat;
 	atomic_t fc4NvmeLsRequests;
 	atomic_t fc4NvmeLsCmpls;
 	atomic_t xmt_fcp_noxri;
@@ -76,57 +71,6 @@
 	struct completion rport_unreg_done;
 };
 
-struct lpfc_nvme_buf {
-	struct list_head list;
-	struct nvmefc_fcp_req *nvmeCmd;
-	struct lpfc_nvme_rport *nrport;
-	struct lpfc_nodelist *ndlp;
-
-	uint32_t timeout;
-
-	uint16_t flags;  /* TBD convert exch_busy to flags */
-#define LPFC_SBUF_XBUSY         0x1     /* SLI4 hba reported XB on WCQE cmpl */
-#define LPFC_BUMP_QDEPTH	0x2	/* bumped queue depth counter */
-	uint16_t exch_busy;     /* SLI4 hba reported XB on complete WCQE */
-	uint16_t status;	/* From IOCB Word 7- ulpStatus */
-	uint16_t cpu;
-	uint16_t qidx;
-	uint16_t sqid;
-	uint32_t result;	/* From IOCB Word 4. */
-
-	uint32_t   seg_cnt;	/* Number of scatter-gather segments returned by
-				 * dma_map_sg.  The driver needs this for calls
-				 * to dma_unmap_sg.
-				 */
-	dma_addr_t nonsg_phys;	/* Non scatter-gather physical address. */
-
-	/*
-	 * data and dma_handle are the kernel virtual and bus address of the
-	 * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
-	 * gather bde list that supports the sg_tablesize value.
-	 */
-	void *data;
-	dma_addr_t dma_handle;
-
-	struct sli4_sge *nvme_sgl;
-	dma_addr_t dma_phys_sgl;
-
-	/* cur_iocbq has phys of the dma-able buffer.
-	 * Iotag is in here
-	 */
-	struct lpfc_iocbq cur_iocbq;
-
-	wait_queue_head_t *waitq;
-	unsigned long start_time;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-	uint64_t ts_cmd_start;
-	uint64_t ts_last_cmd;
-	uint64_t ts_cmd_wqput;
-	uint64_t ts_isr_cmpl;
-	uint64_t ts_data_nvme;
-#endif
-};
-
 struct lpfc_nvme_fcpreq_priv {
-	struct lpfc_nvme_buf *nvme_buf;
+	struct lpfc_io_buf *nvme_buf;
 };
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index b766afe..9884228 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channsel Host Bus Adapters.                               *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -73,6 +73,9 @@
 					   uint32_t, uint16_t);
 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
 				    struct lpfc_nvmet_rcv_ctx *);
+static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
+
+static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
 
 static union lpfc_wqe128 lpfc_tsend_cmd_template;
 static union lpfc_wqe128 lpfc_treceive_cmd_template;
@@ -217,24 +220,71 @@
 	/* Word 12, 13, 14, 15 - is zero */
 }
 
-void
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+static struct lpfc_nvmet_rcv_ctx *
+lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
+{
+	struct lpfc_nvmet_rcv_ctx *ctxp;
+	unsigned long iflag;
+	bool found = false;
+
+	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+	list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
+		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
+			continue;
+
+		found = true;
+		break;
+	}
+	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
+	if (found)
+		return ctxp;
+
+	return NULL;
+}
+
+static struct lpfc_nvmet_rcv_ctx *
+lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
+{
+	struct lpfc_nvmet_rcv_ctx *ctxp;
+	unsigned long iflag;
+	bool found = false;
+
+	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+	list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
+		if (ctxp->oxid != oxid || ctxp->sid != sid)
+			continue;
+
+		found = true;
+		break;
+	}
+	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
+	if (found)
+		return ctxp;
+
+	return NULL;
+}
+#endif
+
+static void
 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
 {
-	unsigned long iflag;
+	lockdep_assert_held(&ctxp->ctxlock);
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-			"6313 NVMET Defer ctx release xri x%x flg x%x\n",
+			"6313 NVMET Defer ctx release oxid x%x flg x%x\n",
 			ctxp->oxid, ctxp->flag);
 
-	spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
-	if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
-		spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
-				       iflag);
+	if (ctxp->flag & LPFC_NVMET_CTX_RLS)
 		return;
-	}
+
 	ctxp->flag |= LPFC_NVMET_CTX_RLS;
+	spin_lock(&phba->sli4_hba.t_active_list_lock);
+	list_del(&ctxp->list);
+	spin_unlock(&phba->sli4_hba.t_active_list_lock);
+	spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 	list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
-	spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
+	spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 }
 
 /**
@@ -324,8 +374,7 @@
 	struct fc_frame_header *fc_hdr;
 	struct rqb_dmabuf *nvmebuf;
 	struct lpfc_nvmet_ctx_info *infop;
-	uint32_t *payload;
-	uint32_t size, oxid, sid, rc;
+	uint32_t size, oxid, sid;
 	int cpu;
 	unsigned long iflag;
 
@@ -341,6 +390,27 @@
 				"6411 NVMET free, already free IO x%x: %d %d\n",
 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
 	}
+
+	if (ctxp->rqb_buffer) {
+		spin_lock_irqsave(&ctxp->ctxlock, iflag);
+		nvmebuf = ctxp->rqb_buffer;
+		/* check if freed in another path whilst acquiring lock */
+		if (nvmebuf) {
+			ctxp->rqb_buffer = NULL;
+			if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
+				ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
+				spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+				nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
+								    nvmebuf);
+			} else {
+				spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+				/* repost */
+				lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
+			}
+		} else {
+			spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+		}
+	}
 	ctxp->state = LPFC_NVMET_STE_FREE;
 
 	spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
@@ -355,7 +425,6 @@
 		fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
 		oxid = be16_to_cpu(fc_hdr->fh_ox_id);
 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-		payload = (uint32_t *)(nvmebuf->dbuf.virt);
 		size = nvmebuf->bytes_recv;
 		sid = sli4_sid_from_fc_hdr(fc_hdr);
 
@@ -375,8 +444,9 @@
 		spin_lock_init(&ctxp->ctxlock);
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-		if (ctxp->ts_cmd_nvme) {
-			ctxp->ts_cmd_nvme = ktime_get_ns();
+		/* NOTE: isr time stamp is stale when context is re-assigned*/
+		if (ctxp->ts_isr_cmd) {
+			ctxp->ts_cmd_nvme = 0;
 			ctxp->ts_nvme_data = 0;
 			ctxp->ts_data_wqput = 0;
 			ctxp->ts_isr_data = 0;
@@ -388,46 +458,28 @@
 		}
 #endif
 		atomic_inc(&tgtp->rcv_fcp_cmd_in);
-		/*
-		 * The calling sequence should be:
-		 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
-		 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
-		 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
-		 * the NVME command / FC header is stored.
-		 * A buffer has already been reposted for this IO, so just free
-		 * the nvmebuf.
-		 */
-		rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
-					  payload, size);
 
-		/* Process FCP command */
-		if (rc == 0) {
-			ctxp->rqb_buffer = NULL;
-			atomic_inc(&tgtp->rcv_fcp_cmd_out);
-			nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
-			return;
+		/* Indicate that a replacement buffer has been posted */
+		spin_lock_irqsave(&ctxp->ctxlock, iflag);
+		ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
+		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+
+		if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
+			atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+					"6181 Unable to queue deferred work "
+					"for oxid x%x. "
+					"FCP Drop IO [x%x x%x x%x]\n",
+					ctxp->oxid,
+					atomic_read(&tgtp->rcv_fcp_cmd_in),
+					atomic_read(&tgtp->rcv_fcp_cmd_out),
+					atomic_read(&tgtp->xmt_fcp_release));
+
+			spin_lock_irqsave(&ctxp->ctxlock, iflag);
+			lpfc_nvmet_defer_release(phba, ctxp);
+			spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+			lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
 		}
-
-		/* Processing of FCP command is deferred */
-		if (rc == -EOVERFLOW) {
-			lpfc_nvmeio_data(phba,
-					 "NVMET RCV BUSY: xri x%x sz %d "
-					 "from %06x\n",
-					 oxid, size, sid);
-			atomic_inc(&tgtp->rcv_fcp_cmd_out);
-			return;
-		}
-		atomic_inc(&tgtp->rcv_fcp_cmd_drop);
-		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-				"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
-				ctxp->oxid, rc,
-				atomic_read(&tgtp->rcv_fcp_cmd_in),
-				atomic_read(&tgtp->rcv_fcp_cmd_out),
-				atomic_read(&tgtp->xmt_fcp_release));
-
-		lpfc_nvmet_defer_release(phba, ctxp);
-		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
-		nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
 		return;
 	}
 	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
@@ -436,7 +488,10 @@
 	 * Use the CPU context list, from the MRQ the IO was received on
 	 * (ctxp->idx), to save context structure.
 	 */
-	cpu = smp_processor_id();
+	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+	list_del_init(&ctxp->list);
+	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
+	cpu = raw_smp_processor_id();
 	infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
 	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
 	list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
@@ -703,8 +758,10 @@
 		}
 
 		lpfc_printf_log(phba, KERN_INFO, logerr,
-				"6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
-				ctxp->oxid, status, result, ctxp->flag);
+				"6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
+				"XBUSY:x%x\n",
+				ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
+				status, result, ctxp->flag);
 
 	} else {
 		rsp->fcp_error = NVME_SC_SUCCESS;
@@ -744,16 +801,6 @@
 					ktime_get_ns();
 			}
 		}
-		if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
-			id = smp_processor_id();
-			if (ctxp->cpu != id)
-				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-						"6703 CPU Check cmpl: "
-						"cpu %d expect %d\n",
-						id, ctxp->cpu);
-			if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
-				phba->cpucheck_cmpl_io[id]++;
-		}
 #endif
 		rsp->done(rsp);
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -771,19 +818,22 @@
 			ctxp->ts_isr_data = cmdwqe->isr_timestamp;
 			ctxp->ts_data_nvme = ktime_get_ns();
 		}
-		if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
-			id = smp_processor_id();
-			if (ctxp->cpu != id)
-				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-						"6704 CPU Check cmdcmpl: "
-						"cpu %d expect %d\n",
-						id, ctxp->cpu);
-			if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
-				phba->cpucheck_ccmpl_io[id]++;
-		}
 #endif
 		rsp->done(rsp);
 	}
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
+		id = raw_smp_processor_id();
+		if (id < LPFC_CHECK_CPU_CNT) {
+			if (ctxp->cpu != id)
+				lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+						"6704 CPU Check cmdcmpl: "
+						"cpu %d expect %d\n",
+						id, ctxp->cpu);
+			phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
+		}
+	}
+#endif
 }
 
 static int
@@ -852,14 +902,13 @@
 	lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
 			 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
 
-	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
+	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
 	if (rc == WQE_SUCCESS) {
 		/*
 		 * Okay to repost buffer here, but wait till cmpl
 		 * before freeing ctxp and iocbq.
 		 */
 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
-		ctxp->rqb_buffer = 0;
 		atomic_inc(&nvmep->xmt_ls_rsp);
 		return 0;
 	}
@@ -908,18 +957,22 @@
 		else
 			ctxp->ts_nvme_data = ktime_get_ns();
 	}
+
+	/* Setup the hdw queue if not already set */
+	if (!ctxp->hdwq)
+		ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
+
 	if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
-		int id = smp_processor_id();
-		ctxp->cpu = id;
-		if (id < LPFC_CHECK_CPU_CNT)
-			phba->cpucheck_xmt_io[id]++;
-		if (rsp->hwqid != id) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-					"6705 CPU Check OP: "
-					"cpu %d expect %d\n",
-					id, rsp->hwqid);
-			ctxp->cpu = rsp->hwqid;
+		int id = raw_smp_processor_id();
+		if (id < LPFC_CHECK_CPU_CNT) {
+			if (rsp->hwqid != id)
+				lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+						"6705 CPU Check OP: "
+						"cpu %d expect %d\n",
+						id, rsp->hwqid);
+			phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
 		}
+		ctxp->cpu = id; /* Setup cpu for cmpl check */
 	}
 #endif
 
@@ -928,7 +981,7 @@
 	    (ctxp->state == LPFC_NVMET_STE_ABORT)) {
 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-				"6102 IO xri x%x aborted\n",
+				"6102 IO oxid x%x aborted\n",
 				ctxp->oxid);
 		rc = -ENXIO;
 		goto aerr;
@@ -954,7 +1007,7 @@
 			 ctxp->oxid, rsp->op, rsp->rsplen);
 
 	ctxp->flag |= LPFC_NVMET_IO_INP;
-	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
+	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
 	if (rc == WQE_SUCCESS) {
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 		if (!ctxp->ts_cmd_nvme)
@@ -973,7 +1026,7 @@
 		 * WQE release CQE
 		 */
 		ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
-		wq = phba->sli4_hba.nvme_wq[rsp->hwqid];
+		wq = ctxp->hdwq->io_wq;
 		pring = wq->pring;
 		spin_lock_irqsave(&pring->ring_lock, iflags);
 		list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
@@ -1003,7 +1056,8 @@
 	struct lpfc_nvmet_tgtport *tport = targetport->private;
 
 	/* release any threads waiting for the unreg to complete */
-	complete(&tport->tport_unreg_done);
+	if (tport->phba->targetport)
+		complete(tport->tport_unreg_cmp);
 }
 
 static void
@@ -1023,8 +1077,11 @@
 	if (phba->pport->load_flag & FC_UNLOADING)
 		return;
 
+	if (!ctxp->hdwq)
+		ctxp->hdwq = &phba->sli4_hba.hdwq[0];
+
 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-			"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
+			"6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
 			ctxp->oxid, ctxp->flag, ctxp->state);
 
 	lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
@@ -1033,25 +1090,25 @@
 	atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
 
 	spin_lock_irqsave(&ctxp->ctxlock, flags);
-	ctxp->state = LPFC_NVMET_STE_ABORT;
 
 	/* Since iaab/iaar are NOT set, we need to check
 	 * if the firmware is in process of aborting IO
 	 */
-	if (ctxp->flag & LPFC_NVMET_XBUSY) {
+	if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) {
 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 		return;
 	}
 	ctxp->flag |= LPFC_NVMET_ABORT_OP;
 
 	if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
+		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
 						 ctxp->oxid);
-		wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx];
-		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+		wq = ctxp->hdwq->io_wq;
 		lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
 		return;
 	}
+	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 
 	/* An state of LPFC_NVMET_STE_RCV means we have just received
 	 * the NVME command and have not started processing it.
@@ -1063,7 +1120,6 @@
 	else
 		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
 					       ctxp->oxid);
-	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 }
 
 static void
@@ -1077,14 +1133,18 @@
 	unsigned long flags;
 	bool aborting = false;
 
-	if (ctxp->state != LPFC_NVMET_STE_DONE &&
-	    ctxp->state != LPFC_NVMET_STE_ABORT) {
+	spin_lock_irqsave(&ctxp->ctxlock, flags);
+	if (ctxp->flag & LPFC_NVMET_XBUSY)
+		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+				"6027 NVMET release with XBUSY flag x%x"
+				" oxid x%x\n",
+				ctxp->flag, ctxp->oxid);
+	else if (ctxp->state != LPFC_NVMET_STE_DONE &&
+		 ctxp->state != LPFC_NVMET_STE_ABORT)
 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
 				"6413 NVMET release bad state %d %d oxid x%x\n",
 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
-	}
 
-	spin_lock_irqsave(&ctxp->ctxlock, flags);
 	if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
 	    (ctxp->flag & LPFC_NVMET_XBUSY)) {
 		aborting = true;
@@ -1097,6 +1157,7 @@
 			 ctxp->state, aborting);
 
 	atomic_inc(&lpfc_nvmep->xmt_fcp_release);
+	ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
 
 	if (aborting)
 		return;
@@ -1113,13 +1174,15 @@
 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
 	struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
 	struct lpfc_hba *phba = ctxp->phba;
+	unsigned long iflag;
+
 
 	lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
-			 ctxp->oxid, ctxp->size, smp_processor_id());
+			 ctxp->oxid, ctxp->size, raw_smp_processor_id());
 
 	if (!nvmebuf) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
-				"6425 Defer rcv: no buffer xri x%x: "
+				"6425 Defer rcv: no buffer oxid x%x: "
 				"flg %x ste %x\n",
 				ctxp->oxid, ctxp->flag, ctxp->state);
 		return;
@@ -1131,6 +1194,25 @@
 
 	/* Free the nvmebuf since a new buffer already replaced it */
 	nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+	spin_lock_irqsave(&ctxp->ctxlock, iflag);
+	ctxp->rqb_buffer = NULL;
+	spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+}
+
+static void
+lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
+{
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct lpfc_hba *phba;
+	uint32_t rc;
+
+	tgtp = tgtport->private;
+	phba = tgtp->phba;
+
+	rc = lpfc_issue_els_rscn(phba->pport, 0);
+	lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+			"6420 NVMET subsystem change: Notification %s\n",
+			(rc) ? "Failed" : "Sent");
 }
 
 static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -1140,6 +1222,7 @@
 	.fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
 	.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
 	.defer_rcv	= lpfc_nvmet_defer_rcv,
+	.discovery_event = lpfc_nvmet_discovery_event,
 
 	.max_hw_queues  = 1,
 	.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1162,9 +1245,9 @@
 	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
 	list_for_each_entry_safe(ctx_buf, next_ctx_buf,
 				&infop->nvmet_ctx_list, list) {
-		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 		list_del_init(&ctx_buf->list);
-		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 
 		__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
 		ctx_buf->sglq->state = SGL_FREED;
@@ -1194,9 +1277,9 @@
 
 	/* Cycle the the entire CPU context list for every MRQ */
 	for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
-		for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
+		for_each_present_cpu(j) {
+			infop = lpfc_get_ctx_list(phba, j, i);
 			__lpfc_nvmet_clean_io_for_cpu(phba, infop);
-			infop++; /* next */
 		}
 	}
 	kfree(phba->sli4_hba.nvmet_ctx_info);
@@ -1211,14 +1294,14 @@
 	union lpfc_wqe128 *wqe;
 	struct lpfc_nvmet_ctx_info *last_infop;
 	struct lpfc_nvmet_ctx_info *infop;
-	int i, j, idx;
+	int i, j, idx, cpu;
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
 			"6403 Allocate NVMET resources for %d XRIs\n",
 			phba->sli4_hba.nvmet_xri_cnt);
 
 	phba->sli4_hba.nvmet_ctx_info = kcalloc(
-		phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
+		phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
 		sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
 	if (!phba->sli4_hba.nvmet_ctx_info) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1246,13 +1329,12 @@
 	 * of the IO completion. Thus a context that was allocated for MRQ A
 	 * whose IO completed on CPU B will be freed to cpuB/mrqA.
 	 */
-	infop = phba->sli4_hba.nvmet_ctx_info;
-	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+	for_each_possible_cpu(i) {
 		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+			infop = lpfc_get_ctx_list(phba, i, j);
 			INIT_LIST_HEAD(&infop->nvmet_ctx_list);
 			spin_lock_init(&infop->nvmet_ctx_list_lock);
 			infop->nvmet_ctx_list_cnt = 0;
-			infop++;
 		}
 	}
 
@@ -1262,8 +1344,10 @@
 	 * MRQ 1 cycling thru CPUs 0 - X, and so on.
 	 */
 	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
-		last_infop = lpfc_get_ctx_list(phba, 0, j);
-		for (i = phba->sli4_hba.num_present_cpu - 1;  i >= 0; i--) {
+		last_infop = lpfc_get_ctx_list(phba,
+					       cpumask_first(cpu_present_mask),
+					       j);
+		for (i = phba->sli4_hba.num_possible_cpu - 1;  i >= 0; i--) {
 			infop = lpfc_get_ctx_list(phba, i, j);
 			infop->nvmet_ctx_next_cpu = last_infop;
 			last_infop = infop;
@@ -1274,6 +1358,7 @@
 	 * received command on a per xri basis.
 	 */
 	idx = 0;
+	cpu = cpumask_first(cpu_present_mask);
 	for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
 		ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
 		if (!ctx_buf) {
@@ -1321,13 +1406,14 @@
 					"6407 Ran out of NVMET XRIs\n");
 			return -ENOMEM;
 		}
+		INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
 
 		/*
 		 * Add ctx to MRQidx context list. Our initial assumption
 		 * is MRQidx will be associated with CPUidx. This association
 		 * can change on the fly.
 		 */
-		infop = lpfc_get_ctx_list(phba, idx, idx);
+		infop = lpfc_get_ctx_list(phba, cpu, idx);
 		spin_lock(&infop->nvmet_ctx_list_lock);
 		list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
 		infop->nvmet_ctx_list_cnt++;
@@ -1335,19 +1421,25 @@
 
 		/* Spread ctx structures evenly across all MRQs */
 		idx++;
-		if (idx >= phba->cfg_nvmet_mrq)
+		if (idx >= phba->cfg_nvmet_mrq) {
 			idx = 0;
+			cpu = cpumask_first(cpu_present_mask);
+			continue;
+		}
+		cpu = cpumask_next(cpu, cpu_present_mask);
+		if (cpu == nr_cpu_ids)
+			cpu = cpumask_first(cpu_present_mask);
+
 	}
 
-	infop = phba->sli4_hba.nvmet_ctx_info;
-	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
-		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+	for_each_present_cpu(i) {
+		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+			infop = lpfc_get_ctx_list(phba, i, j);
 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
 					"6408 TOTAL NVMET ctx for CPU %d "
-					"MRQ %d: cnt %d nextcpu %p\n",
+					"MRQ %d: cnt %d nextcpu x%px\n",
 					i, j, infop->nvmet_ctx_list_cnt,
 					infop->nvmet_ctx_next_cpu);
-			infop++;
 		}
 	}
 	return 0;
@@ -1373,19 +1465,12 @@
 	pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
 	pinfo.port_id = vport->fc_myDID;
 
-	/* Limit to LPFC_MAX_NVME_SEG_CNT.
-	 * For now need + 1 to get around NVME transport logic.
+	/* We need to tell the transport layer + 1 because it takes page
+	 * alignment into account. When space for the SGL is allocated we
+	 * allocate + 3, one for cmd, one for rsp and one for this alignment
 	 */
-	if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
-		lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
-				"6400 Reducing sg segment cnt to %d\n",
-				LPFC_MAX_NVME_SEG_CNT);
-		phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
-	} else {
-		phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
-	}
 	lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
-	lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
+	lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
 	lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
 
 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
@@ -1415,7 +1500,7 @@
 
 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
 				"6026 Registered NVME "
-				"targetport: %p, private %p "
+				"targetport: x%px, private x%px "
 				"portnm %llx nodenm %llx segs %d qs %d\n",
 				phba->targetport, tgtp,
 				pinfo.port_name, pinfo.node_name,
@@ -1470,7 +1555,7 @@
 		return 0;
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
-			 "6007 Update NVMET port %p did x%x\n",
+			 "6007 Update NVMET port x%px did x%x\n",
 			 phba->targetport, vport->fc_myDID);
 
 	phba->targetport->port_id = vport->fc_myDID;
@@ -1489,10 +1574,12 @@
 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
 			    struct sli4_wcqe_xri_aborted *axri)
 {
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
 	struct lpfc_nvmet_tgtport *tgtp;
+	struct nvmefc_tgt_fcp_req *req = NULL;
 	struct lpfc_nodelist *ndlp;
 	unsigned long iflag = 0;
 	int rrq_empty = 0;
@@ -1510,23 +1597,25 @@
 	}
 
 	spin_lock_irqsave(&phba->hbalock, iflag);
-	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+	spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 	list_for_each_entry_safe(ctxp, next_ctxp,
 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
 				 list) {
 		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
 			continue;
 
+		spin_lock(&ctxp->ctxlock);
 		/* Check if we already received a free context call
 		 * and we have completed processing an abort situation.
 		 */
 		if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
 		    !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
-			list_del(&ctxp->list);
+			list_del_init(&ctxp->list);
 			released = true;
 		}
 		ctxp->flag &= ~LPFC_NVMET_XBUSY;
-		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		spin_unlock(&ctxp->ctxlock);
+		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 
 		rrq_empty = list_empty(&phba->active_rrq_list);
 		spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -1541,7 +1630,7 @@
 		}
 
 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-				"6318 XB aborted oxid %x flg x%x (%x)\n",
+				"6318 XB aborted oxid x%x flg x%x (%x)\n",
 				ctxp->oxid, ctxp->flag, released);
 		if (released)
 			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
@@ -1550,33 +1639,63 @@
 			lpfc_worker_wake_up(phba);
 		return;
 	}
-	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+	spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
+	if (ctxp) {
+		/*
+		 *  Abort already done by FW, so BA_ACC sent.
+		 *  However, the transport may be unaware.
+		 */
+		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+				"6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
+				"flag x%x oxid x%x rxid x%x\n",
+				xri, ctxp->state, ctxp->flag, ctxp->oxid,
+				rxid);
+
+		spin_lock_irqsave(&ctxp->ctxlock, iflag);
+		ctxp->flag |= LPFC_NVMET_ABTS_RCV;
+		ctxp->state = LPFC_NVMET_STE_ABORT;
+		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+
+		lpfc_nvmeio_data(phba,
+				 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+				 xri, raw_smp_processor_id(), 0);
+
+		req = &ctxp->ctx.fcp_req;
+		if (req)
+			nvmet_fc_rcv_fcp_abort(phba->targetport, req);
+	}
+#endif
 }
 
 int
 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
 			   struct fc_frame_header *fc_hdr)
-
 {
 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
 	struct nvmefc_tgt_fcp_req *rsp;
-	uint16_t xri;
+	uint32_t sid;
+	uint16_t oxid, xri;
 	unsigned long iflag = 0;
 
-	xri = be16_to_cpu(fc_hdr->fh_ox_id);
+	sid = sli4_sid_from_fc_hdr(fc_hdr);
+	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
 
 	spin_lock_irqsave(&phba->hbalock, iflag);
-	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+	spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 	list_for_each_entry_safe(ctxp, next_ctxp,
 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
 				 list) {
-		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
+		if (ctxp->oxid != oxid || ctxp->sid != sid)
 			continue;
 
-		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		xri = ctxp->ctxbuf->sglq->sli4_xritag;
+
+		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 		spin_unlock_irqrestore(&phba->hbalock, iflag);
 
 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
@@ -1585,7 +1704,7 @@
 
 		lpfc_nvmeio_data(phba,
 			"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
-			xri, smp_processor_id(), 0);
+			xri, raw_smp_processor_id(), 0);
 
 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
 				"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
@@ -1597,14 +1716,92 @@
 		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
 		return 0;
 	}
-	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+	spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 	spin_unlock_irqrestore(&phba->hbalock, iflag);
 
-	lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
-			 xri, smp_processor_id(), 1);
+	/* check the wait list */
+	if (phba->sli4_hba.nvmet_io_wait_cnt) {
+		struct rqb_dmabuf *nvmebuf;
+		struct fc_frame_header *fc_hdr_tmp;
+		u32 sid_tmp;
+		u16 oxid_tmp;
+		bool found = false;
+
+		spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+
+		/* match by oxid and s_id */
+		list_for_each_entry(nvmebuf,
+				    &phba->sli4_hba.lpfc_nvmet_io_wait_list,
+				    hbuf.list) {
+			fc_hdr_tmp = (struct fc_frame_header *)
+					(nvmebuf->hbuf.virt);
+			oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
+			sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
+			if (oxid_tmp != oxid || sid_tmp != sid)
+				continue;
+
+			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+					"6321 NVMET Rcv ABTS oxid x%x from x%x "
+					"is waiting for a ctxp\n",
+					oxid, sid);
+
+			list_del_init(&nvmebuf->hbuf.list);
+			phba->sli4_hba.nvmet_io_wait_cnt--;
+			found = true;
+			break;
+		}
+		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
+				       iflag);
+
+		/* free buffer since already posted a new DMA buffer to RQ */
+		if (found) {
+			nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+			/* Respond with BA_ACC accordingly */
+			lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
+			return 0;
+		}
+	}
+
+	/* check active list */
+	ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
+	if (ctxp) {
+		xri = ctxp->ctxbuf->sglq->sli4_xritag;
+
+		spin_lock_irqsave(&ctxp->ctxlock, iflag);
+		ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP);
+		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+
+		lpfc_nvmeio_data(phba,
+				 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+				 xri, raw_smp_processor_id(), 0);
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+				"6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
+				"flag x%x state x%x\n",
+				ctxp->oxid, xri, ctxp->flag, ctxp->state);
+
+		if (ctxp->flag & LPFC_NVMET_TNOTIFY) {
+			/* Notify the transport */
+			nvmet_fc_rcv_fcp_abort(phba->targetport,
+					       &ctxp->ctx.fcp_req);
+		} else {
+			cancel_work_sync(&ctxp->ctxbuf->defer_work);
+			spin_lock_irqsave(&ctxp->ctxlock, iflag);
+			lpfc_nvmet_defer_release(phba, ctxp);
+			spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+		}
+		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+					       ctxp->oxid);
+
+		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
+		return 0;
+	}
+
+	lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
+			 oxid, raw_smp_processor_id(), 1);
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-			"6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
+			"6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
 
 	/* Respond with BA_RJT accordingly */
 	lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
@@ -1665,6 +1862,7 @@
 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
 	struct lpfc_sli_ring *pring;
 	struct lpfc_iocbq *nvmewqeq;
+	struct lpfc_nvmet_rcv_ctx *ctxp;
 	unsigned long iflags;
 	int rc;
 
@@ -1678,7 +1876,8 @@
 		list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
 				 list);
 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
-		rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
+		ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
+		rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
 		spin_lock_irqsave(&pring->ring_lock, iflags);
 		if (rc == -EBUSY) {
 			/* WQ was full again, so put it back on the list */
@@ -1686,6 +1885,18 @@
 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
 			return;
 		}
+		if (rc == WQE_SUCCESS) {
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+			if (ctxp->ts_cmd_nvme) {
+				if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP)
+					ctxp->ts_status_wqput = ktime_get_ns();
+				else
+					ctxp->ts_data_wqput = ktime_get_ns();
+			}
+#endif
+		} else {
+			WARN_ON(rc);
+		}
 	}
 	wq->q_flag &= ~HBA_NVMET_WQFULL;
 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -1700,18 +1911,23 @@
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct lpfc_queue *wq;
 	uint32_t qidx;
+	DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
 
 	if (phba->nvmet_support == 0)
 		return;
 	if (phba->targetport) {
 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-		for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
-			wq = phba->sli4_hba.nvme_wq[qidx];
+		for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+			wq = phba->sli4_hba.hdwq[qidx].io_wq;
 			lpfc_nvmet_wqfull_flush(phba, wq, NULL);
 		}
-		init_completion(&tgtp->tport_unreg_done);
+		tgtp->tport_unreg_cmp = &tport_unreg_cmp;
 		nvmet_fc_unregister_targetport(phba->targetport);
-		wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
+		if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
+					msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+					"6179 Unreg targetport x%px timeout "
+					"reached.\n", phba->targetport);
 		lpfc_nvmet_cleanup_io_context(phba);
 	}
 	phba->targetport = NULL;
@@ -1781,6 +1997,7 @@
 	ctxp->state = LPFC_NVMET_STE_LS_RCV;
 	ctxp->entry_cnt = 1;
 	ctxp->rqb_buffer = (void *)nvmebuf;
+	ctxp->hdwq = &phba->sli4_hba.hdwq[0];
 
 	lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
 			 oxid, size, sid);
@@ -1820,6 +2037,122 @@
 #endif
 }
 
+static void
+lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
+{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+	struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
+	struct lpfc_hba *phba = ctxp->phba;
+	struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
+	struct lpfc_nvmet_tgtport *tgtp;
+	uint32_t *payload, qno;
+	uint32_t rc;
+	unsigned long iflags;
+
+	if (!nvmebuf) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+			"6159 process_rcv_fcp_req, nvmebuf is NULL, "
+			"oxid: x%x flg: x%x state: x%x\n",
+			ctxp->oxid, ctxp->flag, ctxp->state);
+		spin_lock_irqsave(&ctxp->ctxlock, iflags);
+		lpfc_nvmet_defer_release(phba, ctxp);
+		spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+						 ctxp->oxid);
+		return;
+	}
+
+	if (ctxp->flag & LPFC_NVMET_ABTS_RCV) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6324 IO oxid x%x aborted\n",
+				ctxp->oxid);
+		return;
+	}
+
+	payload = (uint32_t *)(nvmebuf->dbuf.virt);
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	ctxp->flag |= LPFC_NVMET_TNOTIFY;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (ctxp->ts_isr_cmd)
+		ctxp->ts_cmd_nvme = ktime_get_ns();
+#endif
+	/*
+	 * The calling sequence should be:
+	 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
+	 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
+	 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
+	 * the NVME command / FC header is stored.
+	 * A buffer has already been reposted for this IO, so just free
+	 * the nvmebuf.
+	 */
+	rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
+				  payload, ctxp->size);
+	/* Process FCP command */
+	if (rc == 0) {
+		atomic_inc(&tgtp->rcv_fcp_cmd_out);
+		spin_lock_irqsave(&ctxp->ctxlock, iflags);
+		if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
+		    (nvmebuf != ctxp->rqb_buffer)) {
+			spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+			return;
+		}
+		ctxp->rqb_buffer = NULL;
+		spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+		return;
+	}
+
+	/* Processing of FCP command is deferred */
+	if (rc == -EOVERFLOW) {
+		lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
+				 "from %06x\n",
+				 ctxp->oxid, ctxp->size, ctxp->sid);
+		atomic_inc(&tgtp->rcv_fcp_cmd_out);
+		atomic_inc(&tgtp->defer_fod);
+		spin_lock_irqsave(&ctxp->ctxlock, iflags);
+		if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
+			spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+			return;
+		}
+		spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+		/*
+		 * Post a replacement DMA buffer to RQ and defer
+		 * freeing rcv buffer till .defer_rcv callback
+		 */
+		qno = nvmebuf->idx;
+		lpfc_post_rq_buffer(
+			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
+			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
+		return;
+	}
+	ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
+	atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+			"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
+			ctxp->oxid, rc,
+			atomic_read(&tgtp->rcv_fcp_cmd_in),
+			atomic_read(&tgtp->rcv_fcp_cmd_out),
+			atomic_read(&tgtp->xmt_fcp_release));
+	lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
+			 ctxp->oxid, ctxp->size, ctxp->sid);
+	spin_lock_irqsave(&ctxp->ctxlock, iflags);
+	lpfc_nvmet_defer_release(phba, ctxp);
+	spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+	lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
+#endif
+}
+
+static void
+lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
+{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+	struct lpfc_nvmet_ctxbuf *ctx_buf =
+		container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
+
+	lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
+#endif
+}
+
 static struct lpfc_nvmet_ctxbuf *
 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
 			     struct lpfc_nvmet_ctx_info *current_infop)
@@ -1844,7 +2177,7 @@
 	else
 		get_infop = current_infop->nvmet_ctx_next_cpu;
 
-	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+	for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
 		if (get_infop == current_infop) {
 			get_infop = get_infop->nvmet_ctx_next_cpu;
 			continue;
@@ -1882,6 +2215,8 @@
  * @phba: pointer to lpfc hba data structure.
  * @idx: relative index of MRQ vector
  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
+ * @isr_timestamp: in jiffies.
+ * @cqflag: cq processing information regarding workload.
  *
  * This routine is used for processing the WQE associated with a unsolicited
  * event. It first determines whether there is an existing ndlp that matches
@@ -1894,20 +2229,17 @@
 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
 			    uint32_t idx,
 			    struct rqb_dmabuf *nvmebuf,
-			    uint64_t isr_timestamp)
+			    uint64_t isr_timestamp,
+			    uint8_t cqflag)
 {
 	struct lpfc_nvmet_rcv_ctx *ctxp;
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct fc_frame_header *fc_hdr;
 	struct lpfc_nvmet_ctxbuf *ctx_buf;
 	struct lpfc_nvmet_ctx_info *current_infop;
-	uint32_t *payload;
-	uint32_t size, oxid, sid, rc, qno;
+	uint32_t size, oxid, sid, qno;
 	unsigned long iflag;
 	int current_cpu;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-	uint32_t id;
-#endif
 
 	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
 		return;
@@ -1916,11 +2248,9 @@
 	if (!nvmebuf || !phba->targetport) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
 				"6157 NVMET FCP Drop IO\n");
-		oxid = 0;
-		size = 0;
-		sid = 0;
-		ctxp = NULL;
-		goto dropit;
+		if (nvmebuf)
+			lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
+		return;
 	}
 
 	/*
@@ -1930,7 +2260,7 @@
 	 * be empty, thus it would need to be replenished with the
 	 * context list from another CPU for this MRQ.
 	 */
-	current_cpu = smp_processor_id();
+	current_cpu = raw_smp_processor_id();
 	current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
 	spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
 	if (current_infop->nvmet_ctx_list_cnt) {
@@ -1948,14 +2278,19 @@
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
-		id = smp_processor_id();
-		if (id < LPFC_CHECK_CPU_CNT)
-			phba->cpucheck_rcv_io[id]++;
+		if (current_cpu < LPFC_CHECK_CPU_CNT) {
+			if (idx != current_cpu)
+				lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+						"6703 CPU Check rcv: "
+						"cpu %d expect %d\n",
+						current_cpu, idx);
+			phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
+		}
 	}
 #endif
 
 	lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
-			 oxid, size, smp_processor_id());
+			 oxid, size, raw_smp_processor_id());
 
 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 
@@ -1979,10 +2314,12 @@
 		return;
 	}
 
-	payload = (uint32_t *)(nvmebuf->dbuf.virt);
 	sid = sli4_sid_from_fc_hdr(fc_hdr);
 
 	ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
+	list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
+	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
 	if (ctxp->state != LPFC_NVMET_STE_FREE) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
 				"6414 NVMET Context corrupt %d %d oxid x%x\n",
@@ -2001,87 +2338,45 @@
 	ctxp->flag = 0;
 	ctxp->ctxbuf = ctx_buf;
 	ctxp->rqb_buffer = (void *)nvmebuf;
+	ctxp->hdwq = NULL;
 	spin_lock_init(&ctxp->ctxlock);
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-	if (isr_timestamp) {
+	if (isr_timestamp)
 		ctxp->ts_isr_cmd = isr_timestamp;
-		ctxp->ts_cmd_nvme = ktime_get_ns();
-		ctxp->ts_nvme_data = 0;
-		ctxp->ts_data_wqput = 0;
-		ctxp->ts_isr_data = 0;
-		ctxp->ts_data_nvme = 0;
-		ctxp->ts_nvme_status = 0;
-		ctxp->ts_status_wqput = 0;
-		ctxp->ts_isr_status = 0;
-		ctxp->ts_status_nvme = 0;
-	} else {
-		ctxp->ts_cmd_nvme = 0;
-	}
+	ctxp->ts_cmd_nvme = 0;
+	ctxp->ts_nvme_data = 0;
+	ctxp->ts_data_wqput = 0;
+	ctxp->ts_isr_data = 0;
+	ctxp->ts_data_nvme = 0;
+	ctxp->ts_nvme_status = 0;
+	ctxp->ts_status_wqput = 0;
+	ctxp->ts_isr_status = 0;
+	ctxp->ts_status_nvme = 0;
 #endif
 
 	atomic_inc(&tgtp->rcv_fcp_cmd_in);
-	/*
-	 * The calling sequence should be:
-	 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
-	 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
-	 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
-	 * the NVME command / FC header is stored, so we are free to repost
-	 * the buffer.
-	 */
-	rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
-				  payload, size);
-
-	/* Process FCP command */
-	if (rc == 0) {
-		ctxp->rqb_buffer = NULL;
-		atomic_inc(&tgtp->rcv_fcp_cmd_out);
-		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+	/* check for cq processing load */
+	if (!cqflag) {
+		lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
 		return;
 	}
 
-	/* Processing of FCP command is deferred */
-	if (rc == -EOVERFLOW) {
-		/*
-		 * Post a brand new DMA buffer to RQ and defer
-		 * freeing rcv buffer till .defer_rcv callback
-		 */
-		qno = nvmebuf->idx;
-		lpfc_post_rq_buffer(
-			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
-			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
+	if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
+		atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+				"6325 Unable to queue work for oxid x%x. "
+				"FCP Drop IO [x%x x%x x%x]\n",
+				ctxp->oxid,
+				atomic_read(&tgtp->rcv_fcp_cmd_in),
+				atomic_read(&tgtp->rcv_fcp_cmd_out),
+				atomic_read(&tgtp->xmt_fcp_release));
 
-		lpfc_nvmeio_data(phba,
-				 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
-				 oxid, size, sid);
-		atomic_inc(&tgtp->rcv_fcp_cmd_out);
-		atomic_inc(&tgtp->defer_fod);
-		return;
-	}
-	ctxp->rqb_buffer = nvmebuf;
-
-	atomic_inc(&tgtp->rcv_fcp_cmd_drop);
-	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
-			"6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
-			ctxp->oxid, rc,
-			atomic_read(&tgtp->rcv_fcp_cmd_in),
-			atomic_read(&tgtp->rcv_fcp_cmd_out),
-			atomic_read(&tgtp->xmt_fcp_release));
-dropit:
-	lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
-			 oxid, size, sid);
-	if (oxid) {
+		spin_lock_irqsave(&ctxp->ctxlock, iflag);
 		lpfc_nvmet_defer_release(phba, ctxp);
+		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
-		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
-		return;
 	}
-
-	if (ctx_buf)
-		lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
-
-	if (nvmebuf)
-		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
 }
 
 /**
@@ -2118,6 +2413,8 @@
  * @phba: pointer to lpfc hba data structure.
  * @idx: relative index of MRQ vector
  * @nvmebuf: pointer to received nvme data structure.
+ * @isr_timestamp: in jiffies.
+ * @cqflag: cq processing information regarding workload.
  *
  * This routine is used to process an unsolicited event received from a SLI
  * (Service Level Interface) ring. The actual processing of the data buffer
@@ -2129,14 +2426,14 @@
 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
 			   uint32_t idx,
 			   struct rqb_dmabuf *nvmebuf,
-			   uint64_t isr_timestamp)
+			   uint64_t isr_timestamp,
+			   uint8_t cqflag)
 {
 	if (phba->nvmet_support == 0) {
 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
 		return;
 	}
-	lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
-				    isr_timestamp);
+	lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
 }
 
 /**
@@ -2603,8 +2900,7 @@
 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
 	nvmewqe->context1 = ndlp;
 
-	for (i = 0; i < rsp->sg_cnt; i++) {
-		sgel = &rsp->sg[i];
+	for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) {
 		physaddr = sg_dma_address(sgel);
 		cnt = sg_dma_len(sgel);
 		sgl->addr_hi = putPaddrHigh(physaddr);
@@ -2654,27 +2950,28 @@
 {
 	struct lpfc_nvmet_rcv_ctx *ctxp;
 	struct lpfc_nvmet_tgtport *tgtp;
-	uint32_t status, result;
+	uint32_t result;
 	unsigned long flags;
 	bool released = false;
 
 	ctxp = cmdwqe->context2;
-	status = bf_get(lpfc_wcqe_c_status, wcqe);
 	result = wcqe->parameter;
 
 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 	if (ctxp->flag & LPFC_NVMET_ABORT_OP)
 		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
 
+	spin_lock_irqsave(&ctxp->ctxlock, flags);
 	ctxp->state = LPFC_NVMET_STE_DONE;
 
 	/* Check if we already received a free context call
 	 * and we have completed processing an abort situation.
 	 */
-	spin_lock_irqsave(&ctxp->ctxlock, flags);
 	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
 	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
-		list_del(&ctxp->list);
+		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+		list_del_init(&ctxp->list);
+		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 		released = true;
 	}
 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
@@ -2682,7 +2979,7 @@
 	atomic_inc(&tgtp->xmt_abort_rsp);
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-			"6165 ABORT cmpl: xri x%x flg x%x (%d) "
+			"6165 ABORT cmpl: oxid x%x flg x%x (%d) "
 			"WCQE: %08x %08x %08x %08x\n",
 			ctxp->oxid, ctxp->flag, released,
 			wcqe->word0, wcqe->total_data_placed,
@@ -2723,11 +3020,10 @@
 	struct lpfc_nvmet_rcv_ctx *ctxp;
 	struct lpfc_nvmet_tgtport *tgtp;
 	unsigned long flags;
-	uint32_t status, result;
+	uint32_t result;
 	bool released = false;
 
 	ctxp = cmdwqe->context2;
-	status = bf_get(lpfc_wcqe_c_status, wcqe);
 	result = wcqe->parameter;
 
 	if (!ctxp) {
@@ -2740,6 +3036,7 @@
 	}
 
 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	spin_lock_irqsave(&ctxp->ctxlock, flags);
 	if (ctxp->flag & LPFC_NVMET_ABORT_OP)
 		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
 
@@ -2754,10 +3051,11 @@
 	 * and we have completed processing an abort situation.
 	 */
 	ctxp->state = LPFC_NVMET_STE_DONE;
-	spin_lock_irqsave(&ctxp->ctxlock, flags);
 	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
 	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
-		list_del(&ctxp->list);
+		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+		list_del_init(&ctxp->list);
+		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
 		released = true;
 	}
 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
@@ -2765,7 +3063,7 @@
 	atomic_inc(&tgtp->xmt_abort_rsp);
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-			"6316 ABTS cmpl xri x%x flg x%x (%x) "
+			"6316 ABTS cmpl oxid x%x flg x%x (%x) "
 			"WCQE: %08x %08x %08x %08x\n",
 			ctxp->oxid, ctxp->flag, released,
 			wcqe->word0, wcqe->total_data_placed,
@@ -2802,17 +3100,16 @@
 {
 	struct lpfc_nvmet_rcv_ctx *ctxp;
 	struct lpfc_nvmet_tgtport *tgtp;
-	uint32_t status, result;
+	uint32_t result;
 
 	ctxp = cmdwqe->context2;
-	status = bf_get(lpfc_wcqe_c_status, wcqe);
 	result = wcqe->parameter;
 
 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 	atomic_inc(&tgtp->xmt_ls_abort_cmpl);
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
-			"6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
+			"6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
 			ctxp, wcqe->word0, wcqe->total_data_placed,
 			result, wcqe->word3);
 
@@ -2963,12 +3260,15 @@
 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
 
 		/* No failure to an ABTS request. */
+		spin_lock_irqsave(&ctxp->ctxlock, flags);
 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 		return 0;
 	}
 
 	/* Issue ABTS for this WQE based on iotag */
 	ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
+	spin_lock_irqsave(&ctxp->ctxlock, flags);
 	if (!ctxp->abort_wqeq) {
 		atomic_inc(&tgtp->xmt_abort_rsp_error);
 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
@@ -2976,11 +3276,13 @@
 				"xri: x%x\n", ctxp->oxid);
 		/* No failure to an ABTS request. */
 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 		return 0;
 	}
 	abts_wqeq = ctxp->abort_wqeq;
 	abts_wqe = &abts_wqeq->wqe;
 	ctxp->state = LPFC_NVMET_STE_ABORT;
+	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 
 	/* Announce entry to new IO submit field. */
 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
@@ -2993,7 +3295,7 @@
 	 */
 	spin_lock_irqsave(&phba->hbalock, flags);
 	/* driver queued commands are in process of being flushed */
-	if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
+	if (phba->hba_flag & HBA_IOQ_FLUSH) {
 		spin_unlock_irqrestore(&phba->hbalock, flags);
 		atomic_inc(&tgtp->xmt_abort_rsp_error);
 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
@@ -3001,7 +3303,9 @@
 				"NVME Req now. hba_flag x%x oxid x%x\n",
 				phba->hba_flag, ctxp->oxid);
 		lpfc_sli_release_iocbq(phba, abts_wqeq);
+		spin_lock_irqsave(&ctxp->ctxlock, flags);
 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 		return 0;
 	}
 
@@ -3014,7 +3318,9 @@
 				"still pending on oxid x%x\n",
 				ctxp->oxid);
 		lpfc_sli_release_iocbq(phba, abts_wqeq);
+		spin_lock_irqsave(&ctxp->ctxlock, flags);
 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 		return 0;
 	}
 
@@ -3024,7 +3330,7 @@
 	/* WQEs are reused.  Clear stale data and set key fields to
 	 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
 	 */
-	memset(abts_wqe, 0, sizeof(union lpfc_wqe));
+	memset(abts_wqe, 0, sizeof(*abts_wqe));
 
 	/* word 3 */
 	bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
@@ -3058,7 +3364,10 @@
 	abts_wqeq->iocb_flag |= LPFC_IO_NVME;
 	abts_wqeq->context2 = ctxp;
 	abts_wqeq->vport = phba->pport;
-	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
+	if (!ctxp->hdwq)
+		ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
+
+	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 	if (rc == WQE_SUCCESS) {
 		atomic_inc(&tgtp->xmt_abort_sol);
@@ -3066,7 +3375,9 @@
 	}
 
 	atomic_inc(&tgtp->xmt_abort_rsp_error);
+	spin_lock_irqsave(&ctxp->ctxlock, flags);
 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 	lpfc_sli_release_iocbq(phba, abts_wqeq);
 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
 			"6166 Failed ABORT issue_wqe with status x%x "
@@ -3075,7 +3386,6 @@
 	return 1;
 }
 
-
 static int
 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
 				 struct lpfc_nvmet_rcv_ctx *ctxp,
@@ -3084,6 +3394,7 @@
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct lpfc_iocbq *abts_wqeq;
 	unsigned long flags;
+	bool released = false;
 	int rc;
 
 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3110,7 +3421,10 @@
 	abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
 	abts_wqeq->iocb_cmpl = NULL;
 	abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
-	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
+	if (!ctxp->hdwq)
+		ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
+
+	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 	if (rc == WQE_SUCCESS) {
 		return 0;
@@ -3118,16 +3432,22 @@
 
 aerr:
 	spin_lock_irqsave(&ctxp->ctxlock, flags);
-	if (ctxp->flag & LPFC_NVMET_CTX_RLS)
-		list_del(&ctxp->list);
+	if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
+		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+		list_del_init(&ctxp->list);
+		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+		released = true;
+	}
 	ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 
 	atomic_inc(&tgtp->xmt_abort_rsp_error);
 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
-			"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
-			ctxp->oxid, rc);
-	lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
+			"6135 Failed to Issue ABTS for oxid x%x. Status x%x "
+			"(%x)\n",
+			ctxp->oxid, rc, released);
+	if (released)
+		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
 	return 1;
 }
 
@@ -3138,7 +3458,6 @@
 {
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct lpfc_iocbq *abts_wqeq;
-	union lpfc_wqe128 *wqe_abts;
 	unsigned long flags;
 	int rc;
 
@@ -3168,7 +3487,6 @@
 		}
 	}
 	abts_wqeq = ctxp->wqeq;
-	wqe_abts = &abts_wqeq->wqe;
 
 	if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
 		rc = WQE_BUSY;
@@ -3179,7 +3497,7 @@
 	abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
 	abts_wqeq->iocb_cmpl = 0;
 	abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
-	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
+	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 	if (rc == WQE_SUCCESS) {
 		atomic_inc(&tgtp->xmt_abort_unsol);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 1aaff63..8ff67de 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -27,14 +27,15 @@
 #define LPFC_NVMET_RQE_DEF_COUNT	2048
 #define LPFC_NVMET_SUCCESS_LEN		12
 
-#define LPFC_NVMET_MRQ_OFF		0xffff
 #define LPFC_NVMET_MRQ_AUTO		0
 #define LPFC_NVMET_MRQ_MAX		16
 
+#define LPFC_NVMET_WAIT_TMO		(5 * MSEC_PER_SEC)
+
 /* Used for NVME Target */
 struct lpfc_nvmet_tgtport {
 	struct lpfc_hba *phba;
-	struct completion tport_unreg_done;
+	struct completion *tport_unreg_cmp;
 
 	/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
 	atomic_t rcv_ls_req_in;
@@ -137,9 +138,12 @@
 #define LPFC_NVMET_XBUSY		0x4  /* XB bit set on IO cmpl */
 #define LPFC_NVMET_CTX_RLS		0x8  /* ctx free requested */
 #define LPFC_NVMET_ABTS_RCV		0x10  /* ABTS received on exchange */
+#define LPFC_NVMET_CTX_REUSE_WQ		0x20  /* ctx reused via WQ */
 #define LPFC_NVMET_DEFER_WQFULL		0x40  /* Waiting on a free WQE */
+#define LPFC_NVMET_TNOTIFY		0x80  /* notify transport of abts */
 	struct rqb_dmabuf *rqb_buffer;
 	struct lpfc_nvmet_ctxbuf *ctxbuf;
+	struct lpfc_sli4_hdw_queue *hdwq;
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	uint64_t ts_isr_cmd;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 200b5bc..6822cd9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -53,8 +53,6 @@
 #define LPFC_RESET_WAIT  2
 #define LPFC_ABORT_WAIT  2
 
-int _dump_buf_done = 1;
-
 static char *dif_op_str[] = {
 	"PROT_NORMAL",
 	"PROT_READ_INSERT",
@@ -83,69 +81,12 @@
 }
 
 static void
-lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
 static void
-lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
 static int
 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
 
-static void
-lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
-{
-	void *src, *dst;
-	struct scatterlist *sgde = scsi_sglist(cmnd);
-
-	if (!_dump_buf_data) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
-				__func__);
-		return;
-	}
-
-
-	if (!sgde) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-			"9051 BLKGRD: ERROR: data scatterlist is null\n");
-		return;
-	}
-
-	dst = (void *) _dump_buf_data;
-	while (sgde) {
-		src = sg_virt(sgde);
-		memcpy(dst, src, sgde->length);
-		dst += sgde->length;
-		sgde = sg_next(sgde);
-	}
-}
-
-static void
-lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
-{
-	void *src, *dst;
-	struct scatterlist *sgde = scsi_prot_sglist(cmnd);
-
-	if (!_dump_buf_dif) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
-				__func__);
-		return;
-	}
-
-	if (!sgde) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
-		return;
-	}
-
-	dst = _dump_buf_dif;
-	while (sgde) {
-		src = sg_virt(sgde);
-		memcpy(dst, src, sgde->length);
-		dst += sgde->length;
-		sgde = sg_next(sgde);
-	}
-}
-
 static inline unsigned
 lpfc_cmd_blksize(struct scsi_cmnd *sc)
 {
@@ -180,9 +121,9 @@
  **/
 static void
 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
-				struct lpfc_scsi_buf *lpfc_cmd)
+				struct lpfc_io_buf *lpfc_cmd)
 {
-	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
 	if (sgl) {
 		sgl += 1;
 		sgl->word2 = le32_to_cpu(sgl->word2);
@@ -200,10 +141,10 @@
  * function updates the statistical data for the command completion.
  **/
 static void
-lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
+lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
 {
-	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
-	struct lpfc_nodelist *pnode = rdata->pnode;
+	struct lpfc_rport_data *rdata;
+	struct lpfc_nodelist *pnode;
 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
 	unsigned long flags;
 	struct Scsi_Host  *shost = cmd->device->host;
@@ -211,17 +152,19 @@
 	unsigned long latency;
 	int i;
 
-	if (cmd->result)
+	if (!vport->stat_data_enabled ||
+	    vport->stat_data_blocked ||
+	    (cmd->result))
 		return;
 
 	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
+	rdata = lpfc_cmd->rdata;
+	pnode = rdata->pnode;
 
 	spin_lock_irqsave(shost->host_lock, flags);
-	if (!vport->stat_data_enabled ||
-		vport->stat_data_blocked ||
-		!pnode ||
-		!pnode->lat_data ||
-		(phba->bucket_type == LPFC_NO_BUCKET)) {
+	if (!pnode ||
+	    !pnode->lat_data ||
+	    (phba->bucket_type == LPFC_NO_BUCKET)) {
 		spin_unlock_irqrestore(shost->host_lock, flags);
 		return;
 	}
@@ -387,12 +330,12 @@
 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
 {
 	struct lpfc_hba *phba = vport->phba;
-	struct lpfc_scsi_buf *psb;
+	struct lpfc_io_buf *psb;
 	struct ulp_bde64 *bpl;
 	IOCB_t *iocb;
 	dma_addr_t pdma_phys_fcp_cmd;
 	dma_addr_t pdma_phys_fcp_rsp;
-	dma_addr_t pdma_phys_bpl;
+	dma_addr_t pdma_phys_sgl;
 	uint16_t iotag;
 	int bcnt, bpl_size;
 
@@ -406,7 +349,7 @@
 			 (int)sizeof(struct fcp_rsp), bpl_size);
 
 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
-		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+		psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
 		if (!psb)
 			break;
 
@@ -436,14 +379,14 @@
 
 		psb->fcp_cmnd = psb->data;
 		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
-		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
+		psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
 			sizeof(struct fcp_rsp);
 
 		/* Initialize local short-hand pointers. */
-		bpl = psb->fcp_bpl;
+		bpl = (struct ulp_bde64 *)psb->dma_sgl;
 		pdma_phys_fcp_cmd = psb->dma_handle;
 		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
-		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
+		pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
 			sizeof(struct fcp_rsp);
 
 		/*
@@ -494,9 +437,9 @@
 			iocb->un.fcpi64.bdl.bdeSize =
 					(2 * sizeof(struct ulp_bde64));
 			iocb->un.fcpi64.bdl.addrLow =
-					putPaddrLow(pdma_phys_bpl);
+					putPaddrLow(pdma_phys_sgl);
 			iocb->un.fcpi64.bdl.addrHigh =
-					putPaddrHigh(pdma_phys_bpl);
+					putPaddrHigh(pdma_phys_sgl);
 			iocb->ulpBdeCount = 1;
 			iocb->ulpLe = 1;
 		}
@@ -504,6 +447,7 @@
 		psb->status = IOSTAT_SUCCESS;
 		/* Put it back into the SCSI buffer list */
 		psb->cur_iocbq.context1  = psb;
+		spin_lock_init(&psb->buf_lock);
 		lpfc_release_scsi_buf_s3(phba, psb);
 
 	}
@@ -522,38 +466,49 @@
 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
 {
 	struct lpfc_hba *phba = vport->phba;
-	struct lpfc_scsi_buf *psb, *next_psb;
+	struct lpfc_io_buf *psb, *next_psb;
+	struct lpfc_sli4_hdw_queue *qp;
 	unsigned long iflag = 0;
+	int idx;
 
-	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
 		return;
+
 	spin_lock_irqsave(&phba->hbalock, iflag);
-	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
-	list_for_each_entry_safe(psb, next_psb,
-				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
-		if (psb->rdata && psb->rdata->pnode
-			&& psb->rdata->pnode->vport == vport)
-			psb->rdata = NULL;
+	for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+		qp = &phba->sli4_hba.hdwq[idx];
+
+		spin_lock(&qp->abts_io_buf_list_lock);
+		list_for_each_entry_safe(psb, next_psb,
+					 &qp->lpfc_abts_io_buf_list, list) {
+			if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
+				continue;
+
+			if (psb->rdata && psb->rdata->pnode &&
+			    psb->rdata->pnode->vport == vport)
+				psb->rdata = NULL;
+		}
+		spin_unlock(&qp->abts_io_buf_list_lock);
 	}
-	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
 	spin_unlock_irqrestore(&phba->hbalock, iflag);
 }
 
 /**
- * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
+ * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
  * @phba: pointer to lpfc hba data structure.
  * @axri: pointer to the fcp xri abort wcqe structure.
  *
  * This routine is invoked by the worker thread to process a SLI4 fast-path
- * FCP aborted xri.
+ * FCP or NVME aborted xri.
  **/
 void
-lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
-			  struct sli4_wcqe_xri_aborted *axri)
+lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
+			 struct sli4_wcqe_xri_aborted *axri, int idx)
 {
 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
-	struct lpfc_scsi_buf *psb, *next_psb;
+	struct lpfc_io_buf *psb, *next_psb;
+	struct lpfc_sli4_hdw_queue *qp;
 	unsigned long iflag = 0;
 	struct lpfc_iocbq *iocbq;
 	int i;
@@ -563,16 +518,26 @@
 
 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
 		return;
+
+	qp = &phba->sli4_hba.hdwq[idx];
 	spin_lock_irqsave(&phba->hbalock, iflag);
-	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	spin_lock(&qp->abts_io_buf_list_lock);
 	list_for_each_entry_safe(psb, next_psb,
-		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+		&qp->lpfc_abts_io_buf_list, list) {
 		if (psb->cur_iocbq.sli4_xritag == xri) {
-			list_del(&psb->list);
+			list_del_init(&psb->list);
 			psb->exch_busy = 0;
 			psb->status = IOSTAT_SUCCESS;
-			spin_unlock(
-				&phba->sli4_hba.abts_scsi_buf_list_lock);
+			if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
+				qp->abts_nvme_io_bufs--;
+				spin_unlock(&qp->abts_io_buf_list_lock);
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+				lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
+				return;
+			}
+			qp->abts_scsi_io_bufs--;
+			spin_unlock(&qp->abts_io_buf_list_lock);
+
 			if (psb->rdata && psb->rdata->pnode)
 				ndlp = psb->rdata->pnode;
 			else
@@ -591,16 +556,16 @@
 			return;
 		}
 	}
-	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	spin_unlock(&qp->abts_io_buf_list_lock);
 	for (i = 1; i <= phba->sli.last_iotag; i++) {
 		iocbq = phba->sli.iocbq_lookup[i];
 
-		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
-			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
+		if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+		    (iocbq->iocb_flag & LPFC_IO_LIBDFC))
 			continue;
 		if (iocbq->sli4_xritag != xri)
 			continue;
-		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+		psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
 		psb->exch_busy = 0;
 		spin_unlock_irqrestore(&phba->hbalock, iflag);
 		if (!list_empty(&pring->txq))
@@ -612,359 +577,6 @@
 }
 
 /**
- * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
- * @phba: pointer to lpfc hba data structure.
- * @post_sblist: pointer to the scsi buffer list.
- *
- * This routine walks a list of scsi buffers that was passed in. It attempts
- * to construct blocks of scsi buffer sgls which contains contiguous xris and
- * uses the non-embedded SGL block post mailbox commands to post to the port.
- * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
- * embedded SGL post mailbox command for posting. The @post_sblist passed in
- * must be local list, thus no lock is needed when manipulate the list.
- *
- * Returns: 0 = failure, non-zero number of successfully posted buffers.
- **/
-static int
-lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
-			     struct list_head *post_sblist, int sb_count)
-{
-	struct lpfc_scsi_buf *psb, *psb_next;
-	int status, sgl_size;
-	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
-	dma_addr_t pdma_phys_bpl1;
-	int last_xritag = NO_XRI;
-	LIST_HEAD(prep_sblist);
-	LIST_HEAD(blck_sblist);
-	LIST_HEAD(scsi_sblist);
-
-	/* sanity check */
-	if (sb_count <= 0)
-		return -EINVAL;
-
-	sgl_size = phba->cfg_sg_dma_buf_size -
-		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
-
-	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
-		list_del_init(&psb->list);
-		block_cnt++;
-		if ((last_xritag != NO_XRI) &&
-		    (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
-			/* a hole in xri block, form a sgl posting block */
-			list_splice_init(&prep_sblist, &blck_sblist);
-			post_cnt = block_cnt - 1;
-			/* prepare list for next posting block */
-			list_add_tail(&psb->list, &prep_sblist);
-			block_cnt = 1;
-		} else {
-			/* prepare list for next posting block */
-			list_add_tail(&psb->list, &prep_sblist);
-			/* enough sgls for non-embed sgl mbox command */
-			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
-				list_splice_init(&prep_sblist, &blck_sblist);
-				post_cnt = block_cnt;
-				block_cnt = 0;
-			}
-		}
-		num_posting++;
-		last_xritag = psb->cur_iocbq.sli4_xritag;
-
-		/* end of repost sgl list condition for SCSI buffers */
-		if (num_posting == sb_count) {
-			if (post_cnt == 0) {
-				/* last sgl posting block */
-				list_splice_init(&prep_sblist, &blck_sblist);
-				post_cnt = block_cnt;
-			} else if (block_cnt == 1) {
-				/* last single sgl with non-contiguous xri */
-				if (sgl_size > SGL_PAGE_SIZE)
-					pdma_phys_bpl1 = psb->dma_phys_bpl +
-								SGL_PAGE_SIZE;
-				else
-					pdma_phys_bpl1 = 0;
-				status = lpfc_sli4_post_sgl(phba,
-						psb->dma_phys_bpl,
-						pdma_phys_bpl1,
-						psb->cur_iocbq.sli4_xritag);
-				if (status) {
-					/* failure, put on abort scsi list */
-					psb->exch_busy = 1;
-				} else {
-					/* success, put on SCSI buffer list */
-					psb->exch_busy = 0;
-					psb->status = IOSTAT_SUCCESS;
-					num_posted++;
-				}
-				/* success, put on SCSI buffer sgl list */
-				list_add_tail(&psb->list, &scsi_sblist);
-			}
-		}
-
-		/* continue until a nembed page worth of sgls */
-		if (post_cnt == 0)
-			continue;
-
-		/* post block of SCSI buffer list sgls */
-		status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
-						       post_cnt);
-
-		/* don't reset xirtag due to hole in xri block */
-		if (block_cnt == 0)
-			last_xritag = NO_XRI;
-
-		/* reset SCSI buffer post count for next round of posting */
-		post_cnt = 0;
-
-		/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
-		while (!list_empty(&blck_sblist)) {
-			list_remove_head(&blck_sblist, psb,
-					 struct lpfc_scsi_buf, list);
-			if (status) {
-				/* failure, put on abort scsi list */
-				psb->exch_busy = 1;
-			} else {
-				/* success, put on SCSI buffer list */
-				psb->exch_busy = 0;
-				psb->status = IOSTAT_SUCCESS;
-				num_posted++;
-			}
-			list_add_tail(&psb->list, &scsi_sblist);
-		}
-	}
-	/* Push SCSI buffers with sgl posted to the availble list */
-	while (!list_empty(&scsi_sblist)) {
-		list_remove_head(&scsi_sblist, psb,
-				 struct lpfc_scsi_buf, list);
-		lpfc_release_scsi_buf_s4(phba, psb);
-	}
-	return num_posted;
-}
-
-/**
- * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine walks the list of scsi buffers that have been allocated and
- * repost them to the port by using SGL block post. This is needed after a
- * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
- * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
- * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
- *
- * Returns: 0 = success, non-zero failure.
- **/
-int
-lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
-{
-	LIST_HEAD(post_sblist);
-	int num_posted, rc = 0;
-
-	/* get all SCSI buffers need to repost to a local list */
-	spin_lock_irq(&phba->scsi_buf_list_get_lock);
-	spin_lock(&phba->scsi_buf_list_put_lock);
-	list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
-	list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
-	spin_unlock(&phba->scsi_buf_list_put_lock);
-	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
-
-	/* post the list of scsi buffer sgls to port if available */
-	if (!list_empty(&post_sblist)) {
-		num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
-						phba->sli4_hba.scsi_xri_cnt);
-		/* failed to post any scsi buffer, return error */
-		if (num_posted == 0)
-			rc = -EIO;
-	}
-	return rc;
-}
-
-/**
- * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
- * @vport: The virtual port for which this call being executed.
- * @num_to_allocate: The requested number of buffers to allocate.
- *
- * This routine allocates scsi buffers for device with SLI-4 interface spec,
- * the scsi buffer contains all the necessary information needed to initiate
- * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
- * them on a list, it post them to the port by using SGL block post.
- *
- * Return codes:
- *   int - number of scsi buffers that were allocated and posted.
- *   0 = failure, less than num_to_alloc is a partial failure.
- **/
-static int
-lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
-{
-	struct lpfc_hba *phba = vport->phba;
-	struct lpfc_scsi_buf *psb;
-	struct sli4_sge *sgl;
-	IOCB_t *iocb;
-	dma_addr_t pdma_phys_fcp_cmd;
-	dma_addr_t pdma_phys_fcp_rsp;
-	dma_addr_t pdma_phys_bpl;
-	uint16_t iotag, lxri = 0;
-	int bcnt, num_posted, sgl_size;
-	LIST_HEAD(prep_sblist);
-	LIST_HEAD(post_sblist);
-	LIST_HEAD(scsi_sblist);
-
-	sgl_size = phba->cfg_sg_dma_buf_size -
-		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
-
-	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
-			 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
-			 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
-			 (int)sizeof(struct fcp_cmnd),
-			 (int)sizeof(struct fcp_rsp));
-
-	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
-		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
-		if (!psb)
-			break;
-		/*
-		 * Get memory from the pci pool to map the virt space to
-		 * pci bus space for an I/O. The DMA buffer includes space
-		 * for the struct fcp_cmnd, struct fcp_rsp and the number
-		 * of bde's necessary to support the sg_tablesize.
-		 */
-		psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
-						GFP_KERNEL, &psb->dma_handle);
-		if (!psb->data) {
-			kfree(psb);
-			break;
-		}
-
-		/*
-		 * 4K Page alignment is CRITICAL to BlockGuard, double check
-		 * to be sure.
-		 */
-		if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
-		    (((unsigned long)(psb->data) &
-		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-					"3369 Memory alignment error "
-					"addr=%lx\n",
-					(unsigned long)psb->data);
-			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
-				      psb->data, psb->dma_handle);
-			kfree(psb);
-			break;
-		}
-
-
-		lxri = lpfc_sli4_next_xritag(phba);
-		if (lxri == NO_XRI) {
-			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
-				      psb->data, psb->dma_handle);
-			kfree(psb);
-			break;
-		}
-
-		/* Allocate iotag for psb->cur_iocbq. */
-		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
-		if (iotag == 0) {
-			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
-				      psb->data, psb->dma_handle);
-			kfree(psb);
-			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-					"3368 Failed to allocate IOTAG for"
-					" XRI:0x%x\n", lxri);
-			lpfc_sli4_free_xri(phba, lxri);
-			break;
-		}
-		psb->cur_iocbq.sli4_lxritag = lxri;
-		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
-		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
-		psb->fcp_bpl = psb->data;
-		psb->fcp_cmnd = (psb->data + sgl_size);
-		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
-					sizeof(struct fcp_cmnd));
-
-		/* Initialize local short-hand pointers. */
-		sgl = (struct sli4_sge *)psb->fcp_bpl;
-		pdma_phys_bpl = psb->dma_handle;
-		pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
-		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
-
-		/*
-		 * The first two bdes are the FCP_CMD and FCP_RSP.
-		 * The balance are sg list bdes. Initialize the
-		 * first two and leave the rest for queuecommand.
-		 */
-		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
-		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
-		sgl->word2 = le32_to_cpu(sgl->word2);
-		bf_set(lpfc_sli4_sge_last, sgl, 0);
-		sgl->word2 = cpu_to_le32(sgl->word2);
-		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
-		sgl++;
-
-		/* Setup the physical region for the FCP RSP */
-		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
-		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
-		sgl->word2 = le32_to_cpu(sgl->word2);
-		bf_set(lpfc_sli4_sge_last, sgl, 1);
-		sgl->word2 = cpu_to_le32(sgl->word2);
-		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
-
-		/*
-		 * Since the IOCB for the FCP I/O is built into this
-		 * lpfc_scsi_buf, initialize it with all known data now.
-		 */
-		iocb = &psb->cur_iocbq.iocb;
-		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
-		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
-		/* setting the BLP size to 2 * sizeof BDE may not be correct.
-		 * We are setting the bpl to point to out sgl. An sgl's
-		 * entries are 16 bytes, a bpl entries are 12 bytes.
-		 */
-		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
-		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
-		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
-		iocb->ulpBdeCount = 1;
-		iocb->ulpLe = 1;
-		iocb->ulpClass = CLASS3;
-		psb->cur_iocbq.context1 = psb;
-		psb->dma_phys_bpl = pdma_phys_bpl;
-
-		/* add the scsi buffer to a post list */
-		list_add_tail(&psb->list, &post_sblist);
-		spin_lock_irq(&phba->scsi_buf_list_get_lock);
-		phba->sli4_hba.scsi_xri_cnt++;
-		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
-	}
-	lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP,
-			"3021 Allocate %d out of %d requested new SCSI "
-			"buffers\n", bcnt, num_to_alloc);
-
-	/* post the list of scsi buffer sgls to port if available */
-	if (!list_empty(&post_sblist))
-		num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
-							  &post_sblist, bcnt);
-	else
-		num_posted = 0;
-
-	return num_posted;
-}
-
-/**
- * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
- * @vport: The virtual port for which this call being executed.
- * @num_to_allocate: The requested number of buffers to allocate.
- *
- * This routine wraps the actual SCSI buffer allocator function pointer from
- * the lpfc_hba struct.
- *
- * Return codes:
- *   int - number of scsi buffers that were allocated.
- *   0 = failure, less than num_to_alloc is a partial failure.
- **/
-static inline int
-lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
-{
-	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
-}
-
-/**
  * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
  * @phba: The HBA for which this call is being executed.
  *
@@ -975,15 +587,16 @@
  *   NULL - Error
  *   Pointer to lpfc_scsi_buf - Success
  **/
-static struct lpfc_scsi_buf*
-lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+static struct lpfc_io_buf *
+lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+		     struct scsi_cmnd *cmnd)
 {
-	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
+	struct lpfc_io_buf *lpfc_cmd = NULL;
 	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
 	unsigned long iflag = 0;
 
 	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
-	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
+	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
 			 list);
 	if (!lpfc_cmd) {
 		spin_lock(&phba->scsi_buf_list_put_lock);
@@ -991,7 +604,7 @@
 			    &phba->lpfc_scsi_buf_list_get);
 		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
 		list_remove_head(scsi_buf_list_get, lpfc_cmd,
-				 struct lpfc_scsi_buf, list);
+				 struct lpfc_io_buf, list);
 		spin_unlock(&phba->scsi_buf_list_put_lock);
 	}
 	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
@@ -1003,54 +616,110 @@
 	return  lpfc_cmd;
 }
 /**
- * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
  * @phba: The HBA for which this call is being executed.
  *
- * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * This routine removes a scsi buffer from head of @hdwq io_buf_list
  * and returns to caller.
  *
  * Return codes:
  *   NULL - Error
  *   Pointer to lpfc_scsi_buf - Success
  **/
-static struct lpfc_scsi_buf*
-lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+static struct lpfc_io_buf *
+lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+		     struct scsi_cmnd *cmnd)
 {
-	struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
-	unsigned long iflag = 0;
-	int found = 0;
+	struct lpfc_io_buf *lpfc_cmd;
+	struct lpfc_sli4_hdw_queue *qp;
+	struct sli4_sge *sgl;
+	IOCB_t *iocb;
+	dma_addr_t pdma_phys_fcp_rsp;
+	dma_addr_t pdma_phys_fcp_cmd;
+	uint32_t cpu, idx;
+	int tag;
+	struct fcp_cmd_rsp_buf *tmp = NULL;
 
-	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
-	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
-				 &phba->lpfc_scsi_buf_list_get, list) {
-		if (lpfc_test_rrq_active(phba, ndlp,
-					 lpfc_cmd->cur_iocbq.sli4_lxritag))
-			continue;
-		list_del_init(&lpfc_cmd->list);
-		found = 1;
-		break;
+	cpu = raw_smp_processor_id();
+	if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
+		tag = blk_mq_unique_tag(cmnd->request);
+		idx = blk_mq_unique_tag_to_hwq(tag);
+	} else {
+		idx = phba->sli4_hba.cpu_map[cpu].hdwq;
 	}
-	if (!found) {
-		spin_lock(&phba->scsi_buf_list_put_lock);
-		list_splice(&phba->lpfc_scsi_buf_list_put,
-			    &phba->lpfc_scsi_buf_list_get);
-		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
-		spin_unlock(&phba->scsi_buf_list_put_lock);
-		list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
-					 &phba->lpfc_scsi_buf_list_get, list) {
-			if (lpfc_test_rrq_active(
-				phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
-				continue;
-			list_del_init(&lpfc_cmd->list);
-			found = 1;
-			break;
-		}
+
+	lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
+				   !phba->cfg_xri_rebalancing);
+	if (!lpfc_cmd) {
+		qp = &phba->sli4_hba.hdwq[idx];
+		qp->empty_io_bufs++;
+		return NULL;
 	}
-	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
-	if (!found)
+
+	/* Setup key fields in buffer that may have been changed
+	 * if other protocols used this buffer.
+	 */
+	lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+	lpfc_cmd->prot_seg_cnt = 0;
+	lpfc_cmd->seg_cnt = 0;
+	lpfc_cmd->timeout = 0;
+	lpfc_cmd->flags = 0;
+	lpfc_cmd->start_time = jiffies;
+	lpfc_cmd->waitq = NULL;
+	lpfc_cmd->cpu = cpu;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	lpfc_cmd->prot_data_type = 0;
+#endif
+	tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
+	if (!tmp)
 		return NULL;
 
-	if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+	lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
+	lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
+
+	/*
+	 * The first two SGEs are the FCP_CMD and FCP_RSP.
+	 * The balance are sg list bdes. Initialize the
+	 * first two and leave the rest for queuecommand.
+	 */
+	sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
+	pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
+	sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
+	sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+	sgl->word2 = le32_to_cpu(sgl->word2);
+	bf_set(lpfc_sli4_sge_last, sgl, 0);
+	sgl->word2 = cpu_to_le32(sgl->word2);
+	sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
+	sgl++;
+
+	/* Setup the physical region for the FCP RSP */
+	pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
+	sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
+	sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+	sgl->word2 = le32_to_cpu(sgl->word2);
+	bf_set(lpfc_sli4_sge_last, sgl, 1);
+	sgl->word2 = cpu_to_le32(sgl->word2);
+	sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
+
+	/*
+	 * Since the IOCB for the FCP I/O is built into this
+	 * lpfc_io_buf, initialize it with all known data now.
+	 */
+	iocb = &lpfc_cmd->cur_iocbq.iocb;
+	iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+	iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
+	/* setting the BLP size to 2 * sizeof BDE may not be correct.
+	 * We are setting the bpl to point to out sgl. An sgl's
+	 * entries are 16 bytes, a bpl entries are 12 bytes.
+	 */
+	iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+	iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
+	iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
+	iocb->ulpBdeCount = 1;
+	iocb->ulpLe = 1;
+	iocb->ulpClass = CLASS3;
+
+	if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
 		atomic_inc(&ndlp->cmd_pending);
 		lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
 	}
@@ -1067,10 +736,11 @@
  *   NULL - Error
  *   Pointer to lpfc_scsi_buf - Success
  **/
-static struct lpfc_scsi_buf*
-lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+static struct lpfc_io_buf*
+lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+		  struct scsi_cmnd *cmnd)
 {
-	return  phba->lpfc_get_scsi_buf(phba, ndlp);
+	return  phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
 }
 
 /**
@@ -1082,12 +752,11 @@
  * lpfc_scsi_buf_list list.
  **/
 static void
-lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
 {
 	unsigned long iflag = 0;
 
 	psb->seg_cnt = 0;
-	psb->nonsg_phys = 0;
 	psb->prot_seg_cnt = 0;
 
 	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
@@ -1102,34 +771,29 @@
  * @phba: The Hba for which this call is being executed.
  * @psb: The scsi buffer which is being released.
  *
- * This routine releases @psb scsi buffer by adding it to tail of @phba
- * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
+ * This routine releases @psb scsi buffer by adding it to tail of @hdwq
+ * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
  * and cannot be reused for at least RA_TOV amount of time if it was
  * aborted.
  **/
 static void
-lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
 {
+	struct lpfc_sli4_hdw_queue *qp;
 	unsigned long iflag = 0;
 
 	psb->seg_cnt = 0;
-	psb->nonsg_phys = 0;
 	psb->prot_seg_cnt = 0;
 
+	qp = psb->hdwq;
 	if (psb->exch_busy) {
-		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
-					iflag);
+		spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
 		psb->pCmd = NULL;
-		list_add_tail(&psb->list,
-			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
-		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
-					iflag);
+		list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
+		qp->abts_scsi_io_bufs++;
+		spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
 	} else {
-		psb->pCmd = NULL;
-		psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
-		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
-		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
-		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
+		lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
 	}
 }
 
@@ -1142,7 +806,7 @@
  * lpfc_scsi_buf_list list.
  **/
 static void
-lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
 {
 	if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
 		atomic_dec(&psb->ndlp->cmd_pending);
@@ -1166,12 +830,12 @@
  *   0 - Success
  **/
 static int
-lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
 {
 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
 	struct scatterlist *sgel = NULL;
 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
-	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+	struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
 	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
 	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
@@ -1206,9 +870,10 @@
 			       "dma_map_sg.  Config %d, seg_cnt %d\n",
 			       __func__, phba->cfg_sg_seg_cnt,
 			       lpfc_cmd->seg_cnt);
+			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
 			lpfc_cmd->seg_cnt = 0;
 			scsi_dma_unmap(scsi_cmnd);
-			return 1;
+			return 2;
 		}
 
 		/*
@@ -1318,7 +983,7 @@
 		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
 {
 	struct scatterlist *sgpe; /* s/g prot entry */
-	struct lpfc_scsi_buf *lpfc_cmd = NULL;
+	struct lpfc_io_buf *lpfc_cmd = NULL;
 	struct scsi_dif_tuple *src = NULL;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_rport_data *rdata;
@@ -1377,7 +1042,7 @@
 	if (sgpe) {
 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
 		src += blockoff;
-		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
+		lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
 	}
 
 	/* Should we change the Reference Tag */
@@ -1425,7 +1090,7 @@
 
 					break;
 				}
-				/* Drop thru */
+				/* fall through */
 			case SCSI_PROT_WRITE_INSERT:
 				/*
 				 * For WRITE_INSERT, force the error
@@ -1544,7 +1209,7 @@
 					rc = BG_ERR_TGT | BG_ERR_CHECK;
 					break;
 				}
-				/* Drop thru */
+				/* fall through */
 			case SCSI_PROT_WRITE_INSERT:
 				/*
 				 * For WRITE_INSERT, force the
@@ -1626,7 +1291,7 @@
 			switch (op) {
 			case SCSI_PROT_WRITE_PASS:
 				rc = BG_ERR_CHECK;
-				/* Drop thru */
+				/* fall through */
 
 			case SCSI_PROT_WRITE_INSERT:
 				/*
@@ -2062,7 +1727,7 @@
 
 	if (!sgpe || !sgde) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
+				"9020 Invalid s/g entry: data=x%px prot=x%px\n",
 				sgpe, sgde);
 		return 0;
 	}
@@ -2277,7 +1942,8 @@
  **/
 static int
 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
-		struct sli4_sge *sgl, int datasegcnt)
+		struct sli4_sge *sgl, int datasegcnt,
+		struct lpfc_io_buf *lpfc_cmd)
 {
 	struct scatterlist *sgde = NULL; /* s/g data entry */
 	struct sli4_sge_diseed *diseed = NULL;
@@ -2291,6 +1957,9 @@
 	uint32_t checking = 1;
 	uint32_t dma_len;
 	uint32_t dma_offset = 0;
+	struct sli4_hybrid_sgl *sgl_xtra = NULL;
+	int j;
+	bool lsp_just_set = false;
 
 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
 	if (status)
@@ -2350,23 +2019,64 @@
 	sgl++;
 
 	/* assumption: caller has already run dma_map_sg on command data */
-	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
-		physaddr = sg_dma_address(sgde);
-		dma_len = sg_dma_len(sgde);
-		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
-		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
-		if ((i + 1) == datasegcnt)
-			bf_set(lpfc_sli4_sge_last, sgl, 1);
-		else
-			bf_set(lpfc_sli4_sge_last, sgl, 0);
-		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
-		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+	sgde = scsi_sglist(sc);
+	j = 3;
+	for (i = 0; i < datasegcnt; i++) {
+		/* clear it */
+		sgl->word2 = 0;
 
-		sgl->sge_len = cpu_to_le32(dma_len);
-		dma_offset += dma_len;
+		/* do we need to expand the segment */
+		if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
+		    ((datasegcnt - 1) != i)) {
+			/* set LSP type */
+			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
 
-		sgl++;
-		num_sge++;
+			sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
+
+			if (unlikely(!sgl_xtra)) {
+				lpfc_cmd->seg_cnt = 0;
+				return 0;
+			}
+			sgl->addr_lo = cpu_to_le32(putPaddrLow(
+						sgl_xtra->dma_phys_sgl));
+			sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+						sgl_xtra->dma_phys_sgl));
+
+		} else {
+			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+		}
+
+		if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
+			if ((datasegcnt - 1) == i)
+				bf_set(lpfc_sli4_sge_last, sgl, 1);
+			physaddr = sg_dma_address(sgde);
+			dma_len = sg_dma_len(sgde);
+			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+
+			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			sgl->sge_len = cpu_to_le32(dma_len);
+
+			dma_offset += dma_len;
+			sgde = sg_next(sgde);
+
+			sgl++;
+			num_sge++;
+			lsp_just_set = false;
+
+		} else {
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
+
+			sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+			i = i - 1;
+
+			lsp_just_set = true;
+		}
+
+		j++;
+
 	}
 
 out:
@@ -2412,7 +2122,8 @@
  **/
 static int
 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
-		struct sli4_sge *sgl, int datacnt, int protcnt)
+		struct sli4_sge *sgl, int datacnt, int protcnt,
+		struct lpfc_io_buf *lpfc_cmd)
 {
 	struct scatterlist *sgde = NULL; /* s/g data entry */
 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
@@ -2434,14 +2145,15 @@
 #endif
 	uint32_t checking = 1;
 	uint32_t dma_offset = 0;
-	int num_sge = 0;
+	int num_sge = 0, j = 2;
+	struct sli4_hybrid_sgl *sgl_xtra = NULL;
 
 	sgpe = scsi_prot_sglist(sc);
 	sgde = scsi_sglist(sc);
 
 	if (!sgpe || !sgde) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
+				"9082 Invalid s/g entry: data=x%px prot=x%px\n",
 				sgpe, sgde);
 		return 0;
 	}
@@ -2467,9 +2179,37 @@
 	split_offset = 0;
 	do {
 		/* Check to see if we ran out of space */
-		if (num_sge >= (phba->cfg_total_seg_cnt - 2))
+		if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
+		    !(phba->cfg_xpsgl))
 			return num_sge + 3;
 
+		/* DISEED and DIF have to be together */
+		if (!((j + 1) % phba->border_sge_num) ||
+		    !((j + 2) % phba->border_sge_num) ||
+		    !((j + 3) % phba->border_sge_num)) {
+			sgl->word2 = 0;
+
+			/* set LSP type */
+			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
+
+			sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
+
+			if (unlikely(!sgl_xtra)) {
+				goto out;
+			} else {
+				sgl->addr_lo = cpu_to_le32(putPaddrLow(
+						sgl_xtra->dma_phys_sgl));
+				sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+						       sgl_xtra->dma_phys_sgl));
+			}
+
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
+
+			sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+			j = 0;
+		}
+
 		/* setup DISEED with what we have */
 		diseed = (struct sli4_sge_diseed *) sgl;
 		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2516,7 +2256,9 @@
 
 		/* advance sgl and increment bde count */
 		num_sge++;
+
 		sgl++;
+		j++;
 
 		/* setup the first BDE that points to protection buffer */
 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
@@ -2531,6 +2273,7 @@
 		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
 		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
 		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->sge_len = 0;
 
 		protgrp_blks = protgroup_len / 8;
 		protgrp_bytes = protgrp_blks * blksize;
@@ -2551,9 +2294,14 @@
 		/* setup SGE's for data blocks associated with DIF data */
 		pgdone = 0;
 		subtotal = 0; /* total bytes processed for current prot grp */
+
+		sgl++;
+		j++;
+
 		while (!pgdone) {
 			/* Check to see if we ran out of space */
-			if (num_sge >= phba->cfg_total_seg_cnt)
+			if ((num_sge >= phba->cfg_total_seg_cnt) &&
+			    !phba->cfg_xpsgl)
 				return num_sge + 1;
 
 			if (!sgde) {
@@ -2562,60 +2310,101 @@
 						__func__);
 				return 0;
 			}
-			sgl++;
-			dataphysaddr = sg_dma_address(sgde) + split_offset;
 
-			remainder = sg_dma_len(sgde) - split_offset;
+			if (!((j + 1) % phba->border_sge_num)) {
+				sgl->word2 = 0;
 
-			if ((subtotal + remainder) <= protgrp_bytes) {
-				/* we can use this whole buffer */
-				dma_len = remainder;
-				split_offset = 0;
+				/* set LSP type */
+				bf_set(lpfc_sli4_sge_type, sgl,
+				       LPFC_SGE_TYPE_LSP);
 
-				if ((subtotal + remainder) == protgrp_bytes)
-					pgdone = 1;
+				sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
+								 lpfc_cmd);
+
+				if (unlikely(!sgl_xtra)) {
+					goto out;
+				} else {
+					sgl->addr_lo = cpu_to_le32(
+					  putPaddrLow(sgl_xtra->dma_phys_sgl));
+					sgl->addr_hi = cpu_to_le32(
+					  putPaddrHigh(sgl_xtra->dma_phys_sgl));
+				}
+
+				sgl->word2 = cpu_to_le32(sgl->word2);
+				sgl->sge_len = cpu_to_le32(
+						     phba->cfg_sg_dma_buf_size);
+
+				sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
 			} else {
-				/* must split this buffer with next prot grp */
-				dma_len = protgrp_bytes - subtotal;
-				split_offset += dma_len;
+				dataphysaddr = sg_dma_address(sgde) +
+								   split_offset;
+
+				remainder = sg_dma_len(sgde) - split_offset;
+
+				if ((subtotal + remainder) <= protgrp_bytes) {
+					/* we can use this whole buffer */
+					dma_len = remainder;
+					split_offset = 0;
+
+					if ((subtotal + remainder) ==
+								  protgrp_bytes)
+						pgdone = 1;
+				} else {
+					/* must split this buffer with next
+					 * prot grp
+					 */
+					dma_len = protgrp_bytes - subtotal;
+					split_offset += dma_len;
+				}
+
+				subtotal += dma_len;
+
+				sgl->word2 = 0;
+				sgl->addr_lo = cpu_to_le32(putPaddrLow(
+								 dataphysaddr));
+				sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+								 dataphysaddr));
+				bf_set(lpfc_sli4_sge_last, sgl, 0);
+				bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+				bf_set(lpfc_sli4_sge_type, sgl,
+				       LPFC_SGE_TYPE_DATA);
+
+				sgl->sge_len = cpu_to_le32(dma_len);
+				dma_offset += dma_len;
+
+				num_sge++;
+				curr_data++;
+
+				if (split_offset) {
+					sgl++;
+					j++;
+					break;
+				}
+
+				/* Move to the next s/g segment if possible */
+				sgde = sg_next(sgde);
+
+				sgl++;
 			}
 
-			subtotal += dma_len;
-
-			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
-			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
-			bf_set(lpfc_sli4_sge_last, sgl, 0);
-			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
-			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
-
-			sgl->sge_len = cpu_to_le32(dma_len);
-			dma_offset += dma_len;
-
-			num_sge++;
-			curr_data++;
-
-			if (split_offset)
-				break;
-
-			/* Move to the next s/g segment if possible */
-			sgde = sg_next(sgde);
+			j++;
 		}
 
 		if (protgroup_offset) {
 			/* update the reference tag */
 			reftag += protgrp_blks;
-			sgl++;
 			continue;
 		}
 
 		/* are we done ? */
 		if (curr_prot == protcnt) {
+			/* mark the last SGL */
+			sgl--;
 			bf_set(lpfc_sli4_sge_last, sgl, 1);
 			alldone = 1;
 		} else if (curr_prot < protcnt) {
 			/* advance to next prot buffer */
 			sgpe = sg_next(sgpe);
-			sgl++;
 
 			/* update the reference tag */
 			reftag += protgrp_blks;
@@ -2682,7 +2471,7 @@
  **/
 static int
 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
-		       struct lpfc_scsi_buf *lpfc_cmd)
+		       struct lpfc_io_buf *lpfc_cmd)
 {
 	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
 	int fcpdl;
@@ -2718,20 +2507,25 @@
  *
  * This is the protection/DIF aware version of
  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
- * two functions eventually, but for now, it's here
+ * two functions eventually, but for now, it's here.
+ * RETURNS 0 - SUCCESS,
+ *         1 - Failed DMA map, retry.
+ *         2 - Invalid scsi cmd or prot-type. Do not rety.
  **/
 static int
 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
-		struct lpfc_scsi_buf *lpfc_cmd)
+		struct lpfc_io_buf *lpfc_cmd)
 {
 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
-	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+	struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
 	uint32_t num_bde = 0;
 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
 	int prot_group_type = 0;
 	int fcpdl;
+	int ret = 1;
+	struct lpfc_vport *vport = phba->pport;
 
 	/*
 	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
@@ -2754,8 +2548,11 @@
 		lpfc_cmd->seg_cnt = datasegcnt;
 
 		/* First check if data segment count from SCSI Layer is good */
-		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
+			ret = 2;
 			goto err;
+		}
 
 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
 
@@ -2763,14 +2560,18 @@
 		case LPFC_PG_TYPE_NO_DIF:
 
 			/* Here we need to add a PDE5 and PDE6 to the count */
-			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
+			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
+				ret = 2;
 				goto err;
+			}
 
 			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
 					datasegcnt);
 			/* we should have 2 or more entries in buffer list */
-			if (num_bde < 2)
+			if (num_bde < 2) {
+				ret = 2;
 				goto err;
+			}
 			break;
 
 		case LPFC_PG_TYPE_DIF_BUF:
@@ -2794,15 +2595,19 @@
 			 * protection data segment.
 			 */
 			if ((lpfc_cmd->prot_seg_cnt * 4) >
-			    (phba->cfg_total_seg_cnt - 2))
+			    (phba->cfg_total_seg_cnt - 2)) {
+				ret = 2;
 				goto err;
+			}
 
 			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
 					datasegcnt, protsegcnt);
 			/* we should have 3 or more entries in buffer list */
 			if ((num_bde < 3) ||
-			    (num_bde > phba->cfg_total_seg_cnt))
+			    (num_bde > phba->cfg_total_seg_cnt)) {
+				ret = 2;
 				goto err;
+			}
 			break;
 
 		case LPFC_PG_TYPE_INVALID:
@@ -2813,7 +2618,7 @@
 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
 					"9022 Unexpected protection group %i\n",
 					prot_group_type);
-			return 1;
+			return 2;
 		}
 	}
 
@@ -2837,6 +2642,14 @@
 	 */
 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
 
+	/*
+	 * For First burst, we may need to adjust the initial transfer
+	 * length for DIF
+	 */
+	if (iocb_cmd->un.fcpi.fcpi_XRdy &&
+	    (fcpdl < vport->cfg_first_burst_size))
+		iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
+
 	return 0;
 err:
 	if (lpfc_cmd->seg_cnt)
@@ -2855,7 +2668,7 @@
 
 	lpfc_cmd->seg_cnt = 0;
 	lpfc_cmd->prot_seg_cnt = 0;
-	return 1;
+	return ret;
 }
 
 /*
@@ -2893,7 +2706,7 @@
  * what type of T10-DIF error occurred.
  */
 static void
-lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
 {
 	struct scatterlist *sgpe; /* s/g prot entry */
 	struct scatterlist *sgde; /* s/g data entry */
@@ -3078,8 +2891,8 @@
  * -1 - Internal error (bad profile, ...etc)
  */
 static int
-lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
-			struct lpfc_iocbq *pIocbOut)
+lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
+		  struct lpfc_iocbq *pIocbOut)
 {
 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
 	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
@@ -3088,26 +2901,6 @@
 	uint32_t bgstat = bgf->bgstat;
 	uint64_t failing_sector = 0;
 
-	spin_lock(&_dump_buf_lock);
-	if (!_dump_buf_done) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
-			" Data for %u blocks to debugfs\n",
-				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
-		lpfc_debug_save_data(phba, cmd);
-
-		/* If we have a prot sgl, save the DIF buffer */
-		if (lpfc_prot_group_type(phba, cmd) ==
-				LPFC_PG_TYPE_DIF_BUF) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
-				"Saving DIF for %u blocks to debugfs\n",
-				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
-			lpfc_debug_save_dif(phba, cmd);
-		}
-
-		_dump_buf_done = 1;
-	}
-	spin_unlock(&_dump_buf_lock);
-
 	if (lpfc_bgs_get_invalid_prof(bgstat)) {
 		cmd->result = DID_ERROR << 16;
 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3241,24 +3034,27 @@
  * field of @lpfc_cmd for device with SLI-4 interface spec.
  *
  * Return codes:
- *	1 - Error
+ *	2 - Error - Do not retry
+ *	1 - Error - Retry
  *	0 - Success
  **/
 static int
-lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
 {
 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
 	struct scatterlist *sgel = NULL;
 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
-	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
 	struct sli4_sge *first_data_sgl;
 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
 	dma_addr_t physaddr;
 	uint32_t num_bde = 0;
 	uint32_t dma_len;
 	uint32_t dma_offset = 0;
-	int nseg;
+	int nseg, i, j;
 	struct ulp_bde64 *bde;
+	bool lsp_just_set = false;
+	struct sli4_hybrid_sgl *sgl_xtra = NULL;
 
 	/*
 	 * There are three possibilities here - use scatter-gather segment, use
@@ -3285,15 +3081,17 @@
 		sgl += 1;
 		first_data_sgl = sgl;
 		lpfc_cmd->seg_cnt = nseg;
-		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+		if (!phba->cfg_xpsgl &&
+		    lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
 				" %s: Too many sg segments from "
 				"dma_map_sg.  Config %d, seg_cnt %d\n",
 				__func__, phba->cfg_sg_seg_cnt,
 			       lpfc_cmd->seg_cnt);
+			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
 			lpfc_cmd->seg_cnt = 0;
 			scsi_dma_unmap(scsi_cmnd);
-			return 1;
+			return 2;
 		}
 
 		/*
@@ -3305,22 +3103,80 @@
 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
 		 * does for SLI-2 mode.
 		 */
-		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
-			physaddr = sg_dma_address(sgel);
-			dma_len = sg_dma_len(sgel);
-			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
-			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
-			sgl->word2 = le32_to_cpu(sgl->word2);
-			if ((num_bde + 1) == nseg)
+
+		/* for tracking segment boundaries */
+		sgel = scsi_sglist(scsi_cmnd);
+		j = 2;
+		for (i = 0; i < nseg; i++) {
+			sgl->word2 = 0;
+			if ((num_bde + 1) == nseg) {
 				bf_set(lpfc_sli4_sge_last, sgl, 1);
-			else
+				bf_set(lpfc_sli4_sge_type, sgl,
+				       LPFC_SGE_TYPE_DATA);
+			} else {
 				bf_set(lpfc_sli4_sge_last, sgl, 0);
-			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
-			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
-			sgl->word2 = cpu_to_le32(sgl->word2);
-			sgl->sge_len = cpu_to_le32(dma_len);
-			dma_offset += dma_len;
-			sgl++;
+
+				/* do we need to expand the segment */
+				if (!lsp_just_set &&
+				    !((j + 1) % phba->border_sge_num) &&
+				    ((nseg - 1) != i)) {
+					/* set LSP type */
+					bf_set(lpfc_sli4_sge_type, sgl,
+					       LPFC_SGE_TYPE_LSP);
+
+					sgl_xtra = lpfc_get_sgl_per_hdwq(
+							phba, lpfc_cmd);
+
+					if (unlikely(!sgl_xtra)) {
+						lpfc_cmd->seg_cnt = 0;
+						scsi_dma_unmap(scsi_cmnd);
+						return 1;
+					}
+					sgl->addr_lo = cpu_to_le32(putPaddrLow(
+						       sgl_xtra->dma_phys_sgl));
+					sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+						       sgl_xtra->dma_phys_sgl));
+
+				} else {
+					bf_set(lpfc_sli4_sge_type, sgl,
+					       LPFC_SGE_TYPE_DATA);
+				}
+			}
+
+			if (!(bf_get(lpfc_sli4_sge_type, sgl) &
+				     LPFC_SGE_TYPE_LSP)) {
+				if ((nseg - 1) == i)
+					bf_set(lpfc_sli4_sge_last, sgl, 1);
+
+				physaddr = sg_dma_address(sgel);
+				dma_len = sg_dma_len(sgel);
+				sgl->addr_lo = cpu_to_le32(putPaddrLow(
+							   physaddr));
+				sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+							   physaddr));
+
+				bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+				sgl->word2 = cpu_to_le32(sgl->word2);
+				sgl->sge_len = cpu_to_le32(dma_len);
+
+				dma_offset += dma_len;
+				sgel = sg_next(sgel);
+
+				sgl++;
+				lsp_just_set = false;
+
+			} else {
+				sgl->word2 = cpu_to_le32(sgl->word2);
+				sgl->sge_len = cpu_to_le32(
+						     phba->cfg_sg_dma_buf_size);
+
+				sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+				i = i - 1;
+
+				lsp_just_set = true;
+			}
+
+			j++;
 		}
 		/*
 		 * Setup the first Payload BDE. For FCoE we just key off
@@ -3377,6 +3233,7 @@
 		lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
 			scsi_cmnd->device->hostdata)->priority;
 	}
+
 	return 0;
 }
 
@@ -3388,19 +3245,25 @@
  * This is the protection/DIF aware version of
  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
  * two functions eventually, but for now, it's here
+ * Return codes:
+ *	2 - Error - Do not retry
+ *	1 - Error - Retry
+ *	0 - Success
  **/
 static int
 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
-		struct lpfc_scsi_buf *lpfc_cmd)
+		struct lpfc_io_buf *lpfc_cmd)
 {
 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
-	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
+	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
 	uint32_t num_sge = 0;
 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
 	int prot_group_type = 0;
 	int fcpdl;
+	int ret = 1;
+	struct lpfc_vport *vport = phba->pport;
 
 	/*
 	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
@@ -3429,23 +3292,33 @@
 		lpfc_cmd->seg_cnt = datasegcnt;
 
 		/* First check if data segment count from SCSI Layer is good */
-		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
+		    !phba->cfg_xpsgl) {
+			WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
+			ret = 2;
 			goto err;
+		}
 
 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
 
 		switch (prot_group_type) {
 		case LPFC_PG_TYPE_NO_DIF:
 			/* Here we need to add a DISEED to the count */
-			if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
+			if (((lpfc_cmd->seg_cnt + 1) >
+					phba->cfg_total_seg_cnt) &&
+			    !phba->cfg_xpsgl) {
+				ret = 2;
 				goto err;
+			}
 
 			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
-					datasegcnt);
+					datasegcnt, lpfc_cmd);
 
 			/* we should have 2 or more entries in buffer list */
-			if (num_sge < 2)
+			if (num_sge < 2) {
+				ret = 2;
 				goto err;
+			}
 			break;
 
 		case LPFC_PG_TYPE_DIF_BUF:
@@ -3467,17 +3340,23 @@
 			 * There is a minimun of 3 SGEs used for every
 			 * protection data segment.
 			 */
-			if ((lpfc_cmd->prot_seg_cnt * 3) >
-			    (phba->cfg_total_seg_cnt - 2))
+			if (((lpfc_cmd->prot_seg_cnt * 3) >
+					(phba->cfg_total_seg_cnt - 2)) &&
+			    !phba->cfg_xpsgl) {
+				ret = 2;
 				goto err;
+			}
 
 			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
-					datasegcnt, protsegcnt);
+					datasegcnt, protsegcnt, lpfc_cmd);
 
 			/* we should have 3 or more entries in buffer list */
-			if ((num_sge < 3) ||
-			    (num_sge > phba->cfg_total_seg_cnt))
+			if (num_sge < 3 ||
+			    (num_sge > phba->cfg_total_seg_cnt &&
+			     !phba->cfg_xpsgl)) {
+				ret = 2;
 				goto err;
+			}
 			break;
 
 		case LPFC_PG_TYPE_INVALID:
@@ -3488,7 +3367,7 @@
 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
 					"9083 Unexpected protection group %i\n",
 					prot_group_type);
-			return 1;
+			return 2;
 		}
 	}
 
@@ -3517,6 +3396,14 @@
 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
 
 	/*
+	 * For First burst, we may need to adjust the initial transfer
+	 * length for DIF
+	 */
+	if (iocb_cmd->un.fcpi.fcpi_XRdy &&
+	    (fcpdl < vport->cfg_first_burst_size))
+		iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
+
+	/*
 	 * If the OAS driver feature is enabled and the lun is enabled for
 	 * OAS, set the oas iocb related flags.
 	 */
@@ -3542,7 +3429,7 @@
 
 	lpfc_cmd->seg_cnt = 0;
 	lpfc_cmd->prot_seg_cnt = 0;
-	return 1;
+	return ret;
 }
 
 /**
@@ -3558,7 +3445,7 @@
  *	0 - Success
  **/
 static inline int
-lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
 {
 	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
 }
@@ -3577,7 +3464,7 @@
  *	0 - Success
  **/
 static inline int
-lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
 {
 	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
 }
@@ -3594,7 +3481,7 @@
  **/
 static void
 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
-		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
+		struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
 	uint32_t resp_info = fcprsp->rspStatus2;
@@ -3686,7 +3573,7 @@
  * field of @lpfc_cmd for device with SLI-3 interface spec.
  **/
 static void
-lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
 {
 	/*
 	 * There are only two special cases to consider.  (1) the scsi command
@@ -3705,7 +3592,7 @@
 /**
  * lpfc_handler_fcp_err - FCP response handler
  * @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
  * @rsp_iocb: The response IOCB which contains FCP error.
  *
  * This routine is called to process response IOCB with status field
@@ -3713,7 +3600,7 @@
  * based upon SCSI and FCP error.
  **/
 static void
-lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
 		    struct lpfc_iocbq *rsp_iocb)
 {
 	struct lpfc_hba *phba = vport->phba;
@@ -3892,49 +3779,6 @@
 }
 
 /**
- * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
- * @phba: Pointer to HBA context object.
- *
- * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
- * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
- * held.
- * If scsi-mq is enabled, get the default block layer mapping of software queues
- * to hardware queues. This information is saved in request tag.
- *
- * Return: index into SLI4 fast-path FCP queue index.
- **/
-int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
-				  struct lpfc_scsi_buf *lpfc_cmd)
-{
-	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
-	struct lpfc_vector_map_info *cpup;
-	int chann, cpu;
-	uint32_t tag;
-	uint16_t hwq;
-
-	if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
-		tag = blk_mq_unique_tag(cmnd->request);
-		hwq = blk_mq_unique_tag_to_hwq(tag);
-
-		return hwq;
-	}
-
-	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
-	    && phba->cfg_fcp_io_channel > 1) {
-		cpu = smp_processor_id();
-		if (cpu < phba->sli4_hba.num_present_cpu) {
-			cpup = phba->sli4_hba.cpu_map;
-			cpup += cpu;
-			return cpup->channel_id;
-		}
-	}
-	chann = atomic_add_return(1, &phba->fcp_qidx);
-	chann = chann % phba->cfg_fcp_io_channel;
-	return chann;
-}
-
-
-/**
  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
  * @phba: The Hba for which this call is being executed.
  * @pIocbIn: The command IOCBQ for the scsi cmnd.
@@ -3948,8 +3792,8 @@
 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 			struct lpfc_iocbq *pIocbOut)
 {
-	struct lpfc_scsi_buf *lpfc_cmd =
-		(struct lpfc_scsi_buf *) pIocbIn->context1;
+	struct lpfc_io_buf *lpfc_cmd =
+		(struct lpfc_io_buf *) pIocbIn->context1;
 	struct lpfc_vport      *vport = pIocbIn->vport;
 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
 	struct lpfc_nodelist *pnode = rdata->pnode;
@@ -3957,14 +3801,35 @@
 	unsigned long flags;
 	struct lpfc_fast_path_event *fast_path_evt;
 	struct Scsi_Host *shost;
+	int idx;
 	uint32_t logit = LOG_FCP;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	int cpu;
+#endif
 
-	atomic_inc(&phba->fc4ScsiIoCmpls);
+	/* Guard against abort handler being called at same time */
+	spin_lock(&lpfc_cmd->buf_lock);
 
 	/* Sanity check on return of outstanding command */
 	cmd = lpfc_cmd->pCmd;
-	if (!cmd)
+	if (!cmd) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "2621 IO completion: Not an active IO\n");
+		spin_unlock(&lpfc_cmd->buf_lock);
 		return;
+	}
+
+	idx = lpfc_cmd->cur_iocbq.hba_wqidx;
+	if (phba->sli4_hba.hdwq)
+		phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+		cpu = raw_smp_processor_id();
+		if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
+			phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
+	}
+#endif
 	shost = cmd->device->host;
 
 	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
@@ -4113,7 +3978,7 @@
 					lpfc_cmd->cur_iocbq.sli4_lxritag,
 					0, 0);
 			}
-		/* else: fall through */
+			/* fall through */
 		default:
 			cmd->result = DID_ERROR << 16;
 			break;
@@ -4130,7 +3995,7 @@
 		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
 
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
-				 "0710 Iodone <%d/%llu> cmd %p, error "
+				 "0710 Iodone <%d/%llu> cmd x%px, error "
 				 "x%x SNS x%x x%x Data: x%x x%x\n",
 				 cmd->device->id, cmd->device->lun, cmd,
 				 cmd->result, *lp, *(lp + 3), cmd->retries,
@@ -4158,29 +4023,21 @@
 	}
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
 
-	/* If pCmd was set to NULL from abort path, do not call scsi_done */
-	if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
-		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
-				 "0711 FCP cmd already NULL, sid: 0x%06x, "
-				 "did: 0x%06x, oxid: 0x%04x\n",
-				 vport->fc_myDID,
-				 (pnode) ? pnode->nlp_DID : 0,
-				 phba->sli_rev == LPFC_SLI_REV4 ?
-				 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
-		return;
-	}
+	lpfc_cmd->pCmd = NULL;
+	spin_unlock(&lpfc_cmd->buf_lock);
 
 	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
 	cmd->scsi_done(cmd);
 
 	/*
-	 * If there is a thread waiting for command completion
+	 * If there is an abort thread waiting for command completion
 	 * wake up the thread.
 	 */
-	spin_lock_irqsave(shost->host_lock, flags);
+	spin_lock(&lpfc_cmd->buf_lock);
+	lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
 	if (lpfc_cmd->waitq)
 		wake_up(lpfc_cmd->waitq);
-	spin_unlock_irqrestore(shost->host_lock, flags);
+	spin_unlock(&lpfc_cmd->buf_lock);
 
 	lpfc_release_scsi_buf(phba, lpfc_cmd);
 }
@@ -4213,7 +4070,7 @@
  * to transfer for device with SLI3 interface spec.
  **/
 static void
-lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
 		    struct lpfc_nodelist *pnode)
 {
 	struct lpfc_hba *phba = vport->phba;
@@ -4221,7 +4078,9 @@
 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
 	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
+	struct lpfc_sli4_hdw_queue *hdwq = NULL;
 	int datadir = scsi_cmnd->sc_data_direction;
+	int idx;
 	uint8_t *ptr;
 	bool sli4;
 	uint32_t fcpdl;
@@ -4247,6 +4106,9 @@
 
 	sli4 = (phba->sli_rev == LPFC_SLI_REV4);
 	piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
+	idx = lpfc_cmd->hdwq_no;
+	if (phba->sli4_hba.hdwq)
+		hdwq = &phba->sli4_hba.hdwq[idx];
 
 	/*
 	 * There are three possibilities here - use scatter-gather segment, use
@@ -4268,19 +4130,22 @@
 						vport->cfg_first_burst_size;
 			}
 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
-			atomic_inc(&phba->fc4ScsiOutputRequests);
+			if (hdwq)
+				hdwq->scsi_cstat.output_requests++;
 		} else {
 			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
 			iocb_cmd->ulpPU = PARM_READ_CHECK;
 			fcp_cmnd->fcpCntl3 = READ_DATA;
-			atomic_inc(&phba->fc4ScsiInputRequests);
+			if (hdwq)
+				hdwq->scsi_cstat.input_requests++;
 		}
 	} else {
 		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
 		iocb_cmd->un.fcpi.fcpi_parm = 0;
 		iocb_cmd->ulpPU = 0;
 		fcp_cmnd->fcpCntl3 = 0;
-		atomic_inc(&phba->fc4ScsiControlRequests);
+		if (hdwq)
+			hdwq->scsi_cstat.control_requests++;
 	}
 	if (phba->sli_rev == 3 &&
 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
@@ -4308,7 +4173,7 @@
 /**
  * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
  * @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
  * @lun: Logical unit number.
  * @task_mgmt_cmd: SCSI task management command.
  *
@@ -4321,7 +4186,7 @@
  **/
 static int
 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
-			     struct lpfc_scsi_buf *lpfc_cmd,
+			     struct lpfc_io_buf *lpfc_cmd,
 			     uint64_t lun,
 			     uint8_t task_mgmt_cmd)
 {
@@ -4393,14 +4258,12 @@
 
 	switch (dev_grp) {
 	case LPFC_PCI_DEV_LP:
-		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
 		break;
 	case LPFC_PCI_DEV_OC:
-		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
@@ -4432,14 +4295,74 @@
 			struct lpfc_iocbq *cmdiocbq,
 			struct lpfc_iocbq *rspiocbq)
 {
-	struct lpfc_scsi_buf *lpfc_cmd =
-		(struct lpfc_scsi_buf *) cmdiocbq->context1;
+	struct lpfc_io_buf *lpfc_cmd =
+		(struct lpfc_io_buf *) cmdiocbq->context1;
 	if (lpfc_cmd)
 		lpfc_release_scsi_buf(phba, lpfc_cmd);
 	return;
 }
 
 /**
+ * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
+ *                             if issuing a pci_bus_reset is possibly unsafe
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Walks the bus_list to ensure only PCI devices with Emulex
+ * vendor id, device ids that support hot reset, and only one occurrence
+ * of function 0.
+ *
+ * Returns:
+ * -EBADSLT,  detected invalid device
+ *      0,    successful
+ */
+int
+lpfc_check_pci_resettable(const struct lpfc_hba *phba)
+{
+	const struct pci_dev *pdev = phba->pcidev;
+	struct pci_dev *ptr = NULL;
+	u8 counter = 0;
+
+	/* Walk the list of devices on the pci_dev's bus */
+	list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
+		/* Check for Emulex Vendor ID */
+		if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"8346 Non-Emulex vendor found: "
+					"0x%04x\n", ptr->vendor);
+			return -EBADSLT;
+		}
+
+		/* Check for valid Emulex Device ID */
+		switch (ptr->device) {
+		case PCI_DEVICE_ID_LANCER_FC:
+		case PCI_DEVICE_ID_LANCER_G6_FC:
+		case PCI_DEVICE_ID_LANCER_G7_FC:
+			break;
+		default:
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"8347 Invalid device found: "
+					"0x%04x\n", ptr->device);
+			return -EBADSLT;
+		}
+
+		/* Check for only one function 0 ID to ensure only one HBA on
+		 * secondary bus
+		 */
+		if (ptr->devfn == 0) {
+			if (++counter > 1) {
+				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+						"8348 More than one device on "
+						"secondary bus found\n");
+				return -EBADSLT;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
  * lpfc_info - Info entry point of scsi_host_template data structure
  * @host: The scsi host for which this call is being executed.
  *
@@ -4453,32 +4376,53 @@
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
-	int len, link_speed = 0;
-	static char  lpfcinfobuf[384];
+	int link_speed = 0;
+	static char lpfcinfobuf[384];
+	char tmp[384] = {0};
 
-	memset(lpfcinfobuf,0,384);
+	memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
 	if (phba && phba->pcidev){
-		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
-		len = strlen(lpfcinfobuf);
-		snprintf(lpfcinfobuf + len,
-			384-len,
-			" on PCI bus %02x device %02x irq %d",
-			phba->pcidev->bus->number,
-			phba->pcidev->devfn,
-			phba->pcidev->irq);
-		len = strlen(lpfcinfobuf);
+		/* Model Description */
+		scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
+		if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
+		    sizeof(lpfcinfobuf))
+			goto buffer_done;
+
+		/* PCI Info */
+		scnprintf(tmp, sizeof(tmp),
+			  " on PCI bus %02x device %02x irq %d",
+			  phba->pcidev->bus->number, phba->pcidev->devfn,
+			  phba->pcidev->irq);
+		if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
+		    sizeof(lpfcinfobuf))
+			goto buffer_done;
+
+		/* Port Number */
 		if (phba->Port[0]) {
-			snprintf(lpfcinfobuf + len,
-				 384-len,
-				 " port %s",
-				 phba->Port);
+			scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
+			if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
+			    sizeof(lpfcinfobuf))
+				goto buffer_done;
 		}
-		len = strlen(lpfcinfobuf);
+
+		/* Link Speed */
 		link_speed = lpfc_sli_port_speed_get(phba);
-		if (link_speed != 0)
-			snprintf(lpfcinfobuf + len, 384-len,
-				 " Logical Link Speed: %d Mbps", link_speed);
+		if (link_speed != 0) {
+			scnprintf(tmp, sizeof(tmp),
+				  " Logical Link Speed: %d Mbps", link_speed);
+			if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
+			    sizeof(lpfcinfobuf))
+				goto buffer_done;
+		}
+
+		/* PCI resettable */
+		if (!lpfc_check_pci_resettable(phba)) {
+			scnprintf(tmp, sizeof(tmp), " PCI resettable");
+			strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
+		}
 	}
+
+buffer_done:
 	return lpfcinfobuf;
 }
 
@@ -4551,9 +4495,12 @@
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_rport_data *rdata;
 	struct lpfc_nodelist *ndlp;
-	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_io_buf *lpfc_cmd;
 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
-	int err;
+	int err, idx;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	int cpu;
+#endif
 
 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 
@@ -4617,7 +4564,7 @@
 		}
 	}
 
-	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
+	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
 	if (lpfc_cmd == NULL) {
 		lpfc_rampdown_queue_depth(phba);
 
@@ -4634,8 +4581,6 @@
 	lpfc_cmd->pCmd  = cmnd;
 	lpfc_cmd->rdata = rdata;
 	lpfc_cmd->ndlp = ndlp;
-	lpfc_cmd->timeout = 0;
-	lpfc_cmd->start_time = jiffies;
 	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
 
 	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
@@ -4665,11 +4610,25 @@
 		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
 	}
 
-	if (err)
+	if (err == 2) {
+		cmnd->result = DID_ERROR << 16;
+		goto out_fail_command_release_buf;
+	} else if (err) {
 		goto out_host_busy_free_buf;
+	}
 
 	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
 
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+		cpu = raw_smp_processor_id();
+		if (cpu < LPFC_CHECK_CPU_CNT) {
+			struct lpfc_sli4_hdw_queue *hdwq =
+					&phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
+			hdwq->cpucheck_xmt_io[cpu]++;
+		}
+	}
+#endif
 	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
 	if (err) {
@@ -4690,16 +4649,6 @@
 				 (uint32_t)
 				 (cmnd->request->timeout / 1000));
 
-		switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
-		case WRITE_DATA:
-			atomic_dec(&phba->fc4ScsiOutputRequests);
-			break;
-		case READ_DATA:
-			atomic_dec(&phba->fc4ScsiInputRequests);
-			break;
-		default:
-			atomic_dec(&phba->fc4ScsiControlRequests);
-		}
 		goto out_host_busy_free_buf;
 	}
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -4710,10 +4659,26 @@
 			lpfc_poll_rearm_timer(phba);
 	}
 
+	if (phba->cfg_xri_rebalancing)
+		lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
+
 	return 0;
 
  out_host_busy_free_buf:
+	idx = lpfc_cmd->hdwq_no;
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+	if (phba->sli4_hba.hdwq) {
+		switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
+		case WRITE_DATA:
+			phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
+			break;
+		case READ_DATA:
+			phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
+			break;
+		default:
+			phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
+		}
+	}
 	lpfc_release_scsi_buf(phba, lpfc_cmd);
  out_host_busy:
 	return SCSI_MLQUEUE_HOST_BUSY;
@@ -4721,6 +4686,9 @@
  out_tgt_busy:
 	return SCSI_MLQUEUE_TARGET_BUSY;
 
+ out_fail_command_release_buf:
+	lpfc_release_scsi_buf(phba, lpfc_cmd);
+
  out_fail_command:
 	cmnd->scsi_done(cmnd);
 	return 0;
@@ -4745,7 +4713,7 @@
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_iocbq *iocb;
 	struct lpfc_iocbq *abtsiocb;
-	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_io_buf *lpfc_cmd;
 	IOCB_t *cmd, *icmd;
 	int ret = SUCCESS, status = 0;
 	struct lpfc_sli_ring *pring_s4 = NULL;
@@ -4757,65 +4725,59 @@
 	if (status != 0 && status != SUCCESS)
 		return status;
 
+	lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
+	if (!lpfc_cmd)
+		return ret;
+
 	spin_lock_irqsave(&phba->hbalock, flags);
 	/* driver queued commands are in process of being flushed */
-	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
-		spin_unlock_irqrestore(&phba->hbalock, flags);
+	if (phba->hba_flag & HBA_IOQ_FLUSH) {
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 			"3168 SCSI Layer abort requested I/O has been "
 			"flushed by LLD.\n");
-		return FAILED;
+		ret = FAILED;
+		goto out_unlock;
 	}
 
-	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
-	if (!lpfc_cmd || !lpfc_cmd->pCmd) {
-		spin_unlock_irqrestore(&phba->hbalock, flags);
+	/* Guard against IO completion being called at same time */
+	spin_lock(&lpfc_cmd->buf_lock);
+
+	if (!lpfc_cmd->pCmd) {
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
 			 "x%x ID %d LUN %llu\n",
 			 SUCCESS, cmnd->device->id, cmnd->device->lun);
-		return SUCCESS;
+		goto out_unlock_buf;
 	}
 
 	iocb = &lpfc_cmd->cur_iocbq;
 	if (phba->sli_rev == LPFC_SLI_REV4) {
-		if (!(phba->cfg_fof) ||
-		    (!(iocb->iocb_flag & LPFC_IO_FOF))) {
-			pring_s4 =
-				phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring;
-		} else {
-			iocb->hba_wqidx = 0;
-			pring_s4 = phba->sli4_hba.oas_wq->pring;
-		}
+		pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
 		if (!pring_s4) {
 			ret = FAILED;
-			goto out_unlock;
+			goto out_unlock_buf;
 		}
 		spin_lock(&pring_s4->ring_lock);
 	}
 	/* the command is in process of being cancelled */
 	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			spin_unlock(&pring_s4->ring_lock);
-		spin_unlock_irqrestore(&phba->hbalock, flags);
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 			"3169 SCSI Layer abort requested I/O has been "
 			"cancelled by LLD.\n");
-		return FAILED;
+		ret = FAILED;
+		goto out_unlock_ring;
 	}
 	/*
-	 * If pCmd field of the corresponding lpfc_scsi_buf structure
+	 * If pCmd field of the corresponding lpfc_io_buf structure
 	 * points to a different SCSI command, then the driver has
 	 * already completed this command, but the midlayer did not
 	 * see the completion before the eh fired. Just return SUCCESS.
 	 */
 	if (lpfc_cmd->pCmd != cmnd) {
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			spin_unlock(&pring_s4->ring_lock);
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 			"3170 SCSI Layer abort requested I/O has been "
 			"completed by LLD.\n");
-		goto out_unlock;
+		goto out_unlock_ring;
 	}
 
 	BUG_ON(iocb->context1 != lpfc_cmd);
@@ -4826,6 +4788,7 @@
 			 "3389 SCSI Layer I/O Abort Request is pending\n");
 		if (phba->sli_rev == LPFC_SLI_REV4)
 			spin_unlock(&pring_s4->ring_lock);
+		spin_unlock(&lpfc_cmd->buf_lock);
 		spin_unlock_irqrestore(&phba->hbalock, flags);
 		goto wait_for_cmpl;
 	}
@@ -4833,9 +4796,7 @@
 	abtsiocb = __lpfc_sli_get_iocbq(phba);
 	if (abtsiocb == NULL) {
 		ret = FAILED;
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			spin_unlock(&pring_s4->ring_lock);
-		goto out_unlock;
+		goto out_unlock_ring;
 	}
 
 	/* Indicate the IO is being aborted by the driver. */
@@ -4885,24 +4846,18 @@
 	/* no longer need the lock after this point */
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 
-
 	if (ret_val == IOCB_ERROR) {
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			spin_lock_irqsave(&pring_s4->ring_lock, flags);
-		else
-			spin_lock_irqsave(&phba->hbalock, flags);
 		/* Indicate the IO is not being aborted by the driver. */
 		iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
 		lpfc_cmd->waitq = NULL;
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			spin_unlock_irqrestore(&pring_s4->ring_lock, flags);
-		else
-			spin_unlock_irqrestore(&phba->hbalock, flags);
+		spin_unlock(&lpfc_cmd->buf_lock);
 		lpfc_sli_release_iocbq(phba, abtsiocb);
 		ret = FAILED;
 		goto out;
 	}
 
+	spin_unlock(&lpfc_cmd->buf_lock);
+
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
 		lpfc_sli_handle_fast_ring_event(phba,
 			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
@@ -4913,9 +4868,7 @@
 			  (lpfc_cmd->pCmd != cmnd),
 			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
 
-	spin_lock_irqsave(shost->host_lock, flags);
-	lpfc_cmd->waitq = NULL;
-	spin_unlock_irqrestore(shost->host_lock, flags);
+	spin_lock(&lpfc_cmd->buf_lock);
 
 	if (lpfc_cmd->pCmd == cmnd) {
 		ret = FAILED;
@@ -4926,8 +4879,17 @@
 				 iocb->sli4_xritag, ret,
 				 cmnd->device->id, cmnd->device->lun);
 	}
+
+	lpfc_cmd->waitq = NULL;
+
+	spin_unlock(&lpfc_cmd->buf_lock);
 	goto out;
 
+out_unlock_ring:
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring_s4->ring_lock);
+out_unlock_buf:
+	spin_unlock(&lpfc_cmd->buf_lock);
 out_unlock:
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 out:
@@ -4965,7 +4927,7 @@
 /**
  * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
  * @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
  *
  * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
  *
@@ -4974,7 +4936,7 @@
  *   0x2002 - Success
  **/
 static int
-lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
 {
 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
 	uint32_t rsp_info;
@@ -4999,7 +4961,12 @@
 				 rsp_info,
 				 rsp_len, rsp_info_code);
 
-		if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
+		/* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
+		 * field specifies the number of valid bytes of FCP_RSP_INFO.
+		 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
+		 */
+		if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
+		    ((rsp_len == 8) || (rsp_len == 4))) {
 			switch (rsp_info_code) {
 			case RSP_NO_FAILURE:
 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
@@ -5049,7 +5016,7 @@
 		   uint8_t task_mgmt_cmd)
 {
 	struct lpfc_hba   *phba = vport->phba;
-	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_io_buf *lpfc_cmd;
 	struct lpfc_iocbq *iocbq;
 	struct lpfc_iocbq *iocbqrsp;
 	struct lpfc_rport_data *rdata;
@@ -5062,7 +5029,7 @@
 		return FAILED;
 	pnode = rdata->pnode;
 
-	lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
+	lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
 	if (lpfc_cmd == NULL)
 		return FAILED;
 	lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
@@ -5152,7 +5119,7 @@
 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 	if (!rdata) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
-			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
+			"0797 Tgt Map rport failure: rdata x%px\n", rdata);
 		return FAILED;
 	}
 	pnode = rdata->pnode;
@@ -5250,7 +5217,7 @@
 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 	if (!rdata || !rdata->pnode) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-				 "0798 Device Reset rport failure: rdata x%p\n",
+				 "0798 Device Reset rdata failure: rdata x%px\n",
 				 rdata);
 		return FAILED;
 	}
@@ -5262,7 +5229,7 @@
 	status = lpfc_chk_tgt_mapped(vport, cmnd);
 	if (status == FAILED) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-			"0721 Device Reset rport failure: rdata x%p\n", rdata);
+			"0721 Device Reset rport failure: rdata x%px\n", rdata);
 		return FAILED;
 	}
 
@@ -5319,9 +5286,10 @@
 	int status;
 
 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
-	if (!rdata) {
+	if (!rdata || !rdata->pnode) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-			"0799 Target Reset rport failure: rdata x%p\n", rdata);
+				 "0799 Target Reset rdata failure: rdata x%px\n",
+				 rdata);
 		return FAILED;
 	}
 	pnode = rdata->pnode;
@@ -5332,7 +5300,7 @@
 	status = lpfc_chk_tgt_mapped(vport, cmnd);
 	if (status == FAILED) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-			"0722 Target Reset rport failure: rdata x%p\n", rdata);
+			"0722 Target Reset rport failure: rdata x%px\n", rdata);
 		if (pnode) {
 			spin_lock_irq(shost->host_lock);
 			pnode->nlp_flag &= ~NLP_NPR_ADISC;
@@ -5490,18 +5458,20 @@
 	lpfc_offline(phba);
 	rc = lpfc_sli_brdrestart(phba);
 	if (rc)
-		ret = FAILED;
+		goto error;
+
 	rc = lpfc_online(phba);
 	if (rc)
-		ret = FAILED;
+		goto error;
+
 	lpfc_unblock_mgmt_io(phba);
 
-	if (ret == FAILED) {
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-				 "3323 Failed host reset, bring it offline\n");
-		lpfc_sli4_offline_eratt(phba);
-	}
 	return ret;
+error:
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			 "3323 Failed host reset\n");
+	lpfc_unblock_mgmt_io(phba);
+	return FAILED;
 }
 
 /**
@@ -5570,6 +5540,12 @@
 	}
 	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
 
+	/* For SLI4, all IO buffers are pre-allocated */
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		return 0;
+
+	/* This code path is now ONLY for SLI3 adapters */
+
 	/*
 	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
 	 * available list of scsi buffers.  Don't allocate more than the
@@ -5601,7 +5577,7 @@
 				 (phba->cfg_hba_queue_depth - total));
 		num_to_alloc = phba->cfg_hba_queue_depth - total;
 	}
-	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
+	num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
 	if (num_to_alloc != num_allocated) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 					 "0708 Allocation request of %d "
@@ -5936,7 +5912,7 @@
 
 	/* Create an lun info structure and add to list of luns */
 	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
-					   pri, false);
+					   pri, true);
 	if (lun_info) {
 		lun_info->oas_enabled = true;
 		lun_info->priority = pri;
@@ -6034,7 +6010,6 @@
 	.this_id		= -1,
 	.sg_tablesize		= 1,
 	.cmd_per_lun		= 1,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= lpfc_hba_attrs,
 	.max_sectors		= 0xFFFF,
 	.vendor_id		= LPFC_NL_VENDOR_ID,
@@ -6059,9 +6034,8 @@
 	.this_id		= -1,
 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= lpfc_hba_attrs,
-	.max_sectors		= 0xFFFF,
+	.max_sectors		= 0xFFFFFFFF,
 	.vendor_id		= LPFC_NL_VENDOR_ID,
 	.change_queue_depth	= scsi_change_queue_depth,
 	.track_queue_depth	= 1,
@@ -6086,7 +6060,6 @@
 	.this_id		= -1,
 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= lpfc_hba_attrs,
 	.max_sectors		= 0xFFFF,
 	.vendor_id		= LPFC_NL_VENDOR_ID,
@@ -6111,7 +6084,6 @@
 	.this_id		= -1,
 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= lpfc_vport_attrs,
 	.max_sectors		= 0xFFFF,
 	.change_queue_depth	= scsi_change_queue_depth,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index cc99859..f76667b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -130,62 +130,6 @@
 	uint32_t cmd_count;
 };
 
-struct lpfc_scsi_buf {
-	struct list_head list;
-	struct scsi_cmnd *pCmd;
-	struct lpfc_rport_data *rdata;
-	struct lpfc_nodelist *ndlp;
-
-	uint32_t timeout;
-
-	uint16_t flags;  /* TBD convert exch_busy to flags */
-#define LPFC_SBUF_XBUSY         0x1     /* SLI4 hba reported XB on WCQE cmpl */
-#define LPFC_SBUF_BUMP_QDEPTH	0x8	/* bumped queue depth counter */
-	uint16_t exch_busy;     /* SLI4 hba reported XB on complete WCQE */
-	uint16_t status;	/* From IOCB Word 7- ulpStatus */
-	uint32_t result;	/* From IOCB Word 4. */
-
-	uint32_t   seg_cnt;	/* Number of scatter-gather segments returned by
-				 * dma_map_sg.  The driver needs this for calls
-				 * to dma_unmap_sg. */
-	uint32_t prot_seg_cnt;  /* seg_cnt's counterpart for protection data */
-
-	dma_addr_t nonsg_phys;	/* Non scatter-gather physical address. */
-
-	/*
-	 * data and dma_handle are the kernel virtual and bus address of the
-	 * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
-	 * gather bde list that supports the sg_tablesize value.
-	 */
-	void *data;
-	dma_addr_t dma_handle;
-
-	struct fcp_cmnd *fcp_cmnd;
-	struct fcp_rsp *fcp_rsp;
-	struct ulp_bde64 *fcp_bpl;
-
-	dma_addr_t dma_phys_bpl;
-
-	/* cur_iocbq has phys of the dma-able buffer.
-	 * Iotag is in here
-	 */
-	struct lpfc_iocbq cur_iocbq;
-	uint16_t cpu;
-
-	wait_queue_head_t *waitq;
-	unsigned long start_time;
-
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-	/* Used to restore any changes to protection data for error injection */
-	void *prot_data_segment;
-	uint32_t prot_data;
-	uint32_t prot_data_type;
-#define	LPFC_INJERR_REFTAG	1
-#define	LPFC_INJERR_APPTAG	2
-#define	LPFC_INJERR_GUARD	3
-#endif
-};
-
 #define LPFC_SCSI_DMA_EXT_SIZE	264
 #define LPFC_BPL_SIZE		1024
 #define MDAC_DIRECT_CMD		0x22
@@ -194,7 +138,12 @@
 #define NO_MORE_OAS_LUN		-1
 #define NOT_OAS_ENABLED_LUN	NO_MORE_OAS_LUN
 
+#ifndef FC_PORTSPEED_128GBIT
+#define FC_PORTSPEED_128GBIT	0x2000
+#endif
+
 #define TXRDY_PAYLOAD_LEN	12
 
-int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
-				  struct lpfc_scsi_buf *lpfc_cmd);
+/* For sysfs/debugfs tmp string max len */
+#define LPFC_MAX_SCSI_INFO_TMP_LEN	79
+
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index be2bac9..614f78d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -78,17 +78,15 @@
 				      struct hbq_dmabuf *);
 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
 					  struct hbq_dmabuf *dmabuf);
-static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
-				    struct lpfc_cqe *);
+static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
+				   struct lpfc_queue *cq, struct lpfc_cqe *cqe);
 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
 				       int);
 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
-				     struct lpfc_eqe *eqe, uint32_t qidx);
+				     struct lpfc_queue *eq,
+				     struct lpfc_eqe *eqe);
 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
-static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
-				   struct lpfc_sli_ring *pring,
-				   struct lpfc_iocbq *cmdiocb);
 
 static IOCB_t *
 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -110,7 +108,7 @@
  * endianness. This function can be called with or without
  * lock.
  **/
-void
+static void
 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
 {
 	uint64_t *src = srcp;
@@ -150,7 +148,7 @@
 	/* sanity check on queue memory */
 	if (unlikely(!q))
 		return -ENOMEM;
-	temp_wqe = q->qe[q->host_index].wqe;
+	temp_wqe = lpfc_sli4_qe(q, q->host_index);
 
 	/* If the host has not yet processed the next entry then we are done */
 	idx = ((q->host_index + 1) % q->entry_count);
@@ -160,7 +158,7 @@
 	}
 	q->WQ_posted++;
 	/* set consumption flag every once in a while */
-	if (!((q->host_index + 1) % q->entry_repost))
+	if (!((q->host_index + 1) % q->notify_interval))
 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
 	else
 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
@@ -270,7 +268,7 @@
 	/* sanity check on queue memory */
 	if (unlikely(!q))
 		return -ENOMEM;
-	temp_mqe = q->qe[q->host_index].mqe;
+	temp_mqe = lpfc_sli4_qe(q, q->host_index);
 
 	/* If the host has not yet processed the next entry then we are done */
 	if (((q->host_index + 1) % q->entry_count) == q->hba_index)
@@ -325,29 +323,16 @@
 static struct lpfc_eqe *
 lpfc_sli4_eq_get(struct lpfc_queue *q)
 {
-	struct lpfc_hba *phba;
 	struct lpfc_eqe *eqe;
-	uint32_t idx;
 
 	/* sanity check on queue memory */
 	if (unlikely(!q))
 		return NULL;
-	phba = q->phba;
-	eqe = q->qe[q->hba_index].eqe;
+	eqe = lpfc_sli4_qe(q, q->host_index);
 
 	/* If the next EQE is not valid then we are done */
 	if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
 		return NULL;
-	/* If the host has not yet processed the next entry then we are done */
-	idx = ((q->hba_index + 1) % q->entry_count);
-	if (idx == q->host_index)
-		return NULL;
-
-	q->hba_index = idx;
-	/* if the index wrapped around, toggle the valid bit */
-	if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index)
-		q->qe_valid = (q->qe_valid) ? 0 : 1;
-
 
 	/*
 	 * insert barrier for instruction interlock : data from the hardware
@@ -367,7 +352,7 @@
  * @q: The Event Queue to disable interrupts
  *
  **/
-inline void
+void
 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
 {
 	struct lpfc_register doorbell;
@@ -386,59 +371,36 @@
  * @q: The Event Queue to disable interrupts
  *
  **/
-inline void
+void
 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
 {
 	struct lpfc_register doorbell;
 
 	doorbell.word0 = 0;
-	bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
-	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
-	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
-		(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
-	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
+	bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
 	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 }
 
 /**
- * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
+ * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
+ * @phba: adapter with EQ
  * @q: The Event Queue that the host has completed processing for.
+ * @count: Number of elements that have been consumed
  * @arm: Indicates whether the host wants to arms this CQ.
  *
- * This routine will mark all Event Queue Entries on @q, from the last
- * known completed entry to the last entry that was processed, as completed
- * by clearing the valid bit for each completion queue entry. Then it will
- * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
- * The internal host index in the @q will be updated by this routine to indicate
- * that the host has finished processing the entries. The @arm parameter
- * indicates that the queue should be rearmed when ringing the doorbell.
- *
- * This function will return the number of EQEs that were popped.
+ * This routine will notify the HBA, by ringing the doorbell, that count
+ * number of EQEs have been processed. The @arm parameter indicates whether
+ * the queue should be rearmed when ringing the doorbell.
  **/
-uint32_t
-lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
+void
+lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+		     uint32_t count, bool arm)
 {
-	uint32_t released = 0;
-	struct lpfc_hba *phba;
-	struct lpfc_eqe *temp_eqe;
 	struct lpfc_register doorbell;
 
 	/* sanity check on queue memory */
-	if (unlikely(!q))
-		return 0;
-	phba = q->phba;
-
-	/* while there are valid entries */
-	while (q->hba_index != q->host_index) {
-		if (!phba->sli4_hba.pc_sli4_params.eqav) {
-			temp_eqe = q->qe[q->host_index].eqe;
-			bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
-		}
-		released++;
-		q->host_index = ((q->host_index + 1) % q->entry_count);
-	}
-	if (unlikely(released == 0 && !arm))
-		return 0;
+	if (unlikely(!q || (count == 0 && !arm)))
+		return;
 
 	/* ring doorbell for number popped */
 	doorbell.word0 = 0;
@@ -446,7 +408,7 @@
 		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
 		bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
 	}
-	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
 	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
 			(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
@@ -455,60 +417,112 @@
 	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
 	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
 		readl(q->phba->sli4_hba.EQDBregaddr);
-	return released;
 }
 
 /**
- * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ
+ * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
+ * @phba: adapter with EQ
  * @q: The Event Queue that the host has completed processing for.
+ * @count: Number of elements that have been consumed
  * @arm: Indicates whether the host wants to arms this CQ.
  *
- * This routine will mark all Event Queue Entries on @q, from the last
- * known completed entry to the last entry that was processed, as completed
- * by clearing the valid bit for each completion queue entry. Then it will
- * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
- * The internal host index in the @q will be updated by this routine to indicate
- * that the host has finished processing the entries. The @arm parameter
- * indicates that the queue should be rearmed when ringing the doorbell.
- *
- * This function will return the number of EQEs that were popped.
+ * This routine will notify the HBA, by ringing the doorbell, that count
+ * number of EQEs have been processed. The @arm parameter indicates whether
+ * the queue should be rearmed when ringing the doorbell.
  **/
-uint32_t
-lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm)
+void
+lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+			  uint32_t count, bool arm)
 {
-	uint32_t released = 0;
-	struct lpfc_hba *phba;
-	struct lpfc_eqe *temp_eqe;
 	struct lpfc_register doorbell;
 
 	/* sanity check on queue memory */
-	if (unlikely(!q))
-		return 0;
-	phba = q->phba;
-
-	/* while there are valid entries */
-	while (q->hba_index != q->host_index) {
-		if (!phba->sli4_hba.pc_sli4_params.eqav) {
-			temp_eqe = q->qe[q->host_index].eqe;
-			bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
-		}
-		released++;
-		q->host_index = ((q->host_index + 1) % q->entry_count);
-	}
-	if (unlikely(released == 0 && !arm))
-		return 0;
+	if (unlikely(!q || (count == 0 && !arm)))
+		return;
 
 	/* ring doorbell for number popped */
 	doorbell.word0 = 0;
 	if (arm)
 		bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
-	bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released);
+	bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
 	bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
 	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
 	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
 		readl(q->phba->sli4_hba.EQDBregaddr);
-	return released;
+}
+
+static void
+__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
+			struct lpfc_eqe *eqe)
+{
+	if (!phba->sli4_hba.pc_sli4_params.eqav)
+		bf_set_le32(lpfc_eqe_valid, eqe, 0);
+
+	eq->host_index = ((eq->host_index + 1) % eq->entry_count);
+
+	/* if the index wrapped around, toggle the valid bit */
+	if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
+		eq->qe_valid = (eq->qe_valid) ? 0 : 1;
+}
+
+static void
+lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+	struct lpfc_eqe *eqe;
+	uint32_t count = 0;
+
+	/* walk all the EQ entries and drop on the floor */
+	eqe = lpfc_sli4_eq_get(eq);
+	while (eqe) {
+		__lpfc_sli4_consume_eqe(phba, eq, eqe);
+		count++;
+		eqe = lpfc_sli4_eq_get(eq);
+	}
+
+	/* Clear and re-arm the EQ */
+	phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
+}
+
+static int
+lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+	struct lpfc_eqe *eqe;
+	int count = 0, consumed = 0;
+
+	if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
+		goto rearm_and_exit;
+
+	eqe = lpfc_sli4_eq_get(eq);
+	while (eqe) {
+		lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
+		__lpfc_sli4_consume_eqe(phba, eq, eqe);
+
+		consumed++;
+		if (!(++count % eq->max_proc_limit))
+			break;
+
+		if (!(count % eq->notify_interval)) {
+			phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
+							LPFC_QUEUE_NOARM);
+			consumed = 0;
+		}
+
+		eqe = lpfc_sli4_eq_get(eq);
+	}
+	eq->EQ_processed += count;
+
+	/* Track the max number of EQEs processed in 1 intr */
+	if (count > eq->EQ_max_eqe)
+		eq->EQ_max_eqe = count;
+
+	eq->queue_claimed = 0;
+
+rearm_and_exit:
+	/* Always clear and re-arm the EQ */
+	phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
+
+	return count;
 }
 
 /**
@@ -523,28 +537,16 @@
 static struct lpfc_cqe *
 lpfc_sli4_cq_get(struct lpfc_queue *q)
 {
-	struct lpfc_hba *phba;
 	struct lpfc_cqe *cqe;
-	uint32_t idx;
 
 	/* sanity check on queue memory */
 	if (unlikely(!q))
 		return NULL;
-	phba = q->phba;
-	cqe = q->qe[q->hba_index].cqe;
+	cqe = lpfc_sli4_qe(q, q->host_index);
 
 	/* If the next CQE is not valid then we are done */
 	if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
 		return NULL;
-	/* If the host has not yet processed the next entry then we are done */
-	idx = ((q->hba_index + 1) % q->entry_count);
-	if (idx == q->host_index)
-		return NULL;
-
-	q->hba_index = idx;
-	/* if the index wrapped around, toggle the valid bit */
-	if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index)
-		q->qe_valid = (q->qe_valid) ? 0 : 1;
 
 	/*
 	 * insert barrier for instruction interlock : data from the hardware
@@ -558,107 +560,81 @@
 	return cqe;
 }
 
+static void
+__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			struct lpfc_cqe *cqe)
+{
+	if (!phba->sli4_hba.pc_sli4_params.cqav)
+		bf_set_le32(lpfc_cqe_valid, cqe, 0);
+
+	cq->host_index = ((cq->host_index + 1) % cq->entry_count);
+
+	/* if the index wrapped around, toggle the valid bit */
+	if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
+		cq->qe_valid = (cq->qe_valid) ? 0 : 1;
+}
+
 /**
- * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
+ * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
+ * @phba: the adapter with the CQ
  * @q: The Completion Queue that the host has completed processing for.
+ * @count: the number of elements that were consumed
  * @arm: Indicates whether the host wants to arms this CQ.
  *
- * This routine will mark all Completion queue entries on @q, from the last
- * known completed entry to the last entry that was processed, as completed
- * by clearing the valid bit for each completion queue entry. Then it will
- * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
- * The internal host index in the @q will be updated by this routine to indicate
- * that the host has finished processing the entries. The @arm parameter
- * indicates that the queue should be rearmed when ringing the doorbell.
- *
- * This function will return the number of CQEs that were released.
+ * This routine will notify the HBA, by ringing the doorbell, that the
+ * CQEs have been processed. The @arm parameter specifies whether the
+ * queue should be rearmed when ringing the doorbell.
  **/
-uint32_t
-lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
+void
+lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+		     uint32_t count, bool arm)
 {
-	uint32_t released = 0;
-	struct lpfc_hba *phba;
-	struct lpfc_cqe *temp_qe;
 	struct lpfc_register doorbell;
 
 	/* sanity check on queue memory */
-	if (unlikely(!q))
-		return 0;
-	phba = q->phba;
-
-	/* while there are valid entries */
-	while (q->hba_index != q->host_index) {
-		if (!phba->sli4_hba.pc_sli4_params.cqav) {
-			temp_qe = q->qe[q->host_index].cqe;
-			bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
-		}
-		released++;
-		q->host_index = ((q->host_index + 1) % q->entry_count);
-	}
-	if (unlikely(released == 0 && !arm))
-		return 0;
+	if (unlikely(!q || (count == 0 && !arm)))
+		return;
 
 	/* ring doorbell for number popped */
 	doorbell.word0 = 0;
 	if (arm)
 		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
-	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
 	bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
 			(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
 	bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
 	writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
-	return released;
 }
 
 /**
- * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ
+ * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
+ * @phba: the adapter with the CQ
  * @q: The Completion Queue that the host has completed processing for.
+ * @count: the number of elements that were consumed
  * @arm: Indicates whether the host wants to arms this CQ.
  *
- * This routine will mark all Completion queue entries on @q, from the last
- * known completed entry to the last entry that was processed, as completed
- * by clearing the valid bit for each completion queue entry. Then it will
- * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
- * The internal host index in the @q will be updated by this routine to indicate
- * that the host has finished processing the entries. The @arm parameter
- * indicates that the queue should be rearmed when ringing the doorbell.
- *
- * This function will return the number of CQEs that were released.
+ * This routine will notify the HBA, by ringing the doorbell, that the
+ * CQEs have been processed. The @arm parameter specifies whether the
+ * queue should be rearmed when ringing the doorbell.
  **/
-uint32_t
-lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm)
+void
+lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+			 uint32_t count, bool arm)
 {
-	uint32_t released = 0;
-	struct lpfc_hba *phba;
-	struct lpfc_cqe *temp_qe;
 	struct lpfc_register doorbell;
 
 	/* sanity check on queue memory */
-	if (unlikely(!q))
-		return 0;
-	phba = q->phba;
-
-	/* while there are valid entries */
-	while (q->hba_index != q->host_index) {
-		if (!phba->sli4_hba.pc_sli4_params.cqav) {
-			temp_qe = q->qe[q->host_index].cqe;
-			bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
-		}
-		released++;
-		q->host_index = ((q->host_index + 1) % q->entry_count);
-	}
-	if (unlikely(released == 0 && !arm))
-		return 0;
+	if (unlikely(!q || (count == 0 && !arm)))
+		return;
 
 	/* ring doorbell for number popped */
 	doorbell.word0 = 0;
 	if (arm)
 		bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
-	bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released);
+	bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
 	bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
 	writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
-	return released;
 }
 
 /**
@@ -688,8 +664,8 @@
 		return -ENOMEM;
 	hq_put_index = hq->host_index;
 	dq_put_index = dq->host_index;
-	temp_hrqe = hq->qe[hq_put_index].rqe;
-	temp_drqe = dq->qe[dq_put_index].rqe;
+	temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
+	temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
 
 	if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
 		return -EINVAL;
@@ -707,15 +683,15 @@
 	hq->RQ_buf_posted++;
 
 	/* Ring The Header Receive Queue Doorbell */
-	if (!(hq->host_index % hq->entry_repost)) {
+	if (!(hq->host_index % hq->notify_interval)) {
 		doorbell.word0 = 0;
 		if (hq->db_format == LPFC_DB_RING_FORMAT) {
 			bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
-			       hq->entry_repost);
+			       hq->notify_interval);
 			bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
 		} else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
 			bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
-			       hq->entry_repost);
+			       hq->notify_interval);
 			bf_set(lpfc_rq_db_list_fm_index, &doorbell,
 			       hq->host_index);
 			bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
@@ -928,10 +904,10 @@
 		mod_timer(&phba->rrq_tmr, next_time);
 	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
 		list_del(&rrq->list);
-		if (!rrq->send_rrq)
+		if (!rrq->send_rrq) {
 			/* this call will free the rrq */
-		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
-		else if (lpfc_send_rrq(phba, rrq)) {
+			lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+		} else if (lpfc_send_rrq(phba, rrq)) {
 			/* if we send the rrq then the completion handler
 			*  will clear the bit in the xribitmap.
 			*/
@@ -1015,21 +991,20 @@
  * @ndlp: Targets nodelist pointer for this exchange.
  * @xritag the xri in the bitmap to test.
  *
- * This function is called with hbalock held. This function
- * returns 0 = rrq not active for this xri
- *         1 = rrq is valid for this xri.
+ * This function returns:
+ * 0 = rrq not active for this xri
+ * 1 = rrq is valid for this xri.
  **/
 int
 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 			uint16_t  xritag)
 {
-	lockdep_assert_held(&phba->hbalock);
 	if (!ndlp)
 		return 0;
 	if (!ndlp->active_rrqs_xri_bitmap)
 		return 0;
 	if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
-			return 1;
+		return 1;
 	else
 		return 0;
 }
@@ -1126,10 +1101,11 @@
  * @phba: Pointer to HBA context object.
  * @piocb: Pointer to the iocbq.
  *
- * This function is called with the ring lock held. This function
- * gets a new driver sglq object from the sglq list. If the
- * list is not empty then it is successful, it returns pointer to the newly
- * allocated sglq object else it returns NULL.
+ * The driver calls this function with either the nvme ls ring lock
+ * or the fc els ring lock held depending on the iocb usage.  This function
+ * gets a new driver sglq object from the sglq list. If the list is not empty
+ * then it is successful, it returns pointer to the newly allocated sglq
+ * object else it returns NULL.
  **/
 static struct lpfc_sglq *
 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
@@ -1137,14 +1113,20 @@
 	struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
 	struct lpfc_sglq *sglq = NULL;
 	struct lpfc_sglq *start_sglq = NULL;
-	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_io_buf *lpfc_cmd;
 	struct lpfc_nodelist *ndlp;
+	struct lpfc_sli_ring *pring = NULL;
 	int found = 0;
 
-	lockdep_assert_held(&phba->hbalock);
+	if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
+		pring =  phba->sli4_hba.nvmels_wq->pring;
+	else
+		pring = lpfc_phba_elsring(phba);
+
+	lockdep_assert_held(&pring->ring_lock);
 
 	if (piocbq->iocb_flag &  LPFC_IO_FCP) {
-		lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
+		lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
 		ndlp = lpfc_cmd->rdata->pnode;
 	} else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
 			!(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
@@ -1409,9 +1391,12 @@
 
 	while (!list_empty(iocblist)) {
 		list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
-		if (!piocb->iocb_cmpl)
-			lpfc_sli_release_iocbq(phba, piocb);
-		else {
+		if (!piocb->iocb_cmpl) {
+			if (piocb->iocb_flag & LPFC_IO_NVME)
+				lpfc_nvme_cancel_iocb(phba, piocb);
+			else
+				lpfc_sli_release_iocbq(phba, piocb);
+		} else {
 			piocb->iocb.ulpStatus = ulpstatus;
 			piocb->iocb.un.ulpWord[4] = ulpWord4;
 			(piocb->iocb_cmpl) (phba, piocb, piocb);
@@ -1584,7 +1569,8 @@
  * @pring: Pointer to driver SLI ring object.
  * @piocb: Pointer to the driver iocb object.
  *
- * This function is called with hbalock held. The function adds the
+ * The driver calls this function with the hbalock held for SLI3 ports or
+ * the ring lock held for SLI4 ports. The function adds the
  * new iocb to txcmplq of the given ring. This function always returns
  * 0. If this function is called for ELS ring, this function checks if
  * there is a vport associated with the ELS command. This function also
@@ -1594,12 +1580,16 @@
 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			struct lpfc_iocbq *piocb)
 {
-	lockdep_assert_held(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lockdep_assert_held(&pring->ring_lock);
+	else
+		lockdep_assert_held(&phba->hbalock);
 
 	BUG_ON(!piocb);
 
 	list_add_tail(&piocb->list, &pring->txcmplq);
 	piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
+	pring->txcmplq_cnt++;
 
 	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -2439,6 +2429,20 @@
 	return;
 }
 
+static void
+__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	unsigned long iflags;
+
+	if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+		lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+		spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
+		ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+		ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+		spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
+	}
+	ndlp->nlp_flag &= ~NLP_UNREG_INP;
+}
 
 /**
  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
@@ -2460,7 +2464,7 @@
 	uint16_t rpi, vpi;
 	int rc;
 
-	mp = (struct lpfc_dmabuf *) (pmb->context1);
+	mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
 
 	if (mp) {
 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -2495,9 +2499,35 @@
 	}
 
 	if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
-		ndlp = (struct lpfc_nodelist *)pmb->context2;
+		ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
 		lpfc_nlp_put(ndlp);
-		pmb->context2 = NULL;
+		pmb->ctx_buf = NULL;
+		pmb->ctx_ndlp = NULL;
+	}
+
+	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
+		ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+
+		/* Check to see if there are any deferred events to process */
+		if (ndlp) {
+			lpfc_printf_vlog(
+				vport,
+				KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
+				"1438 UNREG cmpl deferred mbox x%x "
+				"on NPort x%x Data: x%x x%x %px\n",
+				ndlp->nlp_rpi, ndlp->nlp_DID,
+				ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
+
+			if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+			    (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
+				ndlp->nlp_flag &= ~NLP_UNREG_INP;
+				ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
+				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+			} else {
+				__lpfc_sli_rpi_release(vport, ndlp);
+			}
+			pmb->ctx_ndlp = NULL;
+		}
 	}
 
 	/* Check security permission status on INIT_LINK mailbox command */
@@ -2531,21 +2561,46 @@
 	struct lpfc_vport  *vport = pmb->vport;
 	struct lpfc_nodelist *ndlp;
 
-	ndlp = pmb->context1;
+	ndlp = pmb->ctx_ndlp;
 	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
 		if (phba->sli_rev == LPFC_SLI_REV4 &&
 		    (bf_get(lpfc_sli_intf_if_type,
 		     &phba->sli4_hba.sli_intf) >=
 		     LPFC_SLI_INTF_IF_TYPE_2)) {
 			if (ndlp) {
-				lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
-						 "0010 UNREG_LOGIN vpi:%x "
-						 "rpi:%x DID:%x map:%x %p\n",
-						 vport->vpi, ndlp->nlp_rpi,
-						 ndlp->nlp_DID,
-						 ndlp->nlp_usg_map, ndlp);
+				lpfc_printf_vlog(
+					vport, KERN_INFO, LOG_MBOX | LOG_SLI,
+					 "0010 UNREG_LOGIN vpi:%x "
+					 "rpi:%x DID:%x defer x%x flg x%x "
+					 "map:%x %px\n",
+					 vport->vpi, ndlp->nlp_rpi,
+					 ndlp->nlp_DID, ndlp->nlp_defer_did,
+					 ndlp->nlp_flag,
+					 ndlp->nlp_usg_map, ndlp);
 				ndlp->nlp_flag &= ~NLP_LOGO_ACC;
 				lpfc_nlp_put(ndlp);
+
+				/* Check to see if there are any deferred
+				 * events to process
+				 */
+				if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+				    (ndlp->nlp_defer_did !=
+				    NLP_EVT_NOTHING_PENDING)) {
+					lpfc_printf_vlog(
+						vport, KERN_INFO, LOG_DISCOVERY,
+						"4111 UNREG cmpl deferred "
+						"clr x%x on "
+						"NPort x%x Data: x%x x%px\n",
+						ndlp->nlp_rpi, ndlp->nlp_DID,
+						ndlp->nlp_defer_did, ndlp);
+					ndlp->nlp_flag &= ~NLP_UNREG_INP;
+					ndlp->nlp_defer_did =
+						NLP_EVT_NOTHING_PENDING;
+					lpfc_issue_els_plogi(
+						vport, ndlp->nlp_DID, 0);
+				} else {
+					__lpfc_sli_rpi_release(vport, ndlp);
+				}
 			}
 		}
 	}
@@ -2657,7 +2712,7 @@
 
 		/* Mailbox cmd <cmd> Cmpl <cmpl> */
 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
-				"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
+				"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
 				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
 				"x%x x%x x%x\n",
 				pmb->vport ? pmb->vport->vpi : 0,
@@ -2939,8 +2994,8 @@
  *
  * This function looks up the iocb_lookup table to get the command iocb
  * corresponding to the given response iocb using the iotag of the
- * response iocb. This function is called with the hbalock held
- * for sli3 devices or the ring_lock for sli4 devices.
+ * response iocb. The driver calls this function with the hbalock held
+ * for SLI3 ports or the ring lock held for SLI4 ports.
  * This function returns the command iocb object if it finds the command
  * iocb else returns NULL.
  **/
@@ -2951,8 +3006,15 @@
 {
 	struct lpfc_iocbq *cmd_iocb = NULL;
 	uint16_t iotag;
-	lockdep_assert_held(&phba->hbalock);
+	spinlock_t *temp_lock = NULL;
+	unsigned long iflag = 0;
 
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		temp_lock = &pring->ring_lock;
+	else
+		temp_lock = &phba->hbalock;
+
+	spin_lock_irqsave(temp_lock, iflag);
 	iotag = prspiocb->iocb.ulpIoTag;
 
 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
@@ -2961,10 +3023,13 @@
 			/* remove from txcmpl queue list */
 			list_del_init(&cmd_iocb->list);
 			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+			pring->txcmplq_cnt--;
+			spin_unlock_irqrestore(temp_lock, iflag);
 			return cmd_iocb;
 		}
 	}
 
+	spin_unlock_irqrestore(temp_lock, iflag);
 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 			"0317 iotag x%x is out of "
 			"range: max iotag x%x wd0 x%x\n",
@@ -2980,8 +3045,8 @@
  * @iotag: IOCB tag.
  *
  * This function looks up the iocb_lookup table to get the command iocb
- * corresponding to the given iotag. This function is called with the
- * hbalock held.
+ * corresponding to the given iotag. The driver calls this function with
+ * the ring lock held because this function is an SLI4 port only helper.
  * This function returns the command iocb object if it finds the command
  * iocb else returns NULL.
  **/
@@ -2990,18 +3055,28 @@
 			     struct lpfc_sli_ring *pring, uint16_t iotag)
 {
 	struct lpfc_iocbq *cmd_iocb = NULL;
+	spinlock_t *temp_lock = NULL;
+	unsigned long iflag = 0;
 
-	lockdep_assert_held(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		temp_lock = &pring->ring_lock;
+	else
+		temp_lock = &phba->hbalock;
+
+	spin_lock_irqsave(temp_lock, iflag);
 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
 		if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
 			/* remove from txcmpl queue list */
 			list_del_init(&cmd_iocb->list);
 			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+			pring->txcmplq_cnt--;
+			spin_unlock_irqrestore(temp_lock, iflag);
 			return cmd_iocb;
 		}
 	}
 
+	spin_unlock_irqrestore(temp_lock, iflag);
 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 			"0372 iotag x%x lookup error: max iotag (x%x) "
 			"iocb_flag x%x\n",
@@ -3035,17 +3110,7 @@
 	int rc = 1;
 	unsigned long iflag;
 
-	/* Based on the iotag field, get the cmd IOCB from the txcmplq */
-	if (phba->sli_rev == LPFC_SLI_REV4)
-		spin_lock_irqsave(&pring->ring_lock, iflag);
-	else
-		spin_lock_irqsave(&phba->hbalock, iflag);
 	cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
-	if (phba->sli_rev == LPFC_SLI_REV4)
-		spin_unlock_irqrestore(&pring->ring_lock, iflag);
-	else
-		spin_unlock_irqrestore(&phba->hbalock, iflag);
-
 	if (cmdiocbp) {
 		if (cmdiocbp->iocb_cmpl) {
 			/*
@@ -3376,8 +3441,10 @@
 				break;
 			}
 
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
 							 &rspiocbq);
+			spin_lock_irqsave(&phba->hbalock, iflag);
 			if (unlikely(!cmdiocbq))
 				break;
 			if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
@@ -3571,9 +3638,12 @@
 
 		case LPFC_ABORT_IOCB:
 			cmdiocbp = NULL;
-			if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
+			if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
 				cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
 								 saveq);
+				spin_lock_irqsave(&phba->hbalock, iflag);
+			}
 			if (cmdiocbp) {
 				/* Call the specified completion routine */
 				if (cmdiocbp->iocb_cmpl) {
@@ -3889,33 +3959,6 @@
 }
 
 /**
- * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
- * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
- *
- * This function aborts all iocbs in the given ring and frees all the iocb
- * objects in txq. This function issues an abort iocb for all the iocb commands
- * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
- * the return of this function. The caller is not required to hold any locks.
- **/
-void
-lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
-{
-	LIST_HEAD(completions);
-	struct lpfc_iocbq *iocb, *next_iocb;
-
-	if (pring->ringno == LPFC_ELS_RING)
-		lpfc_fabric_abort_hba(phba);
-
-	spin_lock_irq(&phba->hbalock);
-	/* Next issue ABTS for everything on the txcmplq */
-	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
-		lpfc_sli4_abort_nvme_io(phba, pring, iocb);
-	spin_unlock_irq(&phba->hbalock);
-}
-
-
-/**
  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
  * @phba: Pointer to HBA context object.
  * @pring: Pointer to driver SLI ring object.
@@ -3934,8 +3977,8 @@
 
 	/* Look on all the FCP Rings for the iotag */
 	if (phba->sli_rev >= LPFC_SLI_REV4) {
-		for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
-			pring = phba->sli4_hba.fcp_wq[i]->pring;
+		for (i = 0; i < phba->cfg_hdw_queue; i++) {
+			pring = phba->sli4_hba.hdwq[i].io_wq->pring;
 			lpfc_sli_abort_iocb_ring(phba, pring);
 		}
 	} else {
@@ -3945,43 +3988,17 @@
 }
 
 /**
- * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
+ * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
  * @phba: Pointer to HBA context object.
  *
- * This function aborts all wqes in NVME rings. This function issues an
- * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
- * the txcmplq is not guaranteed to complete before the return of this
- * function. The caller is not required to hold any locks.
- **/
-void
-lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
-{
-	struct lpfc_sli_ring  *pring;
-	uint32_t i;
-
-	if (phba->sli_rev < LPFC_SLI_REV4)
-		return;
-
-	/* Abort all IO on each NVME ring. */
-	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
-		pring = phba->sli4_hba.nvme_wq[i]->pring;
-		lpfc_sli_abort_wqe_ring(phba, pring);
-	}
-}
-
-
-/**
- * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
- * @phba: Pointer to HBA context object.
- *
- * This function flushes all iocbs in the fcp ring and frees all the iocb
+ * This function flushes all iocbs in the IO ring and frees all the iocb
  * objects in txq and txcmplq. This function will not issue abort iocbs
  * for all the iocb commands in txcmplq, they will just be returned with
  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
  * slot has been permanently disabled.
  **/
 void
-lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
+lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
 {
 	LIST_HEAD(txq);
 	LIST_HEAD(txcmplq);
@@ -3992,13 +4009,13 @@
 
 	spin_lock_irq(&phba->hbalock);
 	/* Indicate the I/O queues are flushed */
-	phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
+	phba->hba_flag |= HBA_IOQ_FLUSH;
 	spin_unlock_irq(&phba->hbalock);
 
 	/* Look on all the FCP Rings for the iotag */
 	if (phba->sli_rev >= LPFC_SLI_REV4) {
-		for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
-			pring = phba->sli4_hba.fcp_wq[i]->pring;
+		for (i = 0; i < phba->cfg_hdw_queue; i++) {
+			pring = phba->sli4_hba.hdwq[i].io_wq->pring;
 
 			spin_lock_irq(&pring->ring_lock);
 			/* Retrieve everything on txq */
@@ -4046,55 +4063,6 @@
 }
 
 /**
- * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
- * @phba: Pointer to HBA context object.
- *
- * This function flushes all wqes in the nvme rings and frees all resources
- * in the txcmplq. This function does not issue abort wqes for the IO
- * commands in txcmplq, they will just be returned with
- * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
- * slot has been permanently disabled.
- **/
-void
-lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
-{
-	LIST_HEAD(txcmplq);
-	struct lpfc_sli_ring  *pring;
-	uint32_t i;
-	struct lpfc_iocbq *piocb, *next_iocb;
-
-	if (phba->sli_rev < LPFC_SLI_REV4)
-		return;
-
-	/* Hint to other driver operations that a flush is in progress. */
-	spin_lock_irq(&phba->hbalock);
-	phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
-	spin_unlock_irq(&phba->hbalock);
-
-	/* Cycle through all NVME rings and complete each IO with
-	 * a local driver reason code.  This is a flush so no
-	 * abort exchange to FW.
-	 */
-	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
-		pring = phba->sli4_hba.nvme_wq[i]->pring;
-
-		spin_lock_irq(&pring->ring_lock);
-		list_for_each_entry_safe(piocb, next_iocb,
-					 &pring->txcmplq, list)
-			piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
-		/* Retrieve everything on the txcmplq */
-		list_splice_init(&pring->txcmplq, &txcmplq);
-		pring->txcmplq_cnt = 0;
-		spin_unlock_irq(&pring->ring_lock);
-
-		/* Flush the txcmpq &&&PAE */
-		lpfc_sli_cancel_iocbs(phba, &txcmplq,
-				      IOSTAT_LOCAL_REJECT,
-				      IOERR_SLI_DOWN);
-	}
-}
-
-/**
  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
  * @phba: Pointer to HBA context object.
  * @mask: Bit mask to be checked.
@@ -4452,7 +4420,9 @@
 	}
 
 	/* Turn off parity checking and serr during the physical reset */
-	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+	if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
+		return -EIO;
+
 	pci_write_config_word(phba->pcidev, PCI_COMMAND,
 			      (cfg_value &
 			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
@@ -4492,7 +4462,7 @@
  * checking during resets the device. The caller is not required to hold
  * any locks.
  *
- * This function returns 0 always.
+ * This function returns 0 on success else returns negative error code.
  **/
 int
 lpfc_sli4_brdreset(struct lpfc_hba *phba)
@@ -4529,7 +4499,12 @@
 			"0389 Performing PCI function reset!\n");
 
 	/* Turn off parity checking and serr during the physical reset */
-	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+	if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"3205 PCI read Config failed\n");
+		return -EIO;
+	}
+
 	pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
 			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
 
@@ -4644,6 +4619,10 @@
 	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
 
 	rc = lpfc_sli4_brdreset(phba);
+	if (rc) {
+		phba->link_state = LPFC_HBA_ERROR;
+		goto hba_down_queue;
+	}
 
 	spin_lock_irq(&phba->hbalock);
 	phba->pport->stopped = 0;
@@ -4658,6 +4637,7 @@
 	if (hba_aer_enabled)
 		pci_disable_pcie_error_reporting(phba->pcidev);
 
+hba_down_queue:
 	lpfc_hba_down_post(phba);
 	lpfc_sli4_queue_destroy(phba);
 
@@ -5232,7 +5212,7 @@
 		goto out_free_mboxq;
 	}
 
-	mp = (struct lpfc_dmabuf *) mboxq->context1;
+	mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
@@ -5313,8 +5293,8 @@
 	 * mailbox command.
 	 */
 	dma_size = *vpd_size;
-	dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
-					   &dmabuf->phys, GFP_KERNEL);
+	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
+					  &dmabuf->phys, GFP_KERNEL);
 	if (!dmabuf->virt) {
 		kfree(dmabuf);
 		return -ENOMEM;
@@ -5358,7 +5338,7 @@
 }
 
 /**
- * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
+ * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine retrieves SLI4 device physical port name this PCI function
@@ -5366,40 +5346,30 @@
  *
  * Return codes
  *      0 - successful
- *      otherwise - failed to retrieve physical port name
+ *      otherwise - failed to retrieve controller attributes
  **/
 static int
-lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
+lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
 {
 	LPFC_MBOXQ_t *mboxq;
 	struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
 	struct lpfc_controller_attribute *cntl_attr;
-	struct lpfc_mbx_get_port_name *get_port_name;
 	void *virtaddr = NULL;
 	uint32_t alloclen, reqlen;
 	uint32_t shdr_status, shdr_add_status;
 	union lpfc_sli4_cfg_shdr *shdr;
-	char cport_name = 0;
 	int rc;
 
-	/* We assume nothing at this point */
-	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
-	phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
-
 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mboxq)
 		return -ENOMEM;
-	/* obtain link type and link number via READ_CONFIG */
-	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
-	lpfc_sli4_read_config(phba);
-	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
-		goto retrieve_ppname;
 
-	/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
+	/* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
 	reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
 	alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
 			LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
 			LPFC_SLI4_MBX_NEMBED);
+
 	if (alloclen < reqlen) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"3084 Allocated DMA memory size (%d) is "
@@ -5425,16 +5395,71 @@
 		rc = -ENXIO;
 		goto out_free_mboxq;
 	}
+
 	cntl_attr = &mbx_cntl_attr->cntl_attr;
 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
 	phba->sli4_hba.lnk_info.lnk_tp =
 		bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
 	phba->sli4_hba.lnk_info.lnk_no =
 		bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
+
+	memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
+	strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
+		sizeof(phba->BIOSVersion));
+
 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-			"3086 lnk_type:%d, lnk_numb:%d\n",
+			"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
 			phba->sli4_hba.lnk_info.lnk_tp,
-			phba->sli4_hba.lnk_info.lnk_no);
+			phba->sli4_hba.lnk_info.lnk_no,
+			phba->BIOSVersion);
+out_free_mboxq:
+	if (rc != MBX_TIMEOUT) {
+		if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+			lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		else
+			mempool_free(mboxq, phba->mbox_mem_pool);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine retrieves SLI4 device physical port name this PCI function
+ * is attached to.
+ *
+ * Return codes
+ *      0 - successful
+ *      otherwise - failed to retrieve physical port name
+ **/
+static int
+lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_mbx_get_port_name *get_port_name;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	char cport_name = 0;
+	int rc;
+
+	/* We assume nothing at this point */
+	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+	phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
+
+	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+	/* obtain link type and link number via READ_CONFIG */
+	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+	lpfc_sli4_read_config(phba);
+	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
+		goto retrieve_ppname;
+
+	/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
+	rc = lpfc_sli4_get_ctl_attr(phba);
+	if (rc)
+		goto out_free_mboxq;
 
 retrieve_ppname:
 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
@@ -5515,41 +5540,40 @@
 {
 	int qidx;
 	struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_queue *eq;
 
-	sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
-	sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
+	sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
+	sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
 	if (sli4_hba->nvmels_cq)
-		sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
+		sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
+					   LPFC_QUEUE_REARM);
+
+	if (sli4_hba->hdwq) {
+		/* Loop thru all Hardware Queues */
+		for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+			qp = &sli4_hba->hdwq[qidx];
+			/* ARM the corresponding CQ */
+			sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
 						LPFC_QUEUE_REARM);
+		}
 
-	if (sli4_hba->fcp_cq)
-		for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
-			sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx],
-						LPFC_QUEUE_REARM);
-
-	if (sli4_hba->nvme_cq)
-		for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
-			sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
-						LPFC_QUEUE_REARM);
-
-	if (phba->cfg_fof)
-		sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM);
-
-	if (sli4_hba->hba_eq)
-		for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
-			sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
-							LPFC_QUEUE_REARM);
-
-	if (phba->nvmet_support) {
-		for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
-			sli4_hba->sli4_cq_release(
-				sli4_hba->nvmet_cqset[qidx],
-				LPFC_QUEUE_REARM);
+		/* Loop thru all IRQ vectors */
+		for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
+			eq = sli4_hba->hba_eq_hdl[qidx].eq;
+			/* ARM the corresponding EQ */
+			sli4_hba->sli4_write_eq_db(phba, eq,
+						   0, LPFC_QUEUE_REARM);
 		}
 	}
 
-	if (phba->cfg_fof)
-		sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM);
+	if (phba->nvmet_support) {
+		for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
+			sli4_hba->sli4_write_cq_db(phba,
+				sli4_hba->nvmet_cqset[qidx], 0,
+				LPFC_QUEUE_REARM);
+		}
+	}
 }
 
 /**
@@ -5978,11 +6002,8 @@
 		list_add_tail(&rsrc_blks->list, ext_blk_list);
 		rsrc_start = rsrc_id;
 		if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
-			phba->sli4_hba.scsi_xri_start = rsrc_start +
+			phba->sli4_hba.io_xri_start = rsrc_start +
 				lpfc_sli4_get_iocb_cnt(phba);
-			phba->sli4_hba.nvme_xri_start =
-				phba->sli4_hba.scsi_xri_start +
-				phba->sli4_hba.scsi_xri_max;
 		}
 
 		while (rsrc_id < (rsrc_start + rsrc_size)) {
@@ -6152,6 +6173,290 @@
 }
 
 /**
+ * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Disable FW logging into host memory on the adapter. To
+ * be done before reading logs from the host memory.
+ **/
+void
+lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
+{
+	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+
+	ras_fwlog->ras_active = false;
+
+	/* Disable FW logging to host memory */
+	writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
+	       phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+}
+
+/**
+ * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called to free memory allocated for RAS FW logging
+ * support in the driver.
+ **/
+void
+lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
+{
+	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+	struct lpfc_dmabuf *dmabuf, *next;
+
+	if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
+		list_for_each_entry_safe(dmabuf, next,
+				    &ras_fwlog->fwlog_buff_list,
+				    list) {
+			list_del(&dmabuf->list);
+			dma_free_coherent(&phba->pcidev->dev,
+					  LPFC_RAS_MAX_ENTRY_SIZE,
+					  dmabuf->virt, dmabuf->phys);
+			kfree(dmabuf);
+		}
+	}
+
+	if (ras_fwlog->lwpd.virt) {
+		dma_free_coherent(&phba->pcidev->dev,
+				  sizeof(uint32_t) * 2,
+				  ras_fwlog->lwpd.virt,
+				  ras_fwlog->lwpd.phys);
+		ras_fwlog->lwpd.virt = NULL;
+	}
+
+	ras_fwlog->ras_active = false;
+}
+
+/**
+ * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
+ * @phba: Pointer to HBA context object.
+ * @fwlog_buff_count: Count of buffers to be created.
+ *
+ * This routine DMA memory for Log Write Position Data[LPWD] and buffer
+ * to update FW log is posted to the adapter.
+ * Buffer count is calculated based on module param ras_fwlog_buffsize
+ * Size of each buffer posted to FW is 64K.
+ **/
+
+static int
+lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
+			uint32_t fwlog_buff_count)
+{
+	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+	struct lpfc_dmabuf *dmabuf;
+	int rc = 0, i = 0;
+
+	/* Initialize List */
+	INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
+
+	/* Allocate memory for the LWPD */
+	ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
+					    sizeof(uint32_t) * 2,
+					    &ras_fwlog->lwpd.phys,
+					    GFP_KERNEL);
+	if (!ras_fwlog->lwpd.virt) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6185 LWPD Memory Alloc Failed\n");
+
+		return -ENOMEM;
+	}
+
+	ras_fwlog->fw_buffcount = fwlog_buff_count;
+	for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
+		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+				 GFP_KERNEL);
+		if (!dmabuf) {
+			rc = -ENOMEM;
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"6186 Memory Alloc failed FW logging");
+			goto free_mem;
+		}
+
+		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+						  LPFC_RAS_MAX_ENTRY_SIZE,
+						  &dmabuf->phys, GFP_KERNEL);
+		if (!dmabuf->virt) {
+			kfree(dmabuf);
+			rc = -ENOMEM;
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"6187 DMA Alloc Failed FW logging");
+			goto free_mem;
+		}
+		dmabuf->buffer_tag = i;
+		list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
+	}
+
+free_mem:
+	if (rc)
+		lpfc_sli4_ras_dma_free(phba);
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * Completion handler for driver's RAS MBX command to the device.
+ **/
+static void
+lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t shdr_status, shdr_add_status;
+	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+
+	mb = &pmb->u.mb;
+
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+	if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"6188 FW LOG mailbox "
+				"completed with status x%x add_status x%x,"
+				" mbx status x%x\n",
+				shdr_status, shdr_add_status, mb->mbxStatus);
+
+		ras_fwlog->ras_hwsupport = false;
+		goto disable_ras;
+	}
+
+	ras_fwlog->ras_active = true;
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	return;
+
+disable_ras:
+	/* Free RAS DMA memory */
+	lpfc_sli4_ras_dma_free(phba);
+	mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
+ * @phba: pointer to lpfc hba data structure.
+ * @fwlog_level: Logging verbosity level.
+ * @fwlog_enable: Enable/Disable logging.
+ *
+ * Initialize memory and post mailbox command to enable FW logging in host
+ * memory.
+ **/
+int
+lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
+			 uint32_t fwlog_level,
+			 uint32_t fwlog_enable)
+{
+	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+	struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
+	int rc = 0;
+
+	fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
+			  phba->cfg_ras_fwlog_buffsize);
+	fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
+
+	/*
+	 * If re-enabling FW logging support use earlier allocated
+	 * DMA buffers while posting MBX command.
+	 **/
+	if (!ras_fwlog->lwpd.virt) {
+		rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"6189 FW Log Memory Allocation Failed");
+			return rc;
+		}
+	}
+
+	/* Setup Mailbox command */
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6190 RAS MBX Alloc Failed");
+		rc = -ENOMEM;
+		goto mem_free;
+	}
+
+	ras_fwlog->fw_loglevel = fwlog_level;
+	len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
+		sizeof(struct lpfc_sli4_cfg_mhdr));
+
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
+			 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
+			 len, LPFC_SLI4_MBX_EMBED);
+
+	mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
+	bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
+	       fwlog_enable);
+	bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
+	       ras_fwlog->fw_loglevel);
+	bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
+	       ras_fwlog->fw_buffcount);
+	bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
+	       LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
+
+	/* Update DMA buffer address */
+	list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
+		memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
+
+		mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
+			putPaddrLow(dmabuf->phys);
+
+		mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
+			putPaddrHigh(dmabuf->phys);
+	}
+
+	/* Update LPWD address */
+	mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
+	mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
+
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
+
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6191 FW-Log Mailbox failed. "
+				"status %d mbxStatus : x%x", rc,
+				bf_get(lpfc_mqe_status, &mbox->u.mqe));
+		mempool_free(mbox, phba->mbox_mem_pool);
+		rc = -EIO;
+		goto mem_free;
+	} else
+		rc = 0;
+mem_free:
+	if (rc)
+		lpfc_sli4_ras_dma_free(phba);
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Check if RAS is supported on the adapter and initialize it.
+ **/
+void
+lpfc_sli4_ras_setup(struct lpfc_hba *phba)
+{
+	/* Check RAS FW Log needs to be enabled or not */
+	if (lpfc_check_fwlog_support(phba))
+		return;
+
+	lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
+				 LPFC_RAS_ENABLE_LOGGING);
+}
+
+/**
  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
  * @phba: Pointer to HBA context object.
  *
@@ -6723,7 +7028,39 @@
 	return total_cnt;
 }
 
-void
+/**
+ * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of nvme buffers that have been allocated and
+ * repost them to the port by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
+ * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
+ * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+static int
+lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
+{
+	LIST_HEAD(post_nblist);
+	int num_posted, rc = 0;
+
+	/* get all NVME buffers need to repost to a local list */
+	lpfc_io_buf_flush(phba, &post_nblist);
+
+	/* post the list of nvme buffer sgls to port if available */
+	if (!list_empty(&post_nblist)) {
+		num_posted = lpfc_sli4_post_io_sgl_list(
+			phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
+		/* failed to post any nvme buffer, return error */
+		if (num_posted == 0)
+			rc = -EIO;
+	}
+	return rc;
+}
+
+static void
 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
 {
 	uint32_t len;
@@ -6811,7 +7148,7 @@
 int
 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 {
-	int rc, i, cnt;
+	int rc, i, cnt, len;
 	LPFC_MBOXQ_t *mboxq;
 	struct lpfc_mqe *mqe;
 	uint8_t *vpd;
@@ -6874,7 +7211,7 @@
 	else
 		phba->hba_flag &= ~HBA_FIP_SUPPORT;
 
-	phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
+	phba->hba_flag &= ~HBA_IOQ_FLUSH;
 
 	if (phba->sli_rev != LPFC_SLI_REV4) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -6906,6 +7243,12 @@
 				"3080 Successful retrieving SLI4 device "
 				"physical port name: %s.\n", phba->Port);
 
+	rc = lpfc_sli4_get_ctl_attr(phba);
+	if (!rc)
+		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+				"8351 Successful retrieving SLI4 device "
+				"CTL ATTR\n");
+
 	/*
 	 * Evaluate the read rev and vpd data. Populate the driver
 	 * state with the results. If this routine fails, the failure
@@ -7087,7 +7430,7 @@
 
 	mboxq->vport = vport;
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
-	mp = (struct lpfc_dmabuf *) mboxq->context1;
+	mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
 	if (rc == MBX_SUCCESS) {
 		memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
 		rc = 0;
@@ -7099,7 +7442,7 @@
 	 */
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
-	mboxq->context1 = NULL;
+	mboxq->ctx_buf = NULL;
 	if (unlikely(rc)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
 				"0382 READ_SPARAM command failed "
@@ -7184,17 +7527,8 @@
 		/* We need 1 iocbq for every SGL, for IO processing */
 		cnt += phba->sli4_hba.nvmet_xri_cnt;
 	} else {
-		/* update host scsi xri-sgl sizes and mappings */
-		rc = lpfc_sli4_scsi_sgl_update(phba);
-		if (unlikely(rc)) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-					"6309 Failed to update scsi-sgl size "
-					"and mapping: %d\n", rc);
-			goto out_destroy_queue;
-		}
-
-		/* update host nvme xri-sgl sizes and mappings */
-		rc = lpfc_sli4_nvme_sgl_update(phba);
+		/* update host common xri-sgl sizes and mappings */
+		rc = lpfc_sli4_io_sgl_update(phba);
 		if (unlikely(rc)) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
 					"6082 Failed to update nvme-sgl size "
@@ -7202,6 +7536,17 @@
 			goto out_destroy_queue;
 		}
 
+		/* register the allocated common sgl pool to the port */
+		rc = lpfc_sli4_repost_io_sgl_list(phba);
+		if (unlikely(rc)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"6116 Error %d during nvme sgl post "
+					"operation\n", rc);
+			/* Some NVME buffers were moved to abort nvme list */
+			/* A pci function reset will repost them */
+			rc = -ENODEV;
+			goto out_destroy_queue;
+		}
 		cnt = phba->cfg_iocb_cnt * 1024;
 	}
 
@@ -7238,36 +7583,6 @@
 		}
 	}
 
-	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
-		/* register the allocated scsi sgl pool to the port */
-		rc = lpfc_sli4_repost_scsi_sgl_list(phba);
-		if (unlikely(rc)) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-					"0383 Error %d during scsi sgl post "
-					"operation\n", rc);
-			/* Some Scsi buffers were moved to abort scsi list */
-			/* A pci function reset will repost them */
-			rc = -ENODEV;
-			goto out_destroy_queue;
-		}
-	}
-
-	if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
-	    (phba->nvmet_support == 0)) {
-
-		/* register the allocated nvme sgl pool to the port */
-		rc = lpfc_repost_nvme_sgl_list(phba);
-		if (unlikely(rc)) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-					"6116 Error %d during nvme sgl post "
-					"operation\n", rc);
-			/* Some NVME buffers were moved to abort nvme list */
-			/* A pci function reset will repost them */
-			rc = -ENODEV;
-			goto out_destroy_queue;
-		}
-	}
-
 	/* Post the rpi header region to the device. */
 	rc = lpfc_sli4_post_all_rpi_hdrs(phba);
 	if (unlikely(rc)) {
@@ -7317,11 +7632,24 @@
 		lpfc_sli_read_link_ste(phba);
 	}
 
-	/* Arm the CQs and then EQs on device */
-	lpfc_sli4_arm_cqeq_intr(phba);
+	/* Don't post more new bufs if repost already recovered
+	 * the nvme sgls.
+	 */
+	if (phba->nvmet_support == 0) {
+		if (phba->sli4_hba.io_xri_cnt == 0) {
+			len = lpfc_new_io_buf(
+					      phba, phba->sli4_hba.io_xri_max);
+			if (len == 0) {
+				rc = -ENOMEM;
+				goto out_unset_queue;
+			}
 
-	/* Indicate device interrupt mode */
-	phba->sli4_hba.intr_enable = 1;
+			if (phba->cfg_xri_rebalancing)
+				lpfc_create_multixri_pools(phba);
+		}
+	} else {
+		phba->cfg_xri_rebalancing = 0;
+	}
 
 	/* Allow asynchronous mailbox command to go through */
 	spin_lock_irq(&phba->hbalock);
@@ -7345,6 +7673,11 @@
 	phba->hb_outstanding = 0;
 	phba->last_completion_time = jiffies;
 
+	/* start eq_delay heartbeat */
+	if (phba->cfg_auto_imax)
+		queue_delayed_work(phba->wq, &phba->eq_delay_work,
+				   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
+
 	/* Start error attention (ERATT) polling timer */
 	mod_timer(&phba->eratt_poll,
 		  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
@@ -7374,7 +7707,24 @@
 	 */
 	spin_lock_irq(&phba->hbalock);
 	phba->link_state = LPFC_LINK_DOWN;
+
+	/* Check if physical ports are trunked */
+	if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
+		phba->trunk_link.link0.state = LPFC_LINK_DOWN;
+	if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
+		phba->trunk_link.link1.state = LPFC_LINK_DOWN;
+	if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
+		phba->trunk_link.link2.state = LPFC_LINK_DOWN;
+	if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
+		phba->trunk_link.link3.state = LPFC_LINK_DOWN;
 	spin_unlock_irq(&phba->hbalock);
+
+	/* Arm the CQs and then EQs on device */
+	lpfc_sli4_arm_cqeq_intr(phba);
+
+	/* Indicate device interrupt mode */
+	phba->sli4_hba.intr_enable = 1;
+
 	if (!(phba->hba_flag & HBA_FCOE_MODE) &&
 	    (phba->hba_flag & LINK_DISABLED)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
@@ -7385,18 +7735,21 @@
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
 					"3104 Adapter failed to issue "
 					"DOWN_LINK mbox cmd, rc:x%x\n", rc);
-			goto out_unset_queue;
+			goto out_io_buff_free;
 		}
 	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
 		/* don't perform init_link on SLI4 FC port loopback test */
 		if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
 			rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
 			if (rc)
-				goto out_unset_queue;
+				goto out_io_buff_free;
 		}
 	}
 	mempool_free(mboxq, phba->mbox_mem_pool);
 	return rc;
+out_io_buff_free:
+	/* Free allocated IO Buffers */
+	lpfc_io_free(phba);
 out_unset_queue:
 	/* Unset all the queues set up in this routine when error out */
 	lpfc_sli4_queue_unset(phba);
@@ -7466,8 +7819,9 @@
 	mcq = phba->sli4_hba.mbx_cq;
 	idx = mcq->hba_index;
 	qe_valid = mcq->qe_valid;
-	while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) {
-		mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
+	while (bf_get_le32(lpfc_cqe_valid,
+	       (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
+		mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
 		if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
 		    (!bf_get_le32(lpfc_trailer_async, mcqe))) {
 			pending_completions = true;
@@ -7496,27 +7850,28 @@
  * and will process all the completions associated with the eq for the
  * mailbox completion queue.
  **/
-bool
+static bool
 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
 {
 	struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
 	uint32_t eqidx;
 	struct lpfc_queue *fpeq = NULL;
-	struct lpfc_eqe *eqe;
+	struct lpfc_queue *eq;
 	bool mbox_pending;
 
 	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
 		return false;
 
-	/* Find the eq associated with the mcq */
-
-	if (sli4_hba->hba_eq)
-		for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
-			if (sli4_hba->hba_eq[eqidx]->queue_id ==
-			    sli4_hba->mbx_cq->assoc_qid) {
-				fpeq = sli4_hba->hba_eq[eqidx];
+	/* Find the EQ associated with the mbox CQ */
+	if (sli4_hba->hdwq) {
+		for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
+			eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
+			if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
+				fpeq = eq;
 				break;
 			}
+		}
+	}
 	if (!fpeq)
 		return false;
 
@@ -7536,14 +7891,11 @@
 	 */
 
 	if (mbox_pending)
-		while ((eqe = lpfc_sli4_eq_get(fpeq))) {
-			lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
-			fpeq->EQ_processed++;
-		}
-
-	/* Always clear and re-arm the EQ */
-
-	sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+		/* process and rearm the EQ */
+		lpfc_sli4_process_eq(phba, fpeq);
+	else
+		/* Always clear and re-arm the EQ */
+		sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
 
 	return mbox_pending;
 
@@ -7588,7 +7940,7 @@
 
 	/* Mbox cmd <mbxCommand> timeout */
 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-			"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
+			"0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
 			mb->mbxCommand,
 			phba->pport->port_state,
 			phba->sli.sli_flag,
@@ -7858,10 +8210,10 @@
 		}
 
 		/* Copy the mailbox extension data */
-		if (pmbox->in_ext_byte_len && pmbox->context2) {
-			lpfc_sli_pcimem_bcopy(pmbox->context2,
-				(uint8_t *)phba->mbox_ext,
-				pmbox->in_ext_byte_len);
+		if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
+			lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
+					      (uint8_t *)phba->mbox_ext,
+					      pmbox->in_ext_byte_len);
 		}
 		/* Copy command data to host SLIM area */
 		lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
@@ -7872,10 +8224,10 @@
 				= MAILBOX_HBA_EXT_OFFSET;
 
 		/* Copy the mailbox extension data */
-		if (pmbox->in_ext_byte_len && pmbox->context2)
+		if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
 			lpfc_memcpy_to_slim(phba->MBslimaddr +
 				MAILBOX_HBA_EXT_OFFSET,
-				pmbox->context2, pmbox->in_ext_byte_len);
+				pmbox->ctx_buf, pmbox->in_ext_byte_len);
 
 		if (mbx->mbxCommand == MBX_CONFIG_PORT)
 			/* copy command data into host mbox for cmpl */
@@ -7998,9 +8350,9 @@
 			lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
 						MAILBOX_CMD_SIZE);
 			/* Copy the mailbox extension data */
-			if (pmbox->out_ext_byte_len && pmbox->context2) {
+			if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
 				lpfc_sli_pcimem_bcopy(phba->mbox_ext,
-						      pmbox->context2,
+						      pmbox->ctx_buf,
 						      pmbox->out_ext_byte_len);
 			}
 		} else {
@@ -8008,8 +8360,9 @@
 			lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
 						MAILBOX_CMD_SIZE);
 			/* Copy the mailbox extension data */
-			if (pmbox->out_ext_byte_len && pmbox->context2) {
-				lpfc_memcpy_from_slim(pmbox->context2,
+			if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
+				lpfc_memcpy_from_slim(
+					pmbox->ctx_buf,
 					phba->MBslimaddr +
 					MAILBOX_HBA_EXT_OFFSET,
 					pmbox->out_ext_byte_len);
@@ -8149,7 +8502,7 @@
 		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
 		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
 		if (!db_ready)
-			msleep(2);
+			mdelay(2);
 
 		if (time_after(jiffies, timeout))
 			return MBXERR_ERROR;
@@ -8212,7 +8565,6 @@
 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
 	if (rc)
 		goto exit;
-
 	/*
 	 * Initialize the bootstrap memory region to avoid stale data areas
 	 * in the mailbox post.  Then copy the caller's mailbox contents to
@@ -8949,11 +9301,9 @@
 		memset(wqe, 0, sizeof(union lpfc_wqe128));
 	/* Some of the fields are in the right position already */
 	memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
-	if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
-		/* The ct field has moved so reset */
-		wqe->generic.wqe_com.word7 = 0;
-		wqe->generic.wqe_com.word10 = 0;
-	}
+	/* The ct field has moved so reset */
+	wqe->generic.wqe_com.word7 = 0;
+	wqe->generic.wqe_com.word10 = 0;
 
 	abort_tag = (uint32_t) iocbq->iotag;
 	xritag = iocbq->sli4_xritag;
@@ -9023,6 +9373,7 @@
 		if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
 			if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
 				*pcmd == ELS_CMD_SCR ||
+				*pcmd == ELS_CMD_RSCN_XMT ||
 				*pcmd == ELS_CMD_FDISC ||
 				*pcmd == ELS_CMD_LOGO ||
 				*pcmd == ELS_CMD_PLOGI)) {
@@ -9062,6 +9413,7 @@
 		cmnd = CMD_XMIT_SEQUENCE64_CR;
 		if (phba->link_flag & LS_LOOPBACK_MODE)
 			bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
+		/* fall through */
 	case CMD_XMIT_SEQUENCE64_CR:
 		/* word3 iocb=io_tag32 wqe=reserved */
 		wqe->xmit_sequence.rsvd3 = 0;
@@ -9130,7 +9482,7 @@
 			bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
 
 		if (phba->fcp_embed_io) {
-			struct lpfc_scsi_buf *lpfc_cmd;
+			struct lpfc_io_buf *lpfc_cmd;
 			struct sli4_sge *sgl;
 			struct fcp_cmnd *fcp_cmnd;
 			uint32_t *ptr;
@@ -9138,7 +9490,7 @@
 			/* 128 byte wqe support here */
 
 			lpfc_cmd = iocbq->context1;
-			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+			sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
 			fcp_cmnd = lpfc_cmd->fcp_cmnd;
 
 			/* Word 0-2 - FCP_CMND */
@@ -9194,7 +9546,7 @@
 			bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
 
 		if (phba->fcp_embed_io) {
-			struct lpfc_scsi_buf *lpfc_cmd;
+			struct lpfc_io_buf *lpfc_cmd;
 			struct sli4_sge *sgl;
 			struct fcp_cmnd *fcp_cmnd;
 			uint32_t *ptr;
@@ -9202,7 +9554,7 @@
 			/* 128 byte wqe support here */
 
 			lpfc_cmd = iocbq->context1;
-			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+			sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
 			fcp_cmnd = lpfc_cmd->fcp_cmnd;
 
 			/* Word 0-2 - FCP_CMND */
@@ -9251,7 +9603,7 @@
 		/* Note, word 10 is already initialized to 0 */
 
 		if (phba->fcp_embed_io) {
-			struct lpfc_scsi_buf *lpfc_cmd;
+			struct lpfc_io_buf *lpfc_cmd;
 			struct sli4_sge *sgl;
 			struct fcp_cmnd *fcp_cmnd;
 			uint32_t *ptr;
@@ -9259,7 +9611,7 @@
 			/* 128 byte wqe support here */
 
 			lpfc_cmd = iocbq->context1;
-			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+			sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
 			fcp_cmnd = lpfc_cmd->fcp_cmnd;
 
 			/* Word 0-2 - FCP_CMND */
@@ -9410,7 +9762,7 @@
 		 * we re-construct this WQE here based on information in
 		 * iocbq from scratch.
 		 */
-		memset(wqe, 0, sizeof(union lpfc_wqe));
+		memset(wqe, 0, sizeof(*wqe));
 		/* OX_ID is invariable to who sent ABTS to CT exchange */
 		bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
 		       bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
@@ -9457,6 +9809,15 @@
 
 		break;
 	case CMD_SEND_FRAME:
+		bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
+		bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
+		bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
+		bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
+		bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
+		bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
+		bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
+		bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
+		bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 		bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
 		bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
 		return 0;
@@ -9518,10 +9879,7 @@
 	/* Get the WQ */
 	if ((piocb->iocb_flag & LPFC_IO_FCP) ||
 	    (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
-		if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
-			wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
-		else
-			wq = phba->sli4_hba.oas_wq;
+		wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
 	} else {
 		wq = phba->sli4_hba.els_wq;
 	}
@@ -9533,7 +9891,7 @@
 	 * The WQE can be either 64 or 128 bytes,
 	 */
 
-	lockdep_assert_held(&phba->hbalock);
+	lockdep_assert_held(&pring->ring_lock);
 
 	if (piocb->sli4_xritag == NO_XRI) {
 		if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
@@ -9655,29 +10013,20 @@
 struct lpfc_sli_ring *
 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
 {
+	struct lpfc_io_buf *lpfc_cmd;
+
 	if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
-		if (!(phba->cfg_fof) ||
-		    (!(piocb->iocb_flag & LPFC_IO_FOF))) {
-			if (unlikely(!phba->sli4_hba.fcp_wq))
-				return NULL;
-			/*
-			 * for abort iocb hba_wqidx should already
-			 * be setup based on what work queue we used.
-			 */
-			if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
-				piocb->hba_wqidx =
-					lpfc_sli4_scmd_to_wqidx_distr(phba,
-							      piocb->context1);
-				piocb->hba_wqidx = piocb->hba_wqidx %
-					phba->cfg_fcp_io_channel;
-			}
-			return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
-		} else {
-			if (unlikely(!phba->sli4_hba.oas_wq))
-				return NULL;
-			piocb->hba_wqidx = 0;
-			return phba->sli4_hba.oas_wq->pring;
+		if (unlikely(!phba->sli4_hba.hdwq))
+			return NULL;
+		/*
+		 * for abort iocb hba_wqidx should already
+		 * be setup based on what work queue we used.
+		 */
+		if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+			lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
+			piocb->hba_wqidx = lpfc_cmd->hdwq_no;
 		}
+		return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
 	} else {
 		if (unlikely(!phba->sli4_hba.els_wq))
 			return NULL;
@@ -9703,12 +10052,9 @@
 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
 		    struct lpfc_iocbq *piocb, uint32_t flag)
 {
-	struct lpfc_hba_eq_hdl *hba_eq_hdl;
 	struct lpfc_sli_ring *pring;
-	struct lpfc_queue *fpeq;
-	struct lpfc_eqe *eqe;
 	unsigned long iflags;
-	int rc, idx;
+	int rc;
 
 	if (phba->sli_rev == LPFC_SLI_REV4) {
 		pring = lpfc_sli4_calc_ring(phba, piocb);
@@ -9718,34 +10064,6 @@
 		spin_lock_irqsave(&pring->ring_lock, iflags);
 		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
-
-		if (lpfc_fcp_look_ahead && (piocb->iocb_flag &  LPFC_IO_FCP)) {
-			idx = piocb->hba_wqidx;
-			hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
-
-			if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
-
-				/* Get associated EQ with this index */
-				fpeq = phba->sli4_hba.hba_eq[idx];
-
-				/* Turn off interrupts from this EQ */
-				phba->sli4_hba.sli4_eq_clr_intr(fpeq);
-
-				/*
-				 * Process all the events on FCP EQ
-				 */
-				while ((eqe = lpfc_sli4_eq_get(fpeq))) {
-					lpfc_sli4_hba_handle_eqe(phba,
-						eqe, idx);
-					fpeq->EQ_processed++;
-				}
-
-				/* Always clear and re-arm the EQ */
-				phba->sli4_hba.sli4_eq_release(fpeq,
-					LPFC_QUEUE_REARM);
-			}
-			atomic_inc(&hba_eq_hdl->hba_eq_in_use);
-		}
 	} else {
 		/* For now, SLI2/3 will still use hbalock */
 		spin_lock_irqsave(&phba->hbalock, iflags);
@@ -10160,19 +10478,11 @@
 	INIT_LIST_HEAD(&psli->mboxq);
 	INIT_LIST_HEAD(&psli->mboxq_cmpl);
 	/* Initialize list headers for txq and txcmplq as double linked lists */
-	for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
-		pring = phba->sli4_hba.fcp_wq[i]->pring;
+	for (i = 0; i < phba->cfg_hdw_queue; i++) {
+		pring = phba->sli4_hba.hdwq[i].io_wq->pring;
 		pring->flag = 0;
 		pring->ringno = LPFC_FCP_RING;
-		INIT_LIST_HEAD(&pring->txq);
-		INIT_LIST_HEAD(&pring->txcmplq);
-		INIT_LIST_HEAD(&pring->iocb_continueq);
-		spin_lock_init(&pring->ring_lock);
-	}
-	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
-		pring = phba->sli4_hba.nvme_wq[i]->pring;
-		pring->flag = 0;
-		pring->ringno = LPFC_FCP_RING;
+		pring->txcmplq_cnt = 0;
 		INIT_LIST_HEAD(&pring->txq);
 		INIT_LIST_HEAD(&pring->txcmplq);
 		INIT_LIST_HEAD(&pring->iocb_continueq);
@@ -10181,25 +10491,17 @@
 	pring = phba->sli4_hba.els_wq->pring;
 	pring->flag = 0;
 	pring->ringno = LPFC_ELS_RING;
+	pring->txcmplq_cnt = 0;
 	INIT_LIST_HEAD(&pring->txq);
 	INIT_LIST_HEAD(&pring->txcmplq);
 	INIT_LIST_HEAD(&pring->iocb_continueq);
 	spin_lock_init(&pring->ring_lock);
 
-	if (phba->cfg_nvme_io_channel) {
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
 		pring = phba->sli4_hba.nvmels_wq->pring;
 		pring->flag = 0;
 		pring->ringno = LPFC_ELS_RING;
-		INIT_LIST_HEAD(&pring->txq);
-		INIT_LIST_HEAD(&pring->txcmplq);
-		INIT_LIST_HEAD(&pring->iocb_continueq);
-		spin_lock_init(&pring->ring_lock);
-	}
-
-	if (phba->cfg_fof) {
-		pring = phba->sli4_hba.oas_wq->pring;
-		pring->flag = 0;
-		pring->ringno = LPFC_FCP_RING;
+		pring->txcmplq_cnt = 0;
 		INIT_LIST_HEAD(&pring->txq);
 		INIT_LIST_HEAD(&pring->txcmplq);
 		INIT_LIST_HEAD(&pring->iocb_continueq);
@@ -10272,8 +10574,12 @@
 	LPFC_MBOXQ_t *pmb;
 	unsigned long iflag;
 
+	/* Disable softirqs, including timers from obtaining phba->hbalock */
+	local_bh_disable();
+
 	/* Flush all the mailbox commands in the mbox system */
 	spin_lock_irqsave(&phba->hbalock, iflag);
+
 	/* The pending mailbox command queue */
 	list_splice_init(&phba->sli.mboxq, &completions);
 	/* The outstanding active mailbox command */
@@ -10286,6 +10592,9 @@
 	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
 	spin_unlock_irqrestore(&phba->hbalock, iflag);
 
+	/* Enable softirqs again, done with phba->hbalock */
+	local_bh_enable();
+
 	/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
 	while (!list_empty(&completions)) {
 		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
@@ -10425,6 +10734,9 @@
 
 	lpfc_hba_down_prep(phba);
 
+	/* Disable softirqs, including timers from obtaining phba->hbalock */
+	local_bh_disable();
+
 	lpfc_fabric_abort_hba(phba);
 
 	spin_lock_irqsave(&phba->hbalock, flags);
@@ -10449,9 +10761,9 @@
 			pring = qp->pring;
 			if (!pring)
 				continue;
-			spin_lock_irq(&pring->ring_lock);
+			spin_lock(&pring->ring_lock);
 			list_splice_init(&pring->txq, &completions);
-			spin_unlock_irq(&pring->ring_lock);
+			spin_unlock(&pring->ring_lock);
 			if (pring == phba->sli4_hba.els_wq->pring) {
 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
 				/* Set the lpfc data pending flag */
@@ -10478,6 +10790,9 @@
 		kfree(buf_ptr);
 	}
 
+	/* Enable softirqs again, done with phba->hbalock */
+	local_bh_enable();
+
 	/* Return any active mbox cmds */
 	del_timer_sync(&psli->mbox_tmo);
 
@@ -10629,7 +10944,7 @@
 	spin_unlock_irq(&phba->hbalock);
 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 			"0402 Cannot find virtual addr for buffer tag on "
-			"ring %d Data x%lx x%p x%p x%x\n",
+			"ring %d Data x%lx x%px x%px x%x\n",
 			pring->ringno, (unsigned long) tag,
 			slp->next, slp->prev, pring->postbufq_cnt);
 
@@ -10673,7 +10988,7 @@
 	spin_unlock_irq(&phba->hbalock);
 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 			"0410 Cannot find virtual addr for mapped buf on "
-			"ring %d Data x%llx x%p x%p x%x\n",
+			"ring %d Data x%llx x%px x%px x%x\n",
 			pring->ringno, (unsigned long long)phys,
 			slp->next, slp->prev, pring->postbufq_cnt);
 	return NULL;
@@ -10728,13 +11043,16 @@
 			abort_iocb = phba->sli.iocbq_lookup[abort_context];
 
 		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
-				"0327 Cannot abort els iocb %p "
+				"0327 Cannot abort els iocb x%px "
 				"with tag %x context %x, abort status %x, "
 				"abort code %x\n",
 				abort_iocb, abort_iotag, abort_context,
 				irsp->ulpStatus, irsp->un.ulpWord[4]);
 
 		spin_unlock_irq(&phba->hbalock);
+		if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+		    irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
+			lpfc_sli_release_iocbq(phba, abort_iocb);
 	}
 release_iocb:
 	lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -10949,108 +11267,6 @@
 }
 
 /**
- * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
- * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
- * @cmdiocb: Pointer to driver command iocb object.
- *
- * This function issues an abort iocb for the provided command iocb down to
- * the port. Other than the case the outstanding command iocb is an abort
- * request, this function issues abort out unconditionally. This function is
- * called with hbalock held. The function returns 0 when it fails due to
- * memory allocation failure or when the command iocb is an abort request.
- **/
-static int
-lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
-			struct lpfc_iocbq *cmdiocb)
-{
-	struct lpfc_vport *vport = cmdiocb->vport;
-	struct lpfc_iocbq *abtsiocbp;
-	union lpfc_wqe128 *abts_wqe;
-	int retval;
-
-	/*
-	 * There are certain command types we don't want to abort.  And we
-	 * don't want to abort commands that are already in the process of
-	 * being aborted.
-	 */
-	if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
-	    cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
-	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
-		return 0;
-
-	/* issue ABTS for this io based on iotag */
-	abtsiocbp = __lpfc_sli_get_iocbq(phba);
-	if (abtsiocbp == NULL)
-		return 0;
-
-	/* This signals the response to set the correct status
-	 * before calling the completion handler
-	 */
-	cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
-
-	/* Complete prepping the abort wqe and issue to the FW. */
-	abts_wqe = &abtsiocbp->wqe;
-	bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0);
-	bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
-	/* Explicitly set reserved fields to zero.*/
-	abts_wqe->abort_cmd.rsrvd4 = 0;
-	abts_wqe->abort_cmd.rsrvd5 = 0;
-
-	/* WQE Common - word 6.  Context is XRI tag.  Set 0. */
-	bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0);
-	bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0);
-
-	/* word 7 */
-	bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
-	bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-	bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
-	       cmdiocb->iocb.ulpClass);
-
-	/* word 8 - tell the FW to abort the IO associated with this
-	 * outstanding exchange ID.
-	 */
-	abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
-
-	/* word 9 - this is the iotag for the abts_wqe completion. */
-	bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
-	       abtsiocbp->iotag);
-
-	/* word 10 */
-	bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx);
-	bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
-	bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
-	/* word 11 */
-	bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
-	bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
-	bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
-
-	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
-	abtsiocbp->iocb_flag |= LPFC_IO_NVME;
-	abtsiocbp->vport = vport;
-	abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
-	retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
-	if (retval) {
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
-				 "6147 Failed abts issue_wqe with status x%x "
-				 "for oxid x%x\n",
-				 retval, cmdiocb->sli4_xritag);
-		lpfc_sli_release_iocbq(phba, abtsiocbp);
-		return retval;
-	}
-
-	lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
-			 "6148 Drv Abort NVME Request Issued for "
-			 "ox_id x%x on reqtag x%x\n",
-			 cmdiocb->sli4_xritag,
-			 abtsiocbp->iotag);
-
-	return retval;
-}
-
-/**
  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
  * @phba: pointer to lpfc HBA data structure.
  *
@@ -11106,7 +11322,7 @@
 			   uint16_t tgt_id, uint64_t lun_id,
 			   lpfc_ctx_cmd ctx_cmd)
 {
-	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_io_buf *lpfc_cmd;
 	int rc = 1;
 
 	if (iocbq->vport != vport)
@@ -11116,7 +11332,7 @@
 	    !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
 		return rc;
 
-	lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+	lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
 
 	if (lpfc_cmd->pCmd == NULL)
 		return rc;
@@ -11245,7 +11461,7 @@
 	int i;
 
 	/* all I/Os are in process of being flushed */
-	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
+	if (phba->hba_flag & HBA_IOQ_FLUSH)
 		return errcnt;
 
 	for (i = 1; i <= phba->sli.last_iotag; i++) {
@@ -11343,19 +11559,19 @@
 			uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
 {
 	struct lpfc_hba *phba = vport->phba;
-	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_io_buf *lpfc_cmd;
 	struct lpfc_iocbq *abtsiocbq;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_iocbq *iocbq;
 	IOCB_t *icmd;
 	int sum, i, ret_val;
 	unsigned long iflags;
-	struct lpfc_sli_ring *pring_s4;
+	struct lpfc_sli_ring *pring_s4 = NULL;
 
 	spin_lock_irqsave(&phba->hbalock, iflags);
 
 	/* all I/Os are in process of being flushed */
-	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+	if (phba->hba_flag & HBA_IOQ_FLUSH) {
 		spin_unlock_irqrestore(&phba->hbalock, iflags);
 		return 0;
 	}
@@ -11368,17 +11584,46 @@
 					       cmd) != 0)
 			continue;
 
+		/* Guard against IO completion being called at same time */
+		lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
+		spin_lock(&lpfc_cmd->buf_lock);
+
+		if (!lpfc_cmd->pCmd) {
+			spin_unlock(&lpfc_cmd->buf_lock);
+			continue;
+		}
+
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			pring_s4 =
+			    phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
+			if (!pring_s4) {
+				spin_unlock(&lpfc_cmd->buf_lock);
+				continue;
+			}
+			/* Note: both hbalock and ring_lock must be set here */
+			spin_lock(&pring_s4->ring_lock);
+		}
+
 		/*
 		 * If the iocbq is already being aborted, don't take a second
 		 * action, but do count it.
 		 */
-		if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+		if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
+		    !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				spin_unlock(&pring_s4->ring_lock);
+			spin_unlock(&lpfc_cmd->buf_lock);
 			continue;
+		}
 
 		/* issue ABTS for this IOCB based on iotag */
 		abtsiocbq = __lpfc_sli_get_iocbq(phba);
-		if (abtsiocbq == NULL)
+		if (!abtsiocbq) {
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				spin_unlock(&pring_s4->ring_lock);
+			spin_unlock(&lpfc_cmd->buf_lock);
 			continue;
+		}
 
 		icmd = &iocbq->iocb;
 		abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
@@ -11399,7 +11644,6 @@
 		if (iocbq->iocb_flag & LPFC_IO_FOF)
 			abtsiocbq->iocb_flag |= LPFC_IO_FOF;
 
-		lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
 		ndlp = lpfc_cmd->rdata->pnode;
 
 		if (lpfc_is_link_up(phba) &&
@@ -11418,11 +11662,6 @@
 		iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
 
 		if (phba->sli_rev == LPFC_SLI_REV4) {
-			pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq);
-			if (!pring_s4)
-				continue;
-			/* Note: both hbalock and ring_lock must be set here */
-			spin_lock(&pring_s4->ring_lock);
 			ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
 							abtsiocbq, 0);
 			spin_unlock(&pring_s4->ring_lock);
@@ -11431,6 +11670,7 @@
 							abtsiocbq, 0);
 		}
 
+		spin_unlock(&lpfc_cmd->buf_lock);
 
 		if (ret_val == IOCB_ERROR)
 			__lpfc_sli_release_iocbq(phba, abtsiocbq);
@@ -11465,7 +11705,7 @@
 {
 	wait_queue_head_t *pdone_q;
 	unsigned long iflags;
-	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_io_buf *lpfc_cmd;
 
 	spin_lock_irqsave(&phba->hbalock, iflags);
 	if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
@@ -11494,7 +11734,7 @@
 	/* Set the exchange busy flag for task management commands */
 	if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
 		!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
-		lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
+		lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
 			cur_iocbq);
 		lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
 	}
@@ -11781,6 +12021,9 @@
 	}
 	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
 
+	/* Disable softirqs, including timers from obtaining phba->hbalock */
+	local_bh_disable();
+
 	spin_lock_irq(&phba->hbalock);
 	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
 
@@ -11794,6 +12037,9 @@
 						1000) + jiffies;
 		spin_unlock_irq(&phba->hbalock);
 
+		/* Enable softirqs again, done with phba->hbalock */
+		local_bh_enable();
+
 		while (phba->sli.mbox_active) {
 			/* Check active mailbox complete status every 2ms */
 			msleep(2);
@@ -11803,9 +12049,13 @@
 				 */
 				break;
 		}
-	} else
+	} else {
 		spin_unlock_irq(&phba->hbalock);
 
+		/* Enable softirqs again, done with phba->hbalock */
+		local_bh_enable();
+	}
+
 	lpfc_sli_mbox_sys_flush(phba);
 }
 
@@ -12261,10 +12511,10 @@
 					lpfc_sli_pcimem_bcopy(mbox, pmbox,
 							MAILBOX_CMD_SIZE);
 					if (pmb->out_ext_byte_len &&
-						pmb->context2)
+						pmb->ctx_buf)
 						lpfc_sli_pcimem_bcopy(
 						phba->mbox_ext,
-						pmb->context2,
+						pmb->ctx_buf,
 						pmb->out_ext_byte_len);
 				}
 				if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
@@ -12279,9 +12529,9 @@
 
 					if (!pmbox->mbxStatus) {
 						mp = (struct lpfc_dmabuf *)
-							(pmb->context1);
+							(pmb->ctx_buf);
 						ndlp = (struct lpfc_nodelist *)
-							pmb->context2;
+							pmb->ctx_ndlp;
 
 						/* Reg_LOGIN of dflt RPI was
 						 * successful. new lets get
@@ -12294,8 +12544,8 @@
 							pmb);
 						pmb->mbox_cmpl =
 							lpfc_mbx_cmpl_dflt_rpi;
-						pmb->context1 = mp;
-						pmb->context2 = ndlp;
+						pmb->ctx_buf = mp;
+						pmb->ctx_ndlp = ndlp;
 						pmb->vport = vport;
 						rc = lpfc_sli_issue_mbox(phba,
 								pmb,
@@ -12558,35 +12808,6 @@
 }  /* lpfc_sli_intr_handler */
 
 /**
- * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked by the worker thread to process all the pending
- * SLI4 FCP abort XRI events.
- **/
-void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
-{
-	struct lpfc_cq_event *cq_event;
-
-	/* First, declare the fcp xri abort event has been handled */
-	spin_lock_irq(&phba->hbalock);
-	phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
-	spin_unlock_irq(&phba->hbalock);
-	/* Now, handle all the fcp xri abort events */
-	while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
-		/* Get the first event from the head of the event queue */
-		spin_lock_irq(&phba->hbalock);
-		list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
-				 cq_event, struct lpfc_cq_event, list);
-		spin_unlock_irq(&phba->hbalock);
-		/* Notify aborted XRI for FCP work queue */
-		lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
-		/* Free the event processed back to the free pool */
-		lpfc_sli4_cq_event_release(phba, cq_event);
-	}
-}
-
-/**
  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
  * @phba: pointer to lpfc hba data structure.
  *
@@ -12756,13 +12977,11 @@
 		return NULL;
 
 	wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
-	spin_lock_irqsave(&pring->ring_lock, iflags);
 	pring->stats.iocb_event++;
 	/* Look up the ELS command IOCB and create pseudo response IOCB */
 	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
 	if (unlikely(!cmdiocbq)) {
-		spin_unlock_irqrestore(&pring->ring_lock, iflags);
 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
 				"0386 ELS complete with no corresponding "
 				"cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
@@ -12772,6 +12991,7 @@
 		return NULL;
 	}
 
+	spin_lock_irqsave(&pring->ring_lock, iflags);
 	/* Put the iocb back on the txcmplq */
 	lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -12901,16 +13121,16 @@
 				      mcqe_status,
 				      pmbox->un.varWords[0], 0);
 		if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
-			mp = (struct lpfc_dmabuf *)(pmb->context1);
-			ndlp = (struct lpfc_nodelist *)pmb->context2;
+			mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+			ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
 			/* Reg_LOGIN of dflt RPI was successful. Now lets get
 			 * RID of the PPI using the same mbox buffer.
 			 */
 			lpfc_unreg_login(phba, vport->vpi,
 					 pmbox->un.varWords[0], pmb);
 			pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
-			pmb->context1 = mp;
-			pmb->context2 = ndlp;
+			pmb->ctx_buf = mp;
+			pmb->ctx_ndlp = ndlp;
 			pmb->vport = vport;
 			rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 			if (rc != MBX_BUSY)
@@ -12959,11 +13179,14 @@
  * Return: true if work posted to worker thread, otherwise false.
  **/
 static bool
-lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			 struct lpfc_cqe *cqe)
 {
 	struct lpfc_mcqe mcqe;
 	bool workposted;
 
+	cq->CQ_mbox++;
+
 	/* Copy the mailbox MCQE and convert endian order as needed */
 	lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
 
@@ -13081,18 +13304,14 @@
 	unsigned long iflags;
 
 	switch (cq->subtype) {
-	case LPFC_FCP:
-		cq_event = lpfc_cq_event_setup(
-			phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
-		if (!cq_event)
-			return false;
-		spin_lock_irqsave(&phba->hbalock, iflags);
-		list_add_tail(&cq_event->list,
-			      &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
-		/* Set the fcp xri abort event flag */
-		phba->hba_flag |= FCP_XRI_ABORT_EVENT;
-		spin_unlock_irqrestore(&phba->hbalock, iflags);
-		workposted = true;
+	case LPFC_IO:
+		lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
+		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+			/* Notify aborted XRI for NVME work queue */
+			if (phba->nvmet_support)
+				lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
+		}
+		workposted = false;
 		break;
 	case LPFC_NVME_LS: /* NVME LS uses ELS resources */
 	case LPFC_ELS:
@@ -13100,6 +13319,7 @@
 			phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
 		if (!cq_event)
 			return false;
+		cq_event->hdwq = cq->hdwq;
 		spin_lock_irqsave(&phba->hbalock, iflags);
 		list_add_tail(&cq_event->list,
 			      &phba->sli4_hba.sp_els_xri_aborted_work_queue);
@@ -13108,15 +13328,6 @@
 		spin_unlock_irqrestore(&phba->hbalock, iflags);
 		workposted = true;
 		break;
-	case LPFC_NVME:
-		/* Notify aborted XRI for NVME work queue */
-		if (phba->nvmet_support)
-			lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
-		else
-			lpfc_sli4_nvme_xri_aborted(phba, wcqe);
-
-		workposted = false;
-		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"0603 Invalid CQ subtype %d: "
@@ -13129,6 +13340,8 @@
 	return workposted;
 }
 
+#define FC_RCTL_MDS_DIAGS	0xF4
+
 /**
  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
  * @phba: Pointer to HBA context object.
@@ -13166,6 +13379,7 @@
 	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"2537 Receive Frame Truncated!!\n");
+		/* fall through */
 	case FC_STATUS_RQ_SUCCESS:
 		spin_lock_irqsave(&phba->hbalock, iflags);
 		lpfc_sli4_rq_release(hrq, drq);
@@ -13179,10 +13393,17 @@
 		hrq->RQ_buf_posted--;
 		memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
 
-		/* If a NVME LS event (type 0x28), treat it as Fast path */
 		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
 
-		/* save off the frame for the word thread to process */
+		if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
+		    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			/* Handle MDS Loopback frames */
+			lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
+			break;
+		}
+
+		/* save off the frame for the work thread to process */
 		list_add_tail(&dma_buf->cq_event.list,
 			      &phba->sli4_hba.sp_queue_event);
 		/* Frame received */
@@ -13221,7 +13442,7 @@
  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
  * @phba: Pointer to HBA context object.
  * @cq: Pointer to the completion queue.
- * @wcqe: Pointer to a completion queue entry.
+ * @cqe: Pointer to a completion queue entry.
  *
  * This routine process a slow-path work-queue or receive queue completion queue
  * entry.
@@ -13313,68 +13534,135 @@
 	/* Save EQ associated with this CQ */
 	cq->assoc_qp = speq;
 
-	if (!queue_work(phba->wq, &cq->spwork))
+	if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"0390 Cannot schedule soft IRQ "
 				"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
-				cqid, cq->queue_id, smp_processor_id());
+				cqid, cq->queue_id, raw_smp_processor_id());
+}
+
+/**
+ * __lpfc_sli4_process_cq - Process elements of a CQ
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to CQ to be processed
+ * @handler: Routine to process each cqe
+ * @delay: Pointer to usdelay to set in case of rescheduling of the handler
+ *
+ * This routine processes completion queue entries in a CQ. While a valid
+ * queue element is found, the handler is called. During processing checks
+ * are made for periodic doorbell writes to let the hardware know of
+ * element consumption.
+ *
+ * If the max limit on cqes to process is hit, or there are no more valid
+ * entries, the loop stops. If we processed a sufficient number of elements,
+ * meaning there is sufficient load, rather than rearming and generating
+ * another interrupt, a cq rescheduling delay will be set. A delay of 0
+ * indicates no rescheduling.
+ *
+ * Returns True if work scheduled, False otherwise.
+ **/
+static bool
+__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
+	bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_cqe *), unsigned long *delay)
+{
+	struct lpfc_cqe *cqe;
+	bool workposted = false;
+	int count = 0, consumed = 0;
+	bool arm = true;
+
+	/* default - no reschedule */
+	*delay = 0;
+
+	if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
+		goto rearm_and_exit;
+
+	/* Process all the entries to the CQ */
+	cq->q_flag = 0;
+	cqe = lpfc_sli4_cq_get(cq);
+	while (cqe) {
+		workposted |= handler(phba, cq, cqe);
+		__lpfc_sli4_consume_cqe(phba, cq, cqe);
+
+		consumed++;
+		if (!(++count % cq->max_proc_limit))
+			break;
+
+		if (!(count % cq->notify_interval)) {
+			phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
+						LPFC_QUEUE_NOARM);
+			consumed = 0;
+		}
+
+		if (count == LPFC_NVMET_CQ_NOTIFY)
+			cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
+
+		cqe = lpfc_sli4_cq_get(cq);
+	}
+	if (count >= phba->cfg_cq_poll_threshold) {
+		*delay = 1;
+		arm = false;
+	}
+
+	/* Track the max number of CQEs processed in 1 EQ */
+	if (count > cq->CQ_max_cqe)
+		cq->CQ_max_cqe = count;
+
+	cq->assoc_qp->EQ_cqe_cnt += count;
+
+	/* Catch the no cq entry condition */
+	if (unlikely(count == 0))
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"0369 No entry from completion queue "
+				"qid=%d\n", cq->queue_id);
+
+	cq->queue_claimed = 0;
+
+rearm_and_exit:
+	phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
+			arm ?  LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
+
+	return workposted;
 }
 
 /**
  * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
- * @phba: Pointer to HBA context object.
+ * @cq: pointer to CQ to process
  *
- * This routine process a event queue entry from the slow-path event queue.
- * It will check the MajorCode and MinorCode to determine this is for a
- * completion event on a completion queue, if not, an error shall be logged
- * and just return. Otherwise, it will get to the corresponding completion
- * queue and process all the entries on that completion queue, rearm the
- * completion queue, and then return.
+ * This routine calls the cq processing routine with a handler specific
+ * to the type of queue bound to it.
  *
+ * The CQ routine returns two values: the first is the calling status,
+ * which indicates whether work was queued to the  background discovery
+ * thread. If true, the routine should wakeup the discovery thread;
+ * the second is the delay parameter. If non-zero, rather than rearming
+ * the CQ and yet another interrupt, the CQ handler should be queued so
+ * that it is processed in a subsequent polling action. The value of
+ * the delay indicates when to reschedule it.
  **/
 static void
-lpfc_sli4_sp_process_cq(struct work_struct *work)
+__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
 {
-	struct lpfc_queue *cq =
-		container_of(work, struct lpfc_queue, spwork);
 	struct lpfc_hba *phba = cq->phba;
-	struct lpfc_cqe *cqe;
+	unsigned long delay;
 	bool workposted = false;
-	int ccount = 0;
 
-	/* Process all the entries to the CQ */
+	/* Process and rearm the CQ */
 	switch (cq->type) {
 	case LPFC_MCQ:
-		while ((cqe = lpfc_sli4_cq_get(cq))) {
-			workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
-			if (!(++ccount % cq->entry_repost))
-				break;
-			cq->CQ_mbox++;
-		}
+		workposted |= __lpfc_sli4_process_cq(phba, cq,
+						lpfc_sli4_sp_handle_mcqe,
+						&delay);
 		break;
 	case LPFC_WCQ:
-		while ((cqe = lpfc_sli4_cq_get(cq))) {
-			if (cq->subtype == LPFC_FCP ||
-			    cq->subtype == LPFC_NVME) {
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-				if (phba->ktime_on)
-					cq->isr_timestamp = ktime_get_ns();
-				else
-					cq->isr_timestamp = 0;
-#endif
-				workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
-								       cqe);
-			} else {
-				workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
-								      cqe);
-			}
-			if (!(++ccount % cq->entry_repost))
-				break;
-		}
-
-		/* Track the max number of CQEs processed in 1 EQ */
-		if (ccount > cq->CQ_max_cqe)
-			cq->CQ_max_cqe = ccount;
+		if (cq->subtype == LPFC_IO)
+			workposted |= __lpfc_sli4_process_cq(phba, cq,
+						lpfc_sli4_fp_handle_cqe,
+						&delay);
+		else
+			workposted |= __lpfc_sli4_process_cq(phba, cq,
+						lpfc_sli4_sp_handle_cqe,
+						&delay);
 		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -13383,14 +13671,14 @@
 		return;
 	}
 
-	/* Catch the no cq entry condition, log an error */
-	if (unlikely(ccount == 0))
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"0371 No entry from the CQ: identifier "
-				"(x%x), type (%d)\n", cq->queue_id, cq->type);
-
-	/* In any case, flash and re-arm the RCQ */
-	phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
+	if (delay) {
+		if (!queue_delayed_work_on(cq->chann, phba->wq,
+					   &cq->sched_spwork, delay))
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0394 Cannot schedule soft IRQ "
+				"for cqid=%d on CPU %d\n",
+				cq->queue_id, cq->chann);
+	}
 
 	/* wake up worker thread if there are works to be done */
 	if (workposted)
@@ -13398,6 +13686,36 @@
 }
 
 /**
+ * lpfc_sli4_sp_process_cq - slow-path work handler when started by
+ *   interrupt
+ * @work: pointer to work element
+ *
+ * translates from the work handler and calls the slow-path handler.
+ **/
+static void
+lpfc_sli4_sp_process_cq(struct work_struct *work)
+{
+	struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
+
+	__lpfc_sli4_sp_process_cq(cq);
+}
+
+/**
+ * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
+ * @work: pointer to work element
+ *
+ * translates from the work handler and calls the slow-path handler.
+ **/
+static void
+lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
+{
+	struct lpfc_queue *cq = container_of(to_delayed_work(work),
+					struct lpfc_queue, sched_spwork);
+
+	__lpfc_sli4_sp_process_cq(cq);
+}
+
+/**
  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
  * @phba: Pointer to HBA context object.
  * @cq: Pointer to associated CQ
@@ -13438,9 +13756,9 @@
 	/* Look up the FCP command IOCB and create pseudo response IOCB */
 	spin_lock_irqsave(&pring->ring_lock, iflags);
 	pring->stats.iocb_event++;
+	spin_unlock_irqrestore(&pring->ring_lock, iflags);
 	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
-	spin_unlock_irqrestore(&pring->ring_lock, iflags);
 	if (unlikely(!cmdiocbq)) {
 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
 				"0374 FCP complete with no corresponding "
@@ -13568,7 +13886,7 @@
 	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"6126 Receive Frame Truncated!!\n");
-		/* Drop thru */
+		/* fall through */
 	case FC_STATUS_RQ_SUCCESS:
 		spin_lock_irqsave(&phba->hbalock, iflags);
 		lpfc_sli4_rq_release(hrq, drq);
@@ -13594,14 +13912,14 @@
 			goto drop;
 
 		if (fc_hdr->fh_type == FC_TYPE_FCP) {
-			dma_buf->bytes_recv = bf_get(lpfc_rcqe_length,  rcqe);
+			dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
 			lpfc_nvmet_unsol_fcp_event(
-				phba, idx, dma_buf,
-				cq->isr_timestamp);
+				phba, idx, dma_buf, cq->isr_timestamp,
+				cq->q_flag & HBA_NVMET_CQ_NOTIFY);
 			return false;
 		}
 drop:
-		lpfc_in_buf_free(phba, &dma_buf->dbuf);
+		lpfc_rq_buf_free(phba, &dma_buf->hbuf);
 		break;
 	case FC_STATUS_INSUFF_BUF_FRM_DISC:
 		if (phba->nvmet_support) {
@@ -13628,13 +13946,16 @@
 
 /**
  * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
+ * @phba: adapter with cq
  * @cq: Pointer to the completion queue.
  * @eqe: Pointer to fast-path completion queue entry.
  *
  * This routine process a fast-path work queue completion entry from fast-path
  * event queue for FCP command response completion.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
  **/
-static int
+static bool
 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
 			 struct lpfc_cqe *cqe)
 {
@@ -13651,10 +13972,7 @@
 		cq->CQ_wq++;
 		/* Process the WQ complete event */
 		phba->last_completion_time = jiffies;
-		if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
-			lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
-				(struct lpfc_wcqe_complete *)&wcqe);
-		if (cq->subtype == LPFC_NVME_LS)
+		if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
 			lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
 				(struct lpfc_wcqe_complete *)&wcqe);
 		break;
@@ -13701,10 +14019,11 @@
  * completion queue, and then return.
  **/
 static void
-lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
-			uint32_t qidx)
+lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
+			 struct lpfc_eqe *eqe)
 {
 	struct lpfc_queue *cq = NULL;
+	uint32_t qidx = eq->hdwq;
 	uint16_t cqid, id;
 
 	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
@@ -13719,6 +14038,14 @@
 	/* Get the reference to the corresponding CQ */
 	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
 
+	/* Use the fast lookup method first */
+	if (cqid <= phba->sli4_hba.cq_max) {
+		cq = phba->sli4_hba.cq_lookup[cqid];
+		if (cq)
+			goto  work_cq;
+	}
+
+	/* Next check for NVMET completion */
 	if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
 		id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
 		if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
@@ -13728,20 +14055,6 @@
 		}
 	}
 
-	if (phba->sli4_hba.nvme_cq_map &&
-	    (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
-		/* Process NVME / NVMET command completion */
-		cq = phba->sli4_hba.nvme_cq[qidx];
-		goto  process_cq;
-	}
-
-	if (phba->sli4_hba.fcp_cq_map &&
-	    (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
-		/* Process FCP command completion */
-		cq = phba->sli4_hba.fcp_cq[qidx];
-		goto  process_cq;
-	}
-
 	if (phba->sli4_hba.nvmels_cq &&
 	    (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
 		/* Process NVME unsol rcv */
@@ -13750,7 +14063,8 @@
 
 	/* Otherwise this is a Slow path event */
 	if (cq == NULL) {
-		lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
+		lpfc_sli4_sp_handle_eqe(phba, eqe,
+					phba->sli4_hba.hdwq[qidx].hba_eq);
 		return;
 	}
 
@@ -13763,230 +14077,88 @@
 		return;
 	}
 
-	/* Save EQ associated with this CQ */
-	cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
-
-	if (!queue_work(phba->wq, &cq->irqwork))
+work_cq:
+#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
+	if (phba->ktime_on)
+		cq->isr_timestamp = ktime_get_ns();
+	else
+		cq->isr_timestamp = 0;
+#endif
+	if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"0363 Cannot schedule soft IRQ "
 				"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
-				cqid, cq->queue_id, smp_processor_id());
+				cqid, cq->queue_id, raw_smp_processor_id());
 }
 
 /**
- * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
- * @phba: Pointer to HBA context object.
- * @eqe: Pointer to fast-path event queue entry.
+ * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
+ * @cq: Pointer to CQ to be processed
  *
- * This routine process a event queue entry from the fast-path event queue.
- * It will check the MajorCode and MinorCode to determine this is for a
- * completion event on a completion queue, if not, an error shall be logged
- * and just return. Otherwise, it will get to the corresponding completion
- * queue and process all the entries on the completion queue, rearm the
- * completion queue, and then return.
+ * This routine calls the cq processing routine with the handler for
+ * fast path CQEs.
+ *
+ * The CQ routine returns two values: the first is the calling status,
+ * which indicates whether work was queued to the  background discovery
+ * thread. If true, the routine should wakeup the discovery thread;
+ * the second is the delay parameter. If non-zero, rather than rearming
+ * the CQ and yet another interrupt, the CQ handler should be queued so
+ * that it is processed in a subsequent polling action. The value of
+ * the delay indicates when to reschedule it.
  **/
 static void
-lpfc_sli4_hba_process_cq(struct work_struct *work)
+__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
 {
-	struct lpfc_queue *cq =
-		container_of(work, struct lpfc_queue, irqwork);
 	struct lpfc_hba *phba = cq->phba;
-	struct lpfc_cqe *cqe;
+	unsigned long delay;
 	bool workposted = false;
-	int ccount = 0;
 
-	/* Process all the entries to the CQ */
-	while ((cqe = lpfc_sli4_cq_get(cq))) {
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-		if (phba->ktime_on)
-			cq->isr_timestamp = ktime_get_ns();
-		else
-			cq->isr_timestamp = 0;
-#endif
-		workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
-		if (!(++ccount % cq->entry_repost))
-			break;
+	/* process and rearm the CQ */
+	workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
+					     &delay);
+
+	if (delay) {
+		if (!queue_delayed_work_on(cq->chann, phba->wq,
+					   &cq->sched_irqwork, delay))
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0367 Cannot schedule soft IRQ "
+				"for cqid=%d on CPU %d\n",
+				cq->queue_id, cq->chann);
 	}
 
-	/* Track the max number of CQEs processed in 1 EQ */
-	if (ccount > cq->CQ_max_cqe)
-		cq->CQ_max_cqe = ccount;
-	cq->assoc_qp->EQ_cqe_cnt += ccount;
-
-	/* Catch the no cq entry condition */
-	if (unlikely(ccount == 0))
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"0369 No entry from fast-path completion "
-				"queue fcpcqid=%d\n", cq->queue_id);
-
-	/* In any case, flash and re-arm the CQ */
-	phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
-
 	/* wake up worker thread if there are works to be done */
 	if (workposted)
 		lpfc_worker_wake_up(phba);
 }
 
-static void
-lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
-{
-	struct lpfc_eqe *eqe;
-
-	/* walk all the EQ entries and drop on the floor */
-	while ((eqe = lpfc_sli4_eq_get(eq)))
-		;
-
-	/* Clear and re-arm the EQ */
-	phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
-}
-
-
 /**
- * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
- *			     entry
- * @phba: Pointer to HBA context object.
- * @eqe: Pointer to fast-path event queue entry.
+ * lpfc_sli4_hba_process_cq - fast-path work handler when started by
+ *   interrupt
+ * @work: pointer to work element
  *
- * This routine process a event queue entry from the Flash Optimized Fabric
- * event queue.  It will check the MajorCode and MinorCode to determine this
- * is for a completion event on a completion queue, if not, an error shall be
- * logged and just return. Otherwise, it will get to the corresponding
- * completion queue and process all the entries on the completion queue, rearm
- * the completion queue, and then return.
+ * translates from the work handler and calls the fast-path handler.
  **/
 static void
-lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+lpfc_sli4_hba_process_cq(struct work_struct *work)
 {
-	struct lpfc_queue *cq;
-	uint16_t cqid;
+	struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
 
-	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"9147 Not a valid completion "
-				"event: majorcode=x%x, minorcode=x%x\n",
-				bf_get_le32(lpfc_eqe_major_code, eqe),
-				bf_get_le32(lpfc_eqe_minor_code, eqe));
-		return;
-	}
-
-	/* Get the reference to the corresponding CQ */
-	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
-
-	/* Next check for OAS */
-	cq = phba->sli4_hba.oas_cq;
-	if (unlikely(!cq)) {
-		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
-			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-					"9148 OAS completion queue "
-					"does not exist\n");
-		return;
-	}
-
-	if (unlikely(cqid != cq->queue_id)) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"9149 Miss-matched fast-path compl "
-				"queue id: eqcqid=%d, fcpcqid=%d\n",
-				cqid, cq->queue_id);
-		return;
-	}
-
-	/* Save EQ associated with this CQ */
-	cq->assoc_qp = phba->sli4_hba.fof_eq;
-
-	/* CQ work will be processed on CPU affinitized to this IRQ */
-	if (!queue_work(phba->wq, &cq->irqwork))
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"0367 Cannot schedule soft IRQ "
-				"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
-				cqid, cq->queue_id, smp_processor_id());
+	__lpfc_sli4_hba_process_cq(cq);
 }
 
 /**
- * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
- * @irq: Interrupt number.
- * @dev_id: The device context pointer.
+ * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
+ * @work: pointer to work element
  *
- * This function is directly called from the PCI layer as an interrupt
- * service routine when device with SLI-4 interface spec is enabled with
- * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
- * IOCB ring event in the HBA. However, when the device is enabled with either
- * MSI or Pin-IRQ interrupt mode, this function is called as part of the
- * device-level interrupt handler. When the PCI slot is in error recovery
- * or the HBA is undergoing initialization, the interrupt handler will not
- * process the interrupt. The Flash Optimized Fabric ring event are handled in
- * the intrrupt context. This function is called without any lock held.
- * It gets the hbalock to access and update SLI data structures. Note that,
- * the EQ to CQ are one-to-one map such that the EQ index is
- * equal to that of CQ index.
- *
- * This function returns IRQ_HANDLED when interrupt is handled else it
- * returns IRQ_NONE.
+ * translates from the work handler and calls the fast-path handler.
  **/
-irqreturn_t
-lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
+static void
+lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
 {
-	struct lpfc_hba *phba;
-	struct lpfc_hba_eq_hdl *hba_eq_hdl;
-	struct lpfc_queue *eq;
-	struct lpfc_eqe *eqe;
-	unsigned long iflag;
-	int ecount = 0;
+	struct lpfc_queue *cq = container_of(to_delayed_work(work),
+					struct lpfc_queue, sched_irqwork);
 
-	/* Get the driver's phba structure from the dev_id */
-	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
-	phba = hba_eq_hdl->phba;
-
-	if (unlikely(!phba))
-		return IRQ_NONE;
-
-	/* Get to the EQ struct associated with this vector */
-	eq = phba->sli4_hba.fof_eq;
-	if (unlikely(!eq))
-		return IRQ_NONE;
-
-	/* Check device state for handling interrupt */
-	if (unlikely(lpfc_intr_state_check(phba))) {
-		/* Check again for link_state with lock held */
-		spin_lock_irqsave(&phba->hbalock, iflag);
-		if (phba->link_state < LPFC_LINK_DOWN)
-			/* Flush, clear interrupt, and rearm the EQ */
-			lpfc_sli4_eq_flush(phba, eq);
-		spin_unlock_irqrestore(&phba->hbalock, iflag);
-		return IRQ_NONE;
-	}
-
-	/*
-	 * Process all the event on FCP fast-path EQ
-	 */
-	while ((eqe = lpfc_sli4_eq_get(eq))) {
-		lpfc_sli4_fof_handle_eqe(phba, eqe);
-		if (!(++ecount % eq->entry_repost))
-			break;
-		eq->EQ_processed++;
-	}
-
-	/* Track the max number of EQEs processed in 1 intr */
-	if (ecount > eq->EQ_max_eqe)
-		eq->EQ_max_eqe = ecount;
-
-
-	if (unlikely(ecount == 0)) {
-		eq->EQ_no_entry++;
-
-		if (phba->intr_type == MSIX)
-			/* MSI-X treated interrupt served as no EQ share INT */
-			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
-					"9145 MSI-X interrupt with no EQE\n");
-		else {
-			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-					"9146 ISR interrupt with no EQE\n");
-			/* Non MSI-X treated on interrupt as EQ share INT */
-			return IRQ_NONE;
-		}
-	}
-	/* Always clear and re-arm the fast-path EQ */
-	phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
-	return IRQ_HANDLED;
+	__lpfc_sli4_hba_process_cq(cq);
 }
 
 /**
@@ -14021,10 +14193,11 @@
 	struct lpfc_hba *phba;
 	struct lpfc_hba_eq_hdl *hba_eq_hdl;
 	struct lpfc_queue *fpeq;
-	struct lpfc_eqe *eqe;
 	unsigned long iflag;
 	int ecount = 0;
 	int hba_eqidx;
+	struct lpfc_eq_intr_info *eqi;
+	uint32_t icnt;
 
 	/* Get the driver's phba structure from the dev_id */
 	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
@@ -14033,23 +14206,14 @@
 
 	if (unlikely(!phba))
 		return IRQ_NONE;
-	if (unlikely(!phba->sli4_hba.hba_eq))
+	if (unlikely(!phba->sli4_hba.hdwq))
 		return IRQ_NONE;
 
 	/* Get to the EQ struct associated with this vector */
-	fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
+	fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
 	if (unlikely(!fpeq))
 		return IRQ_NONE;
 
-	if (lpfc_fcp_look_ahead) {
-		if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
-			phba->sli4_hba.sli4_eq_clr_intr(fpeq);
-		else {
-			atomic_inc(&hba_eq_hdl->hba_eq_in_use);
-			return IRQ_NONE;
-		}
-	}
-
 	/* Check device state for handling interrupt */
 	if (unlikely(lpfc_intr_state_check(phba))) {
 		/* Check again for link_state with lock held */
@@ -14058,36 +14222,25 @@
 			/* Flush, clear interrupt, and rearm the EQ */
 			lpfc_sli4_eq_flush(phba, fpeq);
 		spin_unlock_irqrestore(&phba->hbalock, iflag);
-		if (lpfc_fcp_look_ahead)
-			atomic_inc(&hba_eq_hdl->hba_eq_in_use);
 		return IRQ_NONE;
 	}
 
-	/*
-	 * Process all the event on FCP fast-path EQ
-	 */
-	while ((eqe = lpfc_sli4_eq_get(fpeq))) {
-		lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
-		if (!(++ecount % fpeq->entry_repost))
-			break;
-		fpeq->EQ_processed++;
-	}
+	eqi = phba->sli4_hba.eq_info;
+	icnt = this_cpu_inc_return(eqi->icnt);
+	fpeq->last_cpu = raw_smp_processor_id();
 
-	/* Track the max number of EQEs processed in 1 intr */
-	if (ecount > fpeq->EQ_max_eqe)
-		fpeq->EQ_max_eqe = ecount;
+	if (icnt > LPFC_EQD_ISR_TRIGGER &&
+	    phba->cfg_irq_chann == 1 &&
+	    phba->cfg_auto_imax &&
+	    fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
+	    phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
+		lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
 
-	/* Always clear and re-arm the fast-path EQ */
-	phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+	/* process and rearm the EQ */
+	ecount = lpfc_sli4_process_eq(phba, fpeq);
 
 	if (unlikely(ecount == 0)) {
 		fpeq->EQ_no_entry++;
-
-		if (lpfc_fcp_look_ahead) {
-			atomic_inc(&hba_eq_hdl->hba_eq_in_use);
-			return IRQ_NONE;
-		}
-
 		if (phba->intr_type == MSIX)
 			/* MSI-X treated interrupt served as no EQ share INT */
 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -14097,9 +14250,6 @@
 			return IRQ_NONE;
 	}
 
-	if (lpfc_fcp_look_ahead)
-		atomic_inc(&hba_eq_hdl->hba_eq_in_use);
-
 	return IRQ_HANDLED;
 } /* lpfc_sli4_fp_intr_handler */
 
@@ -14137,20 +14287,13 @@
 	/*
 	 * Invoke fast-path host attention interrupt handling as appropriate.
 	 */
-	for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
+	for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
 		hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
 					&phba->sli4_hba.hba_eq_hdl[qidx]);
 		if (hba_irq_rc == IRQ_HANDLED)
 			hba_handled |= true;
 	}
 
-	if (phba->cfg_fof) {
-		hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
-					&phba->sli4_hba.hba_eq_hdl[qidx]);
-		if (hba_irq_rc == IRQ_HANDLED)
-			hba_handled |= true;
-	}
-
 	return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
 } /* lpfc_sli4_intr_handler */
 
@@ -14170,6 +14313,9 @@
 	if (!queue)
 		return;
 
+	if (!list_empty(&queue->wq_list))
+		list_del(&queue->wq_list);
+
 	while (!list_empty(&queue->page_list)) {
 		list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
 				 list);
@@ -14182,8 +14328,8 @@
 		kfree(queue->rqbp);
 	}
 
-	if (!list_empty(&queue->wq_list))
-		list_del(&queue->wq_list);
+	if (!list_empty(&queue->cpu_list))
+		list_del(&queue->cpu_list);
 
 	kfree(queue);
 	return;
@@ -14195,6 +14341,7 @@
  * @page_size: The size of a queue page
  * @entry_size: The size of each queue entry for this queue.
  * @entry count: The number of entries that this queue will handle.
+ * @cpu: The cpu that will primarily utilize this queue.
  *
  * This function allocates a queue structure and the DMAable memory used for
  * the host resident queue. This function must be called before creating the
@@ -14202,67 +14349,68 @@
  **/
 struct lpfc_queue *
 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
-		      uint32_t entry_size, uint32_t entry_count)
+		      uint32_t entry_size, uint32_t entry_count, int cpu)
 {
 	struct lpfc_queue *queue;
 	struct lpfc_dmabuf *dmabuf;
-	int x, total_qe_count;
-	void *dma_pointer;
 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+	uint16_t x, pgcnt;
 
 	if (!phba->sli4_hba.pc_sli4_params.supported)
 		hw_page_size = page_size;
 
-	queue = kzalloc(sizeof(struct lpfc_queue) +
-			(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
-	if (!queue)
-		return NULL;
-	queue->page_count = (ALIGN(entry_size * entry_count,
-			hw_page_size))/hw_page_size;
+	pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
 
 	/* If needed, Adjust page count to match the max the adapter supports */
-	if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
-		queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
+	if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
+		pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
+
+	queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
+			     GFP_KERNEL, cpu_to_node(cpu));
+	if (!queue)
+		return NULL;
 
 	INIT_LIST_HEAD(&queue->list);
 	INIT_LIST_HEAD(&queue->wq_list);
 	INIT_LIST_HEAD(&queue->wqfull_list);
 	INIT_LIST_HEAD(&queue->page_list);
 	INIT_LIST_HEAD(&queue->child_list);
+	INIT_LIST_HEAD(&queue->cpu_list);
 
 	/* Set queue parameters now.  If the system cannot provide memory
 	 * resources, the free routine needs to know what was allocated.
 	 */
+	queue->page_count = pgcnt;
+	queue->q_pgs = (void **)&queue[1];
+	queue->entry_cnt_per_pg = hw_page_size / entry_size;
 	queue->entry_size = entry_size;
 	queue->entry_count = entry_count;
 	queue->page_size = hw_page_size;
 	queue->phba = phba;
 
-	for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
-		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	for (x = 0; x < queue->page_count; x++) {
+		dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
+				      dev_to_node(&phba->pcidev->dev));
 		if (!dmabuf)
 			goto out_fail;
-		dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
-						   hw_page_size, &dmabuf->phys,
-						   GFP_KERNEL);
+		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+						  hw_page_size, &dmabuf->phys,
+						  GFP_KERNEL);
 		if (!dmabuf->virt) {
 			kfree(dmabuf);
 			goto out_fail;
 		}
 		dmabuf->buffer_tag = x;
 		list_add_tail(&dmabuf->list, &queue->page_list);
-		/* initialize queue's entry array */
-		dma_pointer = dmabuf->virt;
-		for (; total_qe_count < entry_count &&
-		     dma_pointer < (hw_page_size + dmabuf->virt);
-		     total_qe_count++, dma_pointer += entry_size) {
-			queue->qe[total_qe_count].address = dma_pointer;
-		}
+		/* use lpfc_sli4_qe to index a paritcular entry in this page */
+		queue->q_pgs[x] = dmabuf->virt;
 	}
 	INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
 	INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
+	INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
+	INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
 
-	/* entry_repost will be set during q creation */
+	/* notify_interval will be set during q creation */
 
 	return queue;
 out_fail:
@@ -14299,43 +14447,75 @@
 }
 
 /**
- * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
- * @phba: HBA structure that indicates port to create a queue on.
- * @startq: The starting FCP EQ to modify
+ * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
+ * @phba: HBA structure that EQs are on.
+ * @startq: The starting EQ index to modify
+ * @numq: The number of EQs (consecutive indexes) to modify
+ * @usdelay: amount of delay
  *
- * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
- * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
- * updated in one mailbox command.
+ * This function revises the EQ delay on 1 or more EQs. The EQ delay
+ * is set either by writing to a register (if supported by the SLI Port)
+ * or by mailbox command. The mailbox command allows several EQs to be
+ * updated at once.
  *
- * The @phba struct is used to send mailbox command to HBA. The @startq
- * is used to get the starting FCP EQ to change.
- * This function is asynchronous and will wait for the mailbox
- * command to finish before continuing.
+ * The @phba struct is used to send a mailbox command to HBA. The @startq
+ * is used to get the starting EQ index to change. The @numq value is
+ * used to specify how many consecutive EQ indexes, starting at EQ index,
+ * are to be changed. This function is asynchronous and will wait for any
+ * mailbox commands to finish before returning.
  *
- * On success this function will return a zero. If unable to allocate enough
- * memory this function will return -ENOMEM. If the queue create mailbox command
- * fails this function will return -ENXIO.
+ * On success this function will return a zero. If unable to allocate
+ * enough memory this function will return -ENOMEM. If a mailbox command
+ * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
+ * have had their delay multipler changed.
  **/
-int
+void
 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
-			 uint32_t numq, uint32_t imax)
+			 uint32_t numq, uint32_t usdelay)
 {
 	struct lpfc_mbx_modify_eq_delay *eq_delay;
 	LPFC_MBOXQ_t *mbox;
 	struct lpfc_queue *eq;
-	int cnt, rc, length, status = 0;
+	int cnt = 0, rc, length;
 	uint32_t shdr_status, shdr_add_status;
-	uint32_t result, val;
+	uint32_t dmult;
 	int qidx;
 	union lpfc_sli4_cfg_shdr *shdr;
-	uint16_t dmult;
 
-	if (startq >= phba->io_channel_irqs)
-		return 0;
+	if (startq >= phba->cfg_irq_chann)
+		return;
+
+	if (usdelay > 0xFFFF) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
+				"6429 usdelay %d too large. Scaled down to "
+				"0xFFFF.\n", usdelay);
+		usdelay = 0xFFFF;
+	}
+
+	/* set values by EQ_DELAY register if supported */
+	if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
+		for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
+			eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
+			if (!eq)
+				continue;
+
+			lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
+
+			if (++cnt >= numq)
+				break;
+		}
+		return;
+	}
+
+	/* Otherwise, set values by mailbox cmd */
 
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-	if (!mbox)
-		return -ENOMEM;
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
+				"6428 Failed allocating mailbox cmd buffer."
+				" EQ delay was not set.\n");
+		return;
+	}
 	length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
 		  sizeof(struct lpfc_sli4_cfg_mhdr));
 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
@@ -14344,52 +14524,30 @@
 	eq_delay = &mbox->u.mqe.un.eq_delay;
 
 	/* Calculate delay multiper from maximum interrupt per second */
-	result = imax / phba->io_channel_irqs;
-	if (result > LPFC_DMULT_CONST || result == 0)
-		dmult = 0;
-	else
-		dmult = LPFC_DMULT_CONST/result - 1;
+	dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
+	if (dmult)
+		dmult--;
 	if (dmult > LPFC_DMULT_MAX)
 		dmult = LPFC_DMULT_MAX;
 
-	cnt = 0;
-	for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
-		eq = phba->sli4_hba.hba_eq[qidx];
+	for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
+		eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
 		if (!eq)
 			continue;
-		eq->q_mode = imax;
+		eq->q_mode = usdelay;
 		eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
 		eq_delay->u.request.eq[cnt].phase = 0;
 		eq_delay->u.request.eq[cnt].delay_multi = dmult;
-		cnt++;
 
-		/* q_mode is only used for auto_imax */
-		if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
-			/* Use EQ Delay Register method for q_mode */
-
-			/* Convert for EQ Delay register */
-			val =  phba->cfg_fcp_imax;
-			if (val) {
-				/* First, interrupts per sec per EQ */
-				val = phba->cfg_fcp_imax /
-					phba->io_channel_irqs;
-
-				/* us delay between each interrupt */
-				val = LPFC_SEC_TO_USEC / val;
-			}
-			eq->q_mode = val;
-		} else {
-			eq->q_mode = imax;
-		}
-
-		if (cnt >= numq)
+		if (++cnt >= numq)
 			break;
 	}
 	eq_delay->u.request.num_eq = cnt;
 
 	mbox->vport = phba->pport;
 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-	mbox->context1 = NULL;
+	mbox->ctx_buf = NULL;
+	mbox->ctx_ndlp = NULL;
 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
 	shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -14399,10 +14557,9 @@
 				"2512 MODIFY_EQ_DELAY mailbox failed with "
 				"status x%x add_status x%x, mbx status x%x\n",
 				shdr_status, shdr_add_status, rc);
-		status = -ENXIO;
 	}
 	mempool_free(mbox, phba->mbox_mem_pool);
-	return status;
+	return;
 }
 
 /**
@@ -14476,9 +14633,11 @@
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"0360 Unsupported EQ count. (%d)\n",
 				eq->entry_count);
-		if (eq->entry_count < 256)
-			return -EINVAL;
-		/* otherwise default to smallest count (drop through) */
+		if (eq->entry_count < 256) {
+			status = -EINVAL;
+			goto out;
+		}
+		/* fall through - otherwise default to smallest count */
 	case 256:
 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
 		       LPFC_EQ_CNT_256);
@@ -14509,7 +14668,8 @@
 	}
 	mbox->vport = phba->pport;
 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-	mbox->context1 = NULL;
+	mbox->ctx_buf = NULL;
+	mbox->ctx_ndlp = NULL;
 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
@@ -14526,9 +14686,9 @@
 	if (eq->queue_id == 0xFFFF)
 		status = -ENXIO;
 	eq->host_index = 0;
-	eq->hba_index = 0;
-	eq->entry_repost = LPFC_EQ_REPOST;
-
+	eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
+	eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
+out:
 	mempool_free(mbox, phba->mbox_mem_pool);
 	return status;
 }
@@ -14564,13 +14724,10 @@
 	int rc, length, status = 0;
 	uint32_t shdr_status, shdr_add_status;
 	union lpfc_sli4_cfg_shdr *shdr;
-	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
 
 	/* sanity check on queue memory */
 	if (!cq || !eq)
 		return -ENODEV;
-	if (!phba->sli4_hba.pc_sli4_params.supported)
-		hw_page_size = cq->page_size;
 
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mbox)
@@ -14611,7 +14768,7 @@
 			       LPFC_CQ_CNT_WORD7);
 			break;
 		}
-		/* Fall Thru */
+		/* fall through */
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"0361 Unsupported CQ count: "
@@ -14622,7 +14779,7 @@
 			status = -EINVAL;
 			goto out;
 		}
-		/* otherwise default to smallest count (drop through) */
+		/* fall through - otherwise default to smallest count */
 	case 256:
 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
 		       LPFC_CQ_CNT_256);
@@ -14668,10 +14825,13 @@
 	cq->subtype = subtype;
 	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
 	cq->assoc_qid = eq->queue_id;
+	cq->assoc_qp = eq;
 	cq->host_index = 0;
-	cq->hba_index = 0;
-	cq->entry_repost = LPFC_CQ_REPOST;
+	cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
+	cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
 
+	if (cq->queue_id > phba->sli4_hba.cq_max)
+		phba->sli4_hba.cq_max = cq->queue_id;
 out:
 	mempool_free(mbox, phba->mbox_mem_pool);
 	return status;
@@ -14681,7 +14841,7 @@
  * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
  * @phba: HBA structure that indicates port to create a queue on.
  * @cqp: The queue structure array to use to create the completion queues.
- * @eqp: The event queue array to bind these completion queues to.
+ * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
  *
  * This function creates a set of  completion queue, s to support MRQ
  * as detailed in @cqp, on a port,
@@ -14701,7 +14861,8 @@
  **/
 int
 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
-		   struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
+		   struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
+		   uint32_t subtype)
 {
 	struct lpfc_queue *cq;
 	struct lpfc_queue *eq;
@@ -14716,7 +14877,7 @@
 
 	/* sanity check on queue memory */
 	numcq = phba->cfg_nvmet_mrq;
-	if (!cqp || !eqp || !numcq)
+	if (!cqp || !hdwq || !numcq)
 		return -ENODEV;
 
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -14743,7 +14904,7 @@
 
 	for (idx = 0; idx < numcq; idx++) {
 		cq = cqp[idx];
-		eq = eqp[idx];
+		eq = hdwq[idx].hba_eq;
 		if (!cq || !eq) {
 			status = -ENOMEM;
 			goto out;
@@ -14782,7 +14943,7 @@
 					       LPFC_CQ_CNT_WORD7);
 					break;
 				}
-				/* Fall Thru */
+				/* fall through */
 			default:
 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 						"3118 Bad CQ count. (%d)\n",
@@ -14791,7 +14952,7 @@
 					status = -EINVAL;
 					goto out;
 				}
-				/* otherwise default to smallest (drop thru) */
+				/* fall through - otherwise default to smallest */
 			case 256:
 				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
 				       &cq_set->u.request, LPFC_CQ_CNT_256);
@@ -14876,9 +15037,11 @@
 		cq->type = type;
 		cq->subtype = subtype;
 		cq->assoc_qid = eq->queue_id;
+		cq->assoc_qp = eq;
 		cq->host_index = 0;
-		cq->hba_index = 0;
-		cq->entry_repost = LPFC_CQ_REPOST;
+		cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
+		cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
+					 cq->entry_count);
 		cq->chann = idx;
 
 		rc = 0;
@@ -14916,6 +15079,8 @@
 	for (idx = 0; idx < numcq; idx++) {
 		cq = cqp[idx];
 		cq->queue_id = rc + idx;
+		if (cq->queue_id > phba->sli4_hba.cq_max)
+			phba->sli4_hba.cq_max = cq->queue_id;
 	}
 
 out:
@@ -15063,7 +15228,7 @@
 			status = -EINVAL;
 			goto out;
 		}
-		/* otherwise default to smallest count (drop through) */
+		/* fall through - otherwise default to smallest count */
 	case 16:
 		bf_set(lpfc_mq_context_ring_size,
 		       &mq_create_ext->u.request.context,
@@ -15128,7 +15293,6 @@
 	mq->subtype = subtype;
 	mq->host_index = 0;
 	mq->hba_index = 0;
-	mq->entry_repost = LPFC_MQ_REPOST;
 
 	/* link the mq onto the parent cq child list */
 	list_add_tail(&mq->list, &cq->child_list);
@@ -15394,7 +15558,7 @@
 	wq->subtype = subtype;
 	wq->host_index = 0;
 	wq->hba_index = 0;
-	wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
+	wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
 
 	/* link the wq onto the parent cq child list */
 	list_add_tail(&wq->list, &cq->child_list);
@@ -15482,7 +15646,7 @@
 				status = -EINVAL;
 				goto out;
 			}
-			/* otherwise default to smallest count (drop through) */
+			/* fall through - otherwise default to smallest count */
 		case 512:
 			bf_set(lpfc_rq_context_rqe_count,
 			       &rq_create->u.request.context,
@@ -15588,7 +15752,7 @@
 	hrq->subtype = subtype;
 	hrq->host_index = 0;
 	hrq->hba_index = 0;
-	hrq->entry_repost = LPFC_RQ_REPOST;
+	hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
 
 	/* now create the data queue */
 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -15619,7 +15783,7 @@
 				status = -EINVAL;
 				goto out;
 			}
-			/* otherwise default to smallest count (drop through) */
+			/* fall through - otherwise default to smallest count */
 		case 512:
 			bf_set(lpfc_rq_context_rqe_count,
 			       &rq_create->u.request.context,
@@ -15681,7 +15845,7 @@
 	drq->subtype = subtype;
 	drq->host_index = 0;
 	drq->hba_index = 0;
-	drq->entry_repost = LPFC_RQ_REPOST;
+	drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
 
 	/* link the header and data RQs onto the parent cq child list */
 	list_add_tail(&hrq->list, &cq->child_list);
@@ -15839,7 +16003,7 @@
 		hrq->subtype = subtype;
 		hrq->host_index = 0;
 		hrq->hba_index = 0;
-		hrq->entry_repost = LPFC_RQ_REPOST;
+		hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
 
 		drq->db_format = LPFC_DB_RING_FORMAT;
 		drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -15848,7 +16012,7 @@
 		drq->subtype = subtype;
 		drq->host_index = 0;
 		drq->hba_index = 0;
-		drq->entry_repost = LPFC_RQ_REPOST;
+		drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
 
 		list_add_tail(&hrq->list, &cq->child_list);
 		list_add_tail(&drq->list, &cq->child_list);
@@ -15908,6 +16072,7 @@
 	/* sanity check on queue memory */
 	if (!eq)
 		return -ENODEV;
+
 	mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mbox)
 		return -ENOMEM;
@@ -16457,22 +16622,21 @@
 }
 
 /**
- * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
+ * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
  * @phba: pointer to lpfc hba data structure.
- * @sblist: pointer to scsi buffer list.
+ * @nblist: pointer to nvme buffer list.
  * @count: number of scsi buffers on the list.
  *
  * This routine is invoked to post a block of @count scsi sgl pages from a
- * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
  * No Lock is held.
  *
  **/
-int
-lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
-			      struct list_head *sblist,
-			      int count)
+static int
+lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
+			    int count)
 {
-	struct lpfc_scsi_buf *psb;
+	struct lpfc_io_buf *lpfc_ncmd;
 	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
 	struct sgl_page_pairs *sgl_pg_pairs;
 	void *viraddr;
@@ -16490,25 +16654,25 @@
 		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
 	if (reqlen > SLI4_PAGE_SIZE) {
 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-				"0217 Block sgl registration required DMA "
+				"6118 Block sgl registration required DMA "
 				"size (%d) great than a page\n", reqlen);
 		return -ENOMEM;
 	}
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mbox) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0283 Failed to allocate mbox cmd memory\n");
+				"6119 Failed to allocate mbox cmd memory\n");
 		return -ENOMEM;
 	}
 
 	/* Allocate DMA memory and set up the non-embedded mailbox command */
 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
-				LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
-				LPFC_SLI4_MBX_NEMBED);
+				    LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+				    reqlen, LPFC_SLI4_MBX_NEMBED);
 
 	if (alloclen < reqlen) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"2561 Allocated DMA memory size (%d) is "
+				"6120 Allocated DMA memory size (%d) is "
 				"less than the requested DMA memory "
 				"size (%d)\n", alloclen, reqlen);
 		lpfc_sli4_mbox_cmd_free(phba, mbox);
@@ -16523,14 +16687,15 @@
 	sgl_pg_pairs = &sgl->sgl_pg_pairs;
 
 	pg_pairs = 0;
-	list_for_each_entry(psb, sblist, list) {
+	list_for_each_entry(lpfc_ncmd, nblist, list) {
 		/* Set up the sge entry */
 		sgl_pg_pairs->sgl_pg0_addr_lo =
-			cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+			cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
 		sgl_pg_pairs->sgl_pg0_addr_hi =
-			cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+			cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
 		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
-			pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
+			pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
+						SGL_PAGE_SIZE;
 		else
 			pdma_phys_bpl1 = 0;
 		sgl_pg_pairs->sgl_pg1_addr_lo =
@@ -16539,7 +16704,7 @@
 			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
 		/* Keep the first xritag on the list */
 		if (pg_pairs == 0)
-			xritag_start = psb->cur_iocbq.sli4_xritag;
+			xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
 		sgl_pg_pairs++;
 		pg_pairs++;
 	}
@@ -16548,20 +16713,20 @@
 	/* Perform endian conversion if necessary */
 	sgl->word0 = cpu_to_le32(sgl->word0);
 
-	if (!phba->sli4_hba.intr_enable)
+	if (!phba->sli4_hba.intr_enable) {
 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
-	else {
+	} else {
 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
 	}
-	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+	shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
 	if (rc != MBX_TIMEOUT)
 		lpfc_sli4_mbox_cmd_free(phba, mbox);
 	if (shdr_status || shdr_add_status || rc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"2564 POST_SGL_BLOCK mailbox command failed "
+				"6125 POST_SGL_BLOCK mailbox command failed "
 				"status x%x add_status x%x mbx status x%x\n",
 				shdr_status, shdr_add_status, rc);
 		rc = -ENXIO;
@@ -16570,6 +16735,134 @@
 }
 
 /**
+ * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
+ * @phba: pointer to lpfc hba data structure.
+ * @post_nblist: pointer to the nvme buffer list.
+ *
+ * This routine walks a list of nvme buffers that was passed in. It attempts
+ * to construct blocks of nvme buffer sgls which contains contiguous xris and
+ * uses the non-embedded SGL block post mailbox commands to post to the port.
+ * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
+ * embedded SGL post mailbox command for posting. The @post_nblist passed in
+ * must be local list, thus no lock is needed when manipulate the list.
+ *
+ * Returns: 0 = failure, non-zero number of successfully posted buffers.
+ **/
+int
+lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
+			   struct list_head *post_nblist, int sb_count)
+{
+	struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
+	int status, sgl_size;
+	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
+	dma_addr_t pdma_phys_sgl1;
+	int last_xritag = NO_XRI;
+	int cur_xritag;
+	LIST_HEAD(prep_nblist);
+	LIST_HEAD(blck_nblist);
+	LIST_HEAD(nvme_nblist);
+
+	/* sanity check */
+	if (sb_count <= 0)
+		return -EINVAL;
+
+	sgl_size = phba->cfg_sg_dma_buf_size;
+	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
+		list_del_init(&lpfc_ncmd->list);
+		block_cnt++;
+		if ((last_xritag != NO_XRI) &&
+		    (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
+			/* a hole in xri block, form a sgl posting block */
+			list_splice_init(&prep_nblist, &blck_nblist);
+			post_cnt = block_cnt - 1;
+			/* prepare list for next posting block */
+			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
+			block_cnt = 1;
+		} else {
+			/* prepare list for next posting block */
+			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
+			/* enough sgls for non-embed sgl mbox command */
+			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+				list_splice_init(&prep_nblist, &blck_nblist);
+				post_cnt = block_cnt;
+				block_cnt = 0;
+			}
+		}
+		num_posting++;
+		last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
+
+		/* end of repost sgl list condition for NVME buffers */
+		if (num_posting == sb_count) {
+			if (post_cnt == 0) {
+				/* last sgl posting block */
+				list_splice_init(&prep_nblist, &blck_nblist);
+				post_cnt = block_cnt;
+			} else if (block_cnt == 1) {
+				/* last single sgl with non-contiguous xri */
+				if (sgl_size > SGL_PAGE_SIZE)
+					pdma_phys_sgl1 =
+						lpfc_ncmd->dma_phys_sgl +
+						SGL_PAGE_SIZE;
+				else
+					pdma_phys_sgl1 = 0;
+				cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
+				status = lpfc_sli4_post_sgl(
+						phba, lpfc_ncmd->dma_phys_sgl,
+						pdma_phys_sgl1, cur_xritag);
+				if (status) {
+					/* Post error.  Buffer unavailable. */
+					lpfc_ncmd->flags |=
+						LPFC_SBUF_NOT_POSTED;
+				} else {
+					/* Post success. Bffer available. */
+					lpfc_ncmd->flags &=
+						~LPFC_SBUF_NOT_POSTED;
+					lpfc_ncmd->status = IOSTAT_SUCCESS;
+					num_posted++;
+				}
+				/* success, put on NVME buffer sgl list */
+				list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
+			}
+		}
+
+		/* continue until a nembed page worth of sgls */
+		if (post_cnt == 0)
+			continue;
+
+		/* post block of NVME buffer list sgls */
+		status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
+						     post_cnt);
+
+		/* don't reset xirtag due to hole in xri block */
+		if (block_cnt == 0)
+			last_xritag = NO_XRI;
+
+		/* reset NVME buffer post count for next round of posting */
+		post_cnt = 0;
+
+		/* put posted NVME buffer-sgl posted on NVME buffer sgl list */
+		while (!list_empty(&blck_nblist)) {
+			list_remove_head(&blck_nblist, lpfc_ncmd,
+					 struct lpfc_io_buf, list);
+			if (status) {
+				/* Post error.  Mark buffer unavailable. */
+				lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
+			} else {
+				/* Post success, Mark buffer available. */
+				lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
+				lpfc_ncmd->status = IOSTAT_SUCCESS;
+				num_posted++;
+			}
+			list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
+		}
+	}
+	/* Push NVME buffers with sgl posted to the available list */
+	lpfc_io_buf_replenish(phba, &nvme_nblist);
+
+	return num_posted;
+}
+
+/**
  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
  * @phba: pointer to lpfc_hba struct that the frame was received on
  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -16626,15 +16919,12 @@
 		goto drop;
 	}
 
-#define FC_TYPE_VENDOR_UNIQUE	0xFF
-
 	switch (fc_hdr->fh_type) {
 	case FC_TYPE_BLS:
 	case FC_TYPE_ELS:
 	case FC_TYPE_FCP:
 	case FC_TYPE_CT:
 	case FC_TYPE_NVME:
-	case FC_TYPE_VENDOR_UNIQUE:
 		break;
 	case FC_TYPE_IP:
 	case FC_TYPE_ILS:
@@ -17118,7 +17408,6 @@
 	icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
 	ctiocb->context1 = lpfc_nlp_get(ndlp);
 
-	ctiocb->iocb_cmpl = NULL;
 	ctiocb->vport = phba->pport;
 	ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
 	ctiocb->sli4_lxritag = NO_XRI;
@@ -17464,6 +17753,7 @@
 		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
 	kfree(pcmd);
 	lpfc_sli_release_iocbq(phba, cmdiocb);
+	lpfc_drain_txq(phba);
 }
 
 static void
@@ -17477,14 +17767,23 @@
 	struct lpfc_dmabuf *pcmd = NULL;
 	uint32_t frame_len;
 	int rc;
+	unsigned long iflags;
 
 	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
 	frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
 
 	/* Send the received frame back */
 	iocbq = lpfc_sli_get_iocbq(phba);
-	if (!iocbq)
-		goto exit;
+	if (!iocbq) {
+		/* Queue cq event and wakeup worker thread to process it */
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		list_add_tail(&dmabuf->cq_event.list,
+			      &phba->sli4_hba.sp_queue_event);
+		phba->hba_flag |= HBA_SP_QUEUE_EVT;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		lpfc_worker_wake_up(phba);
+		return;
+	}
 
 	/* Allocate buffer for command payload */
 	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
@@ -17569,6 +17868,14 @@
 	/* Process each received buffer */
 	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
 
+	if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
+	    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
+		vport = phba->pport;
+		/* Handle MDS Loopback frames */
+		lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+		return;
+	}
+
 	/* check to see if this a valid type of frame */
 	if (lpfc_fc_frame_check(phba, fc_hdr)) {
 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
@@ -17585,6 +17892,10 @@
 
 	if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
 		vport = phba->pport;
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"2023 MDS Loopback %d bytes\n",
+				bf_get(lpfc_rcqe_length,
+				       &dmabuf->cq_event.cqe.rcqe_cmpl));
 		/* Handle MDS Loopback frames */
 		lpfc_sli4_handle_mds_loopback(vport, dmabuf);
 		return;
@@ -17873,6 +18184,10 @@
 	if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
 		phba->sli4_hba.rpi_count--;
 		phba->sli4_hba.max_cfg_param.rpi_used--;
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"2016 rpi %x not inuse\n",
+				rpi);
 	}
 }
 
@@ -17930,8 +18245,8 @@
 	lpfc_resume_rpi(mboxq, ndlp);
 	if (cmpl) {
 		mboxq->mbox_cmpl = cmpl;
-		mboxq->context1 = arg;
-		mboxq->context2 = ndlp;
+		mboxq->ctx_buf = arg;
+		mboxq->ctx_ndlp = ndlp;
 	} else
 		mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 	mboxq->vport = ndlp->vport;
@@ -18434,15 +18749,8 @@
 			goto initial_priority;
 		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
 				"2844 No roundrobin failover FCF available\n");
-		if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
-			return LPFC_FCOE_FCF_NEXT_NONE;
-		else {
-			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
-				"3063 Only FCF available idx %d, flag %x\n",
-				next_fcf_index,
-			phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
-			return next_fcf_index;
-		}
+
+		return LPFC_FCOE_FCF_NEXT_NONE;
 	}
 
 	if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
@@ -18749,7 +19057,7 @@
 	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
 		goto out;
 	mqe = &mboxq->u.mqe;
-	mp = (struct lpfc_dmabuf *) mboxq->context1;
+	mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 	if (rc)
 		goto out;
@@ -18894,11 +19202,11 @@
 	struct lpfc_mbx_wr_object *wr_object;
 	LPFC_MBOXQ_t *mbox;
 	int rc = 0, i = 0;
-	uint32_t shdr_status, shdr_add_status;
+	uint32_t shdr_status, shdr_add_status, shdr_change_status;
 	uint32_t mbox_tmo;
-	union lpfc_sli4_cfg_shdr *shdr;
 	struct lpfc_dmabuf *dmabuf;
 	uint32_t written = 0;
+	bool check_change_status = false;
 
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mbox)
@@ -18926,6 +19234,8 @@
 				(size - written);
 			written += (size - written);
 			bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
+			bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
+			check_change_status = true;
 		} else {
 			wr_object->u.request.bde[i].tus.f.bdeSize =
 				SLI4_PAGE_SIZE;
@@ -18942,9 +19252,39 @@
 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
 	}
 	/* The IOCTL status is embedded in the mailbox subheader. */
-	shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
-	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
-	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	shdr_status = bf_get(lpfc_mbox_hdr_status,
+			     &wr_object->header.cfg_shdr.response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+				 &wr_object->header.cfg_shdr.response);
+	if (check_change_status) {
+		shdr_change_status = bf_get(lpfc_wr_object_change_status,
+					    &wr_object->u.response);
+		switch (shdr_change_status) {
+		case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"3198 Firmware write complete: System "
+					"reboot required to instantiate\n");
+			break;
+		case (LPFC_CHANGE_STATUS_FW_RESET):
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"3199 Firmware write complete: Firmware"
+					" reset required to instantiate\n");
+			break;
+		case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"3200 Firmware write complete: Port "
+					"Migration or PCI Reset required to "
+					"instantiate\n");
+			break;
+		case (LPFC_CHANGE_STATUS_PCI_RESET):
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"3201 Firmware write complete: PCI "
+					"Reset required to instantiate\n");
+			break;
+		default:
+			break;
+		}
+	}
 	if (rc != MBX_TIMEOUT)
 		mempool_free(mbox, phba->mbox_mem_pool);
 	if (shdr_status || shdr_add_status || rc) {
@@ -19000,7 +19340,7 @@
 			(mb->u.mb.mbxCommand == MBX_REG_VPI))
 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
-			act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
+			act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
 			/* Put reference count for delayed processing */
 			act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
 			/* Unregister the RPI when mailbox complete */
@@ -19025,7 +19365,7 @@
 
 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 			if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
-				ndlp = (struct lpfc_nodelist *)mb->context2;
+				ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
 				/* Unregister the RPI when mailbox complete */
 				mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
 				restart_loop = 1;
@@ -19045,13 +19385,14 @@
 	while (!list_empty(&mbox_cmd_list)) {
 		list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
 		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
-			mp = (struct lpfc_dmabuf *) (mb->context1);
+			mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
 			if (mp) {
 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
 				kfree(mp);
 			}
-			ndlp = (struct lpfc_nodelist *) mb->context2;
-			mb->context2 = NULL;
+			mb->ctx_buf = NULL;
+			ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
+			mb->ctx_ndlp = NULL;
 			if (ndlp) {
 				spin_lock(shost->host_lock);
 				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
@@ -19097,7 +19438,7 @@
 
 	if (phba->link_flag & LS_MDS_LOOPBACK) {
 		/* MDS WQE are posted only to first WQ*/
-		wq = phba->sli4_hba.fcp_wq[0];
+		wq = phba->sli4_hba.hdwq[0].io_wq;
 		if (unlikely(!wq))
 			return 0;
 		pring = wq->pring;
@@ -19305,7 +19646,7 @@
  * @pwqe: Pointer to command WQE.
  **/
 int
-lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
+lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
 		    struct lpfc_iocbq *pwqe)
 {
 	union lpfc_wqe128 *wqe = &pwqe->wqe;
@@ -19319,7 +19660,8 @@
 	/* NVME_LS and NVME_LS ABTS requests. */
 	if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
 		pring =  phba->sli4_hba.nvmels_wq->pring;
-		spin_lock_irqsave(&pring->ring_lock, iflags);
+		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
+					  qp, wq_access);
 		sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
 		if (!sglq) {
 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -19347,12 +19689,13 @@
 	/* NVME_FCREQ and NVME_ABTS requests */
 	if (pwqe->iocb_flag & LPFC_IO_NVME) {
 		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
-		pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
+		wq = qp->io_wq;
+		pring = wq->pring;
 
-		spin_lock_irqsave(&pring->ring_lock, iflags);
-		wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
-		bf_set(wqe_cqid, &wqe->generic.wqe_com,
-		      phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
+		bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
+
+		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
+					  qp, wq_access);
 		ret = lpfc_sli4_wq_put(wq, wqe);
 		if (ret) {
 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -19366,9 +19709,9 @@
 	/* NVMET requests */
 	if (pwqe->iocb_flag & LPFC_IO_NVMET) {
 		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
-		pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
+		wq = qp->io_wq;
+		pring = wq->pring;
 
-		spin_lock_irqsave(&pring->ring_lock, iflags);
 		ctxp = pwqe->context2;
 		sglq = ctxp->ctxbuf->sglq;
 		if (pwqe->sli4_xritag ==  NO_XRI) {
@@ -19377,9 +19720,10 @@
 		}
 		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
 		       pwqe->sli4_xritag);
-		wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
-		bf_set(wqe_cqid, &wqe->generic.wqe_com,
-		      phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
+		bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
+
+		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
+					  qp, wq_access);
 		ret = lpfc_sli4_wq_put(wq, wqe);
 		if (ret) {
 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -19391,3 +19735,931 @@
 	}
 	return WQE_ERROR;
 }
+
+#ifdef LPFC_MXP_STAT
+/**
+ * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
+ * @phba: pointer to lpfc hba data structure.
+ * @hwqid: belong to which HWQ.
+ *
+ * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
+ * 15 seconds after a test case is running.
+ *
+ * The user should call lpfc_debugfs_multixripools_write before running a test
+ * case to clear stat_snapshot_taken. Then the user starts a test case. During
+ * test case is running, stat_snapshot_taken is incremented by 1 every time when
+ * this routine is called from heartbeat timer. When stat_snapshot_taken is
+ * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
+ **/
+void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
+{
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_multixri_pool *multixri_pool;
+	struct lpfc_pvt_pool *pvt_pool;
+	struct lpfc_pbl_pool *pbl_pool;
+	u32 txcmplq_cnt;
+
+	qp = &phba->sli4_hba.hdwq[hwqid];
+	multixri_pool = qp->p_multixri_pool;
+	if (!multixri_pool)
+		return;
+
+	if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
+		pvt_pool = &qp->p_multixri_pool->pvt_pool;
+		pbl_pool = &qp->p_multixri_pool->pbl_pool;
+		txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
+
+		multixri_pool->stat_pbl_count = pbl_pool->count;
+		multixri_pool->stat_pvt_count = pvt_pool->count;
+		multixri_pool->stat_busy_count = txcmplq_cnt;
+	}
+
+	multixri_pool->stat_snapshot_taken++;
+}
+#endif
+
+/**
+ * lpfc_adjust_pvt_pool_count - Adjust private pool count
+ * @phba: pointer to lpfc hba data structure.
+ * @hwqid: belong to which HWQ.
+ *
+ * This routine moves some XRIs from private to public pool when private pool
+ * is not busy.
+ **/
+void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
+{
+	struct lpfc_multixri_pool *multixri_pool;
+	u32 io_req_count;
+	u32 prev_io_req_count;
+
+	multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
+	if (!multixri_pool)
+		return;
+	io_req_count = multixri_pool->io_req_count;
+	prev_io_req_count = multixri_pool->prev_io_req_count;
+
+	if (prev_io_req_count != io_req_count) {
+		/* Private pool is busy */
+		multixri_pool->prev_io_req_count = io_req_count;
+	} else {
+		/* Private pool is not busy.
+		 * Move XRIs from private to public pool.
+		 */
+		lpfc_move_xri_pvt_to_pbl(phba, hwqid);
+	}
+}
+
+/**
+ * lpfc_adjust_high_watermark - Adjust high watermark
+ * @phba: pointer to lpfc hba data structure.
+ * @hwqid: belong to which HWQ.
+ *
+ * This routine sets high watermark as number of outstanding XRIs,
+ * but make sure the new value is between xri_limit/2 and xri_limit.
+ **/
+void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
+{
+	u32 new_watermark;
+	u32 watermark_max;
+	u32 watermark_min;
+	u32 xri_limit;
+	u32 txcmplq_cnt;
+	u32 abts_io_bufs;
+	struct lpfc_multixri_pool *multixri_pool;
+	struct lpfc_sli4_hdw_queue *qp;
+
+	qp = &phba->sli4_hba.hdwq[hwqid];
+	multixri_pool = qp->p_multixri_pool;
+	if (!multixri_pool)
+		return;
+	xri_limit = multixri_pool->xri_limit;
+
+	watermark_max = xri_limit;
+	watermark_min = xri_limit / 2;
+
+	txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
+	abts_io_bufs = qp->abts_scsi_io_bufs;
+	abts_io_bufs += qp->abts_nvme_io_bufs;
+
+	new_watermark = txcmplq_cnt + abts_io_bufs;
+	new_watermark = min(watermark_max, new_watermark);
+	new_watermark = max(watermark_min, new_watermark);
+	multixri_pool->pvt_pool.high_watermark = new_watermark;
+
+#ifdef LPFC_MXP_STAT
+	multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
+					  new_watermark);
+#endif
+}
+
+/**
+ * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
+ * @phba: pointer to lpfc hba data structure.
+ * @hwqid: belong to which HWQ.
+ *
+ * This routine is called from hearbeat timer when pvt_pool is idle.
+ * All free XRIs are moved from private to public pool on hwqid with 2 steps.
+ * The first step moves (all - low_watermark) amount of XRIs.
+ * The second step moves the rest of XRIs.
+ **/
+void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
+{
+	struct lpfc_pbl_pool *pbl_pool;
+	struct lpfc_pvt_pool *pvt_pool;
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_io_buf *lpfc_ncmd;
+	struct lpfc_io_buf *lpfc_ncmd_next;
+	unsigned long iflag;
+	struct list_head tmp_list;
+	u32 tmp_count;
+
+	qp = &phba->sli4_hba.hdwq[hwqid];
+	pbl_pool = &qp->p_multixri_pool->pbl_pool;
+	pvt_pool = &qp->p_multixri_pool->pvt_pool;
+	tmp_count = 0;
+
+	lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
+	lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
+
+	if (pvt_pool->count > pvt_pool->low_watermark) {
+		/* Step 1: move (all - low_watermark) from pvt_pool
+		 * to pbl_pool
+		 */
+
+		/* Move low watermark of bufs from pvt_pool to tmp_list */
+		INIT_LIST_HEAD(&tmp_list);
+		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+					 &pvt_pool->list, list) {
+			list_move_tail(&lpfc_ncmd->list, &tmp_list);
+			tmp_count++;
+			if (tmp_count >= pvt_pool->low_watermark)
+				break;
+		}
+
+		/* Move all bufs from pvt_pool to pbl_pool */
+		list_splice_init(&pvt_pool->list, &pbl_pool->list);
+
+		/* Move all bufs from tmp_list to pvt_pool */
+		list_splice(&tmp_list, &pvt_pool->list);
+
+		pbl_pool->count += (pvt_pool->count - tmp_count);
+		pvt_pool->count = tmp_count;
+	} else {
+		/* Step 2: move the rest from pvt_pool to pbl_pool */
+		list_splice_init(&pvt_pool->list, &pbl_pool->list);
+		pbl_pool->count += pvt_pool->count;
+		pvt_pool->count = 0;
+	}
+
+	spin_unlock(&pvt_pool->lock);
+	spin_unlock_irqrestore(&pbl_pool->lock, iflag);
+}
+
+/**
+ * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
+ * @phba: pointer to lpfc hba data structure
+ * @pbl_pool: specified public free XRI pool
+ * @pvt_pool: specified private free XRI pool
+ * @count: number of XRIs to move
+ *
+ * This routine tries to move some free common bufs from the specified pbl_pool
+ * to the specified pvt_pool. It might move less than count XRIs if there's not
+ * enough in public pool.
+ *
+ * Return:
+ *   true - if XRIs are successfully moved from the specified pbl_pool to the
+ *          specified pvt_pool
+ *   false - if the specified pbl_pool is empty or locked by someone else
+ **/
+static bool
+_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
+			  struct lpfc_pbl_pool *pbl_pool,
+			  struct lpfc_pvt_pool *pvt_pool, u32 count)
+{
+	struct lpfc_io_buf *lpfc_ncmd;
+	struct lpfc_io_buf *lpfc_ncmd_next;
+	unsigned long iflag;
+	int ret;
+
+	ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
+	if (ret) {
+		if (pbl_pool->count) {
+			/* Move a batch of XRIs from public to private pool */
+			lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
+			list_for_each_entry_safe(lpfc_ncmd,
+						 lpfc_ncmd_next,
+						 &pbl_pool->list,
+						 list) {
+				list_move_tail(&lpfc_ncmd->list,
+					       &pvt_pool->list);
+				pvt_pool->count++;
+				pbl_pool->count--;
+				count--;
+				if (count == 0)
+					break;
+			}
+
+			spin_unlock(&pvt_pool->lock);
+			spin_unlock_irqrestore(&pbl_pool->lock, iflag);
+			return true;
+		}
+		spin_unlock_irqrestore(&pbl_pool->lock, iflag);
+	}
+
+	return false;
+}
+
+/**
+ * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
+ * @phba: pointer to lpfc hba data structure.
+ * @hwqid: belong to which HWQ.
+ * @count: number of XRIs to move
+ *
+ * This routine tries to find some free common bufs in one of public pools with
+ * Round Robin method. The search always starts from local hwqid, then the next
+ * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
+ * a batch of free common bufs are moved to private pool on hwqid.
+ * It might move less than count XRIs if there's not enough in public pool.
+ **/
+void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
+{
+	struct lpfc_multixri_pool *multixri_pool;
+	struct lpfc_multixri_pool *next_multixri_pool;
+	struct lpfc_pvt_pool *pvt_pool;
+	struct lpfc_pbl_pool *pbl_pool;
+	struct lpfc_sli4_hdw_queue *qp;
+	u32 next_hwqid;
+	u32 hwq_count;
+	int ret;
+
+	qp = &phba->sli4_hba.hdwq[hwqid];
+	multixri_pool = qp->p_multixri_pool;
+	pvt_pool = &multixri_pool->pvt_pool;
+	pbl_pool = &multixri_pool->pbl_pool;
+
+	/* Check if local pbl_pool is available */
+	ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
+	if (ret) {
+#ifdef LPFC_MXP_STAT
+		multixri_pool->local_pbl_hit_count++;
+#endif
+		return;
+	}
+
+	hwq_count = phba->cfg_hdw_queue;
+
+	/* Get the next hwqid which was found last time */
+	next_hwqid = multixri_pool->rrb_next_hwqid;
+
+	do {
+		/* Go to next hwq */
+		next_hwqid = (next_hwqid + 1) % hwq_count;
+
+		next_multixri_pool =
+			phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
+		pbl_pool = &next_multixri_pool->pbl_pool;
+
+		/* Check if the public free xri pool is available */
+		ret = _lpfc_move_xri_pbl_to_pvt(
+			phba, qp, pbl_pool, pvt_pool, count);
+
+		/* Exit while-loop if success or all hwqid are checked */
+	} while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
+
+	/* Starting point for the next time */
+	multixri_pool->rrb_next_hwqid = next_hwqid;
+
+	if (!ret) {
+		/* stats: all public pools are empty*/
+		multixri_pool->pbl_empty_count++;
+	}
+
+#ifdef LPFC_MXP_STAT
+	if (ret) {
+		if (next_hwqid == hwqid)
+			multixri_pool->local_pbl_hit_count++;
+		else
+			multixri_pool->other_pbl_hit_count++;
+	}
+#endif
+}
+
+/**
+ * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
+ * @phba: pointer to lpfc hba data structure.
+ * @qp: belong to which HWQ.
+ *
+ * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
+ * low watermark.
+ **/
+void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
+{
+	struct lpfc_multixri_pool *multixri_pool;
+	struct lpfc_pvt_pool *pvt_pool;
+
+	multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
+	pvt_pool = &multixri_pool->pvt_pool;
+
+	if (pvt_pool->count < pvt_pool->low_watermark)
+		lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
+}
+
+/**
+ * lpfc_release_io_buf - Return one IO buf back to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @lpfc_ncmd: IO buf to be returned.
+ * @qp: belong to which HWQ.
+ *
+ * This routine returns one IO buf back to free pool. If this is an urgent IO,
+ * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
+ * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
+ * xri_limit.  If cfg_xri_rebalancing==0, the IO buf is returned to
+ * lpfc_io_buf_list_put.
+ **/
+void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
+			 struct lpfc_sli4_hdw_queue *qp)
+{
+	unsigned long iflag;
+	struct lpfc_pbl_pool *pbl_pool;
+	struct lpfc_pvt_pool *pvt_pool;
+	struct lpfc_epd_pool *epd_pool;
+	u32 txcmplq_cnt;
+	u32 xri_owned;
+	u32 xri_limit;
+	u32 abts_io_bufs;
+
+	/* MUST zero fields if buffer is reused by another protocol */
+	lpfc_ncmd->nvmeCmd = NULL;
+	lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
+	lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
+
+	if (phba->cfg_xri_rebalancing) {
+		if (lpfc_ncmd->expedite) {
+			/* Return to expedite pool */
+			epd_pool = &phba->epd_pool;
+			spin_lock_irqsave(&epd_pool->lock, iflag);
+			list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
+			epd_pool->count++;
+			spin_unlock_irqrestore(&epd_pool->lock, iflag);
+			return;
+		}
+
+		/* Avoid invalid access if an IO sneaks in and is being rejected
+		 * just _after_ xri pools are destroyed in lpfc_offline.
+		 * Nothing much can be done at this point.
+		 */
+		if (!qp->p_multixri_pool)
+			return;
+
+		pbl_pool = &qp->p_multixri_pool->pbl_pool;
+		pvt_pool = &qp->p_multixri_pool->pvt_pool;
+
+		txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
+		abts_io_bufs = qp->abts_scsi_io_bufs;
+		abts_io_bufs += qp->abts_nvme_io_bufs;
+
+		xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
+		xri_limit = qp->p_multixri_pool->xri_limit;
+
+#ifdef LPFC_MXP_STAT
+		if (xri_owned <= xri_limit)
+			qp->p_multixri_pool->below_limit_count++;
+		else
+			qp->p_multixri_pool->above_limit_count++;
+#endif
+
+		/* XRI goes to either public or private free xri pool
+		 *     based on watermark and xri_limit
+		 */
+		if ((pvt_pool->count < pvt_pool->low_watermark) ||
+		    (xri_owned < xri_limit &&
+		     pvt_pool->count < pvt_pool->high_watermark)) {
+			lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
+						  qp, free_pvt_pool);
+			list_add_tail(&lpfc_ncmd->list,
+				      &pvt_pool->list);
+			pvt_pool->count++;
+			spin_unlock_irqrestore(&pvt_pool->lock, iflag);
+		} else {
+			lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
+						  qp, free_pub_pool);
+			list_add_tail(&lpfc_ncmd->list,
+				      &pbl_pool->list);
+			pbl_pool->count++;
+			spin_unlock_irqrestore(&pbl_pool->lock, iflag);
+		}
+	} else {
+		lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
+					  qp, free_xri);
+		list_add_tail(&lpfc_ncmd->list,
+			      &qp->lpfc_io_buf_list_put);
+		qp->put_io_bufs++;
+		spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
+				       iflag);
+	}
+
+	if (phba->cfg_xpsgl && !phba->nvmet_support &&
+	    !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
+		lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+
+	if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
+		lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
+}
+
+/**
+ * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
+ * @phba: pointer to lpfc hba data structure.
+ * @pvt_pool: pointer to private pool data structure.
+ * @ndlp: pointer to lpfc nodelist data structure.
+ *
+ * This routine tries to get one free IO buf from private pool.
+ *
+ * Return:
+ *   pointer to one free IO buf - if private pool is not empty
+ *   NULL - if private pool is empty
+ **/
+static struct lpfc_io_buf *
+lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
+				  struct lpfc_sli4_hdw_queue *qp,
+				  struct lpfc_pvt_pool *pvt_pool,
+				  struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_io_buf *lpfc_ncmd;
+	struct lpfc_io_buf *lpfc_ncmd_next;
+	unsigned long iflag;
+
+	lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
+	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+				 &pvt_pool->list, list) {
+		if (lpfc_test_rrq_active(
+			phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
+			continue;
+		list_del(&lpfc_ncmd->list);
+		pvt_pool->count--;
+		spin_unlock_irqrestore(&pvt_pool->lock, iflag);
+		return lpfc_ncmd;
+	}
+	spin_unlock_irqrestore(&pvt_pool->lock, iflag);
+
+	return NULL;
+}
+
+/**
+ * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine tries to get one free IO buf from expedite pool.
+ *
+ * Return:
+ *   pointer to one free IO buf - if expedite pool is not empty
+ *   NULL - if expedite pool is empty
+ **/
+static struct lpfc_io_buf *
+lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
+{
+	struct lpfc_io_buf *lpfc_ncmd;
+	struct lpfc_io_buf *lpfc_ncmd_next;
+	unsigned long iflag;
+	struct lpfc_epd_pool *epd_pool;
+
+	epd_pool = &phba->epd_pool;
+	lpfc_ncmd = NULL;
+
+	spin_lock_irqsave(&epd_pool->lock, iflag);
+	if (epd_pool->count > 0) {
+		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+					 &epd_pool->list, list) {
+			list_del(&lpfc_ncmd->list);
+			epd_pool->count--;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&epd_pool->lock, iflag);
+
+	return lpfc_ncmd;
+}
+
+/**
+ * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
+ * @phba: pointer to lpfc hba data structure.
+ * @ndlp: pointer to lpfc nodelist data structure.
+ * @hwqid: belong to which HWQ
+ * @expedite: 1 means this request is urgent.
+ *
+ * This routine will do the following actions and then return a pointer to
+ * one free IO buf.
+ *
+ * 1. If private free xri count is empty, move some XRIs from public to
+ *    private pool.
+ * 2. Get one XRI from private free xri pool.
+ * 3. If we fail to get one from pvt_pool and this is an expedite request,
+ *    get one free xri from expedite pool.
+ *
+ * Note: ndlp is only used on SCSI side for RRQ testing.
+ *       The caller should pass NULL for ndlp on NVME side.
+ *
+ * Return:
+ *   pointer to one free IO buf - if private pool is not empty
+ *   NULL - if private pool is empty
+ **/
+static struct lpfc_io_buf *
+lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
+				    struct lpfc_nodelist *ndlp,
+				    int hwqid, int expedite)
+{
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_multixri_pool *multixri_pool;
+	struct lpfc_pvt_pool *pvt_pool;
+	struct lpfc_io_buf *lpfc_ncmd;
+
+	qp = &phba->sli4_hba.hdwq[hwqid];
+	lpfc_ncmd = NULL;
+	multixri_pool = qp->p_multixri_pool;
+	pvt_pool = &multixri_pool->pvt_pool;
+	multixri_pool->io_req_count++;
+
+	/* If pvt_pool is empty, move some XRIs from public to private pool */
+	if (pvt_pool->count == 0)
+		lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
+
+	/* Get one XRI from private free xri pool */
+	lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
+
+	if (lpfc_ncmd) {
+		lpfc_ncmd->hdwq = qp;
+		lpfc_ncmd->hdwq_no = hwqid;
+	} else if (expedite) {
+		/* If we fail to get one from pvt_pool and this is an expedite
+		 * request, get one free xri from expedite pool.
+		 */
+		lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
+	}
+
+	return lpfc_ncmd;
+}
+
+static inline struct lpfc_io_buf *
+lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
+{
+	struct lpfc_sli4_hdw_queue *qp;
+	struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
+
+	qp = &phba->sli4_hba.hdwq[idx];
+	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
+				 &qp->lpfc_io_buf_list_get, list) {
+		if (lpfc_test_rrq_active(phba, ndlp,
+					 lpfc_cmd->cur_iocbq.sli4_lxritag))
+			continue;
+
+		if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
+			continue;
+
+		list_del_init(&lpfc_cmd->list);
+		qp->get_io_bufs--;
+		lpfc_cmd->hdwq = qp;
+		lpfc_cmd->hdwq_no = idx;
+		return lpfc_cmd;
+	}
+	return NULL;
+}
+
+/**
+ * lpfc_get_io_buf - Get one IO buffer from free pool
+ * @phba: The HBA for which this call is being executed.
+ * @ndlp: pointer to lpfc nodelist data structure.
+ * @hwqid: belong to which HWQ
+ * @expedite: 1 means this request is urgent.
+ *
+ * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
+ * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
+ * a IO buffer from head of @hdwq io_buf_list and returns to caller.
+ *
+ * Note: ndlp is only used on SCSI side for RRQ testing.
+ *       The caller should pass NULL for ndlp on NVME side.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_io_buf - Success
+ **/
+struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
+				    struct lpfc_nodelist *ndlp,
+				    u32 hwqid, int expedite)
+{
+	struct lpfc_sli4_hdw_queue *qp;
+	unsigned long iflag;
+	struct lpfc_io_buf *lpfc_cmd;
+
+	qp = &phba->sli4_hba.hdwq[hwqid];
+	lpfc_cmd = NULL;
+
+	if (phba->cfg_xri_rebalancing)
+		lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
+			phba, ndlp, hwqid, expedite);
+	else {
+		lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
+					  qp, alloc_xri_get);
+		if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
+			lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
+		if (!lpfc_cmd) {
+			lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
+					  qp, alloc_xri_put);
+			list_splice(&qp->lpfc_io_buf_list_put,
+				    &qp->lpfc_io_buf_list_get);
+			qp->get_io_bufs += qp->put_io_bufs;
+			INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
+			qp->put_io_bufs = 0;
+			spin_unlock(&qp->io_buf_list_put_lock);
+			if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
+			    expedite)
+				lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
+		}
+		spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
+	}
+
+	return lpfc_cmd;
+}
+
+/**
+ * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure to append the SGL chunk
+ *
+ * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
+ * and will allocate an SGL chunk if the pool is empty.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to sli4_hybrid_sgl - Success
+ **/
+struct sli4_hybrid_sgl *
+lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
+{
+	struct sli4_hybrid_sgl *list_entry = NULL;
+	struct sli4_hybrid_sgl *tmp = NULL;
+	struct sli4_hybrid_sgl *allocated_sgl = NULL;
+	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+	struct list_head *buf_list = &hdwq->sgl_list;
+
+	spin_lock_irq(&hdwq->hdwq_lock);
+
+	if (likely(!list_empty(buf_list))) {
+		/* break off 1 chunk from the sgl_list */
+		list_for_each_entry_safe(list_entry, tmp,
+					 buf_list, list_node) {
+			list_move_tail(&list_entry->list_node,
+				       &lpfc_buf->dma_sgl_xtra_list);
+			break;
+		}
+	} else {
+		/* allocate more */
+		spin_unlock_irq(&hdwq->hdwq_lock);
+		tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
+				   cpu_to_node(smp_processor_id()));
+		if (!tmp) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+					"8353 error kmalloc memory for HDWQ "
+					"%d %s\n",
+					lpfc_buf->hdwq_no, __func__);
+			return NULL;
+		}
+
+		tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
+					      GFP_ATOMIC, &tmp->dma_phys_sgl);
+		if (!tmp->dma_sgl) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+					"8354 error pool_alloc memory for HDWQ "
+					"%d %s\n",
+					lpfc_buf->hdwq_no, __func__);
+			kfree(tmp);
+			return NULL;
+		}
+
+		spin_lock_irq(&hdwq->hdwq_lock);
+		list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
+	}
+
+	allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
+					struct sli4_hybrid_sgl,
+					list_node);
+
+	spin_unlock_irq(&hdwq->hdwq_lock);
+
+	return allocated_sgl;
+}
+
+/**
+ * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure with the SGL chunk
+ *
+ * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
+ *
+ * Return codes:
+ *   0 - Success
+ *   -EINVAL - Error
+ **/
+int
+lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
+{
+	int rc = 0;
+	struct sli4_hybrid_sgl *list_entry = NULL;
+	struct sli4_hybrid_sgl *tmp = NULL;
+	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+	struct list_head *buf_list = &hdwq->sgl_list;
+
+	spin_lock_irq(&hdwq->hdwq_lock);
+
+	if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
+		list_for_each_entry_safe(list_entry, tmp,
+					 &lpfc_buf->dma_sgl_xtra_list,
+					 list_node) {
+			list_move_tail(&list_entry->list_node,
+				       buf_list);
+		}
+	} else {
+		rc = -EINVAL;
+	}
+
+	spin_unlock_irq(&hdwq->hdwq_lock);
+	return rc;
+}
+
+/**
+ * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
+ * @phba: phba object
+ * @hdwq: hdwq to cleanup sgl buff resources on
+ *
+ * This routine frees all SGL chunks of hdwq SGL chunk pool.
+ *
+ * Return codes:
+ *   None
+ **/
+void
+lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
+		       struct lpfc_sli4_hdw_queue *hdwq)
+{
+	struct list_head *buf_list = &hdwq->sgl_list;
+	struct sli4_hybrid_sgl *list_entry = NULL;
+	struct sli4_hybrid_sgl *tmp = NULL;
+
+	spin_lock_irq(&hdwq->hdwq_lock);
+
+	/* Free sgl pool */
+	list_for_each_entry_safe(list_entry, tmp,
+				 buf_list, list_node) {
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+			      list_entry->dma_sgl,
+			      list_entry->dma_phys_sgl);
+		list_del(&list_entry->list_node);
+		kfree(list_entry);
+	}
+
+	spin_unlock_irq(&hdwq->hdwq_lock);
+}
+
+/**
+ * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
+ *
+ * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
+ * and will allocate an CMD/RSP buffer if the pool is empty.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to fcp_cmd_rsp_buf - Success
+ **/
+struct fcp_cmd_rsp_buf *
+lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+			      struct lpfc_io_buf *lpfc_buf)
+{
+	struct fcp_cmd_rsp_buf *list_entry = NULL;
+	struct fcp_cmd_rsp_buf *tmp = NULL;
+	struct fcp_cmd_rsp_buf *allocated_buf = NULL;
+	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+
+	spin_lock_irq(&hdwq->hdwq_lock);
+
+	if (likely(!list_empty(buf_list))) {
+		/* break off 1 chunk from the list */
+		list_for_each_entry_safe(list_entry, tmp,
+					 buf_list,
+					 list_node) {
+			list_move_tail(&list_entry->list_node,
+				       &lpfc_buf->dma_cmd_rsp_list);
+			break;
+		}
+	} else {
+		/* allocate more */
+		spin_unlock_irq(&hdwq->hdwq_lock);
+		tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
+				   cpu_to_node(smp_processor_id()));
+		if (!tmp) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+					"8355 error kmalloc memory for HDWQ "
+					"%d %s\n",
+					lpfc_buf->hdwq_no, __func__);
+			return NULL;
+		}
+
+		tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
+						GFP_ATOMIC,
+						&tmp->fcp_cmd_rsp_dma_handle);
+
+		if (!tmp->fcp_cmnd) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+					"8356 error pool_alloc memory for HDWQ "
+					"%d %s\n",
+					lpfc_buf->hdwq_no, __func__);
+			kfree(tmp);
+			return NULL;
+		}
+
+		tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
+				sizeof(struct fcp_cmnd));
+
+		spin_lock_irq(&hdwq->hdwq_lock);
+		list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
+	}
+
+	allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
+					struct fcp_cmd_rsp_buf,
+					list_node);
+
+	spin_unlock_irq(&hdwq->hdwq_lock);
+
+	return allocated_buf;
+}
+
+/**
+ * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure with the CMD/RSP buf
+ *
+ * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
+ *
+ * Return codes:
+ *   0 - Success
+ *   -EINVAL - Error
+ **/
+int
+lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+			      struct lpfc_io_buf *lpfc_buf)
+{
+	int rc = 0;
+	struct fcp_cmd_rsp_buf *list_entry = NULL;
+	struct fcp_cmd_rsp_buf *tmp = NULL;
+	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+
+	spin_lock_irq(&hdwq->hdwq_lock);
+
+	if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
+		list_for_each_entry_safe(list_entry, tmp,
+					 &lpfc_buf->dma_cmd_rsp_list,
+					 list_node) {
+			list_move_tail(&list_entry->list_node,
+				       buf_list);
+		}
+	} else {
+		rc = -EINVAL;
+	}
+
+	spin_unlock_irq(&hdwq->hdwq_lock);
+	return rc;
+}
+
+/**
+ * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
+ * @phba: phba object
+ * @hdwq: hdwq to cleanup cmd rsp buff resources on
+ *
+ * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
+ *
+ * Return codes:
+ *   None
+ **/
+void
+lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+			       struct lpfc_sli4_hdw_queue *hdwq)
+{
+	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+	struct fcp_cmd_rsp_buf *list_entry = NULL;
+	struct fcp_cmd_rsp_buf *tmp = NULL;
+
+	spin_lock_irq(&hdwq->hdwq_lock);
+
+	/* Free cmd_rsp buf pool */
+	list_for_each_entry_safe(list_entry, tmp,
+				 buf_list,
+				 list_node) {
+		dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
+			      list_entry->fcp_cmnd,
+			      list_entry->fcp_cmd_rsp_dma_handle);
+		list_del(&list_entry->list_node);
+		kfree(list_entry);
+	}
+
+	spin_unlock_irq(&hdwq->hdwq_lock);
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 34b7ab6..37fbcb4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -20,6 +20,10 @@
  * included with this package.                                     *
  *******************************************************************/
 
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
+#define CONFIG_SCSI_LPFC_DEBUG_FS
+#endif
+
 /* forward declaration for LPFC_IOCB_t's use */
 struct lpfc_hba;
 struct lpfc_vport;
@@ -33,6 +37,7 @@
 
 struct lpfc_cq_event {
 	struct list_head list;
+	uint16_t hdwq;
 	union {
 		struct lpfc_mcqe		mcqe_cmpl;
 		struct lpfc_acqe_link		acqe_link;
@@ -144,9 +149,9 @@
 		MAILBOX_t mb;		/* Mailbox cmd */
 		struct lpfc_mqe mqe;
 	} u;
-	struct lpfc_vport *vport;/* virtual port pointer */
-	void *context1;		/* caller context information */
-	void *context2;		/* caller context information */
+	struct lpfc_vport *vport; /* virtual port pointer */
+	void *ctx_ndlp;		  /* caller ndlp information */
+	void *ctx_buf;		  /* caller buffer information */
 	void *context3;
 
 	void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
@@ -322,6 +327,10 @@
 #define LPFC_SLI_ASYNC_MBX_BLK    0x2000 /* Async mailbox is blocked */
 #define LPFC_SLI_SUPPRESS_RSP     0x4000 /* Suppress RSP feature is supported */
 #define LPFC_SLI_USE_EQDR         0x8000 /* EQ Delay Register is supported */
+#define LPFC_QUEUE_FREE_INIT	  0x10000 /* Queue freeing is in progress */
+#define LPFC_QUEUE_FREE_WAIT	  0x20000 /* Hold Queue free as it is being
+					   * used outside worker thread
+					   */
 
 	struct lpfc_sli_ring *sli3_ring;
 
@@ -351,3 +360,93 @@
 #define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO	300
 /* Timeout for other flash-based outstanding mbox command (Seconds) */
 #define LPFC_MBOX_TMO_FLASH_CMD			300
+
+struct lpfc_io_buf {
+	/* Common fields */
+	struct list_head list;
+	void *data;
+
+	dma_addr_t dma_handle;
+	dma_addr_t dma_phys_sgl;
+
+	struct sli4_sge *dma_sgl; /* initial segment chunk */
+
+	/* linked list of extra sli4_hybrid_sge */
+	struct list_head dma_sgl_xtra_list;
+
+	/* list head for fcp_cmd_rsp buf */
+	struct list_head dma_cmd_rsp_list;
+
+	struct lpfc_iocbq cur_iocbq;
+	struct lpfc_sli4_hdw_queue *hdwq;
+	uint16_t hdwq_no;
+	uint16_t cpu;
+
+	struct lpfc_nodelist *ndlp;
+	uint32_t timeout;
+	uint16_t flags;  /* TBD convert exch_busy to flags */
+#define LPFC_SBUF_XBUSY		0x1	/* SLI4 hba reported XB on WCQE cmpl */
+#define LPFC_SBUF_BUMP_QDEPTH	0x2	/* bumped queue depth counter */
+					/* External DIF device IO conversions */
+#define LPFC_SBUF_NORMAL_DIF	0x4	/* normal mode to insert/strip */
+#define LPFC_SBUF_PASS_DIF	0x8	/* insert/strip mode to passthru */
+#define LPFC_SBUF_NOT_POSTED    0x10    /* SGL failed post to FW. */
+	uint16_t exch_busy;     /* SLI4 hba reported XB on complete WCQE */
+	uint16_t status;	/* From IOCB Word 7- ulpStatus */
+	uint32_t result;	/* From IOCB Word 4. */
+
+	uint32_t   seg_cnt;	/* Number of scatter-gather segments returned by
+				 * dma_map_sg.  The driver needs this for calls
+				 * to dma_unmap_sg.
+				 */
+	unsigned long start_time;
+	spinlock_t buf_lock;	/* lock used in case of simultaneous abort */
+	bool expedite;		/* this is an expedite io_buf */
+
+	union {
+		/* SCSI specific fields */
+		struct {
+			struct scsi_cmnd *pCmd;
+			struct lpfc_rport_data *rdata;
+			uint32_t prot_seg_cnt;  /* seg_cnt's counterpart for
+						 * protection data
+						 */
+
+			/*
+			 * data and dma_handle are the kernel virtual and bus
+			 * address of the dma-able buffer containing the
+			 * fcp_cmd, fcp_rsp and a scatter gather bde list that
+			 * supports the sg_tablesize value.
+			 */
+			struct fcp_cmnd *fcp_cmnd;
+			struct fcp_rsp *fcp_rsp;
+
+			wait_queue_head_t *waitq;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+			/* Used to restore any changes to protection data for
+			 * error injection
+			 */
+			void *prot_data_segment;
+			uint32_t prot_data;
+			uint32_t prot_data_type;
+#define	LPFC_INJERR_REFTAG	1
+#define	LPFC_INJERR_APPTAG	2
+#define	LPFC_INJERR_GUARD	3
+#endif
+		};
+
+		/* NVME specific fields */
+		struct {
+			struct nvmefc_fcp_req *nvmeCmd;
+			uint16_t qidx;
+		};
+	};
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint64_t ts_cmd_start;
+	uint64_t ts_last_cmd;
+	uint64_t ts_cmd_wqput;
+	uint64_t ts_isr_cmpl;
+	uint64_t ts_data_nvme;
+#endif
+};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 399c001..0d4882a 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2009-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -20,6 +20,10 @@
  * included with this package.                                     *
  *******************************************************************/
 
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
+#define CONFIG_SCSI_LPFC_DEBUG_FS
+#endif
+
 #define LPFC_ACTIVE_MBOX_WAIT_CNT               100
 #define LPFC_XRI_EXCH_BUSY_WAIT_TMO		10000
 #define LPFC_XRI_EXCH_BUSY_WAIT_T1   		10
@@ -36,14 +40,14 @@
 #define LPFC_NEMBED_MBOX_SGL_CNT		254
 
 /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
-#define LPFC_HBA_IO_CHAN_MIN	0
-#define LPFC_HBA_IO_CHAN_MAX	32
-#define LPFC_FCP_IO_CHAN_DEF	4
-#define LPFC_NVME_IO_CHAN_DEF	0
+#define LPFC_HBA_HDWQ_MIN	0
+#define LPFC_HBA_HDWQ_MAX	128
+#define LPFC_HBA_HDWQ_DEF	0
 
-/* Number of channels used for Flash Optimized Fabric (FOF) operations */
-
-#define LPFC_FOF_IO_CHAN_NUM       1
+/* FCP MQ queue count limiting */
+#define LPFC_FCP_MQ_THRESHOLD_MIN	0
+#define LPFC_FCP_MQ_THRESHOLD_MAX	256
+#define LPFC_FCP_MQ_THRESHOLD_DEF	8
 
 /*
  * Provide the default FCF Record attributes used by the driver
@@ -107,29 +111,13 @@
 enum lpfc_sli4_queue_subtype {
 	LPFC_NONE,
 	LPFC_MBOX,
-	LPFC_FCP,
+	LPFC_IO,
 	LPFC_ELS,
-	LPFC_NVME,
 	LPFC_NVMET,
 	LPFC_NVME_LS,
 	LPFC_USOL
 };
 
-union sli4_qe {
-	void *address;
-	struct lpfc_eqe *eqe;
-	struct lpfc_cqe *cqe;
-	struct lpfc_mcqe *mcqe;
-	struct lpfc_wcqe_complete *wcqe_complete;
-	struct lpfc_wcqe_release *wcqe_release;
-	struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
-	struct lpfc_rcqe_complete *rcqe_complete;
-	struct lpfc_mqe *mqe;
-	union  lpfc_wqe *wqe;
-	union  lpfc_wqe128 *wqe128;
-	struct lpfc_rqe *rqe;
-};
-
 /* RQ buffer list */
 struct lpfc_rqb {
 	uint16_t entry_count;	  /* Current number of RQ slots */
@@ -152,33 +140,66 @@
 	struct list_head child_list;
 	struct list_head page_list;
 	struct list_head sgl_list;
+	struct list_head cpu_list;
 	uint32_t entry_count;	/* Number of entries to support on the queue */
 	uint32_t entry_size;	/* Size of each queue entry. */
-	uint32_t entry_repost;	/* Count of entries before doorbell is rung */
-#define LPFC_EQ_REPOST		8
-#define LPFC_MQ_REPOST		8
-#define LPFC_CQ_REPOST		64
-#define LPFC_RQ_REPOST		64
-#define LPFC_RELEASE_NOTIFICATION_INTERVAL	32  /* For WQs */
+	uint32_t entry_cnt_per_pg;
+	uint32_t notify_interval; /* Queue Notification Interval
+				   * For chip->host queues (EQ, CQ, RQ):
+				   *  specifies the interval (number of
+				   *  entries) where the doorbell is rung to
+				   *  notify the chip of entry consumption.
+				   * For host->chip queues (WQ):
+				   *  specifies the interval (number of
+				   *  entries) where consumption CQE is
+				   *  requested to indicate WQ entries
+				   *  consumed by the chip.
+				   * Not used on an MQ.
+				   */
+#define LPFC_EQ_NOTIFY_INTRVL	16
+#define LPFC_CQ_NOTIFY_INTRVL	16
+#define LPFC_WQ_NOTIFY_INTRVL	16
+#define LPFC_RQ_NOTIFY_INTRVL	16
+	uint32_t max_proc_limit; /* Queue Processing Limit
+				  * For chip->host queues (EQ, CQ):
+				  *  specifies the maximum number of
+				  *  entries to be consumed in one
+				  *  processing iteration sequence. Queue
+				  *  will be rearmed after each iteration.
+				  * Not used on an MQ, RQ or WQ.
+				  */
+#define LPFC_EQ_MAX_PROC_LIMIT		256
+#define LPFC_CQ_MIN_PROC_LIMIT		64
+#define LPFC_CQ_MAX_PROC_LIMIT		LPFC_CQE_EXP_COUNT	// 4096
+#define LPFC_CQ_DEF_MAX_PROC_LIMIT	LPFC_CQE_DEF_COUNT	// 1024
+#define LPFC_CQ_MIN_THRESHOLD_TO_POLL	64
+#define LPFC_CQ_MAX_THRESHOLD_TO_POLL	LPFC_CQ_DEF_MAX_PROC_LIMIT
+#define LPFC_CQ_DEF_THRESHOLD_TO_POLL	LPFC_CQ_DEF_MAX_PROC_LIMIT
+	uint32_t queue_claimed; /* indicates queue is being processed */
 	uint32_t queue_id;	/* Queue ID assigned by the hardware */
 	uint32_t assoc_qid;     /* Queue ID associated with, for CQ/WQ/MQ */
 	uint32_t host_index;	/* The host's index for putting or getting */
 	uint32_t hba_index;	/* The last known hba index for get or put */
+	uint32_t q_mode;
 
 	struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
 	struct lpfc_rqb *rqbp;	/* ptr to RQ buffers */
 
-	uint32_t q_mode;
 	uint16_t page_count;	/* Number of pages allocated for this queue */
 	uint16_t page_size;	/* size of page allocated for this queue */
 #define LPFC_EXPANDED_PAGE_SIZE	16384
 #define LPFC_DEFAULT_PAGE_SIZE	4096
-	uint16_t chann;		/* IO channel this queue is associated with */
+	uint16_t chann;		/* Hardware Queue association WQ/CQ */
+				/* CPU affinity for EQ */
+#define LPFC_FIND_BY_EQ		0
+#define LPFC_FIND_BY_HDWQ	1
 	uint8_t db_format;
 #define LPFC_DB_RING_FORMAT	0x01
 #define LPFC_DB_LIST_FORMAT	0x02
 	uint8_t q_flag;
 #define HBA_NVMET_WQFULL	0x1 /* We hit WQ Full condition for NVMET */
+#define HBA_NVMET_CQ_NOTIFY	0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */
+#define LPFC_NVMET_CQ_NOTIFY	4
 	void __iomem *db_regaddr;
 	uint16_t dpp_enable;
 	uint16_t dpp_id;
@@ -212,23 +233,27 @@
 #define	RQ_buf_posted		q_cnt_3
 #define	RQ_rcv_buf		q_cnt_4
 
-	struct work_struct irqwork;
-	struct work_struct spwork;
+	struct work_struct	irqwork;
+	struct work_struct	spwork;
+	struct delayed_work	sched_irqwork;
+	struct delayed_work	sched_spwork;
 
 	uint64_t isr_timestamp;
+	uint16_t hdwq;
+	uint16_t last_cpu;	/* most recent cpu */
 	uint8_t	qe_valid;
 	struct lpfc_queue *assoc_qp;
-	union sli4_qe qe[1];	/* array to index entries (must be last) */
+	void **q_pgs;	/* array to index entries per page */
 };
 
 struct lpfc_sli4_link {
-	uint16_t speed;
+	uint32_t speed;
 	uint8_t duplex;
 	uint8_t status;
 	uint8_t type;
 	uint8_t number;
 	uint8_t fault;
-	uint16_t logical_speed;
+	uint32_t logical_speed;
 	uint16_t topology;
 };
 
@@ -279,6 +304,7 @@
 #define FCF_REDISC_EVT	0x100 /* FCF rediscovery event to worker thread */
 #define FCF_REDISC_FOV	0x200 /* Post FCF rediscovery fast failover */
 #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
+	uint16_t fcf_redisc_attempted;
 	uint32_t addr_mode;
 	uint32_t eligible_fcf_cnt;
 	struct lpfc_fcf_rec current_rec;
@@ -427,11 +453,7 @@
 	uint32_t idx;
 	char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
 	struct lpfc_hba *phba;
-	atomic_t hba_eq_in_use;
-	struct cpumask *cpumask;
-	/* CPU affinitsed to or 0xffffffff if multiple */
-	uint32_t cpu;
-#define LPFC_MULTI_CPU_AFFINITY 0xffffffff
+	struct lpfc_queue *eq;
 };
 
 /*BB Credit recovery value*/
@@ -494,6 +516,7 @@
 #define LPFC_WQ_SZ64_SUPPORT	1
 #define LPFC_WQ_SZ128_SUPPORT	2
 	uint8_t wqpcnt;
+	uint8_t nvme;
 };
 
 #define LPFC_CQ_4K_PAGE_SZ	0x1
@@ -511,8 +534,9 @@
 #define LPFC_LNK_DAT_INVAL	0
 #define LPFC_LNK_DAT_VAL	1
 	uint8_t lnk_tp;
-#define LPFC_LNK_GE	0x0 /* FCoE */
-#define LPFC_LNK_FC	0x1 /* FC   */
+#define LPFC_LNK_GE		0x0 /* FCoE */
+#define LPFC_LNK_FC		0x1 /* FC */
+#define LPFC_LNK_FC_TRUNKED	0x2 /* FC_Trunked */
 	uint8_t lnk_no;
 	uint8_t optic_state;
 };
@@ -525,11 +549,170 @@
 	uint16_t	phys_id;
 	uint16_t	core_id;
 	uint16_t	irq;
-	uint16_t	channel_id;
+	uint16_t	eq;
+	uint16_t	hdwq;
+	uint16_t	flag;
+#define LPFC_CPU_MAP_HYPER	0x1
+#define LPFC_CPU_MAP_UNASSIGN	0x2
+#define LPFC_CPU_FIRST_IRQ	0x4
 };
 #define LPFC_VECTOR_MAP_EMPTY	0xffff
 
+/* Multi-XRI pool */
+#define XRI_BATCH               8
+
+struct lpfc_pbl_pool {
+	struct list_head list;
+	u32 count;
+	spinlock_t lock;	/* lock for pbl_pool*/
+};
+
+struct lpfc_pvt_pool {
+	u32 low_watermark;
+	u32 high_watermark;
+
+	struct list_head list;
+	u32 count;
+	spinlock_t lock;	/* lock for pvt_pool */
+};
+
+struct lpfc_multixri_pool {
+	u32 xri_limit;
+
+	/* Starting point when searching a pbl_pool with round-robin method */
+	u32 rrb_next_hwqid;
+
+	/* Used by lpfc_adjust_pvt_pool_count.
+	 * io_req_count is incremented by 1 during IO submission. The heartbeat
+	 * handler uses these two variables to determine if pvt_pool is idle or
+	 * busy.
+	 */
+	u32 prev_io_req_count;
+	u32 io_req_count;
+
+	/* statistics */
+	u32 pbl_empty_count;
+#ifdef LPFC_MXP_STAT
+	u32 above_limit_count;
+	u32 below_limit_count;
+	u32 local_pbl_hit_count;
+	u32 other_pbl_hit_count;
+	u32 stat_max_hwm;
+
+#define LPFC_MXP_SNAPSHOT_TAKEN 3 /* snapshot is taken at 3rd heartbeats */
+	u32 stat_pbl_count;
+	u32 stat_pvt_count;
+	u32 stat_busy_count;
+	u32 stat_snapshot_taken;
+#endif
+
+	/* TODO: Separate pvt_pool into get and put list */
+	struct lpfc_pbl_pool pbl_pool;   /* Public free XRI pool */
+	struct lpfc_pvt_pool pvt_pool;   /* Private free XRI pool */
+};
+
+struct lpfc_fc4_ctrl_stat {
+	u32 input_requests;
+	u32 output_requests;
+	u32 control_requests;
+	u32 io_cmpls;
+};
+
+#ifdef LPFC_HDWQ_LOCK_STAT
+struct lpfc_lock_stat {
+	uint32_t alloc_xri_get;
+	uint32_t alloc_xri_put;
+	uint32_t free_xri;
+	uint32_t wq_access;
+	uint32_t alloc_pvt_pool;
+	uint32_t mv_from_pvt_pool;
+	uint32_t mv_to_pub_pool;
+	uint32_t mv_to_pvt_pool;
+	uint32_t free_pub_pool;
+	uint32_t free_pvt_pool;
+};
+#endif
+
+struct lpfc_eq_intr_info {
+	struct list_head list;
+	uint32_t icnt;
+};
+
 /* SLI4 HBA data structure entries */
+struct lpfc_sli4_hdw_queue {
+	/* Pointers to the constructed SLI4 queues */
+	struct lpfc_queue *hba_eq;  /* Event queues for HBA */
+	struct lpfc_queue *io_cq;   /* Fast-path FCP & NVME compl queue */
+	struct lpfc_queue *io_wq;   /* Fast-path FCP & NVME work queue */
+	uint16_t io_cq_map;
+
+	/* Keep track of IO buffers for this hardware queue */
+	spinlock_t io_buf_list_get_lock;  /* Common buf alloc list lock */
+	struct list_head lpfc_io_buf_list_get;
+	spinlock_t io_buf_list_put_lock;  /* Common buf free list lock */
+	struct list_head lpfc_io_buf_list_put;
+	spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */
+	struct list_head lpfc_abts_io_buf_list;
+	uint32_t total_io_bufs;
+	uint32_t get_io_bufs;
+	uint32_t put_io_bufs;
+	uint32_t empty_io_bufs;
+	uint32_t abts_scsi_io_bufs;
+	uint32_t abts_nvme_io_bufs;
+
+	/* Multi-XRI pool per HWQ */
+	struct lpfc_multixri_pool *p_multixri_pool;
+
+	/* FC-4 Stats counters */
+	struct lpfc_fc4_ctrl_stat nvme_cstat;
+	struct lpfc_fc4_ctrl_stat scsi_cstat;
+#ifdef LPFC_HDWQ_LOCK_STAT
+	struct lpfc_lock_stat lock_conflict;
+#endif
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+#define LPFC_CHECK_CPU_CNT    128
+	uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
+	uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
+	uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
+#endif
+
+	/* Per HDWQ pool resources */
+	struct list_head sgl_list;
+	struct list_head cmd_rsp_buf_list;
+
+	/* Lock for syncing Per HDWQ pool resources */
+	spinlock_t hdwq_lock;
+};
+
+#ifdef LPFC_HDWQ_LOCK_STAT
+/* compile time trylock stats */
+#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \
+	{ \
+	int only_once = 1; \
+	while (spin_trylock_irqsave(lock, flag) == 0) { \
+		if (only_once) { \
+			only_once = 0; \
+			qp->lock_conflict.lstat++; \
+		} \
+	} \
+	}
+#define lpfc_qp_spin_lock(lock, qp, lstat) \
+	{ \
+	int only_once = 1; \
+	while (spin_trylock(lock) == 0) { \
+		if (only_once) { \
+			only_once = 0; \
+			qp->lock_conflict.lstat++; \
+		} \
+	} \
+	}
+#else
+#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \
+	spin_lock_irqsave(lock, flag)
+#define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock)
+#endif
+
 struct lpfc_sli4_hba {
 	void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
 					   * config space registers
@@ -598,21 +781,19 @@
 	struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
 
 	void (*sli4_eq_clr_intr)(struct lpfc_queue *q);
-	uint32_t (*sli4_eq_release)(struct lpfc_queue *q, bool arm);
-	uint32_t (*sli4_cq_release)(struct lpfc_queue *q, bool arm);
+	void (*sli4_write_eq_db)(struct lpfc_hba *phba, struct lpfc_queue *eq,
+				uint32_t count, bool arm);
+	void (*sli4_write_cq_db)(struct lpfc_hba *phba, struct lpfc_queue *cq,
+				uint32_t count, bool arm);
 
 	/* Pointers to the constructed SLI4 queues */
-	struct lpfc_queue **hba_eq;  /* Event queues for HBA */
-	struct lpfc_queue **fcp_cq;  /* Fast-path FCP compl queue */
-	struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */
+	struct lpfc_sli4_hdw_queue *hdwq;
+	struct list_head lpfc_wq_list;
+
+	/* Pointers to the constructed SLI4 queues for NVMET */
 	struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
 	struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
 	struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
-	struct lpfc_queue **fcp_wq;  /* Fast-path FCP work queue */
-	struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */
-	uint16_t *fcp_cq_map;
-	uint16_t *nvme_cq_map;
-	struct list_head lpfc_wq_list;
 
 	struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
 	struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
@@ -630,13 +811,7 @@
 	uint32_t ulp0_mode;	/* ULP0 protocol mode */
 	uint32_t ulp1_mode;	/* ULP1 protocol mode */
 
-	struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
-
 	/* Optimized Access Storage specific queues/structures */
-
-	struct lpfc_queue *oas_cq; /* OAS completion queue */
-	struct lpfc_queue *oas_wq; /* OAS Work queue */
-	struct lpfc_sli_ring *oas_ring;
 	uint64_t oas_next_lun;
 	uint8_t oas_next_tgt_wwpn[8];
 	uint8_t oas_next_vpt_wwpn[8];
@@ -662,22 +837,24 @@
 	uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
 	uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
 	uint16_t next_rpi;
-	uint16_t nvme_xri_max;
-	uint16_t nvme_xri_cnt;
-	uint16_t nvme_xri_start;
-	uint16_t scsi_xri_max;
-	uint16_t scsi_xri_cnt;
-	uint16_t scsi_xri_start;
+	uint16_t io_xri_max;
+	uint16_t io_xri_cnt;
+	uint16_t io_xri_start;
 	uint16_t els_xri_cnt;
 	uint16_t nvmet_xri_cnt;
 	uint16_t nvmet_io_wait_cnt;
 	uint16_t nvmet_io_wait_total;
+	uint16_t cq_max;
+	struct lpfc_queue **cq_lookup;
 	struct list_head lpfc_els_sgl_list;
 	struct list_head lpfc_abts_els_sgl_list;
+	spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */
+	struct list_head lpfc_abts_io_buf_list;
 	struct list_head lpfc_nvmet_sgl_list;
+	spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
 	struct list_head lpfc_abts_nvmet_ctx_list;
-	struct list_head lpfc_abts_scsi_buf_list;
-	struct list_head lpfc_abts_nvme_buf_list;
+	spinlock_t t_active_list_lock; /* list of active NVMET IOs */
+	struct list_head t_active_ctx_list;
 	struct list_head lpfc_nvmet_io_wait_list;
 	struct lpfc_nvmet_ctx_info *nvmet_ctx_info;
 	struct lpfc_sglq **lpfc_sglq_active_list;
@@ -706,17 +883,41 @@
 #define LPFC_SLI4_PPNAME_NON	0
 #define LPFC_SLI4_PPNAME_GET	1
 	struct lpfc_iov iov;
-	spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
-	spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
 	spinlock_t sgl_list_lock; /* list of aborted els IOs */
 	spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
 	uint32_t physical_port;
 
 	/* CPU to vector mapping information */
 	struct lpfc_vector_map_info *cpu_map;
-	uint16_t num_online_cpu;
+	uint16_t num_possible_cpu;
 	uint16_t num_present_cpu;
 	uint16_t curr_disp_cpu;
+	struct lpfc_eq_intr_info __percpu *eq_info;
+	uint32_t conf_trunk;
+#define lpfc_conf_trunk_port0_WORD	conf_trunk
+#define lpfc_conf_trunk_port0_SHIFT	0
+#define lpfc_conf_trunk_port0_MASK	0x1
+#define lpfc_conf_trunk_port1_WORD	conf_trunk
+#define lpfc_conf_trunk_port1_SHIFT	1
+#define lpfc_conf_trunk_port1_MASK	0x1
+#define lpfc_conf_trunk_port2_WORD	conf_trunk
+#define lpfc_conf_trunk_port2_SHIFT	2
+#define lpfc_conf_trunk_port2_MASK	0x1
+#define lpfc_conf_trunk_port3_WORD	conf_trunk
+#define lpfc_conf_trunk_port3_SHIFT	3
+#define lpfc_conf_trunk_port3_MASK	0x1
+#define lpfc_conf_trunk_port0_nd_WORD	conf_trunk
+#define lpfc_conf_trunk_port0_nd_SHIFT	4
+#define lpfc_conf_trunk_port0_nd_MASK	0x1
+#define lpfc_conf_trunk_port1_nd_WORD	conf_trunk
+#define lpfc_conf_trunk_port1_nd_SHIFT	5
+#define lpfc_conf_trunk_port1_nd_MASK	0x1
+#define lpfc_conf_trunk_port2_nd_WORD	conf_trunk
+#define lpfc_conf_trunk_port2_nd_SHIFT	6
+#define lpfc_conf_trunk_port2_nd_MASK	0x1
+#define lpfc_conf_trunk_port3_nd_WORD	conf_trunk
+#define lpfc_conf_trunk_port3_nd_SHIFT	7
+#define lpfc_conf_trunk_port3_nd_MASK	0x1
 };
 
 enum lpfc_sge_type {
@@ -800,16 +1001,18 @@
 			       uint16_t);
 
 void lpfc_sli4_hba_reset(struct lpfc_hba *);
-struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
-					 uint32_t, uint32_t);
+struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *phba,
+					 uint32_t page_size,
+					 uint32_t entry_size,
+					 uint32_t entry_count, int cpu);
 void lpfc_sli4_queue_free(struct lpfc_queue *);
 int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
-int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
-			     uint32_t numq, uint32_t imax);
+void lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
+			     uint32_t numq, uint32_t usdelay);
 int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
 			struct lpfc_queue *, uint32_t, uint32_t);
 int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
-			struct lpfc_queue **eqp, uint32_t type,
+			struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
 			uint32_t subtype);
 int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
 		       struct lpfc_queue *, uint32_t);
@@ -829,12 +1032,10 @@
 int lpfc_sli4_queue_setup(struct lpfc_hba *);
 void lpfc_sli4_queue_unset(struct lpfc_hba *);
 int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
-int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
-int lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba);
+int lpfc_repost_io_sgl_list(struct lpfc_hba *phba);
 uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
 void lpfc_sli4_free_xri(struct lpfc_hba *, int);
 int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
-int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
 struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
 struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
 void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
@@ -853,10 +1054,11 @@
 			void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
-			       struct sli4_wcqe_xri_aborted *);
 void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
-				struct sli4_wcqe_xri_aborted *axri);
+				struct sli4_wcqe_xri_aborted *axri,
+				struct lpfc_io_buf *lpfc_ncmd);
+void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
+			      struct sli4_wcqe_xri_aborted *axri, int idx);
 void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
 				 struct sli4_wcqe_xri_aborted *axri);
 void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
@@ -869,12 +1071,16 @@
 int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
 int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba);
 int lpfc_sli4_init_vpi(struct lpfc_vport *);
-inline void lpfc_sli4_eq_clr_intr(struct lpfc_queue *);
-uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
-uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
-inline void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q);
-uint32_t lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm);
-uint32_t lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm);
+void lpfc_sli4_eq_clr_intr(struct lpfc_queue *);
+void lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+			   uint32_t count, bool arm);
+void lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+			   uint32_t count, bool arm);
+void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q);
+void lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+			       uint32_t count, bool arm);
+void lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
+			       uint32_t count, bool arm);
 void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
 int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
 int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
@@ -886,3 +1092,20 @@
 int lpfc_sli4_post_status_check(struct lpfc_hba *);
 uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
 uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
+struct sli4_hybrid_sgl *lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba,
+					      struct lpfc_io_buf *buf);
+struct fcp_cmd_rsp_buf *lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+						      struct lpfc_io_buf *buf);
+int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *buf);
+int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+				  struct lpfc_io_buf *buf);
+void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
+			    struct lpfc_sli4_hdw_queue *hdwq);
+void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+				    struct lpfc_sli4_hdw_queue *hdwq);
+static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
+{
+	return q->q_pgs[idx / q->entry_cnt_per_pg] +
+		(q->entry_size * (idx % q->entry_cnt_per_pg));
+}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 5012495..b8aae31 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "12.0.0.6"
+#define LPFC_DRIVER_VERSION "12.4.0.0"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
 
 #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
 		LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2018 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2019 Broadcom. All Rights " \
 		"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
 		"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 1ff0f7d..b766463 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -138,8 +138,8 @@
 	 * Grab buffer pointer and clear context1 so we can use
 	 * lpfc_sli_issue_box_wait
 	 */
-	mp = (struct lpfc_dmabuf *) pmb->context1;
-	pmb->context1 = NULL;
+	mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+	pmb->ctx_buf = NULL;
 
 	pmb->vport = vport;
 	rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
@@ -207,7 +207,7 @@
 	struct lpfc_vport *vport;
 	unsigned long flags;
 
-	spin_lock_irqsave(&phba->hbalock, flags);
+	spin_lock_irqsave(&phba->port_list_lock, flags);
 	list_for_each_entry(vport, &phba->port_list, listentry) {
 		if (vport == new_vport)
 			continue;
@@ -215,11 +215,11 @@
 		if (memcmp(&vport->fc_sparam.portName,
 			   &new_vport->fc_sparam.portName,
 			   sizeof(struct lpfc_name)) == 0) {
-			spin_unlock_irqrestore(&phba->hbalock, flags);
+			spin_unlock_irqrestore(&phba->port_list_lock, flags);
 			return 0;
 		}
 	}
-	spin_unlock_irqrestore(&phba->hbalock, flags);
+	spin_unlock_irqrestore(&phba->port_list_lock, flags);
 	return 1;
 }
 
@@ -313,11 +313,11 @@
 		goto error_out;
 	}
 
-	/* NPIV is not supported if HBA has NVME enabled */
-	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+	/* NPIV is not supported if HBA has NVME Target enabled */
+	if (phba->nvmet_support) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
 				"3189 Create VPORT failed: "
-				"NPIV is not supported on NVME\n");
+				"NPIV is not supported on NVME Target\n");
 		rc = VPORT_INVAL;
 		goto error_out;
 	}
@@ -403,6 +403,9 @@
 	/* Set the DFT_LUN_Q_DEPTH accordingly */
 	vport->cfg_lun_queue_depth  = phba->pport->cfg_lun_queue_depth;
 
+	/* Only the physical port can support NVME for now */
+	vport->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+
 	*(struct lpfc_vport **)fc_vport->dd_data = vport;
 	vport->fc_vport = fc_vport;
 
@@ -415,22 +418,6 @@
 		vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
 	}
 
-	if ((phba->nvmet_support == 0) &&
-	    ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
-	     (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))) {
-		/* Create NVME binding with nvme_fc_transport. This
-		 * ensures the vport is initialized.
-		 */
-		rc = lpfc_nvme_create_localport(vport);
-		if (rc) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"6003 %s status x%x\n",
-					"NVME registration failed, ",
-					rc);
-			goto error_out;
-		}
-	}
-
 	/*
 	 * In SLI4, the vpi must be activated before it can be used
 	 * by the port.
@@ -540,9 +527,11 @@
 	 * scsi_host_put() to release the vport.
 	 */
 	lpfc_mbx_unreg_vpi(vport);
-	spin_lock_irq(shost->host_lock);
-	vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
-	spin_unlock_irq(shost->host_lock);
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+		spin_unlock_irq(shost->host_lock);
+	}
 
 	lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
@@ -825,9 +814,9 @@
 
 	lpfc_free_vpi(phba, vport->vpi);
 	vport->work_port_events = 0;
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irq(&phba->port_list_lock);
 	list_del_init(&vport->listentry);
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irq(&phba->port_list_lock);
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
 			 "1828 Vport Deleted.\n");
 	scsi_host_put(shost);
@@ -844,7 +833,7 @@
 			 GFP_KERNEL);
 	if (vports == NULL)
 		return NULL;
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irq(&phba->port_list_lock);
 	list_for_each_entry(port_iterator, &phba->port_list, listentry) {
 		if (port_iterator->load_flag & FC_UNLOADING)
 			continue;
@@ -856,7 +845,7 @@
 		}
 		vports[index++] = port_iterator;
 	}
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irq(&phba->port_list_lock);
 	return vports;
 }
 
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 177701d..35d3e32 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * SCSI low-level driver for the 53c94 SCSI bus adaptor found
  * on Power Macintosh computers, controlling the external SCSI chain.
@@ -403,7 +404,7 @@
 	.can_queue	= 1,
 	.this_id	= 7,
 	.sg_tablesize	= SG_ALL,
-	.use_clustering	= DISABLE_CLUSTERING,
+	.max_segment_size = 65535,
 };
 
 static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 71879f2..1c78bc1 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* mac_esp.c: ESP front-end for Macintosh Quadra systems.
  *
  * Adapted from jazz_esp.c and the old mac_esp.c.
@@ -52,14 +53,12 @@
 	struct esp *esp;
 	void __iomem *pdma_regs;
 	void __iomem *pdma_io;
-	int error;
 };
 static struct esp *esp_chips[2];
 static DEFINE_SPINLOCK(esp_chips_lock);
 
 #define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
-			       platform_get_drvdata((struct platform_device *) \
-						    (esp->dev)))
+			       dev_get_drvdata((esp)->dev))
 
 static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
 {
@@ -71,38 +70,6 @@
 	return nubus_readb(esp->regs + reg * 16);
 }
 
-/* For pseudo DMA and PIO we need the virtual address
- * so this address mapping is the identity mapping.
- */
-
-static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
-				     size_t sz, int dir)
-{
-	return (dma_addr_t)buf;
-}
-
-static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
-			  int num_sg, int dir)
-{
-	int i;
-
-	for (i = 0; i < num_sg; i++)
-		sg[i].dma_address = (u32)sg_virt(&sg[i]);
-	return num_sg;
-}
-
-static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
-				 size_t sz, int dir)
-{
-	/* Nothing to do. */
-}
-
-static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
-			     int num_sg, int dir)
-{
-	/* Nothing to do. */
-}
-
 static void mac_esp_reset_dma(struct esp *esp)
 {
 	/* Nothing to do. */
@@ -120,12 +87,11 @@
 
 static int mac_esp_dma_error(struct esp *esp)
 {
-	return MAC_ESP_GET_PRIV(esp)->error;
+	return esp->send_cmd_error;
 }
 
 static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
 {
-	struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
 	int i = 500000;
 
 	do {
@@ -140,7 +106,7 @@
 
 	printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
 	       esp_read8(ESP_STATUS));
-	mep->error = 1;
+	esp->send_cmd_error = 1;
 	return 1;
 }
 
@@ -166,7 +132,7 @@
 
 	printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
 	       esp_read8(ESP_STATUS));
-	mep->error = 1;
+	esp->send_cmd_error = 1;
 	return 1;
 }
 
@@ -233,7 +199,7 @@
 {
 	struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
 
-	mep->error = 0;
+	esp->send_cmd_error = 0;
 
 	if (!write)
 		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
@@ -271,166 +237,6 @@
 	} while (esp_count);
 }
 
-/*
- * Programmed IO routines follow.
- */
-
-static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
-{
-	int i = 500000;
-
-	do {
-		unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
-
-		if (fbytes)
-			return fbytes;
-
-		udelay(2);
-	} while (--i);
-
-	printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
-	       esp_read8(ESP_STATUS));
-	return 0;
-}
-
-static inline int mac_esp_wait_for_intr(struct esp *esp)
-{
-	struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
-	int i = 500000;
-
-	do {
-		esp->sreg = esp_read8(ESP_STATUS);
-		if (esp->sreg & ESP_STAT_INTR)
-			return 0;
-
-		udelay(2);
-	} while (--i);
-
-	printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
-	mep->error = 1;
-	return 1;
-}
-
-#define MAC_ESP_PIO_LOOP(operands, reg1) \
-	asm volatile ( \
-	     "1:     moveb " operands " \n" \
-	     "       subqw #1,%1        \n" \
-	     "       jbne 1b            \n" \
-	     : "+a" (addr), "+r" (reg1) \
-	     : "a" (fifo))
-
-#define MAC_ESP_PIO_FILL(operands, reg1) \
-	asm volatile ( \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       moveb " operands " \n" \
-	     "       subqw #8,%1        \n" \
-	     "       subqw #8,%1        \n" \
-	     : "+a" (addr), "+r" (reg1) \
-	     : "a" (fifo))
-
-#define MAC_ESP_FIFO_SIZE 16
-
-static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
-				 u32 dma_count, int write, u8 cmd)
-{
-	struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
-	u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
-	u8 phase = esp->sreg & ESP_STAT_PMASK;
-
-	cmd &= ~ESP_CMD_DMA;
-	mep->error = 0;
-
-	if (write) {
-		u8 *dst = (u8 *)addr;
-		u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
-
-		scsi_esp_cmd(esp, cmd);
-
-		while (1) {
-			if (!mac_esp_wait_for_fifo(esp))
-				break;
-
-			*dst++ = esp_read8(ESP_FDATA);
-			--esp_count;
-
-			if (!esp_count)
-				break;
-
-			if (mac_esp_wait_for_intr(esp))
-				break;
-
-			if ((esp->sreg & ESP_STAT_PMASK) != phase)
-				break;
-
-			esp->ireg = esp_read8(ESP_INTRPT);
-			if (esp->ireg & mask) {
-				mep->error = 1;
-				break;
-			}
-
-			if (phase == ESP_MIP)
-				scsi_esp_cmd(esp, ESP_CMD_MOK);
-
-			scsi_esp_cmd(esp, ESP_CMD_TI);
-		}
-	} else {
-		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
-
-		if (esp_count >= MAC_ESP_FIFO_SIZE)
-			MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
-		else
-			MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
-
-		scsi_esp_cmd(esp, cmd);
-
-		while (esp_count) {
-			unsigned int n;
-
-			if (mac_esp_wait_for_intr(esp))
-				break;
-
-			if ((esp->sreg & ESP_STAT_PMASK) != phase)
-				break;
-
-			esp->ireg = esp_read8(ESP_INTRPT);
-			if (esp->ireg & ~ESP_INTR_BSERV) {
-				mep->error = 1;
-				break;
-			}
-
-			n = MAC_ESP_FIFO_SIZE -
-			    (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
-			if (n > esp_count)
-				n = esp_count;
-
-			if (n == MAC_ESP_FIFO_SIZE) {
-				MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
-			} else {
-				esp_count -= n;
-				MAC_ESP_PIO_LOOP("%0@+,%2@", n);
-			}
-
-			scsi_esp_cmd(esp, ESP_CMD_TI);
-		}
-	}
-
-	esp->send_cmd_residual = esp_count;
-}
-
 static int mac_esp_irq_pending(struct esp *esp)
 {
 	if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
@@ -472,10 +278,6 @@
 static struct esp_driver_ops mac_esp_ops = {
 	.esp_write8       = mac_esp_write8,
 	.esp_read8        = mac_esp_read8,
-	.map_single       = mac_esp_map_single,
-	.map_sg           = mac_esp_map_sg,
-	.unmap_single     = mac_esp_unmap_single,
-	.unmap_sg         = mac_esp_unmap_sg,
 	.irq_pending      = mac_esp_irq_pending,
 	.dma_length_limit = mac_esp_dma_length_limit,
 	.reset_dma        = mac_esp_reset_dma,
@@ -506,11 +308,11 @@
 		goto fail;
 
 	host->max_id = 8;
-	host->use_clustering = DISABLE_CLUSTERING;
+	host->dma_boundary = PAGE_SIZE - 1;
 	esp = shost_priv(host);
 
 	esp->host = host;
-	esp->dev = dev;
+	esp->dev = &dev->dev;
 
 	esp->command_block = kzalloc(16, GFP_KERNEL);
 	if (!esp->command_block)
@@ -553,14 +355,16 @@
 		mep->pdma_regs = NULL;
 		break;
 	}
+	esp->fifo_reg = esp->regs + ESP_FDATA * 16;
 
 	esp->ops = &mac_esp_ops;
+	esp->flags = ESP_FLAG_NO_DMA_MAP;
 	if (mep->pdma_io == NULL) {
 		printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
 		esp_write8(0, ESP_TCLOW);
 		esp_write8(0, ESP_TCMED);
-		esp->flags = ESP_FLAG_DISABLE_SYNC;
-		mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
+		esp->flags |= ESP_FLAG_DISABLE_SYNC;
+		mac_esp_ops.send_dma_cmd = esp_send_pio_cmd;
 	} else {
 		printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
 	}
@@ -579,7 +383,7 @@
 	esp_chips[dev->id] = esp;
 	spin_unlock(&esp_chips_lock);
 
-	err = scsi_esp_register(esp, &dev->dev);
+	err = scsi_esp_register(esp);
 	if (err)
 		goto fail_free_irq;
 
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index dd60573..9c55662 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -1,8 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Generic Macintosh NCR5380 driver
  *
  * Copyright 1998, Michael Schmitz <mschmitz@lbl.gov>
  *
+ * Copyright 2019 Finn Thain
+ *
  * derived in part from:
  */
 /*
@@ -11,6 +14,7 @@
  * Copyright 1995, Russell King
  */
 
+#include <linux/delay.h>
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/ioport.h>
@@ -21,6 +25,7 @@
 
 #include <asm/hwtest.h>
 #include <asm/io.h>
+#include <asm/macintosh.h>
 #include <asm/macints.h>
 #include <asm/setup.h>
 
@@ -52,7 +57,7 @@
 module_param(setup_cmd_per_lun, int, 0);
 static int setup_sg_tablesize = -1;
 module_param(setup_sg_tablesize, int, 0);
-static int setup_use_pdma = -1;
+static int setup_use_pdma = 512;
 module_param(setup_use_pdma, int, 0);
 static int setup_hostid = -1;
 module_param(setup_hostid, int, 0);
@@ -89,223 +94,318 @@
 __setup("mac5380=", mac_scsi_setup);
 #endif /* !MODULE */
 
-/* Pseudo DMA asm originally by Ove Edlund */
+/*
+ * According to "Inside Macintosh: Devices", Mac OS requires disk drivers to
+ * specify the number of bytes between the delays expected from a SCSI target.
+ * This allows the operating system to "prevent bus errors when a target fails
+ * to deliver the next byte within the processor bus error timeout period."
+ * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets
+ * so bus errors are unavoidable.
+ *
+ * If a MOVE.B instruction faults, we assume that zero bytes were transferred
+ * and simply retry. That assumption probably depends on target behaviour but
+ * seems to hold up okay. The NOP provides synchronization: without it the
+ * fault can sometimes occur after the program counter has moved past the
+ * offending instruction. Post-increment addressing can't be used.
+ */
 
-#define CP_IO_TO_MEM(s,d,n)				\
-__asm__ __volatile__					\
-    ("    cmp.w  #4,%2\n"				\
-     "    bls    8f\n"					\
-     "    move.w %1,%%d0\n"				\
-     "    neg.b  %%d0\n"				\
-     "    and.w  #3,%%d0\n"				\
-     "    sub.w  %%d0,%2\n"				\
-     "    bra    2f\n"					\
-     " 1: move.b (%0),(%1)+\n"				\
-     " 2: dbf    %%d0,1b\n"				\
-     "    move.w %2,%%d0\n"				\
-     "    lsr.w  #5,%%d0\n"				\
-     "    bra    4f\n"					\
-     " 3: move.l (%0),(%1)+\n"				\
-     "31: move.l (%0),(%1)+\n"				\
-     "32: move.l (%0),(%1)+\n"				\
-     "33: move.l (%0),(%1)+\n"				\
-     "34: move.l (%0),(%1)+\n"				\
-     "35: move.l (%0),(%1)+\n"				\
-     "36: move.l (%0),(%1)+\n"				\
-     "37: move.l (%0),(%1)+\n"				\
-     " 4: dbf    %%d0,3b\n"				\
-     "    move.w %2,%%d0\n"				\
-     "    lsr.w  #2,%%d0\n"				\
-     "    and.w  #7,%%d0\n"				\
-     "    bra    6f\n"					\
-     " 5: move.l (%0),(%1)+\n"				\
-     " 6: dbf    %%d0,5b\n"				\
-     "    and.w  #3,%2\n"				\
-     "    bra    8f\n"					\
-     " 7: move.b (%0),(%1)+\n"				\
-     " 8: dbf    %2,7b\n"				\
-     "    moveq.l #0, %2\n"				\
-     " 9: \n"						\
-     ".section .fixup,\"ax\"\n"				\
-     "    .even\n"					\
-     "91: moveq.l #1, %2\n"				\
-     "    jra 9b\n"					\
-     "94: moveq.l #4, %2\n"				\
-     "    jra 9b\n"					\
-     ".previous\n"					\
-     ".section __ex_table,\"a\"\n"			\
-     "   .align 4\n"					\
-     "   .long  1b,91b\n"				\
-     "   .long  3b,94b\n"				\
-     "   .long 31b,94b\n"				\
-     "   .long 32b,94b\n"				\
-     "   .long 33b,94b\n"				\
-     "   .long 34b,94b\n"				\
-     "   .long 35b,94b\n"				\
-     "   .long 36b,94b\n"				\
-     "   .long 37b,94b\n"				\
-     "   .long  5b,94b\n"				\
-     "   .long  7b,91b\n"				\
-     ".previous"					\
-     : "=a"(s), "=a"(d), "=d"(n)			\
-     : "0"(s), "1"(d), "2"(n)				\
-     : "d0")
+#define MOVE_BYTE(operands) \
+	asm volatile ( \
+		"1:     moveb " operands "     \n" \
+		"11:    nop                    \n" \
+		"       addq #1,%0             \n" \
+		"       subq #1,%1             \n" \
+		"40:                           \n" \
+		"                              \n" \
+		".section .fixup,\"ax\"        \n" \
+		".even                         \n" \
+		"90:    movel #1, %2           \n" \
+		"       jra 40b                \n" \
+		".previous                     \n" \
+		"                              \n" \
+		".section __ex_table,\"a\"     \n" \
+		".align  4                     \n" \
+		".long   1b,90b                \n" \
+		".long  11b,90b                \n" \
+		".previous                     \n" \
+		: "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+/*
+ * If a MOVE.W (or MOVE.L) instruction faults, it cannot be retried because
+ * the residual byte count would be uncertain. In that situation the MOVE_WORD
+ * macro clears n in the fixup section to abort the transfer.
+ */
+
+#define MOVE_WORD(operands) \
+	asm volatile ( \
+		"1:     movew " operands "     \n" \
+		"11:    nop                    \n" \
+		"       subq #2,%1             \n" \
+		"40:                           \n" \
+		"                              \n" \
+		".section .fixup,\"ax\"        \n" \
+		".even                         \n" \
+		"90:    movel #0, %1           \n" \
+		"       movel #2, %2           \n" \
+		"       jra 40b                \n" \
+		".previous                     \n" \
+		"                              \n" \
+		".section __ex_table,\"a\"     \n" \
+		".align  4                     \n" \
+		".long   1b,90b                \n" \
+		".long  11b,90b                \n" \
+		".previous                     \n" \
+		: "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+#define MOVE_16_WORDS(operands) \
+	asm volatile ( \
+		"1:     movew " operands "     \n" \
+		"2:     movew " operands "     \n" \
+		"3:     movew " operands "     \n" \
+		"4:     movew " operands "     \n" \
+		"5:     movew " operands "     \n" \
+		"6:     movew " operands "     \n" \
+		"7:     movew " operands "     \n" \
+		"8:     movew " operands "     \n" \
+		"9:     movew " operands "     \n" \
+		"10:    movew " operands "     \n" \
+		"11:    movew " operands "     \n" \
+		"12:    movew " operands "     \n" \
+		"13:    movew " operands "     \n" \
+		"14:    movew " operands "     \n" \
+		"15:    movew " operands "     \n" \
+		"16:    movew " operands "     \n" \
+		"17:    nop                    \n" \
+		"       subl  #32,%1           \n" \
+		"40:                           \n" \
+		"                              \n" \
+		".section .fixup,\"ax\"        \n" \
+		".even                         \n" \
+		"90:    movel #0, %1           \n" \
+		"       movel #2, %2           \n" \
+		"       jra 40b                \n" \
+		".previous                     \n" \
+		"                              \n" \
+		".section __ex_table,\"a\"     \n" \
+		".align  4                     \n" \
+		".long   1b,90b                \n" \
+		".long   2b,90b                \n" \
+		".long   3b,90b                \n" \
+		".long   4b,90b                \n" \
+		".long   5b,90b                \n" \
+		".long   6b,90b                \n" \
+		".long   7b,90b                \n" \
+		".long   8b,90b                \n" \
+		".long   9b,90b                \n" \
+		".long  10b,90b                \n" \
+		".long  11b,90b                \n" \
+		".long  12b,90b                \n" \
+		".long  13b,90b                \n" \
+		".long  14b,90b                \n" \
+		".long  15b,90b                \n" \
+		".long  16b,90b                \n" \
+		".long  17b,90b                \n" \
+		".previous                     \n" \
+		: "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
+
+#define MAC_PDMA_DELAY		32
+
+static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n)
+{
+	unsigned char *addr = start;
+	int result = 0;
+
+	if (n >= 1) {
+		MOVE_BYTE("%3@,%0@");
+		if (result)
+			goto out;
+	}
+	if (n >= 1 && ((unsigned long)addr & 1)) {
+		MOVE_BYTE("%3@,%0@");
+		if (result)
+			goto out;
+	}
+	while (n >= 32)
+		MOVE_16_WORDS("%3@,%0@+");
+	while (n >= 2)
+		MOVE_WORD("%3@,%0@+");
+	if (result)
+		return start - addr; /* Negated to indicate uncertain length */
+	if (n == 1)
+		MOVE_BYTE("%3@,%0@");
+out:
+	return addr - start;
+}
+
+static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n)
+{
+	unsigned char *addr = start;
+	int result = 0;
+
+	if (n >= 1) {
+		MOVE_BYTE("%0@,%3@");
+		if (result)
+			goto out;
+	}
+	if (n >= 1 && ((unsigned long)addr & 1)) {
+		MOVE_BYTE("%0@,%3@");
+		if (result)
+			goto out;
+	}
+	while (n >= 32)
+		MOVE_16_WORDS("%0@+,%3@");
+	while (n >= 2)
+		MOVE_WORD("%0@+,%3@");
+	if (result)
+		return start - addr; /* Negated to indicate uncertain length */
+	if (n == 1)
+		MOVE_BYTE("%0@,%3@");
+out:
+	return addr - start;
+}
+
+/* The "SCSI DMA" chip on the IIfx implements this register. */
+#define CTRL_REG                0x8
+#define CTRL_INTERRUPTS_ENABLE  BIT(1)
+#define CTRL_HANDSHAKE_MODE     BIT(3)
+
+static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value)
+{
+	out_be32(hostdata->io + (CTRL_REG << 4), value);
+}
 
 static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
                                 unsigned char *dst, int len)
 {
 	u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
 	unsigned char *d = dst;
-	int n = len;
-	int transferred;
+	int result = 0;
+
+	hostdata->pdma_residual = len;
 
 	while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
 	                              BASR_DRQ | BASR_PHASE_MATCH,
 	                              BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
-		CP_IO_TO_MEM(s, d, n);
+		int bytes;
 
-		transferred = d - dst - n;
-		hostdata->pdma_residual = len - transferred;
+		if (macintosh_config->ident == MAC_MODEL_IIFX)
+			write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE |
+			                         CTRL_INTERRUPTS_ENABLE);
 
-		/* No bus error. */
-		if (n == 0)
-			return 0;
+		bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512));
 
-		/* Target changed phase early? */
+		if (bytes > 0) {
+			d += bytes;
+			hostdata->pdma_residual -= bytes;
+		}
+
+		if (hostdata->pdma_residual == 0)
+			goto out;
+
 		if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
-		                           BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
-			scmd_printk(KERN_ERR, hostdata->connected,
+		                           BUS_AND_STATUS_REG, BASR_ACK,
+		                           BASR_ACK, HZ / 64) < 0)
+			scmd_printk(KERN_DEBUG, hostdata->connected,
 			            "%s: !REQ and !ACK\n", __func__);
 		if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
-			return 0;
+			goto out;
+
+		if (bytes == 0)
+			udelay(MAC_PDMA_DELAY);
+
+		if (bytes >= 0)
+			continue;
 
 		dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
-		         "%s: bus error (%d/%d)\n", __func__, transferred, len);
+		         "%s: bus error (%d/%d)\n", __func__, d - dst, len);
 		NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
-		d = dst + transferred;
-		n = len - transferred;
+		result = -1;
+		goto out;
 	}
 
 	scmd_printk(KERN_ERR, hostdata->connected,
 	            "%s: phase mismatch or !DRQ\n", __func__);
 	NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
-	return -1;
+	result = -1;
+out:
+	if (macintosh_config->ident == MAC_MODEL_IIFX)
+		write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
+	return result;
 }
 
-
-#define CP_MEM_TO_IO(s,d,n)				\
-__asm__ __volatile__					\
-    ("    cmp.w  #4,%2\n"				\
-     "    bls    8f\n"					\
-     "    move.w %0,%%d0\n"				\
-     "    neg.b  %%d0\n"				\
-     "    and.w  #3,%%d0\n"				\
-     "    sub.w  %%d0,%2\n"				\
-     "    bra    2f\n"					\
-     " 1: move.b (%0)+,(%1)\n"				\
-     " 2: dbf    %%d0,1b\n"				\
-     "    move.w %2,%%d0\n"				\
-     "    lsr.w  #5,%%d0\n"				\
-     "    bra    4f\n"					\
-     " 3: move.l (%0)+,(%1)\n"				\
-     "31: move.l (%0)+,(%1)\n"				\
-     "32: move.l (%0)+,(%1)\n"				\
-     "33: move.l (%0)+,(%1)\n"				\
-     "34: move.l (%0)+,(%1)\n"				\
-     "35: move.l (%0)+,(%1)\n"				\
-     "36: move.l (%0)+,(%1)\n"				\
-     "37: move.l (%0)+,(%1)\n"				\
-     " 4: dbf    %%d0,3b\n"				\
-     "    move.w %2,%%d0\n"				\
-     "    lsr.w  #2,%%d0\n"				\
-     "    and.w  #7,%%d0\n"				\
-     "    bra    6f\n"					\
-     " 5: move.l (%0)+,(%1)\n"				\
-     " 6: dbf    %%d0,5b\n"				\
-     "    and.w  #3,%2\n"				\
-     "    bra    8f\n"					\
-     " 7: move.b (%0)+,(%1)\n"				\
-     " 8: dbf    %2,7b\n"				\
-     "    moveq.l #0, %2\n"				\
-     " 9: \n"						\
-     ".section .fixup,\"ax\"\n"				\
-     "    .even\n"					\
-     "91: moveq.l #1, %2\n"				\
-     "    jra 9b\n"					\
-     "94: moveq.l #4, %2\n"				\
-     "    jra 9b\n"					\
-     ".previous\n"					\
-     ".section __ex_table,\"a\"\n"			\
-     "   .align 4\n"					\
-     "   .long  1b,91b\n"				\
-     "   .long  3b,94b\n"				\
-     "   .long 31b,94b\n"				\
-     "   .long 32b,94b\n"				\
-     "   .long 33b,94b\n"				\
-     "   .long 34b,94b\n"				\
-     "   .long 35b,94b\n"				\
-     "   .long 36b,94b\n"				\
-     "   .long 37b,94b\n"				\
-     "   .long  5b,94b\n"				\
-     "   .long  7b,91b\n"				\
-     ".previous"					\
-     : "=a"(s), "=a"(d), "=d"(n)			\
-     : "0"(s), "1"(d), "2"(n)				\
-     : "d0")
-
 static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
                                  unsigned char *src, int len)
 {
 	unsigned char *s = src;
 	u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
-	int n = len;
-	int transferred;
+	int result = 0;
+
+	hostdata->pdma_residual = len;
 
 	while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
 	                              BASR_DRQ | BASR_PHASE_MATCH,
 	                              BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
-		CP_MEM_TO_IO(s, d, n);
+		int bytes;
 
-		transferred = s - src - n;
-		hostdata->pdma_residual = len - transferred;
+		if (macintosh_config->ident == MAC_MODEL_IIFX)
+			write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE |
+			                         CTRL_INTERRUPTS_ENABLE);
 
-		/* Target changed phase early? */
-		if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
-		                           BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
-			scmd_printk(KERN_ERR, hostdata->connected,
-			            "%s: !REQ and !ACK\n", __func__);
-		if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
-			return 0;
+		bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512));
 
-		/* No bus error. */
-		if (n == 0) {
-			if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
-			                          TCR_LAST_BYTE_SENT,
-			                          TCR_LAST_BYTE_SENT, HZ / 64) < 0)
-				scmd_printk(KERN_ERR, hostdata->connected,
-				            "%s: Last Byte Sent timeout\n", __func__);
-			return 0;
+		if (bytes > 0) {
+			s += bytes;
+			hostdata->pdma_residual -= bytes;
 		}
 
+		if (hostdata->pdma_residual == 0) {
+			if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
+			                          TCR_LAST_BYTE_SENT,
+			                          TCR_LAST_BYTE_SENT,
+			                          HZ / 64) < 0) {
+				scmd_printk(KERN_ERR, hostdata->connected,
+				            "%s: Last Byte Sent timeout\n", __func__);
+				result = -1;
+			}
+			goto out;
+		}
+
+		if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+		                           BUS_AND_STATUS_REG, BASR_ACK,
+		                           BASR_ACK, HZ / 64) < 0)
+			scmd_printk(KERN_DEBUG, hostdata->connected,
+			            "%s: !REQ and !ACK\n", __func__);
+		if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+			goto out;
+
+		if (bytes == 0)
+			udelay(MAC_PDMA_DELAY);
+
+		if (bytes >= 0)
+			continue;
+
 		dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
-		         "%s: bus error (%d/%d)\n", __func__, transferred, len);
+		         "%s: bus error (%d/%d)\n", __func__, s - src, len);
 		NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
-		s = src + transferred;
-		n = len - transferred;
+		result = -1;
+		goto out;
 	}
 
 	scmd_printk(KERN_ERR, hostdata->connected,
 	            "%s: phase mismatch or !DRQ\n", __func__);
 	NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
-
-	return -1;
+	result = -1;
+out:
+	if (macintosh_config->ident == MAC_MODEL_IIFX)
+		write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
+	return result;
 }
 
 static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
                                 struct scsi_cmnd *cmd)
 {
 	if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
-	    cmd->SCp.this_residual < 16)
+	    cmd->SCp.this_residual < setup_use_pdma)
 		return 0;
 
 	return cmd->SCp.this_residual;
@@ -333,7 +433,7 @@
 	.this_id		= 7,
 	.sg_tablesize		= 1,
 	.cmd_per_lun		= 2,
-	.use_clustering		= DISABLE_CLUSTERING,
+	.dma_boundary		= PAGE_SIZE - 1,
 	.cmd_size		= NCR5380_CMD_SIZE,
 	.max_sectors		= 128,
 };
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 8c71541..ff6d4aa 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *
  *			Linux MegaRAID device driver
  *
  * Copyright (c) 2002  LSI Logic Corporation.
  *
- *	   This program is free software; you can redistribute it and/or
- *	   modify it under the terms of the GNU General Public License
- *	   as published by the Free Software Foundation; either version
- *	   2 of the License, or (at your option) any later version.
- *
  * Copyright (c) 2002  Red Hat, Inc. All rights reserved.
  *	  - fixes
  *	  - speed-ups (list handling fixes, issued_list, optimizations.)
@@ -28,7 +24,6 @@
  * This driver is supported by LSI Logic, with assistance from Red Hat, Dell,
  * and others. Please send updates to the mailing list
  * linux-scsi@vger.kernel.org .
- *
  */
 
 #include <linux/mm.h>
@@ -4148,7 +4143,6 @@
 	.this_id			= DEFAULT_INITIATOR_ID,
 	.sg_tablesize			= MAX_SGLIST,
 	.cmd_per_lun			= DEF_CMD_PER_LUN,
-	.use_clustering			= ENABLE_CLUSTERING,
 	.eh_abort_handler		= megaraid_abort,
 	.eh_device_reset_handler	= megaraid_reset,
 	.eh_bus_reset_handler		= megaraid_reset,
@@ -4189,11 +4183,11 @@
 		 */
 		if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
 		    pdev->subsystem_device == 0xC000)
-		   	return -ENODEV;
+			goto out_disable_device;
 		/* Now check the magic signature byte */
 		pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
 		if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
-			return -ENODEV;
+			goto out_disable_device;
 		/* Ok it is probably a megaraid */
 	}
 
diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid
index 17419e3..2adc2af 100644
--- a/drivers/scsi/megaraid/Kconfig.megaraid
+++ b/drivers/scsi/megaraid/Kconfig.megaraid
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config MEGARAID_NEWGEN
 	bool "LSI Logic New Generation RAID Device Drivers"
 	depends on PCI && SCSI
@@ -78,6 +79,7 @@
 config MEGARAID_SAS
 	tristate "LSI Logic MegaRAID SAS RAID Module"
 	depends on PCI && SCSI
+	select IRQ_POLL
 	help
 	Module for LSI Logic's SAS based RAID controllers.
 	To compile this driver as a module, choose 'm' here.
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
index 6e74d21..12177e4 100644
--- a/drivers/scsi/megaraid/Makefile
+++ b/drivers/scsi/megaraid/Makefile
@@ -3,4 +3,4 @@
 obj-$(CONFIG_MEGARAID_MAILBOX)	+= megaraid_mbox.o
 obj-$(CONFIG_MEGARAID_SAS)	+= megaraid_sas.o
 megaraid_sas-objs := megaraid_sas_base.o megaraid_sas_fusion.o \
-	megaraid_sas_fp.o
+	megaraid_sas_fp.o megaraid_sas_debugfs.o
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
index e01c6f7..01a1bfb 100644
--- a/drivers/scsi/megaraid/mbox_defs.h
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -1,16 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  *
  *			Linux MegaRAID Unified device driver
  *
  * Copyright (c) 2003-2004  LSI Logic Corporation.
  *
- *	   This program is free software; you can redistribute it and/or
- *	   modify it under the terms of the GNU General Public License
- *	   as published by the Free Software Foundation; either version
- *	   2 of the License, or (at your option) any later version.
- *
  * FILE		: mbox_defs.h
- *
  */
 #ifndef _MRAID_MBOX_DEFS_H_
 #define _MRAID_MBOX_DEFS_H_
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 1d037ed..3a7596e 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  *
  *			Linux MegaRAID device driver
  *
  * Copyright (c) 2003-2004  LSI Logic Corporation.
  *
- *	   This program is free software; you can redistribute it and/or
- *	   modify it under the terms of the GNU General Public License
- *	   as published by the Free Software Foundation; either version
- *	   2 of the License, or (at your option) any later version.
- *
  * FILE		: mega_common.h
  *
  * Libaray of common routine used by all low-level megaraid drivers
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h
index eedcbde..ae9c2ff 100644
--- a/drivers/scsi/megaraid/megaraid_ioctl.h
+++ b/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  *
  *			Linux MegaRAID device driver
  *
  * Copyright (c) 2003-2004  LSI Logic Corporation.
  *
- *	   This program is free software; you can redistribute it and/or
- *	   modify it under the terms of the GNU General Public License
- *	   as published by the Free Software Foundation; either version
- *	   2 of the License, or (at your option) any later version.
- *
  * FILE		: megaraid_ioctl.h
  *
  * Definitions to interface with user level applications
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 530358c..f6ac819 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *
  *			Linux MegaRAID device driver
  *
  * Copyright (c) 2003-2004  LSI Logic Corporation.
  *
- *	   This program is free software; you can redistribute it and/or
- *	   modify it under the terms of the GNU General Public License
- *	   as published by the Free Software Foundation; either version
- *	   2 of the License, or (at your option) any later version.
- *
  * FILE		: megaraid_mbox.c
  * Version	: v2.20.5.1 (Nov 16 2006)
  *
@@ -38,7 +34,6 @@
  * Dell PERC 4e/DC			1000	0408	1028	0002
  * Dell PERC 4e/SC			1000	0408	1028	0001
  *
- *
  * LSI MegaRAID SCSI 320-0		1000	1960	1000	A520
  * LSI MegaRAID SCSI 320-1		1000	1960	1000	0520
  * LSI MegaRAID SCSI 320-2		1000	1960	1000	0518
@@ -202,13 +197,6 @@
 MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
 
 /*
- * ### global data ###
- */
-static uint8_t megaraid_mbox_version[8] =
-	{ 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
-
-
-/*
  * PCI table for all supported controllers.
  */
 static struct pci_device_id pci_id_table_g[] =  {
@@ -343,7 +331,6 @@
 	.eh_abort_handler		= megaraid_abort_handler,
 	.eh_host_reset_handler		= megaraid_reset_handler,
 	.change_queue_depth		= scsi_change_queue_depth,
-	.use_clustering			= ENABLE_CLUSTERING,
 	.no_write_same			= 1,
 	.sdev_attrs			= megaraid_sdev_attrs,
 	.shost_attrs			= megaraid_shost_attrs,
@@ -457,10 +444,9 @@
 
 	// Setup the default DMA mask. This would be changed later on
 	// depending on hardware capabilities
-	if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
-
+	if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(32))) {
 		con_log(CL_ANN, (KERN_WARNING
-			"megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
+			"megaraid: dma_set_mask failed:%d\n", __LINE__));
 
 		goto out_free_adapter;
 	}
@@ -484,7 +470,7 @@
 	// Start the mailbox based controller
 	if (megaraid_init_mbox(adapter) != 0) {
 		con_log(CL_ANN, (KERN_WARNING
-			"megaraid: maibox adapter did not initialize\n"));
+			"megaraid: mailbox adapter did not initialize\n"));
 
 		goto out_free_adapter;
 	}
@@ -878,11 +864,12 @@
 		adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
 		(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
 		adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
-		if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
+		if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(64))) {
 			con_log(CL_ANN, (KERN_WARNING
 				"megaraid: DMA mask for 64-bit failed\n"));
 
-			if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
+			if (dma_set_mask(&adapter->pdev->dev,
+						DMA_BIT_MASK(32))) {
 				con_log(CL_ANN, (KERN_WARNING
 					"megaraid: 32-bit DMA mask failed\n"));
 				goto out_free_sysfs_res;
@@ -950,7 +937,7 @@
  * megaraid_alloc_cmd_packets - allocate shared mailbox
  * @adapter		: soft state of the raid controller
  *
- * Allocate and align the shared mailbox. This maibox is used to issue
+ * Allocate and align the shared mailbox. This mailbox is used to issue
  * all the commands. For IO based controllers, the mailbox is also registered
  * with the FW. Allocate memory for all commands as well.
  * This is our big allocator.
@@ -975,9 +962,10 @@
 	 * Allocate the common 16-byte aligned memory for the handshake
 	 * mailbox.
 	 */
-	raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev,
-						     sizeof(mbox64_t),
-						     &raid_dev->una_mbox64_dma);
+	raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev,
+						  sizeof(mbox64_t),
+						  &raid_dev->una_mbox64_dma,
+						  GFP_KERNEL);
 
 	if (!raid_dev->una_mbox64) {
 		con_log(CL_ANN, (KERN_WARNING
@@ -1003,8 +991,8 @@
 			align;
 
 	// Allocate memory for commands issued internally
-	adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE,
-					      &adapter->ibuf_dma_h);
+	adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
+					   &adapter->ibuf_dma_h, GFP_KERNEL);
 	if (!adapter->ibuf) {
 
 		con_log(CL_ANN, (KERN_WARNING
@@ -1082,7 +1070,7 @@
 
 		scb->scp		= NULL;
 		scb->state		= SCB_FREE;
-		scb->dma_direction	= PCI_DMA_NONE;
+		scb->dma_direction	= DMA_NONE;
 		scb->dma_type		= MRAID_DMA_NONE;
 		scb->dev_channel	= -1;
 		scb->dev_target		= -1;
@@ -1098,10 +1086,10 @@
 out_free_scb_list:
 	kfree(adapter->kscb_list);
 out_free_ibuf:
-	pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
+	dma_free_coherent(&pdev->dev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
 		adapter->ibuf_dma_h);
 out_free_common_mbox:
-	pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+	dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
 		(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
 
 	return -1;
@@ -1123,10 +1111,10 @@
 
 	kfree(adapter->kscb_list);
 
-	pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
+	dma_free_coherent(&adapter->pdev->dev, MBOX_IBUF_SIZE,
 		(void *)adapter->ibuf, adapter->ibuf_dma_h);
 
-	pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+	dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
 		(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
 	return;
 }
@@ -1250,8 +1238,7 @@
 		dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
 			sg_pci_blk[i].dma_addr);
 	}
-	if (raid_dev->sg_pool_handle)
-		dma_pool_destroy(raid_dev->sg_pool_handle);
+	dma_pool_destroy(raid_dev->sg_pool_handle);
 
 
 	epthru_pci_blk = raid_dev->epthru_pool;
@@ -1259,8 +1246,7 @@
 		dma_pool_free(raid_dev->epthru_pool_handle,
 			epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
 	}
-	if (raid_dev->epthru_pool_handle)
-		dma_pool_destroy(raid_dev->epthru_pool_handle);
+	dma_pool_destroy(raid_dev->epthru_pool_handle);
 
 
 	mbox_pci_blk = raid_dev->mbox_pool;
@@ -1268,8 +1254,7 @@
 		dma_pool_free(raid_dev->mbox_pool_handle,
 			mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
 	}
-	if (raid_dev->mbox_pool_handle)
-		dma_pool_destroy(raid_dev->mbox_pool_handle);
+	dma_pool_destroy(raid_dev->mbox_pool_handle);
 
 	return;
 }
@@ -1428,12 +1413,6 @@
 
 	adapter->outstanding_cmds++;
 
-	if (scb->dma_direction == PCI_DMA_TODEVICE)
-		pci_dma_sync_sg_for_device(adapter->pdev,
-					   scsi_sglist(scb->scp),
-					   scsi_sg_count(scb->scp),
-					   PCI_DMA_TODEVICE);
-
 	mbox->busy	= 1;	// Set busy
 	mbox->poll	= 0;
 	mbox->ack	= 0;
@@ -2181,31 +2160,6 @@
 
 
 /**
- * megaraid_mbox_sync_scb - sync kernel buffers
- * @adapter	: controller's soft state
- * @scb		: pointer to the resource packet
- *
- * DMA sync if required.
- */
-static void
-megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
-{
-	mbox_ccb_t	*ccb;
-
-	ccb	= (mbox_ccb_t *)scb->ccb;
-
-	if (scb->dma_direction == PCI_DMA_FROMDEVICE)
-		pci_dma_sync_sg_for_cpu(adapter->pdev,
-					scsi_sglist(scb->scp),
-					scsi_sg_count(scb->scp),
-					PCI_DMA_FROMDEVICE);
-
-	scsi_dma_unmap(scb->scp);
-	return;
-}
-
-
-/**
  * megaraid_mbox_dpc - the tasklet to complete the commands from completed list
  * @devp	: pointer to HBA soft state
  *
@@ -2403,9 +2357,7 @@
 			megaraid_mbox_display_scb(adapter, scb);
 		}
 
-		// Free our internal resources and call the mid-layer callback
-		// routine
-		megaraid_mbox_sync_scb(adapter, scb);
+		scsi_dma_unmap(scp);
 
 		// remove from local clist
 		list_del_init(&scb->list);
@@ -2577,7 +2529,6 @@
 	uint8_t		raw_mbox[sizeof(mbox_t)];
 	int		rval;
 	int		recovery_window;
-	int		recovering;
 	int		i;
 	uioc_t		*kioc;
 
@@ -2591,7 +2542,6 @@
 		return FAILED;
 	}
 
-
 	// Under exceptional conditions, FW can take up to 3 minutes to
 	// complete command processing. Wait for additional 2 minutes for the
 	// pending commands counter to go down to 0. If it doesn't, let the
@@ -2640,8 +2590,6 @@
 
 	recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
 
-	recovering = adapter->outstanding_cmds;
-
 	for (i = 0; i < recovery_window; i++) {
 
 		megaraid_ack_sequence(adapter);
@@ -2725,13 +2673,10 @@
 mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
 {
 	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
-	mbox64_t	*mbox64;
 	mbox_t		*mbox;
 	uint8_t		status;
 	int		i;
 
-
-	mbox64	= raid_dev->mbox64;
 	mbox	= raid_dev->mbox;
 
 	/*
@@ -2948,9 +2893,8 @@
 	 * Issue an ENQUIRY3 command to find out certain adapter parameters,
 	 * e.g., max channels, max commands etc.
 	 */
-	pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
-				      &pinfo_dma_h);
-
+	pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
+				   &pinfo_dma_h, GFP_KERNEL);
 	if (pinfo == NULL) {
 		con_log(CL_ANN, (KERN_WARNING
 			"megaraid: out of memory, %s %d\n", __func__,
@@ -2971,7 +2915,7 @@
 
 		con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
 
-		pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+		dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
 			pinfo, pinfo_dma_h);
 
 		return -1;
@@ -3002,7 +2946,7 @@
 		con_log(CL_ANN, (KERN_WARNING
 			"megaraid: product info failed\n"));
 
-		pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+		dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
 			pinfo, pinfo_dma_h);
 
 		return -1;
@@ -3038,7 +2982,7 @@
 		"megaraid: fw version:[%s] bios version:[%s]\n",
 		adapter->fw_version, adapter->bios_version));
 
-	pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
+	dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), pinfo,
 			pinfo_dma_h);
 
 	return 0;
@@ -3135,7 +3079,6 @@
 static int
 megaraid_mbox_support_random_del(adapter_t *adapter)
 {
-	mbox_t		*mbox;
 	uint8_t		raw_mbox[sizeof(mbox_t)];
 	int		rval;
 
@@ -3157,8 +3100,6 @@
 		return 0;
 	}
 
-	mbox = (mbox_t *)raw_mbox;
-
 	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
 
 	raw_mbox[0] = FC_DEL_LOGDRV;
@@ -3263,12 +3204,8 @@
 static void
 megaraid_mbox_flush_cache(adapter_t *adapter)
 {
-	mbox_t	*mbox;
 	uint8_t	raw_mbox[sizeof(mbox_t)];
 
-
-	mbox = (mbox_t *)raw_mbox;
-
 	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
 
 	raw_mbox[0] = FLUSH_ADAPTER;
@@ -3299,7 +3236,6 @@
 	mbox_t	*mbox;
 	uint8_t	raw_mbox[sizeof(mbox_t)];
 	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
-	mbox64_t *mbox64;
 	int	status = 0;
 	int i;
 	uint32_t dword;
@@ -3310,7 +3246,6 @@
 
 	raw_mbox[0] = 0xFF;
 
-	mbox64	= raid_dev->mbox64;
 	mbox	= raid_dev->mbox;
 
 	/* Wait until mailbox is free */
@@ -3515,7 +3450,7 @@
 
 		scb->scp		= NULL;
 		scb->state		= SCB_FREE;
-		scb->dma_direction	= PCI_DMA_NONE;
+		scb->dma_direction	= DMA_NONE;
 		scb->dma_type		= MRAID_DMA_NONE;
 		scb->dev_channel	= -1;
 		scb->dev_target		= -1;
@@ -3653,7 +3588,7 @@
 
 	scb->state		= SCB_ACTIVE;
 	scb->dma_type		= MRAID_DMA_NONE;
-	scb->dma_direction	= PCI_DMA_NONE;
+	scb->dma_direction	= DMA_NONE;
 
 	ccb		= (mbox_ccb_t *)scb->ccb;
 	mbox64		= (mbox64_t *)(unsigned long)kioc->cmdbuf;
@@ -3794,10 +3729,6 @@
 static int
 gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
 {
-	uint8_t	dmajor;
-
-	dmajor			= megaraid_mbox_version[0];
-
 	hinfo->pci_vendor_id	= adapter->pdev->vendor;
 	hinfo->pci_device_id	= adapter->pdev->device;
 	hinfo->subsys_vendor_id	= adapter->pdev->subsystem_vendor;
@@ -3843,8 +3774,8 @@
 
 	raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
 
-	raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
-			PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
+	raid_dev->sysfs_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+			PAGE_SIZE, &raid_dev->sysfs_buffer_dma, GFP_KERNEL);
 
 	if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
 		!raid_dev->sysfs_buffer) {
@@ -3881,7 +3812,7 @@
 	kfree(raid_dev->sysfs_mbox64);
 
 	if (raid_dev->sysfs_buffer) {
-		pci_free_consistent(adapter->pdev, PAGE_SIZE,
+		dma_free_coherent(&adapter->pdev->dev, PAGE_SIZE,
 			raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
 	}
 }
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index c1d86d9..3e4347c 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  *
  *			Linux MegaRAID device driver
  *
  * Copyright (c) 2003-2004  LSI Logic Corporation.
  *
- *	   This program is free software; you can redistribute it and/or
- *	   modify it under the terms of the GNU General Public License
- *	   as published by the Free Software Foundation; either version
- *	   2 of the License, or (at your option) any later version.
- *
  * FILE		: megaraid_mbox.h
  */
 
@@ -117,7 +113,7 @@
  * @raw_mbox		: raw mailbox pointer
  * @mbox		: mailbox
  * @mbox64		: extended mailbox
- * @mbox_dma_h		: maibox dma address
+ * @mbox_dma_h		: mailbox dma address
  * @sgl64		: 64-bit scatter-gather list
  * @sgl32		: 32-bit scatter-gather list
  * @sgl_dma_h		: dma handle for the scatter-gather list
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 8428247..59cca89 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *
  *			Linux MegaRAID device driver
  *
  * Copyright (c) 2003-2004  LSI Logic Corporation.
  *
- *	   This program is free software; you can redistribute it and/or
- *	   modify it under the terms of the GNU General Public License
- *	   as published by the Free Software Foundation; either version
- *	   2 of the License, or (at your option) any later version.
- *
  * FILE		: megaraid_mm.c
  * Version	: v2.20.2.7 (Jul 16 2006)
  *
@@ -1017,8 +1013,7 @@
 	kfree(adapter->kioc_list);
 	kfree(adapter->mbox_list);
 
-	if (adapter->pthru_dma_pool)
-		dma_pool_destroy(adapter->pthru_dma_pool);
+	dma_pool_destroy(adapter->pthru_dma_pool);
 
 	kfree(adapter);
 
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
index a30e725..bf40115 100644
--- a/drivers/scsi/megaraid/megaraid_mm.h
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  *
  *			Linux MegaRAID device driver
  *
  * Copyright (c) 2003-2004  LSI Logic Corporation.
  *
- *	   This program is free software; you can redistribute it and/or
- *	   modify it under the terms of the GNU General Public License
- *	   as published by the Free Software Foundation; either version
- *	   2 of the License, or (at your option) any later version.
- *
  * FILE		: megaraid_mm.h
  */
 
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 67d356d..a6e788c 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1,32 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  *  Linux MegaRAID driver for SAS based RAID controllers
  *
  *  Copyright (c) 2003-2013  LSI Corporation
- *  Copyright (c) 2013-2014  Avago Technologies
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version 2
- *  of the License, or (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *  Copyright (c) 2013-2016  Avago Technologies
+ *  Copyright (c) 2016-2018  Broadcom Inc.
  *
  *  FILE: megaraid_sas.h
  *
- *  Authors: Avago Technologies
- *           Kashyap Desai <kashyap.desai@avagotech.com>
- *           Sumit Saxena <sumit.saxena@avagotech.com>
+ *  Authors: Broadcom Inc.
+ *           Kashyap Desai <kashyap.desai@broadcom.com>
+ *           Sumit Saxena <sumit.saxena@broadcom.com>
  *
- *  Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- *  San Jose, California 95131
+ *  Send feedback to: megaraidlinux.pdl@broadcom.com
  */
 
 #ifndef LSI_MEGARAID_SAS_H
@@ -35,8 +21,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"07.706.03.00-rc1"
-#define MEGASAS_RELDATE				"May 21, 2018"
+#define MEGASAS_VERSION				"07.710.50.00-rc1"
+#define MEGASAS_RELDATE				"June 28, 2019"
 
 /*
  * Device IDs
@@ -62,6 +48,14 @@
 #define PCI_DEVICE_ID_LSI_TOMCAT		    0x0017
 #define PCI_DEVICE_ID_LSI_VENTURA_4PORT		0x001B
 #define PCI_DEVICE_ID_LSI_CRUSADER_4PORT	0x001C
+#define PCI_DEVICE_ID_LSI_AERO_10E1		0x10e1
+#define PCI_DEVICE_ID_LSI_AERO_10E2		0x10e2
+#define PCI_DEVICE_ID_LSI_AERO_10E5		0x10e5
+#define PCI_DEVICE_ID_LSI_AERO_10E6		0x10e6
+#define PCI_DEVICE_ID_LSI_AERO_10E0		0x10e0
+#define PCI_DEVICE_ID_LSI_AERO_10E3		0x10e3
+#define PCI_DEVICE_ID_LSI_AERO_10E4		0x10e4
+#define PCI_DEVICE_ID_LSI_AERO_10E7		0x10e7
 
 /*
  * Intel HBA SSDIDs
@@ -133,6 +127,8 @@
 #define MFI_RESET_ADAPTER			0x00000002
 #define MEGAMFI_FRAME_SIZE			64
 
+#define MFI_STATE_FAULT_CODE			0x0FFF0000
+#define MFI_STATE_FAULT_SUBCODE			0x0000FF00
 /*
  * During FW init, clear pending cmds & reset state using inbound_msg_0
  *
@@ -142,6 +138,7 @@
  * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
  * HOTPLUG	: Resume from Hotplug
  * MFI_STOP_ADP	: Send signal to FW to stop processing
+ * MFI_ADP_TRIGGER_SNAP_DUMP: Inform firmware to initiate snap dump
  */
 #define WRITE_SEQUENCE_OFFSET		(0x0000000FC) /* I20 */
 #define HOST_DIAGNOSTIC_OFFSET		(0x000000F8)  /* I20 */
@@ -158,6 +155,7 @@
 #define MFI_RESET_FLAGS				MFI_INIT_READY| \
 						MFI_INIT_MFIMODE| \
 						MFI_INIT_ABORT
+#define MFI_ADP_TRIGGER_SNAP_DUMP		0x00000100
 #define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE    (0x01)
 
 /*
@@ -198,6 +196,7 @@
 	MFI_CMD_SMP		= 0x7,
 	MFI_CMD_STP		= 0x8,
 	MFI_CMD_NVME		= 0x9,
+	MFI_CMD_TOOLBOX		= 0xa,
 	MFI_CMD_OP_COUNT,
 	MFI_CMD_INVALID		= 0xff
 };
@@ -786,6 +785,38 @@
 	u8	targetId[MAX_LOGICAL_DRIVES_EXT];
 };
 
+struct MR_HOST_DEVICE_LIST_ENTRY {
+	struct {
+		union {
+			struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+				u8 reserved:7;
+				u8 is_sys_pd:1;
+#else
+				u8 is_sys_pd:1;
+				u8 reserved:7;
+#endif
+			} bits;
+			u8 byte;
+		} u;
+	} flags;
+	u8 scsi_type;
+	__le16 target_id;
+	u8 reserved[4];
+	__le64 sas_addr[2];
+} __packed;
+
+struct MR_HOST_DEVICE_LIST {
+	__le32			size;
+	__le32			count;
+	__le32			reserved[2];
+	struct MR_HOST_DEVICE_LIST_ENTRY	host_device_list[1];
+} __packed;
+
+#define HOST_DEVICE_LIST_SZ (sizeof(struct MR_HOST_DEVICE_LIST) +	       \
+			      (sizeof(struct MR_HOST_DEVICE_LIST_ENTRY) *      \
+			      (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT - 1)))
+
 
 /*
  * SAS controller properties
@@ -860,8 +891,26 @@
 		u32     reserved:18;
 #endif
 	} OnOffProperties;
-	u8 autoSnapVDSpace;
-	u8 viewSpace;
+
+	union {
+		u8 autoSnapVDSpace;
+		u8 viewSpace;
+		struct {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+			u16 reserved3:9;
+			u16 enable_fw_dev_list:1;
+			u16 reserved2:1;
+			u16 enable_snap_dump:1;
+			u16 reserved1:4;
+#else
+			u16 reserved1:4;
+			u16 enable_snap_dump:1;
+			u16 reserved2:1;
+			u16 enable_fw_dev_list:1;
+			u16 reserved3:9;
+#endif
+		} on_off_properties2;
+	};
 	__le16 spinDownTime;
 	u8  reserved[24];
 } __packed;
@@ -1407,7 +1456,39 @@
 
 	u8 reserved6[64];
 
-	u32 rsvdForAdptOp[64];
+	struct {
+	#if defined(__BIG_ENDIAN_BITFIELD)
+		u32 reserved:19;
+		u32 support_pci_lane_margining: 1;
+		u32 support_psoc_update:1;
+		u32 support_force_personality_change:1;
+		u32 support_fde_type_mix:1;
+		u32 support_snap_dump:1;
+		u32 support_nvme_tm:1;
+		u32 support_oce_only:1;
+		u32 support_ext_mfg_vpd:1;
+		u32 support_pcie:1;
+		u32 support_cvhealth_info:1;
+		u32 support_profile_change:2;
+		u32 mr_config_ext2_supported:1;
+	#else
+		u32 mr_config_ext2_supported:1;
+		u32 support_profile_change:2;
+		u32 support_cvhealth_info:1;
+		u32 support_pcie:1;
+		u32 support_ext_mfg_vpd:1;
+		u32 support_oce_only:1;
+		u32 support_nvme_tm:1;
+		u32 support_snap_dump:1;
+		u32 support_fde_type_mix:1;
+		u32 support_force_personality_change:1;
+		u32 support_psoc_update:1;
+		u32 support_pci_lane_margining: 1;
+		u32 reserved:19;
+	#endif
+	} adapter_operations5;
+
+	u32 rsvdForAdptOp[63];
 
 	u8 reserved7[3];
 
@@ -1441,7 +1522,9 @@
 #define MEGASAS_FW_BUSY				1
 
 /* Driver's internal Logging levels*/
-#define OCR_LOGS    (1 << 0)
+#define OCR_DEBUG    (1 << 0)
+#define TM_DEBUG     (1 << 1)
+#define LD_PD_DEBUG    (1 << 2)
 
 #define SCAN_PD_CHANNEL	0x1
 #define SCAN_VD_CHANNEL	0x2
@@ -1485,7 +1568,6 @@
 #define MEGASAS_IOCTL_CMD			0
 #define MEGASAS_DEFAULT_CMD_TIMEOUT		90
 #define MEGASAS_THROTTLE_QUEUE_DEPTH		16
-#define MEGASAS_BLOCKED_CMD_TIMEOUT		60
 #define MEGASAS_DEFAULT_TM_TIMEOUT		50
 /*
  * FW reports the maximum of number of commands that it can accept (maximum
@@ -1518,6 +1600,7 @@
 #define MFI_IO_TIMEOUT_SECS			180
 #define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF	(5 * HZ)
 #define MEGASAS_OCR_SETTLE_TIME_VF		(1000 * 30)
+#define MEGASAS_SRIOV_MAX_RESET_TRIES_VF	1
 #define MEGASAS_ROUTINE_WAIT_TIME_VF		300
 #define MFI_REPLY_1078_MESSAGE_INTERRUPT	0x80000000
 #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT	0x00000001
@@ -1542,13 +1625,21 @@
 
 #define MR_CAN_HANDLE_SYNC_CACHE_OFFSET		0X01000000
 
+#define MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET	(1 << 24)
+
 #define MR_CAN_HANDLE_64_BIT_DMA_OFFSET		(1 << 25)
+#define MR_INTR_COALESCING_SUPPORT_OFFSET	(1 << 26)
+
+#define MEGASAS_WATCHDOG_THREAD_INTERVAL	1000
+#define MEGASAS_WAIT_FOR_NEXT_DMA_MSECS		20
+#define MEGASAS_WATCHDOG_WAIT_COUNT		50
 
 enum MR_ADAPTER_TYPE {
 	MFI_SERIES = 1,
 	THUNDERBOLT_SERIES = 2,
 	INVADER_SERIES = 3,
 	VENTURA_SERIES = 4,
+	AERO_SERIES = 5,
 };
 
 /*
@@ -1588,11 +1679,10 @@
 
 	u32 	reserved_3[3];			/*00A4h*/
 
-	u32 	outbound_scratch_pad ;		/*00B0h*/
-	u32	outbound_scratch_pad_2;         /*00B4h*/
-	u32	outbound_scratch_pad_3;         /*00B8h*/
-	u32	outbound_scratch_pad_4;         /*00BCh*/
-
+	u32	outbound_scratch_pad_0;		/*00B0h*/
+	u32	outbound_scratch_pad_1;         /*00B4h*/
+	u32	outbound_scratch_pad_2;         /*00B8h*/
+	u32	outbound_scratch_pad_3;         /*00BCh*/
 
 	u32 	inbound_low_queue_port ;	/*00C0h*/
 
@@ -1664,7 +1754,8 @@
 typedef union _MFI_CAPABILITIES {
 	struct {
 #if   defined(__BIG_ENDIAN_BITFIELD)
-	u32     reserved:17;
+	u32     reserved:16;
+	u32	support_fw_exposed_dev_list:1;
 	u32	support_nvme_passthru:1;
 	u32     support_64bit_mode:1;
 	u32 support_pd_map_target_id:1;
@@ -1696,7 +1787,8 @@
 	u32	support_pd_map_target_id:1;
 	u32     support_64bit_mode:1;
 	u32	support_nvme_passthru:1;
-	u32     reserved:17;
+	u32	support_fw_exposed_dev_list:1;
+	u32     reserved:16;
 #endif
 	} mfi_capabilities;
 	__le32		reg;
@@ -1715,7 +1807,7 @@
 	__le32 pad_0;		/*0Ch */
 
 	__le16 flags;		/*10h */
-	__le16 reserved_3;		/*12h */
+	__le16 replyqueue_mask;		/*12h */
 	__le32 data_xfer_len;	/*14h */
 
 	__le32 queue_info_new_phys_addr_lo;	/*18h */
@@ -2113,6 +2205,10 @@
 struct megasas_irq_context {
 	struct megasas_instance *instance;
 	u32 MSIxIndex;
+	u32 os_irq;
+	struct irq_poll irqpoll;
+	bool irq_poll_scheduled;
+	bool irq_line_enable;
 };
 
 struct MR_DRV_SYSTEM_INFO {
@@ -2143,6 +2239,23 @@
 #define MR_DEFAULT_NVME_MDTS_KB		128
 #define MR_NVME_PAGE_SIZE_MASK		0x000000FF
 
+/*Aero performance parameters*/
+#define MR_HIGH_IOPS_QUEUE_COUNT	8
+#define MR_DEVICE_HIGH_IOPS_DEPTH	8
+#define MR_HIGH_IOPS_BATCH_COUNT	16
+
+enum MR_PERF_MODE {
+	MR_BALANCED_PERF_MODE		= 0,
+	MR_IOPS_PERF_MODE		= 1,
+	MR_LATENCY_PERF_MODE		= 2,
+};
+
+#define MEGASAS_PERF_MODE_2STR(mode) \
+		((mode) == MR_BALANCED_PERF_MODE ? "Balanced" : \
+		 (mode) == MR_IOPS_PERF_MODE ? "IOPS" : \
+		 (mode) == MR_LATENCY_PERF_MODE ? "Latency" : \
+		 "Unknown")
+
 struct megasas_instance {
 
 	unsigned int *reply_map;
@@ -2181,6 +2294,12 @@
 	struct MR_LD_TARGETID_LIST *ld_targetid_list_buf;
 	dma_addr_t ld_targetid_list_buf_h;
 
+	struct MR_HOST_DEVICE_LIST *host_device_list_buf;
+	dma_addr_t host_device_list_buf_h;
+
+	struct MR_SNAPDUMP_PROPERTIES *snapdump_prop;
+	dma_addr_t snapdump_prop_h;
+
 	void *crash_buf[MAX_CRASH_DUMP_SIZE];
 	unsigned int    fw_crash_buffer_size;
 	unsigned int    fw_crash_state;
@@ -2193,6 +2312,7 @@
 	u32 secure_jbod_support;
 	u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
 	bool use_seqnum_jbod_fp;   /* Added for PD sequence */
+	bool smp_affinity_enable;
 	spinlock_t crashdump_lock;
 
 	struct megasas_register_set __iomem *reg_set;
@@ -2210,6 +2330,7 @@
 	u16 ldio_threshold;
 	u16 cur_can_queue;
 	u32 max_sectors_per_req;
+	bool msix_load_balance;
 	struct megasas_aen_event *ev;
 
 	struct megasas_cmd **cmd_list;
@@ -2237,20 +2358,20 @@
 	struct pci_dev *pdev;
 	u32 unique_id;
 	u32 fw_support_ieee;
+	u32 threshold_reply_count;
 
 	atomic_t fw_outstanding;
 	atomic_t ldio_outstanding;
 	atomic_t fw_reset_no_pci_access;
-	atomic_t ieee_sgl;
-	atomic_t prp_sgl;
-	atomic_t sge_holes_type1;
-	atomic_t sge_holes_type2;
-	atomic_t sge_holes_type3;
+	atomic64_t total_io_count;
+	atomic64_t high_iops_outstanding;
 
 	struct megasas_instance_template *instancet;
 	struct tasklet_struct isr_tasklet;
 	struct work_struct work_init;
-	struct work_struct crash_init;
+	struct delayed_work fw_fault_work;
+	struct workqueue_struct *fw_fault_work_q;
+	char fault_handler_work_q_name[48];
 
 	u8 flag;
 	u8 unload;
@@ -2308,9 +2429,22 @@
 	u8 adapter_type;
 	bool consistent_mask_64bit;
 	bool support_nvme_passthru;
+	bool enable_sdev_max_qd;
 	u8 task_abort_tmo;
 	u8 max_reset_tmo;
+	u8 snapdump_wait_time;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs_root;
+	struct dentry *raidmap_dump;
+#endif
+	u8 enable_fw_dev_list;
+	bool atomic_desc_support;
+	bool support_seqnum_jbod_fp;
+	bool support_pci_lane_margining;
+	u8  low_latency_index_start;
+	int perf_mode;
 };
+
 struct MR_LD_VF_MAP {
 	u32 size;
 	union MR_LD_REF ref;
@@ -2386,9 +2520,9 @@
 	void (*enable_intr)(struct megasas_instance *);
 	void (*disable_intr)(struct megasas_instance *);
 
-	int (*clear_intr)(struct megasas_register_set __iomem *);
+	int (*clear_intr)(struct megasas_instance *);
 
-	u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
+	u32 (*read_fw_status_reg)(struct megasas_instance *);
 	int (*adp_reset)(struct megasas_instance *, \
 		struct megasas_register_set __iomem *);
 	int (*check_reset)(struct megasas_instance *, \
@@ -2535,11 +2669,11 @@
 					   bool is_target_prop);
 int megasas_get_target_prop(struct megasas_instance *instance,
 			    struct scsi_device *sdev);
+void megasas_get_snapdump_properties(struct megasas_instance *instance);
 
 int megasas_set_crash_dump_params(struct megasas_instance *instance,
 	u8 crash_buf_state);
 void megasas_free_host_crash_buffer(struct megasas_instance *instance);
-void megasas_fusion_crash_dump_wq(struct work_struct *work);
 
 void megasas_return_cmd_fusion(struct megasas_instance *instance,
 	struct megasas_cmd_fusion *cmd);
@@ -2560,7 +2694,15 @@
 u32 mega_mod64(u64 dividend, u32 divisor);
 int megasas_alloc_fusion_context(struct megasas_instance *instance);
 void megasas_free_fusion_context(struct megasas_instance *instance);
+int megasas_fusion_start_watchdog(struct megasas_instance *instance);
+void megasas_fusion_stop_watchdog(struct megasas_instance *instance);
+
 void megasas_set_dma_settings(struct megasas_instance *instance,
 			      struct megasas_dcmd_frame *dcmd,
 			      dma_addr_t dma_addr, u32 dma_len);
+int megasas_adp_reset_wait_for_ready(struct megasas_instance *instance,
+				     bool do_adp_reset,
+				     int ocr_context);
+int megasas_irqpoll(struct irq_poll *irqpoll, int budget);
+void megasas_dump_fusion_io(struct scsi_cmnd *scmd);
 #endif				/*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f6de752..42cf38c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1,34 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Linux MegaRAID driver for SAS based RAID controllers
  *
  *  Copyright (c) 2003-2013  LSI Corporation
- *  Copyright (c) 2013-2014  Avago Technologies
+ *  Copyright (c) 2013-2016  Avago Technologies
+ *  Copyright (c) 2016-2018  Broadcom Inc.
  *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version 2
- *  of the License, or (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- *  Authors: Avago Technologies
+ *  Authors: Broadcom Inc.
  *           Sreenivas Bagalkote
  *           Sumant Patro
  *           Bo Yang
  *           Adam Radford
- *           Kashyap Desai <kashyap.desai@avagotech.com>
- *           Sumit Saxena <sumit.saxena@avagotech.com>
+ *           Kashyap Desai <kashyap.desai@broadcom.com>
+ *           Sumit Saxena <sumit.saxena@broadcom.com>
  *
- *  Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- *  San Jose, California 95131
+ *  Send feedback to: megaraidlinux.pdl@broadcom.com
  */
 
 #include <linux/kernel.h>
@@ -50,12 +36,14 @@
 #include <linux/mutex.h>
 #include <linux/poll.h>
 #include <linux/vmalloc.h>
+#include <linux/irq_poll.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
 #include "megaraid_sas_fusion.h"
 #include "megaraid_sas.h"
 
@@ -64,52 +52,71 @@
  * Will be set in megasas_init_mfi if user does not provide
  */
 static unsigned int max_sectors;
-module_param_named(max_sectors, max_sectors, int, 0);
+module_param_named(max_sectors, max_sectors, int, 0444);
 MODULE_PARM_DESC(max_sectors,
 	"Maximum number of sectors per IO command");
 
 static int msix_disable;
-module_param(msix_disable, int, S_IRUGO);
+module_param(msix_disable, int, 0444);
 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
 
 static unsigned int msix_vectors;
-module_param(msix_vectors, int, S_IRUGO);
+module_param(msix_vectors, int, 0444);
 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
 
 static int allow_vf_ioctls;
-module_param(allow_vf_ioctls, int, S_IRUGO);
+module_param(allow_vf_ioctls, int, 0444);
 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
 
 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
-module_param(throttlequeuedepth, int, S_IRUGO);
+module_param(throttlequeuedepth, int, 0444);
 MODULE_PARM_DESC(throttlequeuedepth,
 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
 
 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
-module_param(resetwaittime, int, S_IRUGO);
-MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
-		 "before resetting adapter. Default: 180");
+module_param(resetwaittime, int, 0444);
+MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
 
 int smp_affinity_enable = 1;
-module_param(smp_affinity_enable, int, S_IRUGO);
+module_param(smp_affinity_enable, int, 0444);
 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
 
 int rdpq_enable = 1;
-module_param(rdpq_enable, int, S_IRUGO);
-MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
+module_param(rdpq_enable, int, 0444);
+MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
 
 unsigned int dual_qdepth_disable;
-module_param(dual_qdepth_disable, int, S_IRUGO);
+module_param(dual_qdepth_disable, int, 0444);
 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
 
 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
-module_param(scmd_timeout, int, S_IRUGO);
+module_param(scmd_timeout, int, 0444);
 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
 
+int perf_mode = -1;
+module_param(perf_mode, int, 0444);
+MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
+		"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
+		"interrupt coalescing is enabled only on high iops queues\n\t\t"
+		"1 - iops: High iops queues are not allocated &\n\t\t"
+		"interrupt coalescing is enabled on all queues\n\t\t"
+		"2 - latency: High iops queues are not allocated &\n\t\t"
+		"interrupt coalescing is disabled on all queues\n\t\t"
+		"default mode is 'balanced'"
+		);
+
+int event_log_level = MFI_EVT_CLASS_CRITICAL;
+module_param(event_log_level, int, 0644);
+MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
+
+unsigned int enable_sdev_max_qd;
+module_param(enable_sdev_max_qd, int, 0444);
+MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
+
 MODULE_LICENSE("GPL");
 MODULE_VERSION(MEGASAS_VERSION);
-MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
-MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
+MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
+MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
 
 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
 static int megasas_get_pd_list(struct megasas_instance *instance);
@@ -165,6 +172,14 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
 	{}
 };
 
@@ -181,15 +196,22 @@
 u32 megasas_dbg_lvl;
 static u32 support_device_change;
 static bool support_nvme_encapsulation;
+static bool support_pci_lane_margining;
 
 /* define lock for aen poll */
 spinlock_t poll_aen_lock;
 
+extern struct dentry *megasas_debugfs_root;
+extern void megasas_init_debugfs(void);
+extern void megasas_exit_debugfs(void);
+extern void megasas_setup_debugfs(struct megasas_instance *instance);
+extern void megasas_destroy_debugfs(struct megasas_instance *instance);
+
 void
 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
 		     u8 alt_status);
 static u32
-megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
+megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
 static int
 megasas_adp_reset_gen2(struct megasas_instance *instance,
 		       struct megasas_register_set __iomem *reg_set);
@@ -219,6 +241,28 @@
 static inline void
 megasas_init_ctrl_params(struct megasas_instance *instance);
 
+u32 megasas_readl(struct megasas_instance *instance,
+		  const volatile void __iomem *addr)
+{
+	u32 i = 0, ret_val;
+	/*
+	 * Due to a HW errata in Aero controllers, reads to certain
+	 * Fusion registers could intermittently return all zeroes.
+	 * This behavior is transient in nature and subsequent reads will
+	 * return valid value. As a workaround in driver, retry readl for
+	 * upto three times until a non-zero value is read.
+	 */
+	if (instance->adapter_type == AERO_SERIES) {
+		do {
+			ret_val = readl(addr);
+			i++;
+		} while (ret_val == 0 && i < 3);
+		return ret_val;
+	} else {
+		return readl(addr);
+	}
+}
+
 /**
  * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
  * @instance:			Adapter soft state
@@ -244,7 +288,7 @@
 	}
 }
 
-void
+static void
 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
 {
 	instance->instancet->fire_cmd(instance,
@@ -368,7 +412,13 @@
 	union megasas_evt_class_locale class_locale;
 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
 
-	if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
+	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
+	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
+		printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
+		event_log_level = MFI_EVT_CLASS_CRITICAL;
+	}
+
+	if (class_locale.members.class >= event_log_level)
 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
 			le32_to_cpu(evt_detail->seq_num),
 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
@@ -419,19 +469,21 @@
  * @regs:			MFI register set
  */
 static u32
-megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
+megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
 {
-	return readl(&(regs)->outbound_msg_0);
+	return readl(&instance->reg_set->outbound_msg_0);
 }
 /**
  * megasas_clear_interrupt_xscale -	Check & clear interrupt
  * @regs:				MFI register set
  */
 static int
-megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
+megasas_clear_intr_xscale(struct megasas_instance *instance)
 {
 	u32 status;
 	u32 mfiStatus = 0;
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
 
 	/*
 	 * Check if it is our interrupt
@@ -596,9 +648,9 @@
  * @regs:			MFI register set
  */
 static u32
-megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
+megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
 {
-	return readl(&(regs)->outbound_scratch_pad);
+	return readl(&instance->reg_set->outbound_scratch_pad_0);
 }
 
 /**
@@ -606,9 +658,11 @@
  * @regs:				MFI register set
  */
 static int
-megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
+megasas_clear_intr_ppc(struct megasas_instance *instance)
 {
 	u32 status, mfiStatus = 0;
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
 
 	/*
 	 * Check if it is our interrupt
@@ -721,9 +775,9 @@
  * @regs:			MFI register set
  */
 static u32
-megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
+megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
 {
-	return readl(&(regs)->outbound_scratch_pad);
+	return readl(&instance->reg_set->outbound_scratch_pad_0);
 }
 
 /**
@@ -731,10 +785,12 @@
  * @regs:				MFI register set
  */
 static int
-megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
+megasas_clear_intr_skinny(struct megasas_instance *instance)
 {
 	u32 status;
 	u32 mfiStatus = 0;
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
 
 	/*
 	 * Check if it is our interrupt
@@ -748,7 +804,7 @@
 	/*
 	 * Check if it is our interrupt
 	 */
-	if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
+	if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
 	    MFI_STATE_FAULT) {
 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
 	} else
@@ -786,7 +842,6 @@
 	       &(regs)->inbound_high_queue_port);
 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
 	       &(regs)->inbound_low_queue_port);
-	mmiowb();
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
 }
 
@@ -866,9 +921,9 @@
  * @regs:                      MFI register set
  */
 static u32
-megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
+megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
 {
-	return readl(&(regs)->outbound_scratch_pad);
+	return readl(&instance->reg_set->outbound_scratch_pad_0);
 }
 
 /**
@@ -876,10 +931,12 @@
  * @regs:                              MFI register set
  */
 static int
-megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
+megasas_clear_intr_gen2(struct megasas_instance *instance)
 {
 	u32 status;
 	u32 mfiStatus = 0;
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
 
 	/*
 	 * Check if it is our interrupt
@@ -1080,8 +1137,9 @@
 		ret = wait_event_timeout(instance->int_cmd_wait_q,
 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
 		if (!ret) {
-			dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
-				__func__, __LINE__);
+			dev_err(&instance->pdev->dev,
+				"DCMD(opcode: 0x%x) is timed out, func:%s\n",
+				cmd->frame->dcmd.opcode, __func__);
 			return DCMD_TIMEOUT;
 		}
 	} else
@@ -1110,6 +1168,7 @@
 	struct megasas_cmd *cmd;
 	struct megasas_abort_frame *abort_fr;
 	int ret = 0;
+	u32 opcode;
 
 	cmd = megasas_get_cmd(instance);
 
@@ -1145,8 +1204,10 @@
 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
 		if (!ret) {
-			dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
-				__func__, __LINE__);
+			opcode = cmd_to_abort->frame->dcmd.opcode;
+			dev_err(&instance->pdev->dev,
+				"Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
+				opcode,  __func__);
 			return DCMD_TIMEOUT;
 		}
 	} else
@@ -1330,11 +1391,11 @@
 	device_id = MEGASAS_DEV_INDEX(scp);
 	pthru = (struct megasas_pthru_frame *)cmd->frame;
 
-	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+	if (scp->sc_data_direction == DMA_TO_DEVICE)
 		flags = MFI_FRAME_DIR_WRITE;
-	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
 		flags = MFI_FRAME_DIR_READ;
-	else if (scp->sc_data_direction == PCI_DMA_NONE)
+	else if (scp->sc_data_direction == DMA_NONE)
 		flags = MFI_FRAME_DIR_NONE;
 
 	if (instance->flag_ieee == 1) {
@@ -1428,9 +1489,9 @@
 	device_id = MEGASAS_DEV_INDEX(scp);
 	ldio = (struct megasas_io_frame *)cmd->frame;
 
-	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+	if (scp->sc_data_direction == DMA_TO_DEVICE)
 		flags = MFI_FRAME_DIR_WRITE;
-	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
 		flags = MFI_FRAME_DIR_READ;
 
 	if (instance->flag_ieee == 1) {
@@ -1884,26 +1945,19 @@
 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
 }
 
-
 /*
- * megasas_set_static_target_properties -
- * Device property set by driver are static and it is not required to be
- * updated after OCR.
- *
- * set io timeout
- * set device queue depth
- * set nvme device properties. see - megasas_set_nvme_device_properties
+ * megasas_set_fw_assisted_qd -
+ * set device queue depth to can_queue
+ * set device queue depth to fw assisted qd
  *
  * @sdev:				scsi device
  * @is_target_prop			true, if fw provided target properties.
  */
-static void megasas_set_static_target_properties(struct scsi_device *sdev,
+static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
 						 bool is_target_prop)
 {
-	u16	target_index = 0;
 	u8 interface_type;
 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
-	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
 	u32 tgt_device_qd;
 	struct megasas_instance *instance;
 	struct MR_PRIV_DEVICE *mr_device_priv_data;
@@ -1912,13 +1966,6 @@
 	mr_device_priv_data = sdev->hostdata;
 	interface_type  = mr_device_priv_data->interface_type;
 
-	/*
-	 * The RAID firmware may require extended timeouts.
-	 */
-	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
-
-	target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
-
 	switch (interface_type) {
 	case SAS_PD:
 		device_qd = MEGASAS_SAS_QD;
@@ -1936,18 +1983,49 @@
 		if (tgt_device_qd &&
 		    (tgt_device_qd <= instance->host->can_queue))
 			device_qd = tgt_device_qd;
-
-		/* max_io_size_kb will be set to non zero for
-		 * nvme based vd and syspd.
-		 */
-		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
 	}
 
+	if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
+		device_qd = instance->host->can_queue;
+
+	scsi_change_queue_depth(sdev, device_qd);
+}
+
+/*
+ * megasas_set_static_target_properties -
+ * Device property set by driver are static and it is not required to be
+ * updated after OCR.
+ *
+ * set io timeout
+ * set device queue depth
+ * set nvme device properties. see - megasas_set_nvme_device_properties
+ *
+ * @sdev:				scsi device
+ * @is_target_prop			true, if fw provided target properties.
+ */
+static void megasas_set_static_target_properties(struct scsi_device *sdev,
+						 bool is_target_prop)
+{
+	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
+	struct megasas_instance *instance;
+
+	instance = megasas_lookup_instance(sdev->host->host_no);
+
+	/*
+	 * The RAID firmware may require extended timeouts.
+	 */
+	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
+
+	/* max_io_size_kb will be set to non zero for
+	 * nvme based vd and syspd.
+	 */
+	if (is_target_prop)
+		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
+
 	if (instance->nvme_page_size && max_io_size_kb)
 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
 
-	scsi_change_queue_depth(sdev, device_qd);
-
+	megasas_set_fw_assisted_qd(sdev, is_target_prop);
 }
 
 
@@ -2079,9 +2157,11 @@
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
 		(instance->adapter_type != MFI_SERIES)) {
-		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
-		/* Flush */
-		readl(&instance->reg_set->doorbell);
+		if (!instance->requestorId) {
+			writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
+			/* Flush */
+			readl(&instance->reg_set->doorbell);
+		}
 		if (instance->requestorId && instance->peerIsPresent)
 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
 	} else {
@@ -2191,7 +2271,7 @@
 static void
 process_fw_state_change_wq(struct work_struct *work);
 
-void megasas_do_ocr(struct megasas_instance *instance)
+static void megasas_do_ocr(struct megasas_instance *instance)
 {
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
@@ -2240,9 +2320,9 @@
 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
 	else {
 		new_affiliation_111 =
-			pci_zalloc_consistent(instance->pdev,
-					      sizeof(struct MR_LD_VF_AFFILIATION_111),
-					      &new_affiliation_111_h);
+			dma_alloc_coherent(&instance->pdev->dev,
+					   sizeof(struct MR_LD_VF_AFFILIATION_111),
+					   &new_affiliation_111_h, GFP_KERNEL);
 		if (!new_affiliation_111) {
 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
 			       "memory for new affiliation for scsi%d\n",
@@ -2302,7 +2382,7 @@
 	}
 out:
 	if (new_affiliation_111) {
-		pci_free_consistent(instance->pdev,
+		dma_free_coherent(&instance->pdev->dev,
 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
 				    new_affiliation_111,
 				    new_affiliation_111_h);
@@ -2347,10 +2427,9 @@
 		       sizeof(struct MR_LD_VF_AFFILIATION));
 	else {
 		new_affiliation =
-			pci_zalloc_consistent(instance->pdev,
-					      (MAX_LOGICAL_DRIVES + 1) *
-					      sizeof(struct MR_LD_VF_AFFILIATION),
-					      &new_affiliation_h);
+			dma_alloc_coherent(&instance->pdev->dev,
+					   (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
+					   &new_affiliation_h, GFP_KERNEL);
 		if (!new_affiliation) {
 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
 			       "memory for new affiliation for scsi%d\n",
@@ -2470,7 +2549,7 @@
 	}
 
 	if (new_affiliation)
-		pci_free_consistent(instance->pdev,
+		dma_free_coherent(&instance->pdev->dev,
 				    (MAX_LOGICAL_DRIVES + 1) *
 				    sizeof(struct MR_LD_VF_AFFILIATION),
 				    new_affiliation, new_affiliation_h);
@@ -2513,9 +2592,10 @@
 
 	if (initial) {
 		instance->hb_host_mem =
-			pci_zalloc_consistent(instance->pdev,
-					      sizeof(struct MR_CTRL_HB_HOST_MEM),
-					      &instance->hb_host_mem_h);
+			dma_alloc_coherent(&instance->pdev->dev,
+					   sizeof(struct MR_CTRL_HB_HOST_MEM),
+					   &instance->hb_host_mem_h,
+					   GFP_KERNEL);
 		if (!instance->hb_host_mem) {
 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
 			       " memory for heartbeat host memory for scsi%d\n",
@@ -2682,7 +2762,7 @@
 
 	i = 0;
 	outstanding = atomic_read(&instance->fw_outstanding);
-	fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+	fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
 
 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
 		goto no_outstanding;
@@ -2692,7 +2772,7 @@
 	do {
 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
 			dev_info(&instance->pdev->dev,
-				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
+				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
 			if (i == 3)
 				goto kill_hba_and_failed;
@@ -2711,7 +2791,7 @@
 
 			outstanding = atomic_read(&instance->fw_outstanding);
 
-			fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+			fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
 				goto no_outstanding;
 		}
@@ -2802,21 +2882,108 @@
 }
 
 /**
- * megasas_dump_frame -	This function will dump MPT/MFI frame
+ * megasas_dump -	This function will print hexdump of provided buffer.
+ * @buf:		Buffer to be dumped
+ * @sz:		Size in bytes
+ * @format:		Different formats of dumping e.g. format=n will
+ *			cause only 'n' 32 bit words to be dumped in a single
+ *			line.
  */
-static inline void
-megasas_dump_frame(void *mpi_request, int sz)
+inline void
+megasas_dump(void *buf, int sz, int format)
 {
 	int i;
-	__le32 *mfp = (__le32 *)mpi_request;
+	__le32 *buf_loc = (__le32 *)buf;
 
-	printk(KERN_INFO "IO request frame:\n\t");
-	for (i = 0; i < sz / sizeof(__le32); i++) {
-		if (i && ((i % 8) == 0))
-			printk("\n\t");
-		printk("%08x ", le32_to_cpu(mfp[i]));
+	for (i = 0; i < (sz / sizeof(__le32)); i++) {
+		if ((i % format) == 0) {
+			if (i != 0)
+				printk(KERN_CONT "\n");
+			printk(KERN_CONT "%08x: ", (i * 4));
+		}
+		printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
 	}
-	printk("\n");
+	printk(KERN_CONT "\n");
+}
+
+/**
+ * megasas_dump_reg_set -	This function will print hexdump of register set
+ * @buf:			Buffer to be dumped
+ * @sz:				Size in bytes
+ * @format:			Different formats of dumping e.g. format=n will
+ *				cause only 'n' 32 bit words to be dumped in a
+ *				single line.
+ */
+inline void
+megasas_dump_reg_set(void __iomem *reg_set)
+{
+	unsigned int i, sz = 256;
+	u32 __iomem *reg = (u32 __iomem *)reg_set;
+
+	for (i = 0; i < (sz / sizeof(u32)); i++)
+		printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
+}
+
+/**
+ * megasas_dump_fusion_io -	This function will print key details
+ *				of SCSI IO
+ * @scmd:			SCSI command pointer of SCSI IO
+ */
+void
+megasas_dump_fusion_io(struct scsi_cmnd *scmd)
+{
+	struct megasas_cmd_fusion *cmd;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	struct megasas_instance *instance;
+
+	cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+	scmd_printk(KERN_INFO, scmd,
+		    "scmd: (0x%p)  retries: 0x%x  allowed: 0x%x\n",
+		    scmd, scmd->retries, scmd->allowed);
+	scsi_print_command(scmd);
+
+	if (cmd) {
+		req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
+		scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
+		scmd_printk(KERN_INFO, scmd,
+			    "RequestFlags:0x%x  MSIxIndex:0x%x  SMID:0x%x  LMID:0x%x  DevHandle:0x%x\n",
+			    req_desc->SCSIIO.RequestFlags,
+			    req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
+			    req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
+
+		printk(KERN_INFO "IO request frame:\n");
+		megasas_dump(cmd->io_request,
+			     MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
+		printk(KERN_INFO "Chain frame:\n");
+		megasas_dump(cmd->sg_frame,
+			     instance->max_chain_frame_sz, 8);
+	}
+
+}
+
+/*
+ * megasas_dump_sys_regs - This function will dump system registers through
+ *			    sysfs.
+ * @reg_set:		    Pointer to System register set.
+ * @buf:		    Buffer to which output is to be written.
+ * @return:		    Number of bytes written to buffer.
+ */
+static inline ssize_t
+megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
+{
+	unsigned int i, sz = 256;
+	int bytes_wrote = 0;
+	char *loc = (char *)buf;
+	u32 __iomem *reg = (u32 __iomem *)reg_set;
+
+	for (i = 0; i < sz / sizeof(u32); i++) {
+		bytes_wrote += snprintf(loc + bytes_wrote, PAGE_SIZE,
+					"%08x: %08x\n", (i * 4),
+					readl(&reg[i]));
+	}
+	return bytes_wrote;
 }
 
 /**
@@ -2830,24 +2997,20 @@
 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
 
 	scmd_printk(KERN_INFO, scmd,
-		"Controller reset is requested due to IO timeout\n"
-		"SCSI command pointer: (%p)\t SCSI host state: %d\t"
-		" SCSI host busy: %d\t FW outstanding: %d\n",
-		scmd, scmd->device->host->shost_state,
+		"OCR is requested due to IO timeout!!\n");
+
+	scmd_printk(KERN_INFO, scmd,
+		"SCSI host state: %d  SCSI host busy: %d  FW outstanding: %d\n",
+		scmd->device->host->shost_state,
 		scsi_host_busy(scmd->device->host),
 		atomic_read(&instance->fw_outstanding));
-
 	/*
 	 * First wait for all commands to complete
 	 */
 	if (instance->adapter_type == MFI_SERIES) {
 		ret = megasas_generic_reset(scmd);
 	} else {
-		struct megasas_cmd_fusion *cmd;
-		cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
-		if (cmd)
-			megasas_dump_frame(cmd->io_request,
-				MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
+		megasas_dump_fusion_io(scmd);
 		ret = megasas_reset_fusion(scmd->device->host,
 				SCSIIO_TIMEOUT_OCR);
 	}
@@ -2997,7 +3160,7 @@
 }
 
 static ssize_t
-megasas_fw_crash_buffer_store(struct device *cdev,
+fw_crash_buffer_store(struct device *cdev,
 	struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3016,15 +3179,15 @@
 }
 
 static ssize_t
-megasas_fw_crash_buffer_show(struct device *cdev,
+fw_crash_buffer_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
 	struct megasas_instance *instance =
 		(struct megasas_instance *) shost->hostdata;
 	u32 size;
-	unsigned long buff_addr;
 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+	unsigned long chunk_left_bytes;
 	unsigned long src_addr;
 	unsigned long flags;
 	u32 buff_offset;
@@ -3040,8 +3203,6 @@
 		return -EINVAL;
 	}
 
-	buff_addr = (unsigned long) buf;
-
 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
 		dev_err(&instance->pdev->dev,
 			"Firmware crash dump offset is out of range\n");
@@ -3050,6 +3211,8 @@
 	}
 
 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+	chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
+	size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
 
 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
@@ -3061,7 +3224,7 @@
 }
 
 static ssize_t
-megasas_fw_crash_buffer_size_show(struct device *cdev,
+fw_crash_buffer_size_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3073,7 +3236,7 @@
 }
 
 static ssize_t
-megasas_fw_crash_state_store(struct device *cdev,
+fw_crash_state_store(struct device *cdev,
 	struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3108,7 +3271,7 @@
 }
 
 static ssize_t
-megasas_fw_crash_state_show(struct device *cdev,
+fw_crash_state_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3119,14 +3282,14 @@
 }
 
 static ssize_t
-megasas_page_size_show(struct device *cdev,
+page_size_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
 }
 
 static ssize_t
-megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
+ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3136,7 +3299,7 @@
 }
 
 static ssize_t
-megasas_fw_cmds_outstanding_show(struct device *cdev,
+fw_cmds_outstanding_show(struct device *cdev,
 				 struct device_attribute *attr, char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3145,26 +3308,91 @@
 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
 }
 
-static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
-	megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
-static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
-	megasas_fw_crash_buffer_size_show, NULL);
-static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
-	megasas_fw_crash_state_show, megasas_fw_crash_state_store);
-static DEVICE_ATTR(page_size, S_IRUGO,
-	megasas_page_size_show, NULL);
-static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
-	megasas_ldio_outstanding_show, NULL);
-static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
-	megasas_fw_cmds_outstanding_show, NULL);
+static ssize_t
+enable_sdev_max_qd_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
 
-struct device_attribute *megaraid_host_attrs[] = {
+	return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
+}
+
+static ssize_t
+enable_sdev_max_qd_store(struct device *cdev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+	u32 val = 0;
+	bool is_target_prop;
+	int ret_target_prop = DCMD_FAILED;
+	struct scsi_device *sdev;
+
+	if (kstrtou32(buf, 0, &val) != 0) {
+		pr_err("megasas: could not set enable_sdev_max_qd\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&instance->reset_mutex);
+	if (val)
+		instance->enable_sdev_max_qd = true;
+	else
+		instance->enable_sdev_max_qd = false;
+
+	shost_for_each_device(sdev, shost) {
+		ret_target_prop = megasas_get_target_prop(instance, sdev);
+		is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
+		megasas_set_fw_assisted_qd(sdev, is_target_prop);
+	}
+	mutex_unlock(&instance->reset_mutex);
+
+	return strlen(buf);
+}
+
+static ssize_t
+dump_system_regs_show(struct device *cdev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance =
+			(struct megasas_instance *)shost->hostdata;
+
+	return megasas_dump_sys_regs(instance->reg_set, buf);
+}
+
+static ssize_t
+raid_map_id_show(struct device *cdev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance =
+			(struct megasas_instance *)shost->hostdata;
+
+	return snprintf(buf, PAGE_SIZE, "%ld\n",
+			(unsigned long)instance->map_id);
+}
+
+static DEVICE_ATTR_RW(fw_crash_buffer);
+static DEVICE_ATTR_RO(fw_crash_buffer_size);
+static DEVICE_ATTR_RW(fw_crash_state);
+static DEVICE_ATTR_RO(page_size);
+static DEVICE_ATTR_RO(ldio_outstanding);
+static DEVICE_ATTR_RO(fw_cmds_outstanding);
+static DEVICE_ATTR_RW(enable_sdev_max_qd);
+static DEVICE_ATTR_RO(dump_system_regs);
+static DEVICE_ATTR_RO(raid_map_id);
+
+static struct device_attribute *megaraid_host_attrs[] = {
 	&dev_attr_fw_crash_buffer_size,
 	&dev_attr_fw_crash_buffer,
 	&dev_attr_fw_crash_state,
 	&dev_attr_page_size,
 	&dev_attr_ldio_outstanding,
 	&dev_attr_fw_cmds_outstanding,
+	&dev_attr_enable_sdev_max_qd,
+	&dev_attr_dump_system_regs,
+	&dev_attr_raid_map_id,
 	NULL,
 };
 
@@ -3186,8 +3414,8 @@
 	.eh_timed_out = megasas_reset_timer,
 	.shost_attrs = megaraid_host_attrs,
 	.bios_param = megasas_bios_param,
-	.use_clustering = ENABLE_CLUSTERING,
 	.change_queue_depth = scsi_change_queue_depth,
+	.max_segment_size = 0xffffffff,
 	.no_write_same = 1,
 };
 
@@ -3278,6 +3506,7 @@
 			megasas_complete_int_cmd(instance, cmd);
 			break;
 		}
+		/* fall through */
 
 	case MFI_CMD_LD_READ:
 	case MFI_CMD_LD_WRITE:
@@ -3348,6 +3577,7 @@
 	case MFI_CMD_SMP:
 	case MFI_CMD_STP:
 	case MFI_CMD_NVME:
+	case MFI_CMD_TOOLBOX:
 		megasas_complete_int_cmd(instance, cmd);
 		break;
 
@@ -3665,9 +3895,8 @@
 		return IRQ_HANDLED;
 	}
 
-	if ((mfiStatus = instance->instancet->clear_intr(
-						instance->reg_set)
-						) == 0) {
+	mfiStatus = instance->instancet->clear_intr(instance);
+	if (mfiStatus == 0) {
 		/* Hardware may not set outbound_intr_status in MSI-X mode */
 		if (!instance->msix_vectors)
 			return IRQ_NONE;
@@ -3677,7 +3906,7 @@
 
 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
 		fw_state = instance->instancet->read_fw_status_reg(
-				instance->reg_set) & MFI_STATE_MASK;
+				instance) & MFI_STATE_MASK;
 
 		if (fw_state != MFI_STATE_FAULT) {
 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
@@ -3757,10 +3986,9 @@
 	int i;
 	u8 max_wait;
 	u32 fw_state;
-	u32 cur_state;
 	u32 abs_state, curr_abs_state;
 
-	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
+	abs_state = instance->instancet->read_fw_status_reg(instance);
 	fw_state = abs_state & MFI_STATE_MASK;
 
 	if (fw_state != MFI_STATE_READY)
@@ -3772,13 +4000,18 @@
 		switch (fw_state) {
 
 		case MFI_STATE_FAULT:
-			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
+			dev_printk(KERN_ERR, &instance->pdev->dev,
+				   "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
+				   abs_state & MFI_STATE_FAULT_CODE,
+				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
 			if (ocr) {
 				max_wait = MEGASAS_RESET_WAIT_TIME;
-				cur_state = MFI_STATE_FAULT;
 				break;
-			} else
+			} else {
+				dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
+				megasas_dump_reg_set(instance->reg_set);
 				return -ENODEV;
+			}
 
 		case MFI_STATE_WAIT_HANDSHAKE:
 			/*
@@ -3798,7 +4031,6 @@
 					&instance->reg_set->inbound_doorbell);
 
 			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_WAIT_HANDSHAKE;
 			break;
 
 		case MFI_STATE_BOOT_MESSAGE_PENDING:
@@ -3814,7 +4046,6 @@
 					&instance->reg_set->inbound_doorbell);
 
 			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
 			break;
 
 		case MFI_STATE_OPERATIONAL:
@@ -3832,7 +4063,8 @@
 
 				if (instance->adapter_type != MFI_SERIES) {
 					for (i = 0; i < (10 * 1000); i += 20) {
-						if (readl(
+						if (megasas_readl(
+							    instance,
 							    &instance->
 							    reg_set->
 							    doorbell) & 1)
@@ -3846,7 +4078,6 @@
 					&instance->reg_set->inbound_doorbell);
 
 			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_OPERATIONAL;
 			break;
 
 		case MFI_STATE_UNDEFINED:
@@ -3854,49 +4085,45 @@
 			 * This state should not last for more than 2 seconds
 			 */
 			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_UNDEFINED;
 			break;
 
 		case MFI_STATE_BB_INIT:
 			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_BB_INIT;
 			break;
 
 		case MFI_STATE_FW_INIT:
 			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_FW_INIT;
 			break;
 
 		case MFI_STATE_FW_INIT_2:
 			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_FW_INIT_2;
 			break;
 
 		case MFI_STATE_DEVICE_SCAN:
 			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_DEVICE_SCAN;
 			break;
 
 		case MFI_STATE_FLUSH_CACHE:
 			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_FLUSH_CACHE;
 			break;
 
 		default:
 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
 			       fw_state);
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
+			megasas_dump_reg_set(instance->reg_set);
 			return -ENODEV;
 		}
 
 		/*
 		 * The cur_state should not last for more than max_wait secs
 		 */
-		for (i = 0; i < (max_wait * 1000); i++) {
+		for (i = 0; i < max_wait * 50; i++) {
 			curr_abs_state = instance->instancet->
-				read_fw_status_reg(instance->reg_set);
+				read_fw_status_reg(instance);
 
 			if (abs_state == curr_abs_state) {
-				msleep(1);
+				msleep(20);
 			} else
 				break;
 		}
@@ -3907,6 +4134,8 @@
 		if (curr_abs_state == abs_state) {
 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
 			       "in %d secs\n", fw_state, max_wait);
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
+			megasas_dump_reg_set(instance->reg_set);
 			return -ENODEV;
 		}
 
@@ -3970,23 +4199,12 @@
 {
 	int i;
 	u16 max_cmd;
-	u32 sge_sz;
 	u32 frame_count;
 	struct megasas_cmd *cmd;
 
 	max_cmd = instance->max_mfi_cmds;
 
 	/*
-	 * Size of our frame is 64 bytes for MFI frame, followed by max SG
-	 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
-	 */
-	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
-	    sizeof(struct megasas_sge32);
-
-	if (instance->flag_ieee)
-		sge_sz = sizeof(struct megasas_sge_skinny);
-
-	/*
 	 * For MFI controllers.
 	 * max_num_sge = 60
 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
@@ -4155,6 +4373,7 @@
 	if (megasas_create_frame_pool(instance)) {
 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
 		megasas_free_cmds(instance);
+		return -ENOMEM;
 	}
 
 	return 0;
@@ -4234,8 +4453,10 @@
 		switch (dcmd_timeout_ocr_possible(instance)) {
 		case INITIATE_OCR:
 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+			mutex_unlock(&instance->reset_mutex);
 			megasas_reset_fusion(instance->host,
 				MFI_IO_TIMEOUT_OCR);
+			mutex_lock(&instance->reset_mutex);
 			break;
 		case KILL_ADAPTER:
 			megaraid_sas_kill_hba(instance);
@@ -4271,7 +4492,6 @@
 	struct megasas_dcmd_frame *dcmd;
 	struct MR_PD_LIST *ci;
 	struct MR_PD_ADDRESS *pd_addr;
-	dma_addr_t ci_h = 0;
 
 	if (instance->pd_list_not_supported) {
 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
@@ -4280,7 +4500,6 @@
 	}
 
 	ci = instance->pd_list_buf;
-	ci_h = instance->pd_list_buf_h;
 
 	cmd = megasas_get_cmd(instance);
 
@@ -4353,6 +4572,9 @@
 
 	case DCMD_SUCCESS:
 		pd_addr = ci->addr;
+		if (megasas_dbg_lvl & LD_PD_DEBUG)
+			dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
+				 __func__, le32_to_cpu(ci->count));
 
 		if ((le32_to_cpu(ci->count) >
 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
@@ -4368,6 +4590,11 @@
 					pd_addr->scsiDevType;
 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
 					MR_PD_STATE_SYSTEM;
+			if (megasas_dbg_lvl & LD_PD_DEBUG)
+				dev_info(&instance->pdev->dev,
+					 "PD%d: targetID: 0x%03x deviceType:0x%x\n",
+					 pd_index, le16_to_cpu(pd_addr->deviceId),
+					 pd_addr->scsiDevType);
 			pd_addr++;
 		}
 
@@ -4471,6 +4698,10 @@
 		break;
 
 	case DCMD_SUCCESS:
+		if (megasas_dbg_lvl & LD_PD_DEBUG)
+			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
+				 __func__, ld_count);
+
 		if (ld_count > instance->fw_supported_vd_count)
 			break;
 
@@ -4480,6 +4711,10 @@
 			if (ci->ldList[ld_index].state != 0) {
 				ids = ci->ldList[ld_index].ref.targetId;
 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
+				if (megasas_dbg_lvl & LD_PD_DEBUG)
+					dev_info(&instance->pdev->dev,
+						 "LD%d: targetID: 0x%03x\n",
+						 ld_index, ids);
 			}
 		}
 
@@ -4583,6 +4818,10 @@
 	case DCMD_SUCCESS:
 		tgtid_count = le32_to_cpu(ci->count);
 
+		if (megasas_dbg_lvl & LD_PD_DEBUG)
+			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
+				 __func__, tgtid_count);
+
 		if ((tgtid_count > (instance->fw_supported_vd_count)))
 			break;
 
@@ -4590,6 +4829,9 @@
 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
 			ids = ci->targetId[ld_index];
 			instance->ld_ids[ids] = ci->targetId[ld_index];
+			if (megasas_dbg_lvl & LD_PD_DEBUG)
+				dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
+					 ld_index, ci->targetId[ld_index]);
 		}
 
 		break;
@@ -4601,6 +4843,140 @@
 	return ret;
 }
 
+/**
+ * dcmd.opcode            - MR_DCMD_CTRL_DEVICE_LIST_GET
+ * dcmd.mbox              - reserved
+ * dcmd.sge IN            - ptr to return MR_HOST_DEVICE_LIST structure
+ * Desc:    This DCMD will return the combined device list
+ * Status:  MFI_STAT_OK - List returned successfully
+ *          MFI_STAT_INVALID_CMD - Firmware support for the feature has been
+ *                                 disabled
+ * @instance:			Adapter soft state
+ * @is_probe:			Driver probe check
+ * Return:			0 if DCMD succeeded
+ *				 non-zero if failed
+ */
+static int
+megasas_host_device_list_query(struct megasas_instance *instance,
+			       bool is_probe)
+{
+	int ret, i, target_id;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_HOST_DEVICE_LIST *ci;
+	u32 count;
+	dma_addr_t ci_h;
+
+	ci = instance->host_device_list_buf;
+	ci_h = instance->host_device_list_buf_h;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_warn(&instance->pdev->dev,
+			 "%s: failed to get cmd\n",
+			 __func__);
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->mbox.b[0] = is_probe ? 0 : 1;
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
+
+	megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
+
+	if (!instance->mask_interrupts) {
+		ret = megasas_issue_blocked_cmd(instance, cmd,
+						MFI_IO_TIMEOUT_SECS);
+	} else {
+		ret = megasas_issue_polled(instance, cmd);
+		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+	}
+
+	switch (ret) {
+	case DCMD_SUCCESS:
+		/* Fill the internal pd_list and ld_ids array based on
+		 * targetIds returned by FW
+		 */
+		count = le32_to_cpu(ci->count);
+
+		if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
+			break;
+
+		if (megasas_dbg_lvl & LD_PD_DEBUG)
+			dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
+				 __func__, count);
+
+		memset(instance->local_pd_list, 0,
+		       MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
+		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
+		for (i = 0; i < count; i++) {
+			target_id = le16_to_cpu(ci->host_device_list[i].target_id);
+			if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
+				instance->local_pd_list[target_id].tid = target_id;
+				instance->local_pd_list[target_id].driveType =
+						ci->host_device_list[i].scsi_type;
+				instance->local_pd_list[target_id].driveState =
+						MR_PD_STATE_SYSTEM;
+				if (megasas_dbg_lvl & LD_PD_DEBUG)
+					dev_info(&instance->pdev->dev,
+						 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
+						 i, target_id, ci->host_device_list[i].scsi_type);
+			} else {
+				instance->ld_ids[target_id] = target_id;
+				if (megasas_dbg_lvl & LD_PD_DEBUG)
+					dev_info(&instance->pdev->dev,
+						 "Device %d: LD targetID: 0x%03x\n",
+						 i, target_id);
+			}
+		}
+
+		memcpy(instance->pd_list, instance->local_pd_list,
+		       sizeof(instance->pd_list));
+		break;
+
+	case DCMD_TIMEOUT:
+		switch (dcmd_timeout_ocr_possible(instance)) {
+		case INITIATE_OCR:
+			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+			mutex_unlock(&instance->reset_mutex);
+			megasas_reset_fusion(instance->host,
+				MFI_IO_TIMEOUT_OCR);
+			mutex_lock(&instance->reset_mutex);
+			break;
+		case KILL_ADAPTER:
+			megaraid_sas_kill_hba(instance);
+			break;
+		case IGNORE_TIMEOUT:
+			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+				 __func__, __LINE__);
+			break;
+		}
+		break;
+	case DCMD_FAILED:
+		dev_err(&instance->pdev->dev,
+			"%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
+			__func__);
+		break;
+	}
+
+	if (ret != DCMD_TIMEOUT)
+		megasas_return_cmd(instance, cmd);
+
+	return ret;
+}
+
 /*
  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
  * instance			 : Controller's instance
@@ -4634,9 +5010,9 @@
 	}
 
 	dev_info(&instance->pdev->dev,
-		"firmware type\t: %s\n",
-		instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
-		"Legacy(64 VD) firmware");
+		"FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
+		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
+		instance->ctrl_info_buf->max_lds);
 
 	if (instance->max_raid_mapsize) {
 		ventura_map_sz = instance->max_raid_mapsize *
@@ -4661,6 +5037,89 @@
 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
 }
 
+/*
+ * dcmd.opcode                - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
+ * dcmd.hdr.length            - number of bytes to read
+ * dcmd.sge                   - Ptr to MR_SNAPDUMP_PROPERTIES
+ * Desc:			 Fill in snapdump properties
+ * Status:			 MFI_STAT_OK- Command successful
+ */
+void megasas_get_snapdump_properties(struct megasas_instance *instance)
+{
+	int ret = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_SNAPDUMP_PROPERTIES *ci;
+	dma_addr_t ci_h = 0;
+
+	ci = instance->snapdump_prop;
+	ci_h = instance->snapdump_prop_h;
+
+	if (!ci)
+		return;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
+		return;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
+
+	megasas_set_dma_settings(instance, dcmd, ci_h,
+				 sizeof(struct MR_SNAPDUMP_PROPERTIES));
+
+	if (!instance->mask_interrupts) {
+		ret = megasas_issue_blocked_cmd(instance, cmd,
+						MFI_IO_TIMEOUT_SECS);
+	} else {
+		ret = megasas_issue_polled(instance, cmd);
+		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+	}
+
+	switch (ret) {
+	case DCMD_SUCCESS:
+		instance->snapdump_wait_time =
+			min_t(u8, ci->trigger_min_num_sec_before_ocr,
+				MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
+		break;
+
+	case DCMD_TIMEOUT:
+		switch (dcmd_timeout_ocr_possible(instance)) {
+		case INITIATE_OCR:
+			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+			mutex_unlock(&instance->reset_mutex);
+			megasas_reset_fusion(instance->host,
+				MFI_IO_TIMEOUT_OCR);
+			mutex_lock(&instance->reset_mutex);
+			break;
+		case KILL_ADAPTER:
+			megaraid_sas_kill_hba(instance);
+			break;
+		case IGNORE_TIMEOUT:
+			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+				__func__, __LINE__);
+			break;
+		}
+	}
+
+	if (ret != DCMD_TIMEOUT)
+		megasas_return_cmd(instance, cmd);
+}
+
 /**
  * megasas_get_controller_info -	Returns FW's controller structure
  * @instance:				Adapter soft state
@@ -4720,9 +5179,11 @@
 		 * CPU endianness format.
 		 */
 		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
+		le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
 		le32_to_cpus((u32 *)&ci->adapterOperations2);
 		le32_to_cpus((u32 *)&ci->adapterOperations3);
 		le16_to_cpus((u16 *)&ci->adapter_operations4);
+		le32_to_cpus((u32 *)&ci->adapter_operations5);
 
 		/* Update the latest Ext VD info.
 		 * From Init path, store current firmware details.
@@ -4730,17 +5191,27 @@
 		 * in case of Firmware upgrade without system reboot.
 		 */
 		megasas_update_ext_vd_details(instance);
-		instance->use_seqnum_jbod_fp =
+		instance->support_seqnum_jbod_fp =
 			ci->adapterOperations3.useSeqNumJbodFP;
 		instance->support_morethan256jbod =
 			ci->adapter_operations4.support_pd_map_target_id;
 		instance->support_nvme_passthru =
 			ci->adapter_operations4.support_nvme_passthru;
+		instance->support_pci_lane_margining =
+			ci->adapter_operations5.support_pci_lane_margining;
 		instance->task_abort_tmo = ci->TaskAbortTO;
 		instance->max_reset_tmo = ci->MaxResetTO;
 
 		/*Check whether controller is iMR or MR */
 		instance->is_imr = (ci->memory_size ? 0 : 1);
+
+		instance->snapdump_wait_time =
+			(ci->properties.on_off_properties2.enable_snap_dump ?
+			 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
+
+		instance->enable_fw_dev_list =
+			ci->properties.on_off_properties2.enable_fw_dev_list;
+
 		dev_info(&instance->pdev->dev,
 			"controller type\t: %s(%dMB)\n",
 			instance->is_imr ? "iMR" : "MR",
@@ -4759,6 +5230,10 @@
 		dev_info(&instance->pdev->dev,
 			 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
 			 instance->task_abort_tmo, instance->max_reset_tmo);
+		dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
+			 instance->support_seqnum_jbod_fp ? "Yes" : "No");
+		dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
+			 instance->support_pci_lane_margining ? "Yes" : "No");
 
 		break;
 
@@ -4766,8 +5241,10 @@
 		switch (dcmd_timeout_ocr_possible(instance)) {
 		case INITIATE_OCR:
 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+			mutex_unlock(&instance->reset_mutex);
 			megasas_reset_fusion(instance->host,
 				MFI_IO_TIMEOUT_OCR);
+			mutex_lock(&instance->reset_mutex);
 			break;
 		case KILL_ADAPTER:
 			megaraid_sas_kill_hba(instance);
@@ -4942,16 +5419,13 @@
 static u32
 megasas_init_adapter_mfi(struct megasas_instance *instance)
 {
-	struct megasas_register_set __iomem *reg_set;
 	u32 context_sz;
 	u32 reply_q_sz;
 
-	reg_set = instance->reg_set;
-
 	/*
 	 * Get various operational parameters from status register
 	 */
-	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
 	/*
 	 * Reduce the max supported cmds by 1. This is to ensure that the
 	 * reply_q_sz (1 more than the max cmd that driver may send)
@@ -4959,7 +5433,7 @@
 	 */
 	instance->max_fw_cmds = instance->max_fw_cmds-1;
 	instance->max_mfi_cmds = instance->max_fw_cmds;
-	instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
+	instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
 					0x10;
 	/*
 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
@@ -4995,9 +5469,8 @@
 	context_sz = sizeof(u32);
 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
 
-	instance->reply_queue = pci_alloc_consistent(instance->pdev,
-						     reply_q_sz,
-						     &instance->reply_queue_h);
+	instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
+			reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
 
 	if (!instance->reply_queue) {
 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
@@ -5016,7 +5489,7 @@
 
 	instance->fw_support_ieee = 0;
 	instance->fw_support_ieee =
-		(instance->instancet->read_fw_status_reg(reg_set) &
+		(instance->instancet->read_fw_status_reg(instance) &
 		0x04000000);
 
 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
@@ -5029,7 +5502,7 @@
 
 fail_fw_init:
 
-	pci_free_consistent(instance->pdev, reply_q_sz,
+	dma_free_coherent(&instance->pdev->dev, reply_q_sz,
 			    instance->reply_queue, instance->reply_queue_h);
 fail_reply_queue:
 	megasas_free_cmds(instance);
@@ -5038,6 +5511,25 @@
 	return 1;
 }
 
+static
+void megasas_setup_irq_poll(struct megasas_instance *instance)
+{
+	struct megasas_irq_context *irq_ctx;
+	u32 count, i;
+
+	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+	/* Initialize IRQ poll */
+	for (i = 0; i < count; i++) {
+		irq_ctx = &instance->irq_context[i];
+		irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
+		irq_ctx->irq_poll_scheduled = false;
+		irq_poll_init(&irq_ctx->irqpoll,
+			      instance->threshold_reply_count,
+			      megasas_irqpoll);
+	}
+}
+
 /*
  * megasas_setup_irqs_ioapic -		register legacy interrupts.
  * @instance:				Adapter soft state
@@ -5062,6 +5554,8 @@
 				__func__, __LINE__);
 		return -1;
 	}
+	instance->perf_mode = MR_LATENCY_PERF_MODE;
+	instance->low_latency_index_start = 0;
 	return 0;
 }
 
@@ -5096,6 +5590,7 @@
 					 &instance->irq_context[j]);
 			/* Retry irq register for IO_APIC*/
 			instance->msix_vectors = 0;
+			instance->msix_load_balance = false;
 			if (is_probe) {
 				pci_free_irq_vectors(instance->pdev);
 				return megasas_setup_irqs_ioapic(instance);
@@ -5104,6 +5599,7 @@
 			}
 		}
 	}
+
 	return 0;
 }
 
@@ -5116,6 +5612,16 @@
 megasas_destroy_irqs(struct megasas_instance *instance) {
 
 	int i;
+	int count;
+	struct megasas_irq_context *irq_ctx;
+
+	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+	if (instance->adapter_type != MFI_SERIES) {
+		for (i = 0; i < count; i++) {
+			irq_ctx = &instance->irq_context[i];
+			irq_poll_disable(&irq_ctx->irqpoll);
+		}
+	}
 
 	if (instance->msix_vectors)
 		for (i = 0; i < instance->msix_vectors; i++) {
@@ -5144,10 +5650,12 @@
 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
 
+	instance->use_seqnum_jbod_fp =
+		instance->support_seqnum_jbod_fp;
 	if (reset_devices || !fusion ||
-		!instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
+		!instance->support_seqnum_jbod_fp) {
 		dev_info(&instance->pdev->dev,
-			"Jbod map is not supported %s %d\n",
+			"JBOD sequence map is disabled %s %d\n",
 			__func__, __LINE__);
 		instance->use_seqnum_jbod_fp = false;
 		return;
@@ -5186,9 +5694,11 @@
 static void megasas_setup_reply_map(struct megasas_instance *instance)
 {
 	const struct cpumask *mask;
-	unsigned int queue, cpu;
+	unsigned int queue, cpu, low_latency_index_start;
 
-	for (queue = 0; queue < instance->msix_vectors; queue++) {
+	low_latency_index_start = instance->low_latency_index_start;
+
+	for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
 		mask = pci_irq_get_affinity(instance->pdev, queue);
 		if (!mask)
 			goto fallback;
@@ -5199,8 +5709,131 @@
 	return;
 
 fallback:
-	for_each_possible_cpu(cpu)
-		instance->reply_map[cpu] = cpu % instance->msix_vectors;
+	queue = low_latency_index_start;
+	for_each_possible_cpu(cpu) {
+		instance->reply_map[cpu] = queue;
+		if (queue == (instance->msix_vectors - 1))
+			queue = low_latency_index_start;
+		else
+			queue++;
+	}
+}
+
+/**
+ * megasas_get_device_list -	Get the PD and LD device list from FW.
+ * @instance:			Adapter soft state
+ * @return:			Success or failure
+ *
+ * Issue DCMDs to Firmware to get the PD and LD list.
+ * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
+ * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
+ */
+static
+int megasas_get_device_list(struct megasas_instance *instance)
+{
+	memset(instance->pd_list, 0,
+	       (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
+	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+
+	if (instance->enable_fw_dev_list) {
+		if (megasas_host_device_list_query(instance, true))
+			return FAILED;
+	} else {
+		if (megasas_get_pd_list(instance) < 0) {
+			dev_err(&instance->pdev->dev, "failed to get PD list\n");
+			return FAILED;
+		}
+
+		if (megasas_ld_list_query(instance,
+					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
+			dev_err(&instance->pdev->dev, "failed to get LD list\n");
+			return FAILED;
+		}
+	}
+
+	return SUCCESS;
+}
+
+/**
+ * megasas_set_high_iops_queue_affinity_hint -	Set affinity hint for high IOPS queues
+ * @instance:					Adapter soft state
+ * return:					void
+ */
+static inline void
+megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
+{
+	int i;
+	int local_numa_node;
+
+	if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+		local_numa_node = dev_to_node(&instance->pdev->dev);
+
+		for (i = 0; i < instance->low_latency_index_start; i++)
+			irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
+				cpumask_of_node(local_numa_node));
+	}
+}
+
+static int
+__megasas_alloc_irq_vectors(struct megasas_instance *instance)
+{
+	int i, irq_flags;
+	struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
+	struct irq_affinity *descp = &desc;
+
+	irq_flags = PCI_IRQ_MSIX;
+
+	if (instance->smp_affinity_enable)
+		irq_flags |= PCI_IRQ_AFFINITY;
+	else
+		descp = NULL;
+
+	i = pci_alloc_irq_vectors_affinity(instance->pdev,
+		instance->low_latency_index_start,
+		instance->msix_vectors, irq_flags, descp);
+
+	return i;
+}
+
+/**
+ * megasas_alloc_irq_vectors -	Allocate IRQ vectors/enable MSI-x vectors
+ * @instance:			Adapter soft state
+ * return:			void
+ */
+static void
+megasas_alloc_irq_vectors(struct megasas_instance *instance)
+{
+	int i;
+	unsigned int num_msix_req;
+
+	i = __megasas_alloc_irq_vectors(instance);
+
+	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
+	    (i != instance->msix_vectors)) {
+		if (instance->msix_vectors)
+			pci_free_irq_vectors(instance->pdev);
+		/* Disable Balanced IOPS mode and try realloc vectors */
+		instance->perf_mode = MR_LATENCY_PERF_MODE;
+		instance->low_latency_index_start = 1;
+		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+
+		instance->msix_vectors = min(num_msix_req,
+				instance->msix_vectors);
+
+		i = __megasas_alloc_irq_vectors(instance);
+
+	}
+
+	dev_info(&instance->pdev->dev,
+		"requested/available msix %d/%d\n", instance->msix_vectors, i);
+
+	if (i > 0)
+		instance->msix_vectors = i;
+	else
+		instance->msix_vectors = 0;
+
+	if (instance->smp_affinity_enable)
+		megasas_set_high_iops_queue_affinity_hint(instance);
 }
 
 /**
@@ -5214,14 +5847,17 @@
 {
 	u32 max_sectors_1;
 	u32 max_sectors_2, tmp_sectors, msix_enable;
-	u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
+	u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
 	resource_size_t base_addr;
-	struct megasas_register_set __iomem *reg_set;
+	void *base_addr_phys;
 	struct megasas_ctrl_info *ctrl_info = NULL;
 	unsigned long bar_list;
-	int i, j, loop, fw_msix_count = 0;
+	int i, j, loop;
 	struct IOV_111 *iovPtr;
 	struct fusion_context *fusion;
+	bool intr_coalescing;
+	unsigned int num_msix_req;
+	u16 lnksta, speed;
 
 	fusion = instance->ctrl_context;
 
@@ -5242,7 +5878,10 @@
 		goto fail_ioremap;
 	}
 
-	reg_set = instance->reg_set;
+	base_addr_phys = &base_addr;
+	dev_printk(KERN_DEBUG, &instance->pdev->dev,
+		   "BAR:0x%lx  BAR's base_addr(phys):%pa  mapped virt_addr:0x%p\n",
+		   instance->bar, base_addr_phys, instance->reg_set);
 
 	if (instance->adapter_type != MFI_SERIES)
 		instance->instancet = &megasas_instance_template_fusion;
@@ -5270,19 +5909,35 @@
 	}
 
 	if (megasas_transition_to_ready(instance, 0)) {
-		atomic_set(&instance->fw_reset_no_pci_access, 1);
-		instance->instancet->adp_reset
-			(instance, instance->reg_set);
-		atomic_set(&instance->fw_reset_no_pci_access, 0);
 		dev_info(&instance->pdev->dev,
-			"FW restarted successfully from %s!\n",
-			__func__);
+			 "Failed to transition controller to ready from %s!\n",
+			 __func__);
+		if (instance->adapter_type != MFI_SERIES) {
+			status_reg = instance->instancet->read_fw_status_reg(
+					instance);
+			if (status_reg & MFI_RESET_ADAPTER) {
+				if (megasas_adp_reset_wait_for_ready
+					(instance, true, 0) == FAILED)
+					goto fail_ready_state;
+			} else {
+				goto fail_ready_state;
+			}
+		} else {
+			atomic_set(&instance->fw_reset_no_pci_access, 1);
+			instance->instancet->adp_reset
+				(instance, instance->reg_set);
+			atomic_set(&instance->fw_reset_no_pci_access, 0);
 
-		/*waitting for about 30 second before retry*/
-		ssleep(30);
+			/*waiting for about 30 second before retry*/
+			ssleep(30);
 
-		if (megasas_transition_to_ready(instance, 0))
-			goto fail_ready_state;
+			if (megasas_transition_to_ready(instance, 0))
+				goto fail_ready_state;
+		}
+
+		dev_info(&instance->pdev->dev,
+			 "FW restarted successfully from %s!\n",
+			 __func__);
 	}
 
 	megasas_init_ctrl_params(instance);
@@ -5298,40 +5953,76 @@
 
 	fusion = instance->ctrl_context;
 
-	if (instance->adapter_type == VENTURA_SERIES) {
-		scratch_pad_3 =
-			readl(&instance->reg_set->outbound_scratch_pad_3);
-		instance->max_raid_mapsize = ((scratch_pad_3 >>
+	if (instance->adapter_type >= VENTURA_SERIES) {
+		scratch_pad_2 =
+			megasas_readl(instance,
+				      &instance->reg_set->outbound_scratch_pad_2);
+		instance->max_raid_mapsize = ((scratch_pad_2 >>
 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
 			MR_MAX_RAID_MAP_SIZE_MASK);
 	}
 
+	instance->enable_sdev_max_qd = enable_sdev_max_qd;
+
+	switch (instance->adapter_type) {
+	case VENTURA_SERIES:
+		fusion->pcie_bw_limitation = true;
+		break;
+	case AERO_SERIES:
+		fusion->r56_div_offload = true;
+		break;
+	default:
+		break;
+	}
+
 	/* Check if MSI-X is supported while in ready state */
-	msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+	msix_enable = (instance->instancet->read_fw_status_reg(instance) &
 		       0x4000000) >> 0x1a;
 	if (msix_enable && !msix_disable) {
-		int irq_flags = PCI_IRQ_MSIX;
 
-		scratch_pad_2 = readl
-			(&instance->reg_set->outbound_scratch_pad_2);
+		scratch_pad_1 = megasas_readl
+			(instance, &instance->reg_set->outbound_scratch_pad_1);
 		/* Check max MSI-X vectors */
 		if (fusion) {
 			if (instance->adapter_type == THUNDERBOLT_SERIES) {
 				/* Thunderbolt Series*/
-				instance->msix_vectors = (scratch_pad_2
+				instance->msix_vectors = (scratch_pad_1
 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
-				fw_msix_count = instance->msix_vectors;
-			} else { /* Invader series supports more than 8 MSI-x vectors*/
-				instance->msix_vectors = ((scratch_pad_2
+			} else {
+				instance->msix_vectors = ((scratch_pad_1
 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
-				if (instance->msix_vectors > 16)
-					instance->msix_combined = true;
+
+				/*
+				 * For Invader series, > 8 MSI-x vectors
+				 * supported by FW/HW implies combined
+				 * reply queue mode is enabled.
+				 * For Ventura series, > 16 MSI-x vectors
+				 * supported by FW/HW implies combined
+				 * reply queue mode is enabled.
+				 */
+				switch (instance->adapter_type) {
+				case INVADER_SERIES:
+					if (instance->msix_vectors > 8)
+						instance->msix_combined = true;
+					break;
+				case AERO_SERIES:
+				case VENTURA_SERIES:
+					if (instance->msix_vectors > 16)
+						instance->msix_combined = true;
+					break;
+				}
 
 				if (rdpq_enable)
-					instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
+					instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
 								1 : 0;
-				fw_msix_count = instance->msix_vectors;
+
+				if (instance->adapter_type >= INVADER_SERIES &&
+				    !instance->msix_combined) {
+					instance->msix_load_balance = true;
+					instance->smp_affinity_enable = false;
+				}
+
 				/* Save 1-15 reply post index address to local memory
 				 * Index 0 is already saved from reg offset
 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
@@ -5344,22 +6035,91 @@
 						+ (loop * 0x10));
 				}
 			}
+
+			dev_info(&instance->pdev->dev,
+				 "firmware supports msix\t: (%d)",
+				 instance->msix_vectors);
 			if (msix_vectors)
 				instance->msix_vectors = min(msix_vectors,
 					instance->msix_vectors);
 		} else /* MFI adapters */
 			instance->msix_vectors = 1;
-		/* Don't bother allocating more MSI-X vectors than cpus */
-		instance->msix_vectors = min(instance->msix_vectors,
-					     (unsigned int)num_online_cpus());
-		if (smp_affinity_enable)
-			irq_flags |= PCI_IRQ_AFFINITY;
-		i = pci_alloc_irq_vectors(instance->pdev, 1,
-					  instance->msix_vectors, irq_flags);
-		if (i > 0)
-			instance->msix_vectors = i;
+
+
+		/*
+		 * For Aero (if some conditions are met), driver will configure a
+		 * few additional reply queues with interrupt coalescing enabled.
+		 * These queues with interrupt coalescing enabled are called
+		 * High IOPS queues and rest of reply queues (based on number of
+		 * logical CPUs) are termed as Low latency queues.
+		 *
+		 * Total Number of reply queues = High IOPS queues + low latency queues
+		 *
+		 * For rest of fusion adapters, 1 additional reply queue will be
+		 * reserved for management commands, rest of reply queues
+		 * (based on number of logical CPUs) will be used for IOs and
+		 * referenced as IO queues.
+		 * Total Number of reply queues = 1 + IO queues
+		 *
+		 * MFI adapters supports single MSI-x so single reply queue
+		 * will be used for IO and management commands.
+		 */
+
+		intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
+								true : false;
+		if (intr_coalescing &&
+			(num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
+			(instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
+			instance->perf_mode = MR_BALANCED_PERF_MODE;
 		else
-			instance->msix_vectors = 0;
+			instance->perf_mode = MR_LATENCY_PERF_MODE;
+
+
+		if (instance->adapter_type == AERO_SERIES) {
+			pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
+			speed = lnksta & PCI_EXP_LNKSTA_CLS;
+
+			/*
+			 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
+			 * in latency perf mode and enable R1 PCI bandwidth algorithm
+			 */
+			if (speed < 0x4) {
+				instance->perf_mode = MR_LATENCY_PERF_MODE;
+				fusion->pcie_bw_limitation = true;
+			}
+
+			/*
+			 * Performance mode settings provided through module parameter-perf_mode will
+			 * take affect only for:
+			 * 1. Aero family of adapters.
+			 * 2. When user sets module parameter- perf_mode in range of 0-2.
+			 */
+			if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
+				(perf_mode <= MR_LATENCY_PERF_MODE))
+				instance->perf_mode = perf_mode;
+			/*
+			 * If intr coalescing is not supported by controller FW, then IOPS
+			 * and Balanced modes are not feasible.
+			 */
+			if (!intr_coalescing)
+				instance->perf_mode = MR_LATENCY_PERF_MODE;
+
+		}
+
+		if (instance->perf_mode == MR_BALANCED_PERF_MODE)
+			instance->low_latency_index_start =
+				MR_HIGH_IOPS_QUEUE_COUNT;
+		else
+			instance->low_latency_index_start = 1;
+
+		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+
+		instance->msix_vectors = min(num_msix_req,
+				instance->msix_vectors);
+
+		megasas_alloc_irq_vectors(instance);
+		if (!instance->msix_vectors)
+			instance->msix_load_balance = false;
 	}
 	/*
 	 * MSI-X host index 0 is common for all adapter.
@@ -5378,14 +6138,12 @@
 	if (!instance->msix_vectors) {
 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
 		if (i < 0)
-			goto fail_setup_irqs;
+			goto fail_init_adapter;
 	}
 
 	megasas_setup_reply_map(instance);
 
 	dev_info(&instance->pdev->dev,
-		"firmware supports msix\t: (%d)", fw_msix_count);
-	dev_info(&instance->pdev->dev,
 		"current msix/online cpus\t: (%d/%d)\n",
 		instance->msix_vectors, (unsigned int)num_online_cpus());
 	dev_info(&instance->pdev->dev,
@@ -5404,13 +6162,14 @@
 	if (instance->instancet->init_adapter(instance))
 		goto fail_init_adapter;
 
-	if (instance->adapter_type == VENTURA_SERIES) {
-		scratch_pad_4 =
-			readl(&instance->reg_set->outbound_scratch_pad_4);
-		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
+	if (instance->adapter_type >= VENTURA_SERIES) {
+		scratch_pad_3 =
+			megasas_readl(instance,
+				      &instance->reg_set->outbound_scratch_pad_3);
+		if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
 			MR_DEFAULT_NVME_PAGE_SHIFT)
 			instance->nvme_page_size =
-				(1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
+				(1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
 
 		dev_info(&instance->pdev->dev,
 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
@@ -5421,26 +6180,24 @@
 		megasas_setup_irqs_ioapic(instance))
 		goto fail_init_adapter;
 
+	if (instance->adapter_type != MFI_SERIES)
+		megasas_setup_irq_poll(instance);
+
 	instance->instancet->enable_intr(instance);
 
 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
 
 	megasas_setup_jbod_map(instance);
 
-	/** for passthrough
-	 * the following function will get the PD LIST.
-	 */
-	memset(instance->pd_list, 0,
-		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
-	if (megasas_get_pd_list(instance) < 0) {
-		dev_err(&instance->pdev->dev, "failed to get PD list\n");
+	if (megasas_get_device_list(instance) != SUCCESS) {
+		dev_err(&instance->pdev->dev,
+			"%s: megasas_get_device_list failed\n",
+			__func__);
 		goto fail_get_ld_pd_list;
 	}
 
-	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
-
 	/* stream detection initialization */
-	if (instance->adapter_type == VENTURA_SERIES) {
+	if (instance->adapter_type >= VENTURA_SERIES) {
 		fusion->stream_detect_by_ld =
 			kcalloc(MAX_LOGICAL_DRIVES_EXT,
 				sizeof(struct LD_STREAM_DETECT *),
@@ -5468,10 +6225,6 @@
 		}
 	}
 
-	if (megasas_ld_list_query(instance,
-				  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
-		goto fail_get_ld_pd_list;
-
 	/*
 	 * Compute the max allowed sectors per IO: The controller info has two
 	 * limits on max sectors. Driver should use the minimum of these two.
@@ -5533,13 +6286,18 @@
 
 	else {
 		if (instance->crash_dump_buf)
-			pci_free_consistent(instance->pdev,
+			dma_free_coherent(&instance->pdev->dev,
 				CRASH_DMA_BUF_SIZE,
 				instance->crash_dump_buf,
 				instance->crash_dump_h);
 		instance->crash_dump_buf = NULL;
 	}
 
+	if (instance->snapdump_wait_time) {
+		megasas_get_snapdump_properties(instance);
+		dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
+			 instance->snapdump_wait_time);
+	}
 
 	dev_info(&instance->pdev->dev,
 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
@@ -5551,9 +6309,8 @@
 		instance->UnevenSpanSupport ? "yes" : "no");
 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
 		instance->crash_dump_drv_support ? "yes" : "no");
-	dev_info(&instance->pdev->dev, "jbod sync map		: %s\n",
-		instance->use_seqnum_jbod_fp ? "yes" : "no");
-
+	dev_info(&instance->pdev->dev, "JBOD sequence map	: %s\n",
+		instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
 
 	instance->max_sectors_per_req = instance->max_num_sge *
 						SGE_BUFFER_SIZE / 512;
@@ -5577,19 +6334,32 @@
 
 	/* Launch SR-IOV heartbeat timer */
 	if (instance->requestorId) {
-		if (!megasas_sriov_start_heartbeat(instance, 1))
+		if (!megasas_sriov_start_heartbeat(instance, 1)) {
 			megasas_start_timer(instance);
-		else
+		} else {
 			instance->skip_heartbeat_timer_del = 1;
+			goto fail_get_ld_pd_list;
+		}
 	}
 
+	/*
+	 * Create and start watchdog thread which will monitor
+	 * controller state every 1 sec and trigger OCR when
+	 * it enters fault state
+	 */
+	if (instance->adapter_type != MFI_SERIES)
+		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
+			goto fail_start_watchdog;
+
 	return 0;
 
+fail_start_watchdog:
+	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+		del_timer_sync(&instance->sriov_heartbeat_timer);
 fail_get_ld_pd_list:
 	instance->instancet->disable_intr(instance);
-fail_init_adapter:
 	megasas_destroy_irqs(instance);
-fail_setup_irqs:
+fail_init_adapter:
 	if (instance->msix_vectors)
 		pci_free_irq_vectors(instance->pdev);
 	instance->msix_vectors = 0;
@@ -5616,7 +6386,7 @@
 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
 
 	if (instance->reply_queue)
-		pci_free_consistent(instance->pdev, reply_q_sz,
+		dma_free_coherent(&instance->pdev->dev, reply_q_sz,
 			    instance->reply_queue, instance->reply_queue_h);
 
 	megasas_free_cmds(instance);
@@ -5655,10 +6425,9 @@
 	}
 
 	dcmd = &cmd->frame->dcmd;
-	el_info = pci_zalloc_consistent(instance->pdev,
-					sizeof(struct megasas_evt_log_info),
-					&el_info_h);
-
+	el_info = dma_alloc_coherent(&instance->pdev->dev,
+				     sizeof(struct megasas_evt_log_info),
+				     &el_info_h, GFP_KERNEL);
 	if (!el_info) {
 		megasas_return_cmd(instance, cmd);
 		return -ENOMEM;
@@ -5695,8 +6464,9 @@
 	eli->boot_seq_num = el_info->boot_seq_num;
 
 dcmd_failed:
-	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
-			    el_info, el_info_h);
+	dma_free_coherent(&instance->pdev->dev,
+			sizeof(struct megasas_evt_log_info),
+			el_info, el_info_h);
 
 	megasas_return_cmd(instance, cmd);
 
@@ -5861,7 +6631,8 @@
 	int ret;
 	struct megasas_cmd *cmd;
 	struct megasas_dcmd_frame *dcmd;
-	u16 targetId = (sdev->channel % 2) + sdev->id;
+	u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
+			sdev->id;
 
 	cmd = megasas_get_cmd(instance);
 
@@ -5903,8 +6674,10 @@
 		switch (dcmd_timeout_ocr_possible(instance)) {
 		case INITIATE_OCR:
 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+			mutex_unlock(&instance->reset_mutex);
 			megasas_reset_fusion(instance->host,
 					     MFI_IO_TIMEOUT_OCR);
+			mutex_lock(&instance->reset_mutex);
 			break;
 		case KILL_ADAPTER:
 			megaraid_sas_kill_hba(instance);
@@ -6023,13 +6796,13 @@
  * @instance:		Adapter soft state
  * Description:
  *
- * For Ventura, driver/FW will operate in 64bit DMA addresses.
+ * For Ventura, driver/FW will operate in 63bit DMA addresses.
  *
  * For invader-
  *	By default, driver/FW will operate in 32bit DMA addresses
  *	for consistent DMA mapping but if 32 bit consistent
- *	DMA mask fails, driver will try with 64 bit consistent
- *	mask provided FW is true 64bit DMA capable
+ *	DMA mask fails, driver will try with 63 bit consistent
+ *	mask provided FW is true 63bit DMA capable
  *
  * For older controllers(Thunderbolt and MFI based adapters)-
  *	driver/FW will operate in 32 bit consistent DMA addresses.
@@ -6039,31 +6812,31 @@
 {
 	u64 consistent_mask;
 	struct pci_dev *pdev;
-	u32 scratch_pad_2;
+	u32 scratch_pad_1;
 
 	pdev = instance->pdev;
-	consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
-				DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+	consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
+				DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
 
 	if (IS_DMA64) {
-		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
 			goto fail_set_dma_mask;
 
-		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
+		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
 			/*
 			 * If 32 bit DMA mask fails, then try for 64 bit mask
 			 * for FW capable of handling 64 bit DMA.
 			 */
-			scratch_pad_2 = readl
-				(&instance->reg_set->outbound_scratch_pad_2);
+			scratch_pad_1 = megasas_readl
+				(instance, &instance->reg_set->outbound_scratch_pad_1);
 
-			if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
+			if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
 				goto fail_set_dma_mask;
 			else if (dma_set_mask_and_coherent(&pdev->dev,
-							   DMA_BIT_MASK(64)))
+							   DMA_BIT_MASK(63)))
 				goto fail_set_dma_mask;
 		}
 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
@@ -6075,8 +6848,8 @@
 		instance->consistent_mask_64bit = true;
 
 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
-		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
-		 (instance->consistent_mask_64bit ? "64" : "32"));
+		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
+		 (instance->consistent_mask_64bit ? "63" : "32"));
 
 	return 0;
 
@@ -6089,12 +6862,14 @@
 /*
  * megasas_set_adapter_type -	Set adapter type.
  *				Supported controllers can be divided in
- *				4 categories-  enum MR_ADAPTER_TYPE {
- *							MFI_SERIES = 1,
- *							THUNDERBOLT_SERIES = 2,
- *							INVADER_SERIES = 3,
- *							VENTURA_SERIES = 4,
- *						};
+ *				different categories-
+ *					enum MR_ADAPTER_TYPE {
+ *						MFI_SERIES = 1,
+ *						THUNDERBOLT_SERIES = 2,
+ *						INVADER_SERIES = 3,
+ *						VENTURA_SERIES = 4,
+ *						AERO_SERIES = 5,
+ *					};
  * @instance:			Adapter soft state
  * return:			void
  */
@@ -6105,6 +6880,12 @@
 		instance->adapter_type = MFI_SERIES;
 	} else {
 		switch (instance->pdev->device) {
+		case PCI_DEVICE_ID_LSI_AERO_10E1:
+		case PCI_DEVICE_ID_LSI_AERO_10E2:
+		case PCI_DEVICE_ID_LSI_AERO_10E5:
+		case PCI_DEVICE_ID_LSI_AERO_10E6:
+			instance->adapter_type = AERO_SERIES;
+			break;
 		case PCI_DEVICE_ID_LSI_VENTURA:
 		case PCI_DEVICE_ID_LSI_CRUSADER:
 		case PCI_DEVICE_ID_LSI_HARPOON:
@@ -6134,10 +6915,10 @@
 
 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
 {
-	instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
-						  &instance->producer_h);
-	instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
-						  &instance->consumer_h);
+	instance->producer = dma_alloc_coherent(&instance->pdev->dev,
+			sizeof(u32), &instance->producer_h, GFP_KERNEL);
+	instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
+			sizeof(u32), &instance->consumer_h, GFP_KERNEL);
 
 	if (!instance->producer || !instance->consumer) {
 		dev_err(&instance->pdev->dev,
@@ -6172,6 +6953,7 @@
 		if (megasas_alloc_mfi_ctrl_mem(instance))
 			goto fail;
 		break;
+	case AERO_SERIES:
 	case VENTURA_SERIES:
 	case THUNDERBOLT_SERIES:
 	case INVADER_SERIES:
@@ -6199,11 +6981,11 @@
 	kfree(instance->reply_map);
 	if (instance->adapter_type == MFI_SERIES) {
 		if (instance->producer)
-			pci_free_consistent(instance->pdev, sizeof(u32),
+			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
 					    instance->producer,
 					    instance->producer_h);
 		if (instance->consumer)
-			pci_free_consistent(instance->pdev, sizeof(u32),
+			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
 					    instance->consumer,
 					    instance->consumer_h);
 	} else {
@@ -6224,10 +7006,9 @@
 	struct pci_dev *pdev = instance->pdev;
 	struct fusion_context *fusion = instance->ctrl_context;
 
-	instance->evt_detail =
-		pci_alloc_consistent(pdev,
-				     sizeof(struct megasas_evt_detail),
-				     &instance->evt_detail_h);
+	instance->evt_detail = dma_alloc_coherent(&pdev->dev,
+			sizeof(struct megasas_evt_detail),
+			&instance->evt_detail_h, GFP_KERNEL);
 
 	if (!instance->evt_detail) {
 		dev_err(&instance->pdev->dev,
@@ -6247,12 +7028,32 @@
 				"Failed to allocate PD list buffer\n");
 			return -ENOMEM;
 		}
+
+		instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
+				sizeof(struct MR_SNAPDUMP_PROPERTIES),
+				&instance->snapdump_prop_h, GFP_KERNEL);
+
+		if (!instance->snapdump_prop)
+			dev_err(&pdev->dev,
+				"Failed to allocate snapdump properties buffer\n");
+
+		instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
+							HOST_DEVICE_LIST_SZ,
+							&instance->host_device_list_buf_h,
+							GFP_KERNEL);
+
+		if (!instance->host_device_list_buf) {
+			dev_err(&pdev->dev,
+				"Failed to allocate targetid list buffer\n");
+			return -ENOMEM;
+		}
+
 	}
 
 	instance->pd_list_buf =
-		pci_alloc_consistent(pdev,
+		dma_alloc_coherent(&pdev->dev,
 				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
-				     &instance->pd_list_buf_h);
+				     &instance->pd_list_buf_h, GFP_KERNEL);
 
 	if (!instance->pd_list_buf) {
 		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
@@ -6260,9 +7061,9 @@
 	}
 
 	instance->ctrl_info_buf =
-		pci_alloc_consistent(pdev,
+		dma_alloc_coherent(&pdev->dev,
 				     sizeof(struct megasas_ctrl_info),
-				     &instance->ctrl_info_buf_h);
+				     &instance->ctrl_info_buf_h, GFP_KERNEL);
 
 	if (!instance->ctrl_info_buf) {
 		dev_err(&pdev->dev,
@@ -6271,9 +7072,9 @@
 	}
 
 	instance->ld_list_buf =
-		pci_alloc_consistent(pdev,
+		dma_alloc_coherent(&pdev->dev,
 				     sizeof(struct MR_LD_LIST),
-				     &instance->ld_list_buf_h);
+				     &instance->ld_list_buf_h, GFP_KERNEL);
 
 	if (!instance->ld_list_buf) {
 		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
@@ -6281,9 +7082,9 @@
 	}
 
 	instance->ld_targetid_list_buf =
-		pci_alloc_consistent(pdev,
-				     sizeof(struct MR_LD_TARGETID_LIST),
-				     &instance->ld_targetid_list_buf_h);
+		dma_alloc_coherent(&pdev->dev,
+				sizeof(struct MR_LD_TARGETID_LIST),
+				&instance->ld_targetid_list_buf_h, GFP_KERNEL);
 
 	if (!instance->ld_targetid_list_buf) {
 		dev_err(&pdev->dev,
@@ -6293,21 +7094,20 @@
 
 	if (!reset_devices) {
 		instance->system_info_buf =
-			pci_alloc_consistent(pdev,
-					     sizeof(struct MR_DRV_SYSTEM_INFO),
-					     &instance->system_info_h);
+			dma_alloc_coherent(&pdev->dev,
+					sizeof(struct MR_DRV_SYSTEM_INFO),
+					&instance->system_info_h, GFP_KERNEL);
 		instance->pd_info =
-			pci_alloc_consistent(pdev,
-					     sizeof(struct MR_PD_INFO),
-					     &instance->pd_info_h);
+			dma_alloc_coherent(&pdev->dev,
+					sizeof(struct MR_PD_INFO),
+					&instance->pd_info_h, GFP_KERNEL);
 		instance->tgt_prop =
-			pci_alloc_consistent(pdev,
-					     sizeof(struct MR_TARGET_PROPERTIES),
-					     &instance->tgt_prop_h);
+			dma_alloc_coherent(&pdev->dev,
+					sizeof(struct MR_TARGET_PROPERTIES),
+					&instance->tgt_prop_h, GFP_KERNEL);
 		instance->crash_dump_buf =
-			pci_alloc_consistent(pdev,
-					     CRASH_DMA_BUF_SIZE,
-					     &instance->crash_dump_h);
+			dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
+					&instance->crash_dump_h, GFP_KERNEL);
 
 		if (!instance->system_info_buf)
 			dev_err(&instance->pdev->dev,
@@ -6343,7 +7143,7 @@
 	struct fusion_context *fusion = instance->ctrl_context;
 
 	if (instance->evt_detail)
-		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+		dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
 				    instance->evt_detail,
 				    instance->evt_detail_h);
 
@@ -6354,43 +7154,56 @@
 				  fusion->ioc_init_request_phys);
 
 	if (instance->pd_list_buf)
-		pci_free_consistent(pdev,
+		dma_free_coherent(&pdev->dev,
 				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
 				    instance->pd_list_buf,
 				    instance->pd_list_buf_h);
 
 	if (instance->ld_list_buf)
-		pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
+		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
 				    instance->ld_list_buf,
 				    instance->ld_list_buf_h);
 
 	if (instance->ld_targetid_list_buf)
-		pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
+		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
 				    instance->ld_targetid_list_buf,
 				    instance->ld_targetid_list_buf_h);
 
 	if (instance->ctrl_info_buf)
-		pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
+		dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
 				    instance->ctrl_info_buf,
 				    instance->ctrl_info_buf_h);
 
 	if (instance->system_info_buf)
-		pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
+		dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
 				    instance->system_info_buf,
 				    instance->system_info_h);
 
 	if (instance->pd_info)
-		pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+		dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
 				    instance->pd_info, instance->pd_info_h);
 
 	if (instance->tgt_prop)
-		pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+		dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
 				    instance->tgt_prop, instance->tgt_prop_h);
 
 	if (instance->crash_dump_buf)
-		pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+		dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
 				    instance->crash_dump_buf,
 				    instance->crash_dump_h);
+
+	if (instance->snapdump_prop)
+		dma_free_coherent(&pdev->dev,
+				  sizeof(struct MR_SNAPDUMP_PROPERTIES),
+				  instance->snapdump_prop,
+				  instance->snapdump_prop_h);
+
+	if (instance->host_device_list_buf)
+		dma_free_coherent(&pdev->dev,
+				  HOST_DEVICE_LIST_SZ,
+				  instance->host_device_list_buf,
+				  instance->host_device_list_buf_h);
+
 }
 
 /*
@@ -6414,6 +7227,7 @@
 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
 
 	atomic_set(&instance->fw_outstanding, 0);
+	atomic64_set(&instance->total_io_count, 0);
 
 	init_waitqueue_head(&instance->int_cmd_wait_q);
 	init_waitqueue_head(&instance->abort_cmd_wait_q);
@@ -6436,13 +7250,13 @@
 	instance->last_time = 0;
 	instance->disableOnlineCtrlReset = 1;
 	instance->UnevenSpanSupport = 0;
+	instance->smp_affinity_enable = smp_affinity_enable ? true : false;
+	instance->msix_load_balance = false;
 
-	if (instance->adapter_type != MFI_SERIES) {
+	if (instance->adapter_type != MFI_SERIES)
 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
-		INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
-	} else {
+	else
 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
-	}
 }
 
 /**
@@ -6458,6 +7272,19 @@
 	struct megasas_instance *instance;
 	u16 control = 0;
 
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_LSI_AERO_10E0:
+	case PCI_DEVICE_ID_LSI_AERO_10E3:
+	case PCI_DEVICE_ID_LSI_AERO_10E4:
+	case PCI_DEVICE_ID_LSI_AERO_10E7:
+		dev_err(&pdev->dev, "Adapter is in non secure mode\n");
+		return 1;
+	case PCI_DEVICE_ID_LSI_AERO_10E1:
+	case PCI_DEVICE_ID_LSI_AERO_10E5:
+		dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
+		break;
+	}
+
 	/* Reset MSI-X in the kdump kernel */
 	if (reset_devices) {
 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
@@ -6516,17 +7343,20 @@
 	if (instance->requestorId) {
 		if (instance->PlasmaFW111) {
 			instance->vf_affiliation_111 =
-				pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
-						     &instance->vf_affiliation_111_h);
+				dma_alloc_coherent(&pdev->dev,
+					sizeof(struct MR_LD_VF_AFFILIATION_111),
+					&instance->vf_affiliation_111_h,
+					GFP_KERNEL);
 			if (!instance->vf_affiliation_111)
 				dev_warn(&pdev->dev, "Can't allocate "
 				       "memory for VF affiliation buffer\n");
 		} else {
 			instance->vf_affiliation =
-				pci_alloc_consistent(pdev,
-						     (MAX_LOGICAL_DRIVES + 1) *
-						     sizeof(struct MR_LD_VF_AFFILIATION),
-						     &instance->vf_affiliation_h);
+				dma_alloc_coherent(&pdev->dev,
+					(MAX_LOGICAL_DRIVES + 1) *
+					sizeof(struct MR_LD_VF_AFFILIATION),
+					&instance->vf_affiliation_h,
+					GFP_KERNEL);
 			if (!instance->vf_affiliation)
 				dev_warn(&pdev->dev, "Can't allocate "
 				       "memory for VF affiliation buffer\n");
@@ -6556,7 +7386,9 @@
 	/*
 	 * Trigger SCSI to scan our drives
 	 */
-	scsi_scan_host(host);
+	if (!instance->enable_fw_dev_list ||
+	    (instance->host_device_list_buf->count > 0))
+		scsi_scan_host(host);
 
 	/*
 	 * Initiate AEN (Asynchronous Event Notification)
@@ -6566,6 +7398,8 @@
 		goto fail_start_aen;
 	}
 
+	megasas_setup_debugfs(instance);
+
 	/* Get current SR-IOV LD/VF affiliation */
 	if (instance->requestorId)
 		megasas_get_ld_vf_affiliation(instance, 1);
@@ -6697,17 +7531,25 @@
 static int
 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
 {
-	struct Scsi_Host *host;
 	struct megasas_instance *instance;
 
 	instance = pci_get_drvdata(pdev);
-	host = instance->host;
+
+	if (!instance)
+		return 0;
+
 	instance->unload = 1;
 
+	dev_info(&pdev->dev, "%s is called\n", __func__);
+
 	/* Shutdown SR-IOV heartbeat timer */
 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
 		del_timer_sync(&instance->sriov_heartbeat_timer);
 
+	/* Stop the FW fault detection watchdog */
+	if (instance->adapter_type != MFI_SERIES)
+		megasas_fusion_stop_watchdog(instance);
+
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
 
@@ -6749,11 +7591,16 @@
 	int irq_flags = PCI_IRQ_LEGACY;
 
 	instance = pci_get_drvdata(pdev);
+
+	if (!instance)
+		return 0;
+
 	host = instance->host;
 	pci_set_power_state(pdev, PCI_D0);
 	pci_enable_wake(pdev, PCI_D0, 0);
 	pci_restore_state(pdev);
 
+	dev_info(&pdev->dev, "%s is called\n", __func__);
 	/*
 	 * PCI prepping: enable device set bus mastering and dma mask
 	 */
@@ -6785,7 +7632,7 @@
 	/* Now re-enable MSI-X */
 	if (instance->msix_vectors) {
 		irq_flags = PCI_IRQ_MSIX;
-		if (smp_affinity_enable)
+		if (instance->smp_affinity_enable)
 			irq_flags |= PCI_IRQ_AFFINITY;
 	}
 	rval = pci_alloc_irq_vectors(instance->pdev, 1,
@@ -6823,6 +7670,9 @@
 			megasas_setup_irqs_ioapic(instance))
 		goto fail_init_mfi;
 
+	if (instance->adapter_type != MFI_SERIES)
+		megasas_setup_irq_poll(instance);
+
 	/* Re-launch SR-IOV heartbeat timer */
 	if (instance->requestorId) {
 		if (!megasas_sriov_start_heartbeat(instance, 0))
@@ -6843,8 +7693,16 @@
 	if (megasas_start_aen(instance))
 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
 
+	/* Re-launch FW fault watchdog */
+	if (instance->adapter_type != MFI_SERIES)
+		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
+			goto fail_start_watchdog;
+
 	return 0;
 
+fail_start_watchdog:
+	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+		del_timer_sync(&instance->sriov_heartbeat_timer);
 fail_init_mfi:
 	megasas_free_ctrl_dma_buffers(instance);
 	megasas_free_ctrl_mem(instance);
@@ -6905,6 +7763,10 @@
 	u32 pd_seq_map_sz;
 
 	instance = pci_get_drvdata(pdev);
+
+	if (!instance)
+		return;
+
 	host = instance->host;
 	fusion = instance->ctrl_context;
 
@@ -6912,6 +7774,10 @@
 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
 		del_timer_sync(&instance->sriov_heartbeat_timer);
 
+	/* Stop the FW fault detection watchdog */
+	if (instance->adapter_type != MFI_SERIES)
+		megasas_fusion_stop_watchdog(instance);
+
 	if (instance->fw_crash_state != UNAVAILABLE)
 		megasas_free_host_crash_buffer(instance);
 	scsi_remove_host(instance->host);
@@ -6956,7 +7822,7 @@
 	if (instance->msix_vectors)
 		pci_free_irq_vectors(instance->pdev);
 
-	if (instance->adapter_type == VENTURA_SERIES) {
+	if (instance->adapter_type >= VENTURA_SERIES) {
 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
 			kfree(fusion->stream_detect_by_ld[i]);
 		kfree(fusion->stream_detect_by_ld);
@@ -6994,19 +7860,19 @@
 	}
 
 	if (instance->vf_affiliation)
-		pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+		dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
 				    sizeof(struct MR_LD_VF_AFFILIATION),
 				    instance->vf_affiliation,
 				    instance->vf_affiliation_h);
 
 	if (instance->vf_affiliation_111)
-		pci_free_consistent(pdev,
+		dma_free_coherent(&pdev->dev,
 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
 				    instance->vf_affiliation_111,
 				    instance->vf_affiliation_111_h);
 
 	if (instance->hb_host_mem)
-		pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+		dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
 				    instance->hb_host_mem,
 				    instance->hb_host_mem_h);
 
@@ -7014,6 +7880,8 @@
 
 	megasas_free_ctrl_mem(instance);
 
+	megasas_destroy_debugfs(instance);
+
 	scsi_host_put(host);
 
 	pci_disable_device(pdev);
@@ -7027,6 +7895,9 @@
 {
 	struct megasas_instance *instance = pci_get_drvdata(pdev);
 
+	if (!instance)
+		return;
+
 	instance->unload = 1;
 
 	if (megasas_wait_for_adapter_operational(instance))
@@ -7172,7 +8043,9 @@
 
 	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
 	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
-	    !instance->support_nvme_passthru)) {
+	    !instance->support_nvme_passthru) ||
+	    ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
+	    !instance->support_pci_lane_margining)) {
 		dev_err(&instance->pdev->dev,
 			"Received invalid ioctl command 0x%x\n",
 			ioc->frame.hdr.cmd);
@@ -7208,10 +8081,13 @@
 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
 
 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
+		mutex_lock(&instance->reset_mutex);
 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
 			megasas_return_cmd(instance, cmd);
+			mutex_unlock(&instance->reset_mutex);
 			return -1;
 		}
+		mutex_unlock(&instance->reset_mutex);
 	}
 
 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
@@ -7254,7 +8130,7 @@
 
 		/*
 		 * We don't change the dma_coherent_mask, so
-		 * pci_alloc_consistent only returns 32bit addresses
+		 * dma_alloc_coherent only returns 32bit addresses
 		 */
 		if (instance->consistent_mask_64bit) {
 			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
@@ -7653,6 +8529,14 @@
 
 static DRIVER_ATTR_RO(support_nvme_encapsulation);
 
+static ssize_t
+support_pci_lane_margining_show(struct device_driver *dd, char *buf)
+{
+	return sprintf(buf, "%u\n", support_pci_lane_margining);
+}
+
+static DRIVER_ATTR_RO(support_pci_lane_margining);
+
 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
 {
 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
@@ -7660,102 +8544,103 @@
 	scsi_device_put(sdev);
 }
 
-static void
-megasas_aen_polling(struct work_struct *work)
+/**
+ * megasas_update_device_list -	Update the PD and LD device list from FW
+ *				after an AEN event notification
+ * @instance:			Adapter soft state
+ * @event_type:			Indicates type of event (PD or LD event)
+ *
+ * @return:			Success or failure
+ *
+ * Issue DCMDs to Firmware to update the internal device list in driver.
+ * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
+ * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
+ */
+static
+int megasas_update_device_list(struct megasas_instance *instance,
+			       int event_type)
 {
-	struct megasas_aen_event *ev =
-		container_of(work, struct megasas_aen_event, hotplug_work.work);
-	struct megasas_instance *instance = ev->instance;
-	union megasas_evt_class_locale class_locale;
-	struct  Scsi_Host *host;
-	struct  scsi_device *sdev1;
-	u16     pd_index = 0;
-	u16	ld_index = 0;
-	int     i, j, doscan = 0;
-	u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
-	int error;
-	u8  dcmd_ret = DCMD_SUCCESS;
+	int dcmd_ret = DCMD_SUCCESS;
 
-	if (!instance) {
-		printk(KERN_ERR "invalid instance!\n");
-		kfree(ev);
-		return;
-	}
-
-	/* Adjust event workqueue thread wait time for VF mode */
-	if (instance->requestorId)
-		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
-
-	/* Don't run the event workqueue thread if OCR is running */
-	mutex_lock(&instance->reset_mutex);
-
-	instance->ev = NULL;
-	host = instance->host;
-	if (instance->evt_detail) {
-		megasas_decode_evt(instance);
-
-		switch (le32_to_cpu(instance->evt_detail->code)) {
-
-		case MR_EVT_PD_INSERTED:
-		case MR_EVT_PD_REMOVED:
-			dcmd_ret = megasas_get_pd_list(instance);
-			if (dcmd_ret == DCMD_SUCCESS)
-				doscan = SCAN_PD_CHANNEL;
-			break;
-
-		case MR_EVT_LD_OFFLINE:
-		case MR_EVT_CFG_CLEARED:
-		case MR_EVT_LD_DELETED:
-		case MR_EVT_LD_CREATED:
-			if (!instance->requestorId ||
-				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
-				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
-
-			if (dcmd_ret == DCMD_SUCCESS)
-				doscan = SCAN_VD_CHANNEL;
-
-			break;
-
-		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
-		case MR_EVT_FOREIGN_CFG_IMPORTED:
-		case MR_EVT_LD_STATE_CHANGE:
-			dcmd_ret = megasas_get_pd_list(instance);
-
-			if (dcmd_ret != DCMD_SUCCESS)
-				break;
-
-			if (!instance->requestorId ||
-				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
-				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
-
-			if (dcmd_ret != DCMD_SUCCESS)
-				break;
-
-			doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
-			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
-				instance->host->host_no);
-			break;
-
-		case MR_EVT_CTRL_PROP_CHANGED:
-				dcmd_ret = megasas_get_ctrl_info(instance);
-				break;
-		default:
-			doscan = 0;
-			break;
-		}
+	if (instance->enable_fw_dev_list) {
+		dcmd_ret = megasas_host_device_list_query(instance, false);
+		if (dcmd_ret != DCMD_SUCCESS)
+			goto out;
 	} else {
-		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
-		mutex_unlock(&instance->reset_mutex);
-		kfree(ev);
-		return;
+		if (event_type & SCAN_PD_CHANNEL) {
+			dcmd_ret = megasas_get_pd_list(instance);
+
+			if (dcmd_ret != DCMD_SUCCESS)
+				goto out;
+		}
+
+		if (event_type & SCAN_VD_CHANNEL) {
+			if (!instance->requestorId ||
+			    (instance->requestorId &&
+			     megasas_get_ld_vf_affiliation(instance, 0))) {
+				dcmd_ret = megasas_ld_list_query(instance,
+						MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+				if (dcmd_ret != DCMD_SUCCESS)
+					goto out;
+			}
+		}
 	}
 
-	mutex_unlock(&instance->reset_mutex);
+out:
+	return dcmd_ret;
+}
 
-	if (doscan & SCAN_PD_CHANNEL) {
+/**
+ * megasas_add_remove_devices -	Add/remove devices to SCSI mid-layer
+ *				after an AEN event notification
+ * @instance:			Adapter soft state
+ * @scan_type:			Indicates type of devices (PD/LD) to add
+ * @return			void
+ */
+static
+void megasas_add_remove_devices(struct megasas_instance *instance,
+				int scan_type)
+{
+	int i, j;
+	u16 pd_index = 0;
+	u16 ld_index = 0;
+	u16 channel = 0, id = 0;
+	struct Scsi_Host *host;
+	struct scsi_device *sdev1;
+	struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
+	struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
+
+	host = instance->host;
+
+	if (instance->enable_fw_dev_list) {
+		targetid_list = instance->host_device_list_buf;
+		for (i = 0; i < targetid_list->count; i++) {
+			targetid_entry = &targetid_list->host_device_list[i];
+			if (targetid_entry->flags.u.bits.is_sys_pd) {
+				channel = le16_to_cpu(targetid_entry->target_id) /
+						MEGASAS_MAX_DEV_PER_CHANNEL;
+				id = le16_to_cpu(targetid_entry->target_id) %
+						MEGASAS_MAX_DEV_PER_CHANNEL;
+			} else {
+				channel = MEGASAS_MAX_PD_CHANNELS +
+					  (le16_to_cpu(targetid_entry->target_id) /
+					   MEGASAS_MAX_DEV_PER_CHANNEL);
+				id = le16_to_cpu(targetid_entry->target_id) %
+						MEGASAS_MAX_DEV_PER_CHANNEL;
+			}
+			sdev1 = scsi_device_lookup(host, channel, id, 0);
+			if (!sdev1) {
+				scsi_add_device(host, channel, id, 0);
+			} else {
+				scsi_device_put(sdev1);
+			}
+		}
+	}
+
+	if (scan_type & SCAN_PD_CHANNEL) {
 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
-				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+				pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
 				sdev1 = scsi_device_lookup(host, i, j, 0);
 				if (instance->pd_list[pd_index].driveState ==
 							MR_PD_STATE_SYSTEM) {
@@ -7771,11 +8656,12 @@
 		}
 	}
 
-	if (doscan & SCAN_VD_CHANNEL) {
+	if (scan_type & SCAN_VD_CHANNEL) {
 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-				sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+				sdev1 = scsi_device_lookup(host,
+						MEGASAS_MAX_PD_CHANNELS + i, j, 0);
 				if (instance->ld_ids[ld_index] != 0xff) {
 					if (!sdev1)
 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
@@ -7789,6 +8675,84 @@
 		}
 	}
 
+}
+
+static void
+megasas_aen_polling(struct work_struct *work)
+{
+	struct megasas_aen_event *ev =
+		container_of(work, struct megasas_aen_event, hotplug_work.work);
+	struct megasas_instance *instance = ev->instance;
+	union megasas_evt_class_locale class_locale;
+	int event_type = 0;
+	u32 seq_num;
+	int error;
+	u8  dcmd_ret = DCMD_SUCCESS;
+
+	if (!instance) {
+		printk(KERN_ERR "invalid instance!\n");
+		kfree(ev);
+		return;
+	}
+
+	/* Don't run the event workqueue thread if OCR is running */
+	mutex_lock(&instance->reset_mutex);
+
+	instance->ev = NULL;
+	if (instance->evt_detail) {
+		megasas_decode_evt(instance);
+
+		switch (le32_to_cpu(instance->evt_detail->code)) {
+
+		case MR_EVT_PD_INSERTED:
+		case MR_EVT_PD_REMOVED:
+			event_type = SCAN_PD_CHANNEL;
+			break;
+
+		case MR_EVT_LD_OFFLINE:
+		case MR_EVT_CFG_CLEARED:
+		case MR_EVT_LD_DELETED:
+		case MR_EVT_LD_CREATED:
+			event_type = SCAN_VD_CHANNEL;
+			break;
+
+		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+		case MR_EVT_FOREIGN_CFG_IMPORTED:
+		case MR_EVT_LD_STATE_CHANGE:
+			event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
+			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
+				instance->host->host_no);
+			break;
+
+		case MR_EVT_CTRL_PROP_CHANGED:
+			dcmd_ret = megasas_get_ctrl_info(instance);
+			if (dcmd_ret == DCMD_SUCCESS &&
+			    instance->snapdump_wait_time) {
+				megasas_get_snapdump_properties(instance);
+				dev_info(&instance->pdev->dev,
+					 "Snap dump wait time\t: %d\n",
+					 instance->snapdump_wait_time);
+			}
+			break;
+		default:
+			event_type = 0;
+			break;
+		}
+	} else {
+		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
+		mutex_unlock(&instance->reset_mutex);
+		kfree(ev);
+		return;
+	}
+
+	if (event_type)
+		dcmd_ret = megasas_update_device_list(instance, event_type);
+
+	mutex_unlock(&instance->reset_mutex);
+
+	if (event_type && dcmd_ret == DCMD_SUCCESS)
+		megasas_add_remove_devices(instance, event_type);
+
 	if (dcmd_ret == DCMD_SUCCESS)
 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
 	else
@@ -7842,6 +8806,7 @@
 	support_poll_for_event = 2;
 	support_device_change = 1;
 	support_nvme_encapsulation = true;
+	support_pci_lane_margining = true;
 
 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
 
@@ -7857,6 +8822,8 @@
 
 	megasas_mgmt_majorno = rval;
 
+	megasas_init_debugfs();
+
 	/*
 	 * Register ourselves as PCI hotplug module
 	 */
@@ -7867,6 +8834,12 @@
 		goto err_pcidrv;
 	}
 
+	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
+	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
+		pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
+		event_log_level = MFI_EVT_CLASS_CRITICAL;
+	}
+
 	rval = driver_create_file(&megasas_pci_driver.driver,
 				  &driver_attr_version);
 	if (rval)
@@ -7896,8 +8869,17 @@
 	if (rval)
 		goto err_dcf_support_nvme_encapsulation;
 
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				  &driver_attr_support_pci_lane_margining);
+	if (rval)
+		goto err_dcf_support_pci_lane_margining;
+
 	return rval;
 
+err_dcf_support_pci_lane_margining:
+	driver_remove_file(&megasas_pci_driver.driver,
+			   &driver_attr_support_nvme_encapsulation);
+
 err_dcf_support_nvme_encapsulation:
 	driver_remove_file(&megasas_pci_driver.driver,
 			   &driver_attr_support_device_change);
@@ -7916,6 +8898,7 @@
 err_dcf_attr_ver:
 	pci_unregister_driver(&megasas_pci_driver);
 err_pcidrv:
+	megasas_exit_debugfs();
 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
 	return rval;
 }
@@ -7936,8 +8919,11 @@
 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
 	driver_remove_file(&megasas_pci_driver.driver,
 			   &driver_attr_support_nvme_encapsulation);
+	driver_remove_file(&megasas_pci_driver.driver,
+			   &driver_attr_support_pci_lane_margining);
 
 	pci_unregister_driver(&megasas_pci_driver);
+	megasas_exit_debugfs();
 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
 }
 
diff --git a/drivers/scsi/megaraid/megaraid_sas_debugfs.c b/drivers/scsi/megaraid/megaraid_sas_debugfs.c
new file mode 100644
index 0000000..c697607
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_debugfs.c
@@ -0,0 +1,179 @@
+/*
+ *  Linux MegaRAID driver for SAS based RAID controllers
+ *
+ *  Copyright (c) 2003-2018  LSI Corporation.
+ *  Copyright (c) 2003-2018  Avago Technologies.
+ *  Copyright (c) 2003-2018  Broadcom Inc.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ *  Authors: Broadcom Inc.
+ *           Kashyap Desai <kashyap.desai@broadcom.com>
+ *           Sumit Saxena <sumit.saxena@broadcom.com>
+ *           Shivasharan S <shivasharan.srikanteshwara@broadcom.com>
+ *
+ *  Send feedback to: megaraidlinux.pdl@broadcom.com
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/irq_poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+struct dentry *megasas_debugfs_root;
+
+static ssize_t
+megasas_debugfs_read(struct file *filp, char __user *ubuf, size_t cnt,
+		      loff_t *ppos)
+{
+	struct megasas_debugfs_buffer *debug = filp->private_data;
+
+	if (!debug || !debug->buf)
+		return 0;
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len);
+}
+
+static int
+megasas_debugfs_raidmap_open(struct inode *inode, struct file *file)
+{
+	struct megasas_instance *instance = inode->i_private;
+	struct megasas_debugfs_buffer *debug;
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+
+	debug = kzalloc(sizeof(struct megasas_debugfs_buffer), GFP_KERNEL);
+	if (!debug)
+		return -ENOMEM;
+
+	debug->buf = (void *)fusion->ld_drv_map[(instance->map_id & 1)];
+	debug->len = fusion->drv_map_sz;
+	file->private_data = debug;
+
+	return 0;
+}
+
+static int
+megasas_debugfs_release(struct inode *inode, struct file *file)
+{
+	struct megasas_debug_buffer *debug = file->private_data;
+
+	if (!debug)
+		return 0;
+
+	file->private_data = NULL;
+	kfree(debug);
+	return 0;
+}
+
+static const struct file_operations megasas_debugfs_raidmap_fops = {
+	.owner		= THIS_MODULE,
+	.open           = megasas_debugfs_raidmap_open,
+	.read           = megasas_debugfs_read,
+	.release        = megasas_debugfs_release,
+};
+
+/*
+ * megasas_init_debugfs :	Create debugfs root for megaraid_sas driver
+ */
+void megasas_init_debugfs(void)
+{
+	megasas_debugfs_root = debugfs_create_dir("megaraid_sas", NULL);
+	if (!megasas_debugfs_root)
+		pr_info("Cannot create debugfs root\n");
+}
+
+/*
+ * megasas_exit_debugfs :	Remove debugfs root for megaraid_sas driver
+ */
+void megasas_exit_debugfs(void)
+{
+	debugfs_remove_recursive(megasas_debugfs_root);
+}
+
+/*
+ * megasas_setup_debugfs :	Setup debugfs per Fusion adapter
+ * instance:				Soft instance of adapter
+ */
+void
+megasas_setup_debugfs(struct megasas_instance *instance)
+{
+	char name[64];
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+
+	if (fusion) {
+		snprintf(name, sizeof(name),
+			 "scsi_host%d", instance->host->host_no);
+		if (!instance->debugfs_root) {
+			instance->debugfs_root =
+				debugfs_create_dir(name, megasas_debugfs_root);
+			if (!instance->debugfs_root) {
+				dev_err(&instance->pdev->dev,
+					"Cannot create per adapter debugfs directory\n");
+				return;
+			}
+		}
+
+		snprintf(name, sizeof(name), "raidmap_dump");
+		instance->raidmap_dump =
+			debugfs_create_file(name, S_IRUGO,
+					    instance->debugfs_root, instance,
+					    &megasas_debugfs_raidmap_fops);
+		if (!instance->raidmap_dump) {
+			dev_err(&instance->pdev->dev,
+				"Cannot create raidmap debugfs file\n");
+			debugfs_remove(instance->debugfs_root);
+			return;
+		}
+	}
+
+}
+
+/*
+ * megasas_destroy_debugfs :	Destroy debugfs per Fusion adapter
+ * instance:					Soft instance of adapter
+ */
+void megasas_destroy_debugfs(struct megasas_instance *instance)
+{
+	debugfs_remove_recursive(instance->debugfs_root);
+}
+
+#else
+void megasas_init_debugfs(void)
+{
+}
+void megasas_exit_debugfs(void)
+{
+}
+void megasas_setup_debugfs(struct megasas_instance *instance)
+{
+}
+void megasas_destroy_debugfs(struct megasas_instance *instance)
+{
+}
+#endif /*CONFIG_DEBUG_FS*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 59ecbb3..50b8c1b 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1,35 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Linux MegaRAID driver for SAS based RAID controllers
  *
  *  Copyright (c) 2009-2013  LSI Corporation
- *  Copyright (c) 2013-2014  Avago Technologies
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version 2
- *  of the License, or (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *  Copyright (c) 2013-2016  Avago Technologies
+ *  Copyright (c) 2016-2018  Broadcom Inc.
  *
  *  FILE: megaraid_sas_fp.c
  *
- *  Authors: Avago Technologies
+ *  Authors: Broadcom Inc.
  *           Sumant Patro
  *           Varad Talamacki
  *           Manoj Jose
- *           Kashyap Desai <kashyap.desai@avagotech.com>
- *           Sumit Saxena <sumit.saxena@avagotech.com>
+ *           Kashyap Desai <kashyap.desai@broadcom.com>
+ *           Sumit Saxena <sumit.saxena@broadcom.com>
  *
- *  Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- *  San Jose, California 95131
+ *  Send feedback to: megaraidlinux.pdl@broadcom.com
  */
 
 #include <linux/kernel.h>
@@ -47,6 +33,7 @@
 #include <linux/compat.h>
 #include <linux/blkdev.h>
 #include <linux/poll.h>
+#include <linux/irq_poll.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -59,7 +46,7 @@
 
 #define LB_PENDING_CMDS_DEFAULT 4
 static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
-module_param(lb_pending_cmds, int, S_IRUGO);
+module_param(lb_pending_cmds, int, 0444);
 MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
 	"threshold. Valid Values are 1-128. Default: 4");
 
@@ -745,7 +732,7 @@
 		*pDevHandle = MR_PdDevHandleGet(pd, map);
 		*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
 		/* get second pd also for raid 1/10 fast path writes*/
-		if ((instance->adapter_type == VENTURA_SERIES) &&
+		if ((instance->adapter_type >= VENTURA_SERIES) &&
 		    (raid->level == 1) &&
 		    !io_info->isRead) {
 			r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -770,7 +757,7 @@
 	}
 
 	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
-	if (instance->adapter_type == VENTURA_SERIES) {
+	if (instance->adapter_type >= VENTURA_SERIES) {
 		((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
 			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
 		io_info->span_arm =
@@ -861,7 +848,7 @@
 		*pDevHandle = MR_PdDevHandleGet(pd, map);
 		*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
 		/* get second pd also for raid 1/10 fast path writes*/
-		if ((instance->adapter_type == VENTURA_SERIES) &&
+		if ((instance->adapter_type >= VENTURA_SERIES) &&
 		    (raid->level == 1) &&
 		    !io_info->isRead) {
 			r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -888,7 +875,7 @@
 	}
 
 	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
-	if (instance->adapter_type == VENTURA_SERIES) {
+	if (instance->adapter_type >= VENTURA_SERIES) {
 		((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
 				(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
 		io_info->span_arm =
@@ -903,6 +890,77 @@
 }
 
 /*
+ * mr_get_phy_params_r56_rmw -  Calculate parameters for R56 CTIO write operation
+ * @instance:			Adapter soft state
+ * @ld:				LD index
+ * @stripNo:			Strip Number
+ * @io_info:			IO info structure pointer
+ * pRAID_Context:		RAID context pointer
+ * map:				RAID map pointer
+ *
+ * This routine calculates the logical arm, data Arm, row number and parity arm
+ * for R56 CTIO write operation.
+ */
+static void mr_get_phy_params_r56_rmw(struct megasas_instance *instance,
+			    u32 ld, u64 stripNo,
+			    struct IO_REQUEST_INFO *io_info,
+			    struct RAID_CONTEXT_G35 *pRAID_Context,
+			    struct MR_DRV_RAID_MAP_ALL *map)
+{
+	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
+	u8          span, dataArms, arms, dataArm, logArm;
+	s8          rightmostParityArm, PParityArm;
+	u64         rowNum;
+	u64 *pdBlock = &io_info->pdBlock;
+
+	dataArms = raid->rowDataSize;
+	arms = raid->rowSize;
+
+	rowNum =  mega_div64_32(stripNo, dataArms);
+	/* parity disk arm, first arm is 0 */
+	rightmostParityArm = (arms - 1) - mega_mod64(rowNum, arms);
+
+	/* logical arm within row */
+	logArm =  mega_mod64(stripNo, dataArms);
+	/* physical arm for data */
+	dataArm = mega_mod64((rightmostParityArm + 1 + logArm), arms);
+
+	if (raid->spanDepth == 1) {
+		span = 0;
+	} else {
+		span = (u8)MR_GetSpanBlock(ld, rowNum, pdBlock, map);
+		if (span == SPAN_INVALID)
+			return;
+	}
+
+	if (raid->level == 6) {
+		/* P Parity arm, note this can go negative adjust if negative */
+		PParityArm = (arms - 2) - mega_mod64(rowNum, arms);
+
+		if (PParityArm < 0)
+			PParityArm += arms;
+
+		/* rightmostParityArm is P-Parity for RAID 5 and Q-Parity for RAID */
+		pRAID_Context->flow_specific.r56_arm_map = rightmostParityArm;
+		pRAID_Context->flow_specific.r56_arm_map |=
+				    (u16)(PParityArm << RAID_CTX_R56_P_ARM_SHIFT);
+	} else {
+		pRAID_Context->flow_specific.r56_arm_map |=
+				    (u16)(rightmostParityArm << RAID_CTX_R56_P_ARM_SHIFT);
+	}
+
+	pRAID_Context->reg_lock_row_lba = cpu_to_le64(rowNum);
+	pRAID_Context->flow_specific.r56_arm_map |=
+				   (u16)(logArm << RAID_CTX_R56_LOG_ARM_SHIFT);
+	cpu_to_le16s(&pRAID_Context->flow_specific.r56_arm_map);
+	pRAID_Context->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | dataArm;
+	pRAID_Context->raid_flags = (MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD <<
+				    MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+
+	return;
+}
+
+/*
 ******************************************************************************
 *
 * MR_BuildRaidContext function
@@ -968,6 +1026,7 @@
 	stripSize = 1 << raid->stripeShift;
 	stripe_mask = stripSize-1;
 
+	io_info->data_arms = raid->rowDataSize;
 
 	/*
 	 * calculate starting row and stripe, and number of strips and rows
@@ -1109,6 +1168,13 @@
 	/* save pointer to raid->LUN array */
 	*raidLUN = raid->LUN;
 
+	/* Aero R5/6 Division Offload for WRITE */
+	if (fusion->r56_div_offload && (raid->level >= 5) && !isRead) {
+		mr_get_phy_params_r56_rmw(instance, ld, start_strip, io_info,
+				       (struct RAID_CONTEXT_G35 *)pRAID_Context,
+				       map);
+		return true;
+	}
 
 	/*Get Phy Params only if FP capable, or else leave it to MR firmware
 	  to do the calculation.*/
@@ -1266,7 +1332,7 @@
 
 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
 		ld = MR_TargetIdToLdGet(ldCount, drv_map);
-		if (ld >= MAX_LOGICAL_DRIVES_EXT) {
+		if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
 			lbInfo[ldCount].loadBalanceFlag = 0;
 			continue;
 		}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index c7f95ba..e301458 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1,34 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Linux MegaRAID driver for SAS based RAID controllers
  *
  *  Copyright (c) 2009-2013  LSI Corporation
- *  Copyright (c) 2013-2014  Avago Technologies
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version 2
- *  of the License, or (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *  Copyright (c) 2013-2016  Avago Technologies
+ *  Copyright (c) 2016-2018  Broadcom Inc.
  *
  *  FILE: megaraid_sas_fusion.c
  *
- *  Authors: Avago Technologies
+ *  Authors: Broadcom Inc.
  *           Sumant Patro
  *           Adam Radford
- *           Kashyap Desai <kashyap.desai@avagotech.com>
- *           Sumit Saxena <sumit.saxena@avagotech.com>
+ *           Kashyap Desai <kashyap.desai@broadcom.com>
+ *           Sumit Saxena <sumit.saxena@broadcom.com>
  *
- *  Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- *  San Jose, California 95131
+ *  Send feedback to: megaraidlinux.pdl@broadcom.com
  */
 
 #include <linux/kernel.h>
@@ -48,6 +34,8 @@
 #include <linux/mutex.h>
 #include <linux/poll.h>
 #include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <linux/irq_poll.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -74,7 +62,7 @@
 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
 int megasas_alloc_cmds(struct megasas_instance *instance);
 int
-megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
+megasas_clear_intr_fusion(struct megasas_instance *instance);
 int
 megasas_issue_polled(struct megasas_instance *instance,
 		     struct megasas_cmd *cmd);
@@ -95,6 +83,65 @@
 static void megasas_free_reply_fusion(struct megasas_instance *instance);
 static inline
 void megasas_configure_queue_sizes(struct megasas_instance *instance);
+static void megasas_fusion_crash_dump(struct megasas_instance *instance);
+extern u32 megasas_readl(struct megasas_instance *instance,
+			 const volatile void __iomem *addr);
+
+/**
+ * megasas_adp_reset_wait_for_ready -	initiate chip reset and wait for
+ *					controller to come to ready state
+ * @instance -				adapter's soft state
+ * @do_adp_reset -			If true, do a chip reset
+ * @ocr_context -			If called from OCR context this will
+ *					be set to 1, else 0
+ *
+ * This function initates a chip reset followed by a wait for controller to
+ * transition to ready state.
+ * During this, driver will block all access to PCI config space from userspace
+ */
+int
+megasas_adp_reset_wait_for_ready(struct megasas_instance *instance,
+				 bool do_adp_reset,
+				 int ocr_context)
+{
+	int ret = FAILED;
+
+	/*
+	 * Block access to PCI config space from userspace
+	 * when diag reset is initiated from driver
+	 */
+	if (megasas_dbg_lvl & OCR_DEBUG)
+		dev_info(&instance->pdev->dev,
+			 "Block access to PCI config space %s %d\n",
+			 __func__, __LINE__);
+
+	pci_cfg_access_lock(instance->pdev);
+
+	if (do_adp_reset) {
+		if (instance->instancet->adp_reset
+			(instance, instance->reg_set))
+			goto out;
+	}
+
+	/* Wait for FW to become ready */
+	if (megasas_transition_to_ready(instance, ocr_context)) {
+		dev_warn(&instance->pdev->dev,
+			 "Failed to transition controller to ready for scsi%d.\n",
+			 instance->host->host_no);
+		goto out;
+	}
+
+	ret = SUCCESS;
+out:
+	if (megasas_dbg_lvl & OCR_DEBUG)
+		dev_info(&instance->pdev->dev,
+			 "Unlock access to PCI config space %s %d\n",
+			 __func__, __LINE__);
+
+	pci_cfg_access_unlock(instance->pdev);
+
+	return ret;
+}
 
 /**
  * megasas_check_same_4gb_region -	check if allocation
@@ -143,7 +190,8 @@
 	writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
 
 	/* Dummy readl to force pci flush */
-	readl(&regs->outbound_intr_mask);
+	dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n",
+		 __func__, readl(&regs->outbound_intr_mask));
 }
 
 /**
@@ -154,24 +202,27 @@
 megasas_disable_intr_fusion(struct megasas_instance *instance)
 {
 	u32 mask = 0xFFFFFFFF;
-	u32 status;
 	struct megasas_register_set __iomem *regs;
 	regs = instance->reg_set;
 	instance->mask_interrupts = 1;
 
 	writel(mask, &regs->outbound_intr_mask);
 	/* Dummy readl to force pci flush */
-	status = readl(&regs->outbound_intr_mask);
+	dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n",
+		 __func__, readl(&regs->outbound_intr_mask));
 }
 
 int
-megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
+megasas_clear_intr_fusion(struct megasas_instance *instance)
 {
 	u32 status;
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
 	/*
 	 * Check if it is our interrupt
 	 */
-	status = readl(&regs->outbound_intr_status);
+	status = megasas_readl(instance,
+			       &regs->outbound_intr_status);
 
 	if (status & 1) {
 		writel(status, &regs->outbound_intr_status);
@@ -214,21 +265,17 @@
 }
 
 /**
- * megasas_fire_cmd_fusion -	Sends command to the FW
- * @instance:			Adapter soft state
- * @req_desc:			64bit Request descriptor
- *
- * Perform PCI Write.
+ * megasas_write_64bit_req_desc -	PCI writes 64bit request descriptor
+ * @instance:				Adapter soft state
+ * @req_desc:				64bit Request descriptor
  */
-
 static void
-megasas_fire_cmd_fusion(struct megasas_instance *instance,
+megasas_write_64bit_req_desc(struct megasas_instance *instance,
 		union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
 {
 #if defined(writeq) && defined(CONFIG_64BIT)
 	u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
 		le32_to_cpu(req_desc->u.low));
-
 	writeq(req_data, &instance->reg_set->inbound_low_queue_port);
 #else
 	unsigned long flags;
@@ -237,12 +284,30 @@
 		&instance->reg_set->inbound_low_queue_port);
 	writel(le32_to_cpu(req_desc->u.high),
 		&instance->reg_set->inbound_high_queue_port);
-	mmiowb();
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
 #endif
 }
 
 /**
+ * megasas_fire_cmd_fusion -	Sends command to the FW
+ * @instance:			Adapter soft state
+ * @req_desc:			32bit or 64bit Request descriptor
+ *
+ * Perform PCI Write. AERO SERIES supports 32 bit Descriptor.
+ * Prior to AERO_SERIES support 64 bit Descriptor.
+ */
+static void
+megasas_fire_cmd_fusion(struct megasas_instance *instance,
+		union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
+{
+	if (instance->atomic_desc_support)
+		writel(le32_to_cpu(req_desc->u.low),
+			&instance->reg_set->inbound_single_queue_port);
+	else
+		megasas_write_64bit_req_desc(instance, req_desc);
+}
+
+/**
  * megasas_fusion_update_can_queue -	Do all Adapter Queue depth related calculations here
  * @instance:							Adapter soft state
  * fw_boot_context:						Whether this function called during probe or after OCR
@@ -258,20 +323,18 @@
 {
 	u16 cur_max_fw_cmds = 0;
 	u16 ldio_threshold = 0;
-	struct megasas_register_set __iomem *reg_set;
 
-	reg_set = instance->reg_set;
-
-	/* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
+	/* ventura FW does not fill outbound_scratch_pad_2 with queue depth */
 	if (instance->adapter_type < VENTURA_SERIES)
 		cur_max_fw_cmds =
-		readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
+		megasas_readl(instance,
+			      &instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF;
 
 	if (dual_qdepth_disable || !cur_max_fw_cmds)
-		cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+		cur_max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
 	else
 		ldio_threshold =
-			(instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
+			(instance->instancet->read_fw_status_reg(instance) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
 
 	dev_info(&instance->pdev->dev,
 		 "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n",
@@ -471,7 +534,7 @@
 	return 0;
 }
 
-int
+static int
 megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
 {
 	u32 max_mpt_cmd, i, j;
@@ -510,7 +573,8 @@
 
 	return 0;
 }
-int
+
+static int
 megasas_alloc_request_fusion(struct megasas_instance *instance)
 {
 	struct fusion_context *fusion;
@@ -591,7 +655,7 @@
 	return 0;
 }
 
-int
+static int
 megasas_alloc_reply_fusion(struct megasas_instance *instance)
 {
 	int i, count;
@@ -668,7 +732,7 @@
 	return 0;
 }
 
-int
+static int
 megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
 {
 	int i, j, k, msix_count;
@@ -684,8 +748,9 @@
 	array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
 		     MAX_MSIX_QUEUES_FUSION;
 
-	fusion->rdpq_virt = pci_zalloc_consistent(instance->pdev, array_size,
-						  &fusion->rdpq_phys);
+	fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev,
+					       array_size, &fusion->rdpq_phys,
+					       GFP_KERNEL);
 	if (!fusion->rdpq_virt) {
 		dev_err(&instance->pdev->dev,
 			"Failed from %s %d\n",  __func__, __LINE__);
@@ -807,13 +872,11 @@
 
 	}
 
-	if (fusion->reply_frames_desc_pool)
-		dma_pool_destroy(fusion->reply_frames_desc_pool);
-	if (fusion->reply_frames_desc_pool_align)
-		dma_pool_destroy(fusion->reply_frames_desc_pool_align);
+	dma_pool_destroy(fusion->reply_frames_desc_pool);
+	dma_pool_destroy(fusion->reply_frames_desc_pool_align);
 
 	if (fusion->rdpq_virt)
-		pci_free_consistent(instance->pdev,
+		dma_free_coherent(&instance->pdev->dev,
 			sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
 			fusion->rdpq_virt, fusion->rdpq_phys);
 }
@@ -830,8 +893,7 @@
 			fusion->reply_frames_desc[0],
 			fusion->reply_frames_desc_phys[0]);
 
-	if (fusion->reply_frames_desc_pool)
-		dma_pool_destroy(fusion->reply_frames_desc_pool);
+	dma_pool_destroy(fusion->reply_frames_desc_pool);
 
 }
 
@@ -852,7 +914,7 @@
  * and is used as SMID of the cmd.
  * SMID value range is from 1 to max_fw_cmds.
  */
-int
+static int
 megasas_alloc_cmds_fusion(struct megasas_instance *instance)
 {
 	int i;
@@ -933,17 +995,22 @@
 {
 	int i;
 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
-	struct fusion_context *fusion;
+	u32 status_reg;
 
 	u32 msecs = seconds * 1000;
 
-	fusion = instance->ctrl_context;
 	/*
 	 * Wait for cmd_status to change
 	 */
 	for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
 		rmb();
 		msleep(20);
+		if (!(i % 5000)) {
+			status_reg = instance->instancet->read_fw_status_reg(instance)
+					& MFI_STATE_MASK;
+			if (status_reg == MFI_STATE_FAULT)
+				break;
+		}
 	}
 
 	if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
@@ -974,9 +1041,10 @@
 	struct megasas_header *frame_hdr;
 	const char *sys_info;
 	MFI_CAPABILITIES *drv_ops;
-	u32 scratch_pad_2;
+	u32 scratch_pad_1;
 	ktime_t time;
 	bool cur_fw_64bit_dma_capable;
+	bool cur_intr_coalescing;
 
 	fusion = instance->ctrl_context;
 
@@ -985,14 +1053,14 @@
 
 	cmd = fusion->ioc_init_cmd;
 
-	scratch_pad_2 = readl
-		(&instance->reg_set->outbound_scratch_pad_2);
+	scratch_pad_1 = megasas_readl
+		(instance, &instance->reg_set->outbound_scratch_pad_1);
 
-	cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+	cur_rdpq_mode = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
 
 	if (instance->adapter_type == INVADER_SERIES) {
 		cur_fw_64bit_dma_capable =
-			(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
+			(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
 
 		if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
 			dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
@@ -1010,7 +1078,17 @@
 		goto fail_fw_init;
 	}
 
-	instance->fw_sync_cache_support = (scratch_pad_2 &
+	cur_intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
+							true : false;
+
+	if ((instance->low_latency_index_start ==
+		MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
+		instance->perf_mode = MR_BALANCED_PERF_MODE;
+
+	dev_info(&instance->pdev->dev, "Performance mode :%s\n",
+		MEGASAS_PERF_MODE_2STR(instance->perf_mode));
+
+	instance->fw_sync_cache_support = (scratch_pad_1 &
 		MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
 	dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
 		 instance->fw_sync_cache_support ? "Yes" : "No");
@@ -1043,9 +1121,7 @@
 
 	frame_hdr = &cmd->frame->hdr;
 	frame_hdr->cmd_status = 0xFF;
-	frame_hdr->flags = cpu_to_le16(
-		le16_to_cpu(frame_hdr->flags) |
-		MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
+	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
 
 	init_frame->cmd	= MFI_CMD_INIT;
 	init_frame->cmd_status = 0xFF;
@@ -1072,6 +1148,7 @@
 	drv_ops->mfi_capabilities.support_qd_throttling = 1;
 	drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
 	drv_ops->mfi_capabilities.support_nvme_passthru = 1;
+	drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1;
 
 	if (instance->consistent_mask_64bit)
 		drv_ops->mfi_capabilities.support_64bit_mode = 1;
@@ -1095,6 +1172,22 @@
 		cpu_to_le32(lower_32_bits(ioc_init_handle));
 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
 
+	/*
+	 * Each bit in replyqueue_mask represents one group of MSI-x vectors
+	 * (each group has 8 vectors)
+	 */
+	switch (instance->perf_mode) {
+	case MR_BALANCED_PERF_MODE:
+		init_frame->replyqueue_mask =
+		       cpu_to_le16(~(~0 << instance->low_latency_index_start/8));
+		break;
+	case MR_IOPS_PERF_MODE:
+		init_frame->replyqueue_mask =
+		       cpu_to_le16(~(~0 << instance->msix_vectors/8));
+		break;
+	}
+
+
 	req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
 	req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
 	req_desc.MFAIo.RequestFlags =
@@ -1107,15 +1200,16 @@
 	instance->instancet->disable_intr(instance);
 
 	for (i = 0; i < (10 * 1000); i += 20) {
-		if (readl(&instance->reg_set->doorbell) & 1)
+		if (megasas_readl(instance, &instance->reg_set->doorbell) & 1)
 			msleep(20);
 		else
 			break;
 	}
 
-	megasas_fire_cmd_fusion(instance, &req_desc);
+	/* For AERO also, IOC_INIT requires 64 bit descriptor write */
+	megasas_write_64bit_req_desc(instance, &req_desc);
 
-	wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
+	wait_and_poll(instance, cmd, MFI_IO_TIMEOUT_SECS);
 
 	frame_hdr = &cmd->frame->hdr;
 	if (frame_hdr->cmd_status != 0) {
@@ -1123,6 +1217,17 @@
 		goto fail_fw_init;
 	}
 
+	if (instance->adapter_type >= AERO_SERIES) {
+		scratch_pad_1 = megasas_readl
+			(instance, &instance->reg_set->outbound_scratch_pad_1);
+
+		instance->atomic_desc_support =
+			(scratch_pad_1 & MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
+
+		dev_info(&instance->pdev->dev, "FW supports atomic descriptor\t: %s\n",
+			instance->atomic_desc_support ? "Yes" : "No");
+	}
+
 	return 0;
 
 fail_fw_init:
@@ -1145,7 +1250,7 @@
 int
 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
 	int ret = 0;
-	u32 pd_seq_map_sz;
+	size_t pd_seq_map_sz;
 	struct megasas_cmd *cmd;
 	struct megasas_dcmd_frame *dcmd;
 	struct fusion_context *fusion = instance->ctrl_context;
@@ -1154,9 +1259,7 @@
 
 	pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
 	pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
-	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
-			(sizeof(struct MR_PD_CFG_SEQ) *
-			(MAX_PHYSICAL_DEVICES - 1));
+	pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES - 1);
 
 	cmd = megasas_get_cmd(instance);
 	if (!cmd) {
@@ -1328,7 +1431,6 @@
 	struct megasas_cmd *cmd;
 	struct megasas_dcmd_frame *dcmd;
 	u16 num_lds;
-	u32 size_sync_info;
 	struct fusion_context *fusion;
 	struct MR_LD_TARGET_SYNC *ci = NULL;
 	struct MR_DRV_RAID_MAP_ALL *map;
@@ -1357,8 +1459,6 @@
 
 	dcmd = &cmd->frame->dcmd;
 
-	size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
-
 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 
 	ci = (struct MR_LD_TARGET_SYNC *)
@@ -1559,14 +1659,12 @@
 	fusion = instance->ctrl_context;
 	max_cmd = instance->max_fw_cmds;
 
-	if (instance->adapter_type == VENTURA_SERIES)
+	if (instance->adapter_type >= VENTURA_SERIES)
 		instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS;
 	else
 		instance->max_mpt_cmds = instance->max_fw_cmds;
 
-	instance->max_scsi_cmds = instance->max_fw_cmds -
-			(MEGASAS_FUSION_INTERNAL_CMDS +
-			MEGASAS_FUSION_IOCTL_CMDS);
+	instance->max_scsi_cmds = instance->max_fw_cmds - instance->max_mfi_cmds;
 	instance->cur_can_queue = instance->max_scsi_cmds;
 	instance->host->can_queue = instance->cur_can_queue;
 
@@ -1627,8 +1725,7 @@
 				  fusion->ioc_init_cmd->frame,
 				  fusion->ioc_init_cmd->frame_phys_addr);
 
-	if (fusion->ioc_init_cmd)
-		kfree(fusion->ioc_init_cmd);
+	kfree(fusion->ioc_init_cmd);
 }
 
 /**
@@ -1637,18 +1734,16 @@
  *
  * This is the main function for initializing firmware.
  */
-u32
+static u32
 megasas_init_adapter_fusion(struct megasas_instance *instance)
 {
-	struct megasas_register_set __iomem *reg_set;
 	struct fusion_context *fusion;
-	u32 scratch_pad_2;
+	u32 scratch_pad_1;
 	int i = 0, count;
+	u32 status_reg;
 
 	fusion = instance->ctrl_context;
 
-	reg_set = instance->reg_set;
-
 	megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
 
 	/*
@@ -1659,20 +1754,21 @@
 
 	megasas_configure_queue_sizes(instance);
 
-	scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
-	/* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
+	scratch_pad_1 = megasas_readl(instance,
+				      &instance->reg_set->outbound_scratch_pad_1);
+	/* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
 	 * Firmware support extended IO chain frame which is 4 times more than
 	 * legacy Firmware.
 	 * Legacy Firmware - Frame size is (8 * 128) = 1K
 	 * 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
 	 */
-	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
+	if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
 		instance->max_chain_frame_sz =
-			((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
+			((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
 			MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
 	else
 		instance->max_chain_frame_sz =
-			((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
+			((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
 			MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
 
 	if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
@@ -1727,8 +1823,21 @@
 	if (megasas_alloc_cmds_fusion(instance))
 		goto fail_alloc_cmds;
 
-	if (megasas_ioc_init_fusion(instance))
-		goto fail_ioc_init;
+	if (megasas_ioc_init_fusion(instance)) {
+		status_reg = instance->instancet->read_fw_status_reg(instance);
+		if (((status_reg & MFI_STATE_MASK) == MFI_STATE_FAULT) &&
+		    (status_reg & MFI_RESET_ADAPTER)) {
+			/* Do a chip reset and then retry IOC INIT once */
+			if (megasas_adp_reset_wait_for_ready
+				(instance, true, 0) == FAILED)
+				goto fail_ioc_init;
+
+			if (megasas_ioc_init_fusion(instance))
+				goto fail_ioc_init;
+		} else {
+			goto fail_ioc_init;
+		}
+	}
 
 	megasas_display_intel_branding(instance);
 	if (megasas_get_ctrl_info(instance)) {
@@ -1740,6 +1849,7 @@
 
 	instance->flag_ieee = 1;
 	instance->r1_ldio_hint_default =  MR_R1_LDIO_PIGGYBACK_DEFAULT;
+	instance->threshold_reply_count = instance->max_fw_cmds / 4;
 	fusion->fast_path_io = 0;
 
 	if (megasas_allocate_raid_maps(instance))
@@ -1760,13 +1870,97 @@
 }
 
 /**
+ * megasas_fault_detect_work	-	Worker function of
+ *					FW fault handling workqueue.
+ */
+static void
+megasas_fault_detect_work(struct work_struct *work)
+{
+	struct megasas_instance *instance =
+		container_of(work, struct megasas_instance,
+			     fw_fault_work.work);
+	u32 fw_state, dma_state, status;
+
+	/* Check the fw state */
+	fw_state = instance->instancet->read_fw_status_reg(instance) &
+			MFI_STATE_MASK;
+
+	if (fw_state == MFI_STATE_FAULT) {
+		dma_state = instance->instancet->read_fw_status_reg(instance) &
+				MFI_STATE_DMADONE;
+		/* Start collecting crash, if DMA bit is done */
+		if (instance->crash_dump_drv_support &&
+		    instance->crash_dump_app_support && dma_state) {
+			megasas_fusion_crash_dump(instance);
+		} else {
+			if (instance->unload == 0) {
+				status = megasas_reset_fusion(instance->host, 0);
+				if (status != SUCCESS) {
+					dev_err(&instance->pdev->dev,
+						"Failed from %s %d, do not re-arm timer\n",
+						__func__, __LINE__);
+					return;
+				}
+			}
+		}
+	}
+
+	if (instance->fw_fault_work_q)
+		queue_delayed_work(instance->fw_fault_work_q,
+			&instance->fw_fault_work,
+			msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
+}
+
+int
+megasas_fusion_start_watchdog(struct megasas_instance *instance)
+{
+	/* Check if the Fault WQ is already started */
+	if (instance->fw_fault_work_q)
+		return SUCCESS;
+
+	INIT_DELAYED_WORK(&instance->fw_fault_work, megasas_fault_detect_work);
+
+	snprintf(instance->fault_handler_work_q_name,
+		 sizeof(instance->fault_handler_work_q_name),
+		 "poll_megasas%d_status", instance->host->host_no);
+
+	instance->fw_fault_work_q =
+		create_singlethread_workqueue(instance->fault_handler_work_q_name);
+	if (!instance->fw_fault_work_q) {
+		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+			__func__, __LINE__);
+		return FAILED;
+	}
+
+	queue_delayed_work(instance->fw_fault_work_q,
+			   &instance->fw_fault_work,
+			   msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
+
+	return SUCCESS;
+}
+
+void
+megasas_fusion_stop_watchdog(struct megasas_instance *instance)
+{
+	struct workqueue_struct *wq;
+
+	if (instance->fw_fault_work_q) {
+		wq = instance->fw_fault_work_q;
+		instance->fw_fault_work_q = NULL;
+		if (!cancel_delayed_work_sync(&instance->fw_fault_work))
+			flush_workqueue(wq);
+		destroy_workqueue(wq);
+	}
+}
+
+/**
  * map_cmd_status -	Maps FW cmd status to OS cmd status
  * @cmd :		Pointer to cmd
  * @status :		status of cmd returned by FW
  * @ext_status :	ext status of cmd returned by FW
  */
 
-void
+static void
 map_cmd_status(struct fusion_context *fusion,
 		struct scsi_cmnd *scmd, u8 status, u8 ext_status,
 		u32 data_length, u8 *sense)
@@ -1842,7 +2036,6 @@
 megasas_is_prp_possible(struct megasas_instance *instance,
 			struct scsi_cmnd *scmd, int sge_count)
 {
-	struct fusion_context *fusion;
 	int i;
 	u32 data_length = 0;
 	struct scatterlist *sg_scmd;
@@ -1851,7 +2044,6 @@
 
 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
 				MR_DEFAULT_NVME_PAGE_SIZE);
-	fusion = instance->ctrl_context;
 	data_length = scsi_bufflen(scmd);
 	sg_scmd = scsi_sglist(scmd);
 
@@ -1908,7 +2100,6 @@
 			    mega_mod64(sg_dma_address(sg_scmd),
 				       mr_nvme_pg_size)) {
 				build_prp = false;
-				atomic_inc(&instance->sge_holes_type1);
 				break;
 			}
 		}
@@ -1918,7 +2109,6 @@
 					sg_dma_len(sg_scmd)),
 					mr_nvme_pg_size))) {
 				build_prp = false;
-				atomic_inc(&instance->sge_holes_type2);
 				break;
 			}
 		}
@@ -1927,7 +2117,6 @@
 			if (mega_mod64(sg_dma_address(sg_scmd),
 				       mr_nvme_pg_size)) {
 				build_prp = false;
-				atomic_inc(&instance->sge_holes_type3);
 				break;
 			}
 		}
@@ -1964,12 +2153,9 @@
 	u32 first_prp_len;
 	bool build_prp = false;
 	int data_len = scsi_bufflen(scmd);
-	struct fusion_context *fusion;
 	u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
 					MR_DEFAULT_NVME_PAGE_SIZE);
 
-	fusion = instance->ctrl_context;
-
 	build_prp = megasas_is_prp_possible(instance, scmd, sge_count);
 
 	if (!build_prp)
@@ -2063,7 +2249,6 @@
 	main_chain_element->Length =
 			cpu_to_le32(num_prp_in_chain * sizeof(u64));
 
-	atomic_inc(&instance->prp_sgl);
 	return build_prp;
 }
 
@@ -2138,7 +2323,6 @@
 			memset(sgl_ptr, 0, instance->max_chain_frame_sz);
 		}
 	}
-	atomic_inc(&instance->ieee_sgl);
 }
 
 /**
@@ -2189,7 +2373,7 @@
  *
  * Used to set the PD LBA in CDB for FP IOs
  */
-void
+static void
 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
 		   struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
 		   struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
@@ -2209,7 +2393,7 @@
 		cdb[0] =  MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
 		cdb[7] =  MEGASAS_SCSI_ADDL_CDB_LEN;
 
-		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+		if (scp->sc_data_direction == DMA_FROM_DEVICE)
 			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
 		else
 			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
@@ -2238,7 +2422,7 @@
 		cdb[31] = (u8)(num_blocks & 0xff);
 
 		/* set SCSI IO EEDPFlags */
-		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
+		if (scp->sc_data_direction == DMA_FROM_DEVICE) {
 			io_request->EEDPFlags = cpu_to_le16(
 				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
 				MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
@@ -2450,9 +2634,10 @@
  *
  */
 static void
-megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
-				  struct MR_LD_RAID *raid, bool fp_possible,
-				  u8 is_read, u32 scsi_buff_len)
+megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion,
+				union RAID_CONTEXT_UNION *praid_context,
+				struct MR_LD_RAID *raid, bool fp_possible,
+				u8 is_read, u32 scsi_buff_len)
 {
 	u8 cpu_sel = MR_RAID_CTX_CPUSEL_0;
 	struct RAID_CONTEXT_G35 *rctx_g35;
@@ -2510,11 +2695,11 @@
 	 * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS.
 	 * IO Subtype is not bitmap.
 	 */
-	if ((raid->level == 1) && (!is_read)) {
-		if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
-			praid_context->raid_context_g35.raid_flags =
-				(MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
-				<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+	if ((fusion->pcie_bw_limitation) && (raid->level == 1) && (!is_read) &&
+			(scsi_buff_len > MR_LARGE_IO_MIN_SIZE)) {
+		praid_context->raid_context_g35.raid_flags =
+			(MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
+			<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
 	}
 }
 
@@ -2527,7 +2712,7 @@
  * Prepares the io_request and chain elements (sg_frame) for IO
  * The IO can be for PD (Fast Path) or LD
  */
-void
+static void
 megasas_build_ldio_fusion(struct megasas_instance *instance,
 			  struct scsi_cmnd *scp,
 			  struct megasas_cmd_fusion *cmd)
@@ -2537,27 +2722,27 @@
 	u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
 	u32 scsi_buff_len;
 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
-	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
 	struct IO_REQUEST_INFO io_info;
 	struct fusion_context *fusion;
 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
 	u8 *raidLUN;
 	unsigned long spinlock_flags;
-	union RAID_CONTEXT_UNION *praid_context;
 	struct MR_LD_RAID *raid = NULL;
 	struct MR_PRIV_DEVICE *mrdev_priv;
+	struct RAID_CONTEXT *rctx;
+	struct RAID_CONTEXT_G35 *rctx_g35;
 
 	device_id = MEGASAS_DEV_INDEX(scp);
 
 	fusion = instance->ctrl_context;
 
 	io_request = cmd->io_request;
-	io_request->RaidContext.raid_context.virtual_disk_tgt_id =
-		cpu_to_le16(device_id);
-	io_request->RaidContext.raid_context.status = 0;
-	io_request->RaidContext.raid_context.ex_status = 0;
+	rctx = &io_request->RaidContext.raid_context;
+	rctx_g35 = &io_request->RaidContext.raid_context_g35;
 
-	req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
+	rctx->virtual_disk_tgt_id = cpu_to_le16(device_id);
+	rctx->status = 0;
+	rctx->ex_status = 0;
 
 	start_lba_lo = 0;
 	start_lba_hi = 0;
@@ -2620,8 +2805,9 @@
 	io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
 	scsi_buff_len = scsi_bufflen(scp);
 	io_request->DataLength = cpu_to_le32(scsi_buff_len);
+	io_info.data_arms = 1;
 
-	if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+	if (scp->sc_data_direction == DMA_FROM_DEVICE)
 		io_info.isRead = 1;
 
 	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
@@ -2631,21 +2817,29 @@
 		raid = MR_LdRaidGet(ld, local_map_ptr);
 
 	if (!raid || (!fusion->fast_path_io)) {
-		io_request->RaidContext.raid_context.reg_lock_flags  = 0;
+		rctx->reg_lock_flags  = 0;
 		fp_possible = false;
 	} else {
-		if (MR_BuildRaidContext(instance, &io_info,
-					&io_request->RaidContext.raid_context,
+		if (MR_BuildRaidContext(instance, &io_info, rctx,
 					local_map_ptr, &raidLUN))
 			fp_possible = (io_info.fpOkForIo > 0) ? true : false;
 	}
 
-	cmd->request_desc->SCSIIO.MSIxIndex =
-		instance->reply_map[raw_smp_processor_id()];
+	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
+		atomic_read(&scp->device->device_busy) >
+		(io_info.data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+		cmd->request_desc->SCSIIO.MSIxIndex =
+			mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
+				MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
+	else if (instance->msix_load_balance)
+		cmd->request_desc->SCSIIO.MSIxIndex =
+			(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
+				    instance->msix_vectors));
+	else
+		cmd->request_desc->SCSIIO.MSIxIndex =
+			instance->reply_map[raw_smp_processor_id()];
 
-	praid_context = &io_request->RaidContext;
-
-	if (instance->adapter_type == VENTURA_SERIES) {
+	if (instance->adapter_type >= VENTURA_SERIES) {
 		/* FP for Optimal raid level 1.
 		 * All large RAID-1 writes (> 32 KiB, both WT and WB modes)
 		 * are built by the driver as LD I/Os.
@@ -2661,8 +2855,9 @@
 				(instance->host->can_queue)) {
 				fp_possible = false;
 				atomic_dec(&instance->fw_outstanding);
-			} else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
-				   (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) {
+			} else if (fusion->pcie_bw_limitation &&
+				((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
+				   (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0))) {
 				fp_possible = false;
 				atomic_dec(&instance->fw_outstanding);
 				if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
@@ -2681,17 +2876,17 @@
 			/* In ventura if stream detected for a read and it is
 			 * read ahead capable make this IO as LDIO
 			 */
-			if (is_stream_detected(&io_request->RaidContext.raid_context_g35))
+			if (is_stream_detected(rctx_g35))
 				fp_possible = false;
 		}
 
 		/* If raid is NULL, set CPU affinity to default CPU0 */
 		if (raid)
-			megasas_set_raidflag_cpu_affinity(praid_context,
+			megasas_set_raidflag_cpu_affinity(fusion, &io_request->RaidContext,
 				raid, fp_possible, io_info.isRead,
 				scsi_buff_len);
 		else
-			praid_context->raid_context_g35.routing_flags |=
+			rctx_g35->routing_flags |=
 				(MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
 	}
 
@@ -2703,25 +2898,16 @@
 			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
 			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 		if (instance->adapter_type == INVADER_SERIES) {
-			if (io_request->RaidContext.raid_context.reg_lock_flags ==
-			    REGION_TYPE_UNUSED)
-				cmd->request_desc->SCSIIO.RequestFlags =
-					(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
-					MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-			io_request->RaidContext.raid_context.type
-				= MPI2_TYPE_CUDA;
-			io_request->RaidContext.raid_context.nseg = 0x1;
+			rctx->type = MPI2_TYPE_CUDA;
+			rctx->nseg = 0x1;
 			io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
-			io_request->RaidContext.raid_context.reg_lock_flags |=
+			rctx->reg_lock_flags |=
 			  (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
 			   MR_RL_FLAGS_SEQ_NUM_ENABLE);
-		} else if (instance->adapter_type == VENTURA_SERIES) {
-			io_request->RaidContext.raid_context_g35.nseg_type |=
-						(1 << RAID_CONTEXT_NSEG_SHIFT);
-			io_request->RaidContext.raid_context_g35.nseg_type |=
-						(MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
-			io_request->RaidContext.raid_context_g35.routing_flags |=
-						(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+		} else if (instance->adapter_type >= VENTURA_SERIES) {
+			rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT);
+			rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+			rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
 			io_request->IoFlags |=
 				cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
 		}
@@ -2734,17 +2920,15 @@
 					&io_info, local_map_ptr);
 			scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
 			cmd->pd_r1_lb = io_info.pd_after_lb;
-			if (instance->adapter_type == VENTURA_SERIES)
-				io_request->RaidContext.raid_context_g35.span_arm
-					= io_info.span_arm;
+			if (instance->adapter_type >= VENTURA_SERIES)
+				rctx_g35->span_arm = io_info.span_arm;
 			else
-				io_request->RaidContext.raid_context.span_arm
-					= io_info.span_arm;
+				rctx->span_arm = io_info.span_arm;
 
 		} else
 			scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
 
-		if (instance->adapter_type == VENTURA_SERIES)
+		if (instance->adapter_type >= VENTURA_SERIES)
 			cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
 		else
 			cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
@@ -2762,31 +2946,26 @@
 		/* populate the LUN field */
 		memcpy(io_request->LUN, raidLUN, 8);
 	} else {
-		io_request->RaidContext.raid_context.timeout_value =
+		rctx->timeout_value =
 			cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
 		cmd->request_desc->SCSIIO.RequestFlags =
 			(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
 			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 		if (instance->adapter_type == INVADER_SERIES) {
 			if (io_info.do_fp_rlbypass ||
-			(io_request->RaidContext.raid_context.reg_lock_flags
-					== REGION_TYPE_UNUSED))
+			(rctx->reg_lock_flags == REGION_TYPE_UNUSED))
 				cmd->request_desc->SCSIIO.RequestFlags =
 					(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
 					MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-			io_request->RaidContext.raid_context.type
-				= MPI2_TYPE_CUDA;
-			io_request->RaidContext.raid_context.reg_lock_flags |=
+			rctx->type = MPI2_TYPE_CUDA;
+			rctx->reg_lock_flags |=
 				(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
-				 MR_RL_FLAGS_SEQ_NUM_ENABLE);
-			io_request->RaidContext.raid_context.nseg = 0x1;
-		} else if (instance->adapter_type == VENTURA_SERIES) {
-			io_request->RaidContext.raid_context_g35.routing_flags |=
-					(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
-			io_request->RaidContext.raid_context_g35.nseg_type |=
-					(1 << RAID_CONTEXT_NSEG_SHIFT);
-			io_request->RaidContext.raid_context_g35.nseg_type |=
-					(MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+					MR_RL_FLAGS_SEQ_NUM_ENABLE);
+			rctx->nseg = 0x1;
+		} else if (instance->adapter_type >= VENTURA_SERIES) {
+			rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+			rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT);
+			rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
 		}
 		io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
 		io_request->DevHandle = cpu_to_le16(device_id);
@@ -2832,7 +3011,7 @@
 		device_id < instance->fw_supported_vd_count)) {
 
 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
-		if (ld >= instance->fw_supported_vd_count)
+		if (ld >= instance->fw_supported_vd_count - 1)
 			fp_possible = 0;
 		else {
 			raid = MR_LdRaidGet(ld, local_map_ptr);
@@ -2855,7 +3034,7 @@
 
 		/* set RAID context values */
 		pRAID_Context->config_seq_num = raid->seqNum;
-		if (instance->adapter_type != VENTURA_SERIES)
+		if (instance->adapter_type < VENTURA_SERIES)
 			pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
 		pRAID_Context->timeout_value =
 			cpu_to_le16(raid->fpIoTimeoutForLd);
@@ -2926,50 +3105,71 @@
 		<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
 
 	/* If FW supports PD sequence number */
-	if (instance->use_seqnum_jbod_fp &&
-		instance->pd_list[pd_index].driveType == TYPE_DISK) {
-		/* TgtId must be incremented by 255 as jbod seq number is index
-		 * below raid map
-		 */
-		 /* More than 256 PD/JBOD support for Ventura */
-		if (instance->support_morethan256jbod)
-			pRAID_Context->virtual_disk_tgt_id =
-				pd_sync->seq[pd_index].pd_target_id;
-		else
-			pRAID_Context->virtual_disk_tgt_id =
-				cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
-		pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
-		io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
-		if (instance->adapter_type == VENTURA_SERIES) {
-			io_request->RaidContext.raid_context_g35.routing_flags |=
-				(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
-			io_request->RaidContext.raid_context_g35.nseg_type |=
-							(1 << RAID_CONTEXT_NSEG_SHIFT);
-			io_request->RaidContext.raid_context_g35.nseg_type |=
-							(MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+	if (instance->support_seqnum_jbod_fp) {
+		if (instance->use_seqnum_jbod_fp &&
+			instance->pd_list[pd_index].driveType == TYPE_DISK) {
+
+			/* More than 256 PD/JBOD support for Ventura */
+			if (instance->support_morethan256jbod)
+				pRAID_Context->virtual_disk_tgt_id =
+					pd_sync->seq[pd_index].pd_target_id;
+			else
+				pRAID_Context->virtual_disk_tgt_id =
+					cpu_to_le16(device_id +
+					(MAX_PHYSICAL_DEVICES - 1));
+			pRAID_Context->config_seq_num =
+				pd_sync->seq[pd_index].seqNum;
+			io_request->DevHandle =
+				pd_sync->seq[pd_index].devHandle;
+			if (instance->adapter_type >= VENTURA_SERIES) {
+				io_request->RaidContext.raid_context_g35.routing_flags |=
+					(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+				io_request->RaidContext.raid_context_g35.nseg_type |=
+					(1 << RAID_CONTEXT_NSEG_SHIFT);
+				io_request->RaidContext.raid_context_g35.nseg_type |=
+					(MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+			} else {
+				pRAID_Context->type = MPI2_TYPE_CUDA;
+				pRAID_Context->nseg = 0x1;
+				pRAID_Context->reg_lock_flags |=
+					(MR_RL_FLAGS_SEQ_NUM_ENABLE |
+					 MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+			}
 		} else {
-			pRAID_Context->type = MPI2_TYPE_CUDA;
-			pRAID_Context->nseg = 0x1;
-			pRAID_Context->reg_lock_flags |=
-				(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+			pRAID_Context->virtual_disk_tgt_id =
+				cpu_to_le16(device_id +
+				(MAX_PHYSICAL_DEVICES - 1));
+			pRAID_Context->config_seq_num = 0;
+			io_request->DevHandle = cpu_to_le16(0xFFFF);
 		}
-	} else if (fusion->fast_path_io) {
-		pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
-		pRAID_Context->config_seq_num = 0;
-		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
-		io_request->DevHandle =
-			local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
 	} else {
-		/* Want to send all IO via FW path */
 		pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
 		pRAID_Context->config_seq_num = 0;
-		io_request->DevHandle = cpu_to_le16(0xFFFF);
+
+		if (fusion->fast_path_io) {
+			local_map_ptr =
+				fusion->ld_drv_map[(instance->map_id & 1)];
+			io_request->DevHandle =
+				local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+		} else {
+			io_request->DevHandle = cpu_to_le16(0xFFFF);
+		}
 	}
 
 	cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
 
-	cmd->request_desc->SCSIIO.MSIxIndex =
-		instance->reply_map[raw_smp_processor_id()];
+	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
+		atomic_read(&scmd->device->device_busy) > MR_DEVICE_HIGH_IOPS_DEPTH)
+		cmd->request_desc->SCSIIO.MSIxIndex =
+			mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
+				MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
+	else if (instance->msix_load_balance)
+		cmd->request_desc->SCSIIO.MSIxIndex =
+			(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
+				    instance->msix_vectors));
+	else
+		cmd->request_desc->SCSIIO.MSIxIndex =
+			instance->reply_map[raw_smp_processor_id()];
 
 	if (!fp_possible) {
 		/* system pd firmware path */
@@ -3009,7 +3209,7 @@
  * Invokes helper functions to prepare request frames
  * and sets flags appropriate for IO/Non-IO cmd
  */
-int
+static int
 megasas_build_io_fusion(struct megasas_instance *instance,
 			struct scsi_cmnd *scp,
 			struct megasas_cmd_fusion *cmd)
@@ -3073,7 +3273,7 @@
 		return 1;
 	}
 
-	if (instance->adapter_type == VENTURA_SERIES) {
+	if (instance->adapter_type >= VENTURA_SERIES) {
 		set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
 		cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
 		cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
@@ -3088,9 +3288,9 @@
 
 	io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
 
-	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+	if (scp->sc_data_direction == DMA_TO_DEVICE)
 		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
-	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
 		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
 
 	io_request->SGLOffset0 =
@@ -3123,9 +3323,9 @@
 /* megasas_prepate_secondRaid1_IO
  *  It prepares the raid 1 second IO
  */
-void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
-			    struct megasas_cmd_fusion *cmd,
-			    struct megasas_cmd_fusion *r1_cmd)
+static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
+					   struct megasas_cmd_fusion *cmd,
+					   struct megasas_cmd_fusion *r1_cmd)
 {
 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
 	struct fusion_context *fusion;
@@ -3149,9 +3349,9 @@
 	r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
 	r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
 	r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
-	cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
+	cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid =
 			cpu_to_le16(r1_cmd->index);
-	r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
+	r1_cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid =
 			cpu_to_le16(cmd->index);
 	/*MSIxIndex of both commands request descriptors should be same*/
 	r1_cmd->request_desc->SCSIIO.MSIxIndex =
@@ -3174,9 +3374,6 @@
 	struct megasas_cmd_fusion *cmd, *r1_cmd = NULL;
 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
 	u32 index;
-	struct fusion_context *fusion;
-
-	fusion = instance->ctrl_context;
 
 	if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
 		instance->ldio_threshold &&
@@ -3272,7 +3469,7 @@
 
 	rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35;
 	fusion = instance->ctrl_context;
-	peer_smid = le16_to_cpu(rctx_g35->smid.peer_smid);
+	peer_smid = le16_to_cpu(rctx_g35->flow_specific.peer_smid);
 
 	r1_cmd = fusion->cmd_list[peer_smid - 1];
 	scmd_local = cmd->scmd;
@@ -3311,8 +3508,9 @@
  * @instance:			Adapter soft state
  * Completes all commands that is in reply descriptor queue
  */
-int
-complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
+static int
+complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
+		    struct megasas_irq_context *irq_context)
 {
 	union MPI2_REPLY_DESCRIPTORS_UNION *desc;
 	struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
@@ -3385,7 +3583,7 @@
 				atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
 				cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
 			}
-			//Fall thru and complete IO
+			/* Fall through - and complete IO */
 		case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
 			atomic_dec(&instance->fw_outstanding);
 			if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
@@ -3445,7 +3643,7 @@
 		 * number of reply counts and still there are more replies in reply queue
 		 * pending to be completed
 		 */
-		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
+		if (threshold_reply_count >= instance->threshold_reply_count) {
 			if (instance->msix_combined)
 				writel(((MSIxIndex & 0x7) << 24) |
 					fusion->last_reply_idx[MSIxIndex],
@@ -3455,39 +3653,102 @@
 					fusion->last_reply_idx[MSIxIndex],
 					instance->reply_post_host_index_addr[0]);
 			threshold_reply_count = 0;
+			if (irq_context) {
+				if (!irq_context->irq_poll_scheduled) {
+					irq_context->irq_poll_scheduled = true;
+					irq_context->irq_line_enable = true;
+					irq_poll_sched(&irq_context->irqpoll);
+				}
+				return num_completed;
+			}
 		}
 	}
 
-	if (!num_completed)
-		return IRQ_NONE;
+	if (num_completed) {
+		wmb();
+		if (instance->msix_combined)
+			writel(((MSIxIndex & 0x7) << 24) |
+				fusion->last_reply_idx[MSIxIndex],
+				instance->reply_post_host_index_addr[MSIxIndex/8]);
+		else
+			writel((MSIxIndex << 24) |
+				fusion->last_reply_idx[MSIxIndex],
+				instance->reply_post_host_index_addr[0]);
+		megasas_check_and_restore_queue_depth(instance);
+	}
+	return num_completed;
+}
 
-	wmb();
-	if (instance->msix_combined)
-		writel(((MSIxIndex & 0x7) << 24) |
-			fusion->last_reply_idx[MSIxIndex],
-			instance->reply_post_host_index_addr[MSIxIndex/8]);
-	else
-		writel((MSIxIndex << 24) |
-			fusion->last_reply_idx[MSIxIndex],
-			instance->reply_post_host_index_addr[0]);
-	megasas_check_and_restore_queue_depth(instance);
-	return IRQ_HANDLED;
+/**
+ * megasas_enable_irq_poll() - enable irqpoll
+ */
+static void megasas_enable_irq_poll(struct megasas_instance *instance)
+{
+	u32 count, i;
+	struct megasas_irq_context *irq_ctx;
+
+	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+	for (i = 0; i < count; i++) {
+		irq_ctx = &instance->irq_context[i];
+		irq_poll_enable(&irq_ctx->irqpoll);
+	}
 }
 
 /**
  * megasas_sync_irqs -	Synchronizes all IRQs owned by adapter
  * @instance:			Adapter soft state
  */
-void megasas_sync_irqs(unsigned long instance_addr)
+static void megasas_sync_irqs(unsigned long instance_addr)
 {
 	u32 count, i;
 	struct megasas_instance *instance =
 		(struct megasas_instance *)instance_addr;
+	struct megasas_irq_context *irq_ctx;
 
 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
 
-	for (i = 0; i < count; i++)
+	for (i = 0; i < count; i++) {
 		synchronize_irq(pci_irq_vector(instance->pdev, i));
+		irq_ctx = &instance->irq_context[i];
+		irq_poll_disable(&irq_ctx->irqpoll);
+		if (irq_ctx->irq_poll_scheduled) {
+			irq_ctx->irq_poll_scheduled = false;
+			enable_irq(irq_ctx->os_irq);
+		}
+	}
+}
+
+/**
+ * megasas_irqpoll() - process a queue for completed reply descriptors
+ * @irqpoll:	IRQ poll structure associated with queue to poll.
+ * @budget:	Threshold of reply descriptors to process per poll.
+ *
+ * Return: The number of entries processed.
+ */
+
+int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
+{
+	struct megasas_irq_context *irq_ctx;
+	struct megasas_instance *instance;
+	int num_entries;
+
+	irq_ctx = container_of(irqpoll, struct megasas_irq_context, irqpoll);
+	instance = irq_ctx->instance;
+
+	if (irq_ctx->irq_line_enable) {
+		disable_irq(irq_ctx->os_irq);
+		irq_ctx->irq_line_enable = false;
+	}
+
+	num_entries = complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
+	if (num_entries < budget) {
+		irq_poll_complete(irqpoll);
+		irq_ctx->irq_poll_scheduled = false;
+		enable_irq(irq_ctx->os_irq);
+	}
+
+	return num_entries;
 }
 
 /**
@@ -3496,77 +3757,54 @@
  *
  * Tasklet to complete cmds
  */
-void
+static void
 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
 {
 	struct megasas_instance *instance =
 		(struct megasas_instance *)instance_addr;
-	unsigned long flags;
 	u32 count, MSIxIndex;
 
 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
 
 	/* If we have already declared adapter dead, donot complete cmds */
-	spin_lock_irqsave(&instance->hba_lock, flags);
-	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
-		spin_unlock_irqrestore(&instance->hba_lock, flags);
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
 		return;
-	}
-	spin_unlock_irqrestore(&instance->hba_lock, flags);
 
 	for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
-		complete_cmd_fusion(instance, MSIxIndex);
+		complete_cmd_fusion(instance, MSIxIndex, NULL);
 }
 
 /**
  * megasas_isr_fusion - isr entry point
  */
-irqreturn_t megasas_isr_fusion(int irq, void *devp)
+static irqreturn_t megasas_isr_fusion(int irq, void *devp)
 {
 	struct megasas_irq_context *irq_context = devp;
 	struct megasas_instance *instance = irq_context->instance;
-	u32 mfiStatus, fw_state, dma_state;
+	u32 mfiStatus;
 
 	if (instance->mask_interrupts)
 		return IRQ_NONE;
 
+#if defined(ENABLE_IRQ_POLL)
+	if (irq_context->irq_poll_scheduled)
+		return IRQ_HANDLED;
+#endif
+
 	if (!instance->msix_vectors) {
-		mfiStatus = instance->instancet->clear_intr(instance->reg_set);
+		mfiStatus = instance->instancet->clear_intr(instance);
 		if (!mfiStatus)
 			return IRQ_NONE;
 	}
 
 	/* If we are resetting, bail */
 	if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
-		instance->instancet->clear_intr(instance->reg_set);
+		instance->instancet->clear_intr(instance);
 		return IRQ_HANDLED;
 	}
 
-	if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
-		instance->instancet->clear_intr(instance->reg_set);
-		/* If we didn't complete any commands, check for FW fault */
-		fw_state = instance->instancet->read_fw_status_reg(
-			instance->reg_set) & MFI_STATE_MASK;
-		dma_state = instance->instancet->read_fw_status_reg
-			(instance->reg_set) & MFI_STATE_DMADONE;
-		if (instance->crash_dump_drv_support &&
-			instance->crash_dump_app_support) {
-			/* Start collecting crash, if DMA bit is done */
-			if ((fw_state == MFI_STATE_FAULT) && dma_state)
-				schedule_work(&instance->crash_init);
-			else if (fw_state == MFI_STATE_FAULT) {
-				if (instance->unload == 0)
-					schedule_work(&instance->work_init);
-			}
-		} else if (fw_state == MFI_STATE_FAULT) {
-			dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
-			       "for scsi%d\n", instance->host->host_no);
-			if (instance->unload == 0)
-				schedule_work(&instance->work_init);
-		}
-	}
-
-	return IRQ_HANDLED;
+	return complete_cmd_fusion(instance, irq_context->MSIxIndex, irq_context)
+			? IRQ_HANDLED : IRQ_NONE;
 }
 
 /**
@@ -3575,7 +3813,7 @@
  * mfi_cmd:			megasas_cmd pointer
  *
  */
-void
+static void
 build_mpt_mfi_pass_thru(struct megasas_instance *instance,
 			struct megasas_cmd *mfi_cmd)
 {
@@ -3633,7 +3871,7 @@
  * @cmd:			mfi cmd to build
  *
  */
-union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
 {
 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL;
@@ -3659,7 +3897,7 @@
  * @cmd:			mfi cmd pointer
  *
  */
-void
+static void
 megasas_issue_dcmd_fusion(struct megasas_instance *instance,
 			  struct megasas_cmd *cmd)
 {
@@ -3692,9 +3930,9 @@
  * @regs:			MFI register set
  */
 static u32
-megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
+megasas_read_fw_status_reg_fusion(struct megasas_instance *instance)
 {
-	return readl(&(regs)->outbound_scratch_pad);
+	return megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_0);
 }
 
 /**
@@ -3756,11 +3994,12 @@
 	writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
 
 	/* Check that the diag write enable (DRWE) bit is on */
-	host_diag = readl(&instance->reg_set->fusion_host_diag);
+	host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag);
 	retry = 0;
 	while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
 		msleep(100);
-		host_diag = readl(&instance->reg_set->fusion_host_diag);
+		host_diag = megasas_readl(instance,
+					  &instance->reg_set->fusion_host_diag);
 		if (retry++ == 100) {
 			dev_warn(&instance->pdev->dev,
 				"Host diag unlock failed from %s %d\n",
@@ -3777,11 +4016,12 @@
 	msleep(3000);
 
 	/* Make sure reset adapter bit is cleared */
-	host_diag = readl(&instance->reg_set->fusion_host_diag);
+	host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag);
 	retry = 0;
 	while (host_diag & HOST_DIAG_RESET_ADAPTER) {
 		msleep(100);
-		host_diag = readl(&instance->reg_set->fusion_host_diag);
+		host_diag = megasas_readl(instance,
+					  &instance->reg_set->fusion_host_diag);
 		if (retry++ == 1000) {
 			dev_warn(&instance->pdev->dev,
 				"Diag reset adapter never cleared %s %d\n",
@@ -3792,14 +4032,14 @@
 	if (host_diag & HOST_DIAG_RESET_ADAPTER)
 		return -1;
 
-	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set)
+	abs_state = instance->instancet->read_fw_status_reg(instance)
 			& MFI_STATE_MASK;
 	retry = 0;
 
 	while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
 		msleep(100);
 		abs_state = instance->instancet->
-			read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+			read_fw_status_reg(instance) & MFI_STATE_MASK;
 	}
 	if (abs_state <= MFI_STATE_FW_INIT) {
 		dev_warn(&instance->pdev->dev,
@@ -3822,21 +4062,68 @@
 	return 0;
 }
 
+/**
+ * megasas_trigger_snap_dump -	Trigger snap dump in FW
+ * @instance:			Soft instance of adapter
+ */
+static inline void megasas_trigger_snap_dump(struct megasas_instance *instance)
+{
+	int j;
+	u32 fw_state, abs_state;
+
+	if (!instance->disableOnlineCtrlReset) {
+		dev_info(&instance->pdev->dev, "Trigger snap dump\n");
+		writel(MFI_ADP_TRIGGER_SNAP_DUMP,
+		       &instance->reg_set->doorbell);
+		readl(&instance->reg_set->doorbell);
+	}
+
+	for (j = 0; j < instance->snapdump_wait_time; j++) {
+		abs_state = instance->instancet->read_fw_status_reg(instance);
+		fw_state = abs_state & MFI_STATE_MASK;
+		if (fw_state == MFI_STATE_FAULT) {
+			dev_printk(KERN_ERR, &instance->pdev->dev,
+				   "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n",
+				   abs_state & MFI_STATE_FAULT_CODE,
+				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
+			return;
+		}
+		msleep(1000);
+	}
+}
+
 /* This function waits for outstanding commands on fusion to complete */
-int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
-					int reason, int *convert)
+static int
+megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+				    int reason, int *convert)
 {
 	int i, outstanding, retval = 0, hb_seconds_missed = 0;
-	u32 fw_state;
+	u32 fw_state, abs_state;
+	u32 waittime_for_io_completion;
 
-	for (i = 0; i < resetwaittime; i++) {
+	waittime_for_io_completion =
+		min_t(u32, resetwaittime,
+			(resetwaittime - instance->snapdump_wait_time));
+
+	if (reason == MFI_IO_TIMEOUT_OCR) {
+		dev_info(&instance->pdev->dev,
+			"MFI command is timed out\n");
+		megasas_complete_cmd_dpc_fusion((unsigned long)instance);
+		if (instance->snapdump_wait_time)
+			megasas_trigger_snap_dump(instance);
+		retval = 1;
+		goto out;
+	}
+
+	for (i = 0; i < waittime_for_io_completion; i++) {
 		/* Check if firmware is in fault state */
-		fw_state = instance->instancet->read_fw_status_reg(
-			instance->reg_set) & MFI_STATE_MASK;
+		abs_state = instance->instancet->read_fw_status_reg(instance);
+		fw_state = abs_state & MFI_STATE_MASK;
 		if (fw_state == MFI_STATE_FAULT) {
-			dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
-			       " will reset adapter scsi%d.\n",
-				instance->host->host_no);
+			dev_printk(KERN_ERR, &instance->pdev->dev,
+				   "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n",
+				   abs_state & MFI_STATE_FAULT_CODE,
+				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
 			megasas_complete_cmd_dpc_fusion((unsigned long)instance);
 			if (instance->requestorId && reason) {
 				dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT"
@@ -3850,13 +4137,6 @@
 			goto out;
 		}
 
-		if (reason == MFI_IO_TIMEOUT_OCR) {
-			dev_info(&instance->pdev->dev,
-				"MFI IO is timed out, initiating OCR\n");
-			megasas_complete_cmd_dpc_fusion((unsigned long)instance);
-			retval = 1;
-			goto out;
-		}
 
 		/* If SR-IOV VF mode & heartbeat timeout, don't wait */
 		if (instance->requestorId && !reason) {
@@ -3901,6 +4181,12 @@
 		msleep(1000);
 	}
 
+	if (instance->snapdump_wait_time) {
+		megasas_trigger_snap_dump(instance);
+		retval = 1;
+		goto out;
+	}
+
 	if (atomic_read(&instance->fw_outstanding)) {
 		dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
 		       "will reset adapter scsi%d.\n",
@@ -3908,6 +4194,7 @@
 		*convert = 1;
 		retval = 1;
 	}
+
 out:
 	return retval;
 }
@@ -3932,7 +4219,7 @@
  * megasas_refire_mgmt_cmd :	Re-fire management commands
  * @instance:				Controller's soft instance
 */
-void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
+static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
 {
 	int j;
 	struct megasas_cmd_fusion *cmd_fusion;
@@ -3985,6 +4272,13 @@
 			}
 
 			break;
+		case MFI_CMD_TOOLBOX:
+			if (!instance->support_pci_lane_margining) {
+				cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
+				result = COMPLETE_CMD;
+			}
+
+			break;
 		default:
 			break;
 		}
@@ -4208,6 +4502,7 @@
 			instance->instancet->disable_intr(instance);
 			megasas_sync_irqs((unsigned long)instance);
 			instance->instancet->enable_intr(instance);
+			megasas_enable_irq_poll(instance);
 			if (scsi_lookup->scmd == NULL)
 				break;
 		}
@@ -4221,6 +4516,7 @@
 		megasas_sync_irqs((unsigned long)instance);
 		rc = megasas_track_scsiio(instance, id, channel);
 		instance->instancet->enable_intr(instance);
+		megasas_enable_irq_poll(instance);
 
 		break;
 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
@@ -4313,17 +4609,11 @@
 {
 	struct megasas_instance *instance;
 	u16 smid, devhandle;
-	struct fusion_context *fusion;
 	int ret;
 	struct MR_PRIV_DEVICE *mr_device_priv_data;
 	mr_device_priv_data = scmd->device->hostdata;
 
-
 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
-	fusion = instance->ctrl_context;
-
-	scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd);
-	scsi_print_command(scmd);
 
 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
 		dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
@@ -4340,7 +4630,6 @@
 		goto out;
 	}
 
-
 	if (!mr_device_priv_data->is_tm_capable) {
 		ret = FAILED;
 		goto out;
@@ -4353,7 +4642,7 @@
 	if (!smid) {
 		ret = SUCCESS;
 		scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
-			" issued is not found in oustanding commands\n");
+			" issued is not found in outstanding commands\n");
 		mutex_unlock(&instance->reset_mutex);
 		goto out;
 	}
@@ -4368,7 +4657,7 @@
 		goto out;
 	}
 	sdev_printk(KERN_INFO, scmd->device,
-		"attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
+		"attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n",
 		scmd, devhandle);
 
 	mr_device_priv_data->tm_busy = 1;
@@ -4379,9 +4668,12 @@
 	mr_device_priv_data->tm_busy = 0;
 
 	mutex_unlock(&instance->reset_mutex);
-out:
-	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+	scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n",
 			((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+out:
+	scsi_print_command(scmd);
+	if (megasas_dbg_lvl & TM_DEBUG)
+		megasas_dump_fusion_io(scmd);
 
 	return ret;
 }
@@ -4399,15 +4691,10 @@
 	struct megasas_instance *instance;
 	int ret = FAILED;
 	u16 devhandle;
-	struct fusion_context *fusion;
 	struct MR_PRIV_DEVICE *mr_device_priv_data;
 	mr_device_priv_data = scmd->device->hostdata;
 
 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
-	fusion = instance->ctrl_context;
-
-	sdev_printk(KERN_INFO, scmd->device,
-		    "target reset called for scmd(%p)\n", scmd);
 
 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
 		dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
@@ -4417,14 +4704,13 @@
 	}
 
 	if (!mr_device_priv_data) {
-		sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
-			"scmd(%p)\n", scmd);
+		sdev_printk(KERN_INFO, scmd->device,
+			    "device been deleted! scmd: (0x%p)\n", scmd);
 		scmd->result = DID_NO_CONNECT << 16;
 		ret = SUCCESS;
 		goto out;
 	}
 
-
 	if (!mr_device_priv_data->is_tm_capable) {
 		ret = FAILED;
 		goto out;
@@ -4442,7 +4728,7 @@
 	}
 
 	sdev_printk(KERN_INFO, scmd->device,
-		"attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
+		"attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n",
 		scmd, devhandle);
 	mr_device_priv_data->tm_busy = 1;
 	ret = megasas_issue_tm(instance, devhandle,
@@ -4451,15 +4737,16 @@
 			mr_device_priv_data);
 	mr_device_priv_data->tm_busy = 0;
 	mutex_unlock(&instance->reset_mutex);
-out:
-	scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n",
+	scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n",
 		(ret == SUCCESS) ? "SUCCESS" : "FAILED");
 
+out:
 	return ret;
 }
 
 /*SRIOV get other instance in cluster if any*/
-struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
+static struct
+megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
 {
 	int i;
 
@@ -4499,12 +4786,14 @@
 	struct megasas_instance *instance;
 	struct megasas_cmd_fusion *cmd_fusion, *r1_cmd;
 	struct fusion_context *fusion;
-	u32 abs_state, status_reg, reset_adapter;
+	u32 abs_state, status_reg, reset_adapter, fpio_count = 0;
 	u32 io_timeout_in_crash_mode = 0;
 	struct scsi_cmnd *scmd_local = NULL;
 	struct scsi_device *sdev;
 	int ret_target_prop = DCMD_FAILED;
 	bool is_target_prop = false;
+	bool do_adp_reset = true;
+	int max_reset_tries = MEGASAS_FUSION_MAX_RESET_TRIES;
 
 	instance = (struct megasas_instance *)shost->hostdata;
 	fusion = instance->ctrl_context;
@@ -4518,7 +4807,7 @@
 		mutex_unlock(&instance->reset_mutex);
 		return FAILED;
 	}
-	status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+	status_reg = instance->instancet->read_fw_status_reg(instance);
 	abs_state = status_reg & MFI_STATE_MASK;
 
 	/* IO timeout detected, forcibly put FW in FAULT state */
@@ -4527,7 +4816,7 @@
 		dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
 			"forcibly FAULT Firmware\n");
 		atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
-		status_reg = readl(&instance->reg_set->doorbell);
+		status_reg = megasas_readl(instance, &instance->reg_set->doorbell);
 		writel(status_reg | MFI_STATE_FORCE_OCR,
 			&instance->reg_set->doorbell);
 		readl(&instance->reg_set->doorbell);
@@ -4571,26 +4860,30 @@
 		if (convert)
 			reason = 0;
 
-		if (megasas_dbg_lvl & OCR_LOGS)
+		if (megasas_dbg_lvl & OCR_DEBUG)
 			dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n");
 
 		/* Now return commands back to the OS */
 		for (i = 0 ; i < instance->max_scsi_cmds; i++) {
 			cmd_fusion = fusion->cmd_list[i];
 			/*check for extra commands issued by driver*/
-			if (instance->adapter_type == VENTURA_SERIES) {
+			if (instance->adapter_type >= VENTURA_SERIES) {
 				r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
 				megasas_return_cmd_fusion(instance, r1_cmd);
 			}
 			scmd_local = cmd_fusion->scmd;
 			if (cmd_fusion->scmd) {
-				if (megasas_dbg_lvl & OCR_LOGS) {
+				if (megasas_dbg_lvl & OCR_DEBUG) {
 					sdev_printk(KERN_INFO,
 						cmd_fusion->scmd->device, "SMID: 0x%x\n",
 						cmd_fusion->index);
-					scsi_print_command(cmd_fusion->scmd);
+					megasas_dump_fusion_io(cmd_fusion->scmd);
 				}
 
+				if (cmd_fusion->io_request->Function ==
+					MPI2_FUNCTION_SCSI_IO_REQUEST)
+					fpio_count++;
+
 				scmd_local->result =
 					megasas_check_mpio_paths(instance,
 							scmd_local);
@@ -4603,10 +4896,12 @@
 			}
 		}
 
+		dev_info(&instance->pdev->dev, "Outstanding fastpath IOs: %d\n",
+			fpio_count);
+
 		atomic_set(&instance->fw_outstanding, 0);
 
-		status_reg = instance->instancet->read_fw_status_reg(
-			instance->reg_set);
+		status_reg = instance->instancet->read_fw_status_reg(instance);
 		abs_state = status_reg & MFI_STATE_MASK;
 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
 		if (instance->disableOnlineCtrlReset ||
@@ -4615,52 +4910,45 @@
 			dev_warn(&instance->pdev->dev, "Reset not supported"
 			       ", killing adapter scsi%d.\n",
 				instance->host->host_no);
-			megaraid_sas_kill_hba(instance);
-			instance->skip_heartbeat_timer_del = 1;
-			retval = FAILED;
-			goto out;
+			goto kill_hba;
 		}
 
 		/* Let SR-IOV VF & PF sync up if there was a HB failure */
 		if (instance->requestorId && !reason) {
 			msleep(MEGASAS_OCR_SETTLE_TIME_VF);
-			goto transition_to_ready;
+			do_adp_reset = false;
+			max_reset_tries = MEGASAS_SRIOV_MAX_RESET_TRIES_VF;
 		}
 
 		/* Now try to reset the chip */
-		for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
-
-			if (instance->instancet->adp_reset
-				(instance, instance->reg_set))
+		for (i = 0; i < max_reset_tries; i++) {
+			/*
+			 * Do adp reset and wait for
+			 * controller to transition to ready
+			 */
+			if (megasas_adp_reset_wait_for_ready(instance,
+				do_adp_reset, 1) == FAILED)
 				continue;
-transition_to_ready:
+
 			/* Wait for FW to become ready */
 			if (megasas_transition_to_ready(instance, 1)) {
 				dev_warn(&instance->pdev->dev,
 					"Failed to transition controller to ready for "
 					"scsi%d.\n", instance->host->host_no);
-				if (instance->requestorId && !reason)
-					goto fail_kill_adapter;
-				else
-					continue;
+				continue;
 			}
 			megasas_reset_reply_desc(instance);
 			megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
 
 			if (megasas_ioc_init_fusion(instance)) {
-				if (instance->requestorId && !reason)
-					goto fail_kill_adapter;
-				else
-					continue;
+				continue;
 			}
 
 			if (megasas_get_ctrl_info(instance)) {
 				dev_info(&instance->pdev->dev,
 					"Failed from %s %d\n",
 					__func__, __LINE__);
-				megaraid_sas_kill_hba(instance);
-				retval = FAILED;
-				goto out;
+				goto kill_hba;
 			}
 
 			megasas_refire_mgmt_cmd(instance);
@@ -4677,7 +4965,7 @@
 			megasas_setup_jbod_map(instance);
 
 			/* reset stream detection array */
-			if (instance->adapter_type == VENTURA_SERIES) {
+			if (instance->adapter_type >= VENTURA_SERIES) {
 				for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
 					memset(fusion->stream_detect_by_ld[j],
 					0, sizeof(struct LD_STREAM_DETECT));
@@ -4689,7 +4977,7 @@
 			clear_bit(MEGASAS_FUSION_IN_RESET,
 				  &instance->reset_flags);
 			instance->instancet->enable_intr(instance);
-
+			megasas_enable_irq_poll(instance);
 			shost_for_each_device(sdev, shost) {
 				if ((instance->tgt_prop) &&
 				    (instance->nvme_page_size))
@@ -4701,9 +4989,9 @@
 
 			atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
 
-			dev_info(&instance->pdev->dev, "Interrupts are enabled and"
-				" controller is OPERATIONAL for scsi:%d\n",
-				instance->host->host_no);
+			dev_info(&instance->pdev->dev,
+				 "Adapter is OPERATIONAL for scsi:%d\n",
+				 instance->host->host_no);
 
 			/* Restart SR-IOV heartbeat */
 			if (instance->requestorId) {
@@ -4721,6 +5009,13 @@
 				megasas_set_crash_dump_params(instance,
 					MR_CRASH_BUF_TURN_OFF);
 
+			if (instance->snapdump_wait_time) {
+				megasas_get_snapdump_properties(instance);
+				dev_info(&instance->pdev->dev,
+					 "Snap dump wait time\t: %d\n",
+					 instance->snapdump_wait_time);
+			}
+
 			retval = SUCCESS;
 
 			/* Adapter reset completed successfully */
@@ -4730,13 +5025,10 @@
 
 			goto out;
 		}
-fail_kill_adapter:
 		/* Reset failed, kill the adapter */
 		dev_warn(&instance->pdev->dev, "Reset failed, killing "
 		       "adapter scsi%d.\n", instance->host->host_no);
-		megaraid_sas_kill_hba(instance);
-		instance->skip_heartbeat_timer_del = 1;
-		retval = FAILED;
+		goto kill_hba;
 	} else {
 		/* For VF: Restart HB timer if we didn't OCR */
 		if (instance->requestorId) {
@@ -4744,24 +5036,30 @@
 		}
 		clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
 		instance->instancet->enable_intr(instance);
+		megasas_enable_irq_poll(instance);
 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
+		goto out;
 	}
+kill_hba:
+	megaraid_sas_kill_hba(instance);
+	megasas_enable_irq_poll(instance);
+	instance->skip_heartbeat_timer_del = 1;
+	retval = FAILED;
 out:
 	clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
 	mutex_unlock(&instance->reset_mutex);
 	return retval;
 }
 
-/* Fusion Crash dump collection work queue */
-void  megasas_fusion_crash_dump_wq(struct work_struct *work)
+/* Fusion Crash dump collection */
+static void  megasas_fusion_crash_dump(struct megasas_instance *instance)
 {
-	struct megasas_instance *instance =
-		container_of(work, struct megasas_instance, crash_init);
 	u32 status_reg;
 	u8 partial_copy = 0;
+	int wait = 0;
 
 
-	status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+	status_reg = instance->instancet->read_fw_status_reg(instance);
 
 	/*
 	 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
@@ -4777,8 +5075,8 @@
 				"crash dump and initiating OCR\n");
 			status_reg |= MFI_STATE_CRASH_DUMP_DONE;
 			writel(status_reg,
-				&instance->reg_set->outbound_scratch_pad);
-			readl(&instance->reg_set->outbound_scratch_pad);
+				&instance->reg_set->outbound_scratch_pad_0);
+			readl(&instance->reg_set->outbound_scratch_pad_0);
 			return;
 		}
 		megasas_alloc_host_crash_buffer(instance);
@@ -4786,21 +5084,41 @@
 			"allocated: %d\n", instance->drv_buf_alloc);
 	}
 
-	/*
-	 * Driver has allocated max buffers, which can be allocated
-	 * and FW has more crash dump data, then driver will
-	 * ignore the data.
-	 */
-	if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
-		dev_info(&instance->pdev->dev, "Driver is done copying "
-			"the buffer: %d\n", instance->drv_buf_alloc);
-		status_reg |= MFI_STATE_CRASH_DUMP_DONE;
-		partial_copy = 1;
-	} else {
-		memcpy(instance->crash_buf[instance->drv_buf_index],
-			instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
-		instance->drv_buf_index++;
-		status_reg &= ~MFI_STATE_DMADONE;
+	while (!(status_reg & MFI_STATE_CRASH_DUMP_DONE) &&
+	       (wait < MEGASAS_WATCHDOG_WAIT_COUNT)) {
+		if (!(status_reg & MFI_STATE_DMADONE)) {
+			/*
+			 * Next crash dump buffer is not yet DMA'd by FW
+			 * Check after 10ms. Wait for 1 second for FW to
+			 * post the next buffer. If not bail out.
+			 */
+			wait++;
+			msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
+			status_reg = instance->instancet->read_fw_status_reg(
+					instance);
+			continue;
+		}
+
+		wait = 0;
+		if (instance->drv_buf_index >= instance->drv_buf_alloc) {
+			dev_info(&instance->pdev->dev,
+				 "Driver is done copying the buffer: %d\n",
+				 instance->drv_buf_alloc);
+			status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+			partial_copy = 1;
+			break;
+		} else {
+			memcpy(instance->crash_buf[instance->drv_buf_index],
+			       instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
+			instance->drv_buf_index++;
+			status_reg &= ~MFI_STATE_DMADONE;
+		}
+
+		writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
+		readl(&instance->reg_set->outbound_scratch_pad_0);
+
+		msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
+		status_reg = instance->instancet->read_fw_status_reg(instance);
 	}
 
 	if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
@@ -4809,13 +5127,10 @@
 		instance->fw_crash_buffer_size =  instance->drv_buf_index;
 		instance->fw_crash_state = AVAILABLE;
 		instance->drv_buf_index = 0;
-		writel(status_reg, &instance->reg_set->outbound_scratch_pad);
-		readl(&instance->reg_set->outbound_scratch_pad);
+		writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
+		readl(&instance->reg_set->outbound_scratch_pad_0);
 		if (!partial_copy)
 			megasas_reset_fusion(instance->host, 0);
-	} else {
-		writel(status_reg, &instance->reg_set->outbound_scratch_pad);
-		readl(&instance->reg_set->outbound_scratch_pad);
 	}
 }
 
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 8e5ebee..c013c80 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1,34 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  *  Linux MegaRAID driver for SAS based RAID controllers
  *
  *  Copyright (c) 2009-2013  LSI Corporation
- *  Copyright (c) 2013-2014  Avago Technologies
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version 2
- *  of the License, or (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *  Copyright (c) 2013-2016  Avago Technologies
+ *  Copyright (c) 2016-2018  Broadcom Inc.
  *
  *  FILE: megaraid_sas_fusion.h
  *
- *  Authors: Avago Technologies
+ *  Authors: Broadcom Inc.
  *           Manoj Jose
  *           Sumant Patro
- *           Kashyap Desai <kashyap.desai@avagotech.com>
- *           Sumit Saxena <sumit.saxena@avagotech.com>
+ *           Kashyap Desai <kashyap.desai@broadcom.com>
+ *           Sumit Saxena <sumit.saxena@broadcom.com>
  *
- *  Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- *  San Jose, California 95131
+ *  Send feedback to: megaraidlinux.pdl@broadcom.com
  */
 
 #ifndef _MEGARAID_SAS_FUSION_H_
@@ -89,7 +75,8 @@
 	MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P        = 3,
 	MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q        = 4,
 	MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
-	MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7
+	MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7,
+	MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD = 8
 };
 
 /*
@@ -102,7 +89,6 @@
 
 #define MEGASAS_FP_CMD_LEN	16
 #define MEGASAS_FUSION_IN_RESET 0
-#define THRESHOLD_REPLY_COUNT 50
 #define RAID_1_PEER_CMDS 2
 #define JBOD_MAPS_COUNT	2
 #define MEGASAS_REDUCE_QD_COUNT 64
@@ -154,12 +140,15 @@
 	u16 timeout_value; /* 0x02 -0x03 */
 	u16		routing_flags;	// 0x04 -0x05 routing flags
 	u16 virtual_disk_tgt_id;   /* 0x06 -0x07 */
-	u64 reg_lock_row_lba;      /* 0x08 - 0x0F */
+	__le64 reg_lock_row_lba;      /* 0x08 - 0x0F */
 	u32 reg_lock_length;      /* 0x10 - 0x13 */
-	union {
-		u16 next_lmid; /* 0x14 - 0x15 */
-		u16	peer_smid;	/* used for the raid 1/10 fp writes */
-	} smid;
+	union {                     // flow specific
+		u16 rmw_op_index;   /* 0x14 - 0x15, R5/6 RMW: rmw operation index*/
+		u16 peer_smid;      /* 0x14 - 0x15, R1 Write: peer smid*/
+		u16 r56_arm_map;    /* 0x14 - 0x15, Unused [15], LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
+
+	} flow_specific;
+
 	u8 ex_status;       /* 0x16 : OUT */
 	u8 status;          /* 0x17 status */
 	u8 raid_flags;		/* 0x18 resvd[7:6], ioSubType[5:4],
@@ -250,6 +239,13 @@
 #define RAID_CTX_SPANARM_SPAN_SHIFT	(5)
 #define RAID_CTX_SPANARM_SPAN_MASK	(0xE0)
 
+/* LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
+#define RAID_CTX_R56_Q_ARM_MASK		(0x1F)
+#define RAID_CTX_R56_P_ARM_SHIFT	(5)
+#define RAID_CTX_R56_P_ARM_MASK		(0x3E0)
+#define RAID_CTX_R56_LOG_ARM_SHIFT	(10)
+#define RAID_CTX_R56_LOG_ARM_MASK	(0x7C00)
+
 /* number of bits per index in U32 TrackStream */
 #define BITS_PER_INDEX_STREAM		4
 #define INVALID_STREAM_NUM              16
@@ -725,6 +721,8 @@
 #define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC  0x010e8485   /* SR-IOV HB alloc*/
 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111   0x03200200
 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS       0x03150200
+#define MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES	0x01200100
+#define MR_DCMD_CTRL_DEVICE_LIST_GET		0x01190600
 
 struct MR_DEV_HANDLE_INFO {
 	__le16	curDevHdl;
@@ -952,6 +950,7 @@
 	u8  pd_after_lb;
 	u16 r1_alt_dev_handle; /* raid 1/10 only */
 	bool ra_capable;
+	u8 data_arms;
 };
 
 struct MR_LD_TARGET_SYNC {
@@ -1063,6 +1062,9 @@
 #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP       (0x08)
 #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL       (0x10)
 
+#define MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME 15
+#define MEGASAS_MAX_SNAP_DUMP_WAIT_TIME 60
+
 struct megasas_register_set;
 struct megasas_instance;
 
@@ -1333,7 +1335,8 @@
 	dma_addr_t ioc_init_request_phys;
 	struct MPI2_IOC_INIT_REQUEST *ioc_init_request;
 	struct megasas_cmd *ioc_init_cmd;
-
+	bool pcie_bw_limitation;
+	bool r56_div_offload;
 };
 
 union desc_value {
@@ -1350,6 +1353,19 @@
 	RETURN_CMD = 3,
 };
 
+struct  MR_SNAPDUMP_PROPERTIES {
+	u8       offload_num;
+	u8       max_num_supported;
+	u8       cur_num_supported;
+	u8       trigger_min_num_sec_before_ocr;
+	u8       reserved[12];
+};
+
+struct megasas_debugfs_buffer {
+	void *buf;
+	u32 len;
+};
+
 void megasas_free_cmds_fusion(struct megasas_instance *instance);
 int megasas_ioc_init_fusion(struct megasas_instance *instance);
 u8 megasas_get_map_info(struct megasas_instance *instance);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 82e01db..74fb506 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * SCSI low-level driver for the MESH (Macintosh Enhanced SCSI Hardware)
  * bus adaptor found on Power Macintosh computers.
@@ -1838,7 +1839,7 @@
 	.this_id			= 7,
 	.sg_tablesize			= SG_ALL,
 	.cmd_per_lun			= 2,
-	.use_clustering			= DISABLE_CLUSTERING,
+	.max_segment_size		= 65535,
 };
 
 static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
@@ -1915,8 +1916,9 @@
 	/* We use the PCI APIs for now until the generic one gets fixed
 	 * enough or until we get some macio-specific versions
 	 */
-	dma_cmd_space = pci_zalloc_consistent(macio_get_pci_dev(mdev),
-					      ms->dma_cmd_size, &dma_cmd_bus);
+	dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
+					   ms->dma_cmd_size, &dma_cmd_bus,
+					   GFP_KERNEL);
 	if (dma_cmd_space == NULL) {
 		printk(KERN_ERR "mesh: can't allocate DMA table\n");
 		goto out_unmap;
@@ -1974,7 +1976,7 @@
 	 */
 	mesh_shutdown(mdev);
 	set_mesh_power(ms, 0);
-	pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+	dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
 			    ms->dma_cmd_space, ms->dma_cmd_bus);
  out_unmap:
 	iounmap(ms->dma);
@@ -2007,7 +2009,7 @@
        	iounmap(ms->dma);
 
 	/* Free DMA commands memory */
-	pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+	dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
 			    ms->dma_cmd_space, ms->dma_cmd_bus);
 
 	/* Release memory resources */
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index b736dbc..a072187 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -45,6 +45,7 @@
 	depends on PCI && SCSI
 	select SCSI_SAS_ATTRS
 	select RAID_ATTRS
+	select IRQ_POLL
 	---help---
 	This driver supports PCI-Express SAS 12Gb/s Host Adapters.
 
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 1e45268..18b1e31 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright 2000-2015 Avago Technologies.  All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
  *
  *
  *          Name:  mpi2.h
@@ -9,7 +9,7 @@
  *                 scatter/gather formats.
  * Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.50
+ *  mpi2.h Version:  02.00.54
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -116,7 +116,13 @@
  * 02-03-17  02.00.48  Bumped MPI2_HEADER_VERSION_UNIT.
  * 06-13-17  02.00.49  Bumped MPI2_HEADER_VERSION_UNIT.
  * 09-29-17  02.00.50  Bumped MPI2_HEADER_VERSION_UNIT.
- * --------------------------------------------------------------------------
+ * 07-22-18  02.00.51  Added SECURE_BOOT define.
+ *                     Bumped MPI2_HEADER_VERSION_UNIT
+ * 08-15-18  02.00.52  Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-28-18  02.00.53  Bumped MPI2_HEADER_VERSION_UNIT.
+ *                     Added MPI2_IOCSTATUS_FAILURE
+ * 12-17-18  02.00.54  Bumped MPI2_HEADER_VERSION_UNIT
+ *  --------------------------------------------------------------------------
  */
 
 #ifndef MPI2_H
@@ -156,7 +162,7 @@
 
 
 /* Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x32)
+#define MPI2_HEADER_VERSION_UNIT            (0x36)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
@@ -257,6 +263,8 @@
  */
 #define MPI2_HOST_DIAGNOSTIC_OFFSET             (0x00000008)
 
+#define MPI26_DIAG_SECURE_BOOT                  (0x80000000)
+
 #define MPI2_DIAG_SBR_RELOAD                    (0x00002000)
 
 #define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK       (0x00001800)
@@ -687,7 +695,9 @@
 #define MPI2_IOCSTATUS_INVALID_FIELD                (0x0007)
 #define MPI2_IOCSTATUS_INVALID_STATE                (0x0008)
 #define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED       (0x0009)
+/*MPI v2.6 and later */
 #define MPI2_IOCSTATUS_INSUFFICIENT_POWER           (0x000A)
+#define MPI2_IOCSTATUS_FAILURE                      (0x000F)
 
 /****************************************************************************
 * Config IOCStatus values
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 5122920..3a6871a 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -1,13 +1,13 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright 2000-2015 Avago Technologies.  All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
  *
  *
  *          Name:  mpi2_cnfg.h
  *         Title:  MPI Configuration messages and pages
  * Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.42
+ *    mpi2_cnfg.h Version:  02.00.47
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -231,7 +231,24 @@
  *                     Added NOIOB field to PCIe Device Page 2.
  *                     Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to
  *                     the Capabilities field of PCIe Device Page 2.
+ * 07-22-18  02.00.43  Added defines for SAS3916 and SAS3816.
+ *                     Added WRiteCache defines to IO Unit Page 1.
+ *                     Added MaxEnclosureLevel to BIOS Page 1.
+ *                     Added OEMRD to SAS Enclosure Page 1.
+ *                     Added DMDReportPCIe to PCIe IO Unit Page 1.
+ *                     Added Flags field and flags for Retimers to
+ *                     PCIe Switch Page 1.
+ * 08-02-18  02.00.44  Added Slotx2, Slotx4 to ManPage 7.
+ * 08-15-18  02.00.45  Added ProductSpecific field at end of IOC Page 1
+ * 08-28-18  02.00.46  Added NVMs Write Cache flag to IOUnitPage1
+ *                     Added DMDReport Delay Time defines to
+ *                     PCIeIOUnitPage1
  * --------------------------------------------------------------------------
+ * 08-02-18  02.00.44  Added Slotx2, Slotx4 to ManPage 7.
+ * 08-15-18  02.00.45  Added ProductSpecific field at end of IOC Page 1
+ * 08-28-18  02.00.46  Added NVMs Write Cache flag to IOUnitPage1
+ *                     Added DMDReport Delay Time defines to PCIeIOUnitPage1
+ * 12-17-18  02.00.47  Swap locations of Slotx2 and Slotx4 in ManPage 7.
  */
 
 #ifndef MPI2_CNFG_H
@@ -536,7 +553,8 @@
 #define MPI2_MFGPAGE_DEVID_SAS2308_1                (0x0086)
 #define MPI2_MFGPAGE_DEVID_SAS2308_2                (0x0087)
 #define MPI2_MFGPAGE_DEVID_SAS2308_3                (0x006E)
-#define MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP           (0x02B0)
+#define MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP            (0x02B0)
+#define MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1          (0x02B1)
 
 /*MPI v2.5 SAS products */
 #define MPI25_MFGPAGE_DEVID_SAS3004                 (0x0096)
@@ -568,8 +586,17 @@
 #define MPI26_MFGPAGE_DEVID_SAS3616                 (0x00D1)
 #define MPI26_MFGPAGE_DEVID_SAS3708                 (0x00D2)
 
-#define MPI26_MFGPAGE_DEVID_SAS3816                 (0x00A1)
-#define MPI26_MFGPAGE_DEVID_SAS3916                 (0x00A0)
+#define MPI26_MFGPAGE_DEVID_SEC_MASK_3916           (0x0003)
+#define MPI26_MFGPAGE_DEVID_INVALID0_3916           (0x00E0)
+#define MPI26_MFGPAGE_DEVID_CFG_SEC_3916            (0x00E1)
+#define MPI26_MFGPAGE_DEVID_HARD_SEC_3916           (0x00E2)
+#define MPI26_MFGPAGE_DEVID_INVALID1_3916           (0x00E3)
+
+#define MPI26_MFGPAGE_DEVID_SEC_MASK_3816           (0x0003)
+#define MPI26_MFGPAGE_DEVID_INVALID0_3816           (0x00E4)
+#define MPI26_MFGPAGE_DEVID_CFG_SEC_3816            (0x00E5)
+#define MPI26_MFGPAGE_DEVID_HARD_SEC_3816           (0x00E6)
+#define MPI26_MFGPAGE_DEVID_INVALID1_3816           (0x00E7)
 
 
 /*Manufacturing Page 0 */
@@ -788,7 +815,8 @@
 	U8                          Location;               /*0x14 */
 	U8                          ReceptacleID;           /*0x15 */
 	U16                         Slot;                   /*0x16 */
-	U32                         Reserved2;              /*0x18 */
+	U16                         Slotx2;                 /*0x18 */
+	U16                         Slotx4;                 /*0x1A */
 } MPI2_MANPAGE7_CONNECTOR_INFO,
 	*PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
 	Mpi2ManPage7ConnectorInfo_t,
@@ -932,7 +960,11 @@
 
 #define MPI2_IOUNITPAGE1_PAGEVERSION                    (0x04)
 
-/*IO Unit Page 1 Flags defines */
+/* IO Unit Page 1 Flags defines */
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK             (0x00030000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE           (0x00000000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE          (0x00010000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE        (0x00020000)
 #define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK       (0x00004000)
 #define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE  (0x00002000)
 #define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH             (0x00001000)
@@ -1372,7 +1404,7 @@
 	U8                      PCIBusNum;                  /*0x0E */
 	U8                      PCIDomainSegment;           /*0x0F */
 	U32                     Reserved1;                  /*0x10 */
-	U32                     Reserved2;                  /*0x14 */
+	U32                     ProductSpecific;            /* 0x14 */
 } MPI2_CONFIG_PAGE_IOC_1,
 	*PTR_MPI2_CONFIG_PAGE_IOC_1,
 	Mpi2IOCPage1_t, *pMpi2IOCPage1_t;
@@ -1511,7 +1543,7 @@
 	U32                     BiosOptions;                /*0x04 */
 	U32                     IOCSettings;                /*0x08 */
 	U8                      SSUTimeout;                 /*0x0C */
-	U8                      Reserved1;                  /*0x0D */
+	U8                      MaxEnclosureLevel;          /*0x0D */
 	U16                     Reserved2;                  /*0x0E */
 	U32                     DeviceSettings;             /*0x10 */
 	U16                     NumberOfDevices;            /*0x14 */
@@ -1531,7 +1563,6 @@
 #define MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG                  (0x00004000)
 
 #define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK                         (0x00003800)
-#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK                         (0x00003800)
 #define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL                        (0x00000000)
 #define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE                   (0x00000800)
 #define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID                        (0x00001000)
@@ -3271,10 +3302,12 @@
 	U16	NumSlots;			/*0x18 */
 	U16	StartSlot;			/*0x1A */
 	U8	ChassisSlot;			/*0x1C */
-	U8	EnclosureLeve;			/*0x1D */
+	U8	EnclosureLevel;			/*0x1D */
 	U16	SEPDevHandle;			/*0x1E */
-	U32	Reserved3;			/*0x20 */
-	U32	Reserved4;			/*0x24 */
+	U8	OEMRD;				/*0x20 */
+	U8	Reserved1a;			/*0x21 */
+	U16	Reserved2;			/*0x22 */
+	U32	Reserved3;			/*0x24 */
 } MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
 	*PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
 	Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t,
@@ -3285,6 +3318,8 @@
 #define MPI2_SASENCLOSURE0_PAGEVERSION      (0x04)
 
 /*values for SAS Enclosure Page 0 Flags field */
+#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_VALID          (0x0080)
+#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_COLLECTING     (0x0040)
 #define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID    (0x0020)
 #define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID      (0x0010)
 #define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK              (0x000F)
@@ -3298,6 +3333,8 @@
 #define MPI26_ENCLOSURE0_PAGEVERSION        (0x04)
 
 /*Values for Enclosure Page 0 Flags field */
+#define MPI26_ENCLS0_FLAGS_OEMRD_VALID              (0x0080)
+#define MPI26_ENCLS0_FLAGS_OEMRD_COLLECTING         (0x0040)
 #define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID       (0x0020)
 #define MPI26_ENCLS0_FLAGS_ENCL_LEVEL_VALID         (0x0010)
 #define MPI26_ENCLS0_FLAGS_MNG_MASK                 (0x000F)
@@ -3696,8 +3733,9 @@
 	Mpi26PCIeIOUnit1PhyData_t, *pMpi26PCIeIOUnit1PhyData_t;
 
 /*values for LinkFlags */
-#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS    (0x00)
-#define MPI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS     (0x01)
+#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK     (0x00)
+#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN                 (0x01)
+#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN                 (0x02)
 
 /*
  *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
@@ -3714,7 +3752,7 @@
 	U16	AdditionalControlFlags;             /*0x0C */
 	U16	NVMeMaxQueueDepth;                  /*0x0E */
 	U8	NumPhys;                            /*0x10 */
-	U8	Reserved1;                          /*0x11 */
+	U8	DMDReportPCIe;                      /*0x11 */
 	U16	Reserved2;                          /*0x12 */
 	MPI26_PCIE_IO_UNIT1_PHY_DATA
 		PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/*0x14 */
@@ -3736,6 +3774,12 @@
 #define MPI26_PCIEIOUNIT1_MAX_RATE_8_0                              (0x40)
 #define MPI26_PCIEIOUNIT1_MAX_RATE_16_0                             (0x50)
 
+/*values for PCIe IO Unit Page 1 DMDReportPCIe */
+#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK                          (0x80)
+#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_1_SEC                         (0x00)
+#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_16_SEC                        (0x80)
+#define MPI26_PCIEIOUNIT1_DMDRPT_DELAY_TIME_MASK                    (0x7F)
+
 /*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo
  *values
  */
@@ -3788,6 +3832,9 @@
 
 /*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
 
+/* defines for the Flags field */
+#define MPI26_PCIESWITCH1_2_RETIMER_PRESENCE         (0x0002)
+#define MPI26_PCIESWITCH1_RETIMER_PRESENCE           (0x0001)
 
 /****************************************************************************
 *  PCIe Device Config Pages (MPI v2.6 and later)
@@ -3849,19 +3896,21 @@
  *field
  */
 
-/*values for PCIe Device Page 0 Flags field */
-#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE            (0x8000)
-#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH              (0x4000)
-#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE              (0x2000)
-#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION      (0x0400)
-#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION            (0x0200)
-#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE             (0x0100)
-#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED        (0x0080)
-#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED            (0x0040)
-#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED              (0x0020)
-#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED              (0x0010)
-#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID               (0x0002)
-#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT                 (0x0001)
+/*values for PCIe Device Page 0 Flags field*/
+#define MPI26_PCIEDEV0_FLAGS_2_RETIMER_PRESENCE             (0x00020000)
+#define MPI26_PCIEDEV0_FLAGS_RETIMER_PRESENCE               (0x00010000)
+#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE            (0x00008000)
+#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH              (0x00004000)
+#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE              (0x00002000)
+#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION      (0x00000400)
+#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION            (0x00000200)
+#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE             (0x00000100)
+#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED        (0x00000080)
+#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED            (0x00000040)
+#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED              (0x00000020)
+#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED              (0x00000010)
+#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID               (0x00000002)
+#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT                 (0x00000001)
 
 /* values for PCIe Device Page 0 SupportedLinkRates field */
 #define MPI26_PCIEDEV0_LINK_RATE_16_0_SUPPORTED             (0x08)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
new file mode 100644
index 0000000..a3f6778
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
@@ -0,0 +1,509 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2016-2020 Broadcom Limited. All rights reserved.
+ *
+ *          Name: mpi2_image.h
+ * Description: Contains definitions for firmware and other component images
+ * Creation Date: 04/02/2018
+ *       Version: 02.06.04
+ *
+ *
+ * Version History
+ * ---------------
+ *
+ * Date      Version   Description
+ * --------  --------  ------------------------------------------------------
+ * 08-01-18  02.06.00  Initial version for MPI 2.6.5.
+ * 08-14-18  02.06.01  Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26
+ * 08-28-18  02.06.02  Added MPI2_EXT_IMAGE_TYPE_RDE
+ * 09-07-18  02.06.03  Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
+ * 12-17-18  02.06.04  Addd MPI2_EXT_IMAGE_TYPE_PBLP
+ *			Shorten some defines to be compatible with DOS
+ */
+#ifndef MPI2_IMAGE_H
+#define MPI2_IMAGE_H
+
+
+/*FW Image Header */
+typedef struct _MPI2_FW_IMAGE_HEADER {
+	U32 Signature;		/*0x00 */
+	U32 Signature0;		/*0x04 */
+	U32 Signature1;		/*0x08 */
+	U32 Signature2;		/*0x0C */
+	MPI2_VERSION_UNION MPIVersion;	/*0x10 */
+	MPI2_VERSION_UNION FWVersion;	/*0x14 */
+	MPI2_VERSION_UNION NVDATAVersion;	/*0x18 */
+	MPI2_VERSION_UNION PackageVersion;	/*0x1C */
+	U16 VendorID;		/*0x20 */
+	U16 ProductID;		/*0x22 */
+	U16 ProtocolFlags;	/*0x24 */
+	U16 Reserved26;		/*0x26 */
+	U32 IOCCapabilities;	/*0x28 */
+	U32 ImageSize;		/*0x2C */
+	U32 NextImageHeaderOffset;	/*0x30 */
+	U32 Checksum;		/*0x34 */
+	U32 Reserved38;		/*0x38 */
+	U32 Reserved3C;		/*0x3C */
+	U32 Reserved40;		/*0x40 */
+	U32 Reserved44;		/*0x44 */
+	U32 Reserved48;		/*0x48 */
+	U32 Reserved4C;		/*0x4C */
+	U32 Reserved50;		/*0x50 */
+	U32 Reserved54;		/*0x54 */
+	U32 Reserved58;		/*0x58 */
+	U32 Reserved5C;		/*0x5C */
+	U32 BootFlags;		/*0x60 */
+	U32 FirmwareVersionNameWhat;	/*0x64 */
+	U8 FirmwareVersionName[32];	/*0x68 */
+	U32 VendorNameWhat;	/*0x88 */
+	U8 VendorName[32];	/*0x8C */
+	U32 PackageNameWhat;	/*0x88 */
+	U8 PackageName[32];	/*0x8C */
+	U32 ReservedD0;		/*0xD0 */
+	U32 ReservedD4;		/*0xD4 */
+	U32 ReservedD8;		/*0xD8 */
+	U32 ReservedDC;		/*0xDC */
+	U32 ReservedE0;		/*0xE0 */
+	U32 ReservedE4;		/*0xE4 */
+	U32 ReservedE8;		/*0xE8 */
+	U32 ReservedEC;		/*0xEC */
+	U32 ReservedF0;		/*0xF0 */
+	U32 ReservedF4;		/*0xF4 */
+	U32 ReservedF8;		/*0xF8 */
+	U32 ReservedFC;		/*0xFC */
+} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER,
+	Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t;
+
+/*Signature field */
+#define MPI2_FW_HEADER_SIGNATURE_OFFSET         (0x00)
+#define MPI2_FW_HEADER_SIGNATURE_MASK           (0xFF000000)
+#define MPI2_FW_HEADER_SIGNATURE                (0xEA000000)
+#define MPI26_FW_HEADER_SIGNATURE               (0xEB000000)
+
+/*Signature0 field */
+#define MPI2_FW_HEADER_SIGNATURE0_OFFSET        (0x04)
+#define MPI2_FW_HEADER_SIGNATURE0               (0x5AFAA55A)
+/*Last byte is defined by architecture */
+#define MPI26_FW_HEADER_SIGNATURE0_BASE         (0x5AEAA500)
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_0        (0x5A)
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_1        (0x00)
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_2        (0x01)
+/*legacy (0x5AEAA55A) */
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_3        (0x02)
+#define MPI26_FW_HEADER_SIGNATURE0 \
+	(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0)
+#define MPI26_FW_HEADER_SIGNATURE0_3516 \
+	(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1)
+#define MPI26_FW_HEADER_SIGNATURE0_4008 \
+	(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3)
+
+/*Signature1 field */
+#define MPI2_FW_HEADER_SIGNATURE1_OFFSET        (0x08)
+#define MPI2_FW_HEADER_SIGNATURE1               (0xA55AFAA5)
+#define MPI26_FW_HEADER_SIGNATURE1              (0xA55AEAA5)
+
+/*Signature2 field */
+#define MPI2_FW_HEADER_SIGNATURE2_OFFSET        (0x0C)
+#define MPI2_FW_HEADER_SIGNATURE2               (0x5AA55AFA)
+#define MPI26_FW_HEADER_SIGNATURE2              (0x5AA55AEA)
+
+/*defines for using the ProductID field */
+#define MPI2_FW_HEADER_PID_TYPE_MASK            (0xF000)
+#define MPI2_FW_HEADER_PID_TYPE_SAS             (0x2000)
+
+#define MPI2_FW_HEADER_PID_PROD_MASK                    (0x0F00)
+#define MPI2_FW_HEADER_PID_PROD_A                       (0x0000)
+#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI   (0x0200)
+#define MPI2_FW_HEADER_PID_PROD_IR_SCSI                 (0x0700)
+
+#define MPI2_FW_HEADER_PID_FAMILY_MASK          (0x00FF)
+/*SAS ProductID Family bits */
+#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS      (0x0013)
+#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS      (0x0014)
+#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS     (0x0021)
+#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS     (0x0028)
+#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS     (0x0031)
+
+/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
+
+/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
+
+#define MPI2_FW_HEADER_IMAGESIZE_OFFSET         (0x2C)
+#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET         (0x30)
+
+#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET          (0x60)
+#define MPI2_FW_HEADER_BOOTFLAGS_ISSI32M_FLAG     (0x00000001)
+#define MPI2_FW_HEADER_BOOTFLAGS_W25Q256JW_FLAG   (0x00000002)
+/*This image has a auto-discovery version of SPI */
+#define MPI2_FW_HEADER_BOOTFLAGS_AUTO_SPI_FLAG    (0x00000004)
+
+
+#define MPI2_FW_HEADER_VERNMHWAT_OFFSET         (0x64)
+
+#define MPI2_FW_HEADER_WHAT_SIGNATURE           (0x29232840)
+
+#define MPI2_FW_HEADER_SIZE                     (0x100)
+
+
+/****************************************************************************
+ *              Component Image Format and related defines                  *
+ ****************************************************************************/
+
+/*Maximum number of Hash Exclusion entries in a Component Image Header */
+#define MPI26_COMP_IMG_HDR_NUM_HASH_EXCL        (4)
+
+/*Hash Exclusion Format */
+typedef struct _MPI26_HASH_EXCLUSION_FORMAT {
+	U32 Offset;        /*0x00 */
+	U32 Size;          /*0x04 */
+} MPI26_HASH_EXCLUSION_FORMAT,
+	*PTR_MPI26_HASH_EXCLUSION_FORMAT,
+	Mpi26HashSxclusionFormat_t,
+	*pMpi26HashExclusionFormat_t;
+
+/*FW Image Header */
+typedef struct _MPI26_COMPONENT_IMAGE_HEADER {
+	U32 Signature0;					/*0x00 */
+	U32 LoadAddress;				/*0x04 */
+	U32 DataSize;					/*0x08 */
+	U32 StartAddress;				/*0x0C */
+	U32 Signature1;					/*0x10 */
+	U32 FlashOffset;				/*0x14 */
+	U32 FlashSize;					/*0x18 */
+	U32 VersionStringOffset;			/*0x1C */
+	U32 BuildDateStringOffset;			/*0x20 */
+	U32 BuildTimeStringOffset;			/*0x24 */
+	U32 EnvironmentVariableOffset;			/*0x28 */
+	U32 ApplicationSpecific;			/*0x2C */
+	U32 Signature2;					/*0x30 */
+	U32 HeaderSize;					/*0x34 */
+	U32 Crc;					/*0x38 */
+	U8 NotFlashImage;				/*0x3C */
+	U8 Compressed;					/*0x3D */
+	U16 Reserved3E;					/*0x3E */
+	U32 SecondaryFlashOffset;			/*0x40 */
+	U32 Reserved44;					/*0x44 */
+	U32 Reserved48;					/*0x48 */
+	MPI2_VERSION_UNION RMCInterfaceVersion;		/*0x4C */
+	MPI2_VERSION_UNION Reserved50;			/*0x50 */
+	MPI2_VERSION_UNION FWVersion;			/*0x54 */
+	MPI2_VERSION_UNION NvdataVersion;		/*0x58 */
+	MPI26_HASH_EXCLUSION_FORMAT
+	HashExclusion[MPI26_COMP_IMG_HDR_NUM_HASH_EXCL];/*0x5C */
+	U32 NextImageHeaderOffset;			/*0x7C */
+	U32 Reserved80[32];				/*0x80 -- 0xFC */
+} MPI26_COMPONENT_IMAGE_HEADER,
+	*PTR_MPI26_COMPONENT_IMAGE_HEADER,
+	Mpi26ComponentImageHeader_t,
+	*pMpi26ComponentImageHeader_t;
+
+
+/**** Definitions for Signature0 field ****/
+#define MPI26_IMAGE_HEADER_SIGNATURE0_MPI26                     (0xEB000042)
+
+/**** Definitions for Signature1 field ****/
+#define MPI26_IMAGE_HEADER_SIG1_APPLICATION              (0x20505041)
+#define MPI26_IMAGE_HEADER_SIG1_CBB                      (0x20424243)
+#define MPI26_IMAGE_HEADER_SIG1_MFG                      (0x2047464D)
+#define MPI26_IMAGE_HEADER_SIG1_BIOS                     (0x534F4942)
+#define MPI26_IMAGE_HEADER_SIG1_HIIM                     (0x4D494948)
+#define MPI26_IMAGE_HEADER_SIG1_HIIA                     (0x41494948)
+#define MPI26_IMAGE_HEADER_SIG1_CPLD                     (0x444C5043)
+#define MPI26_IMAGE_HEADER_SIG1_SPD                      (0x20445053)
+#define MPI26_IMAGE_HEADER_SIG1_NVDATA                   (0x5444564E)
+#define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE                (0x20534147)
+#define MPI26_IMAGE_HEADER_SIG1_PBLP                     (0x504C4250)
+
+/**** Definitions for Signature2 field ****/
+#define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE                    (0x50584546)
+
+/**** Offsets for Image Header Fields ****/
+#define MPI26_IMAGE_HEADER_SIGNATURE0_OFFSET                   (0x00)
+#define MPI26_IMAGE_HEADER_LOAD_ADDRESS_OFFSET                 (0x04)
+#define MPI26_IMAGE_HEADER_DATA_SIZE_OFFSET                    (0x08)
+#define MPI26_IMAGE_HEADER_START_ADDRESS_OFFSET                (0x0C)
+#define MPI26_IMAGE_HEADER_SIGNATURE1_OFFSET                   (0x10)
+#define MPI26_IMAGE_HEADER_FLASH_OFFSET_OFFSET                 (0x14)
+#define MPI26_IMAGE_HEADER_FLASH_SIZE_OFFSET                   (0x18)
+#define MPI26_IMAGE_HEADER_VERSION_STRING_OFFSET_OFFSET        (0x1C)
+#define MPI26_IMAGE_HEADER_BUILD_DATE_STRING_OFFSET_OFFSET     (0x20)
+#define MPI26_IMAGE_HEADER_BUILD_TIME_OFFSET_OFFSET            (0x24)
+#define MPI26_IMAGE_HEADER_ENVIROMENT_VAR_OFFSET_OFFSET        (0x28)
+#define MPI26_IMAGE_HEADER_APPLICATION_SPECIFIC_OFFSET         (0x2C)
+#define MPI26_IMAGE_HEADER_SIGNATURE2_OFFSET                   (0x30)
+#define MPI26_IMAGE_HEADER_HEADER_SIZE_OFFSET                  (0x34)
+#define MPI26_IMAGE_HEADER_CRC_OFFSET                          (0x38)
+#define MPI26_IMAGE_HEADER_NOT_FLASH_IMAGE_OFFSET              (0x3C)
+#define MPI26_IMAGE_HEADER_COMPRESSED_OFFSET                   (0x3D)
+#define MPI26_IMAGE_HEADER_SECONDARY_FLASH_OFFSET_OFFSET       (0x40)
+#define MPI26_IMAGE_HEADER_RMC_INTERFACE_VER_OFFSET            (0x4C)
+#define MPI26_IMAGE_HEADER_COMPONENT_IMAGE_VER_OFFSET          (0x54)
+#define MPI26_IMAGE_HEADER_HASH_EXCLUSION_OFFSET               (0x5C)
+#define MPI26_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET     (0x7C)
+
+
+#define MPI26_IMAGE_HEADER_SIZE                                (0x100)
+
+
+/*Extended Image Header */
+typedef struct _MPI2_EXT_IMAGE_HEADER {
+	U8 ImageType;		/*0x00 */
+	U8 Reserved1;		/*0x01 */
+	U16 Reserved2;		/*0x02 */
+	U32 Checksum;		/*0x04 */
+	U32 ImageSize;		/*0x08 */
+	U32 NextImageHeaderOffset;	/*0x0C */
+	U32 PackageVersion;	/*0x10 */
+	U32 Reserved3;		/*0x14 */
+	U32 Reserved4;		/*0x18 */
+	U32 Reserved5;		/*0x1C */
+	U8 IdentifyString[32];	/*0x20 */
+} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER,
+	Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t;
+
+/*useful offsets */
+#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET         (0x00)
+#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET         (0x08)
+#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET         (0x0C)
+#define MPI2_EXT_IMAGE_PACKAGEVERSION_OFFSET   (0x10)
+
+#define MPI2_EXT_IMAGE_HEADER_SIZE              (0x40)
+
+/*defines for the ImageType field */
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED             (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW                      (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA                  (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER              (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION          (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT            (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES       (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID                (0x08)
+#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH          (0x09)
+#define MPI2_EXT_IMAGE_TYPE_RDE                     (0x0A)
+#define MPI2_EXT_IMAGE_TYPE_PBLP                    (0x0B)
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC    (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC    (0xFF)
+
+#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC)
+
+/*FLASH Layout Extended Image Data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check RegionsPerLayout at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
+#define MPI2_FLASH_NUMBER_OF_REGIONS        (1)
+#endif
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumberOfLayouts at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
+#define MPI2_FLASH_NUMBER_OF_LAYOUTS        (1)
+#endif
+
+typedef struct _MPI2_FLASH_REGION {
+	U8 RegionType;		/*0x00 */
+	U8 Reserved1;		/*0x01 */
+	U16 Reserved2;		/*0x02 */
+	U32 RegionOffset;	/*0x04 */
+	U32 RegionSize;		/*0x08 */
+	U32 Reserved3;		/*0x0C */
+} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION,
+	Mpi2FlashRegion_t, *pMpi2FlashRegion_t;
+
+typedef struct _MPI2_FLASH_LAYOUT {
+	U32 FlashSize;		/*0x00 */
+	U32 Reserved1;		/*0x04 */
+	U32 Reserved2;		/*0x08 */
+	U32 Reserved3;		/*0x0C */
+	MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS];	/*0x10 */
+} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT,
+	Mpi2FlashLayout_t, *pMpi2FlashLayout_t;
+
+typedef struct _MPI2_FLASH_LAYOUT_DATA {
+	U8 ImageRevision;	/*0x00 */
+	U8 Reserved1;		/*0x01 */
+	U8 SizeOfRegion;	/*0x02 */
+	U8 Reserved2;		/*0x03 */
+	U16 NumberOfLayouts;	/*0x04 */
+	U16 RegionsPerLayout;	/*0x06 */
+	U16 MinimumSectorAlignment;	/*0x08 */
+	U16 Reserved3;		/*0x0A */
+	U32 Reserved4;		/*0x0C */
+	MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS];	/*0x10 */
+} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA,
+	Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t;
+
+/*defines for the RegionType field */
+#define MPI2_FLASH_REGION_UNUSED                (0x00)
+#define MPI2_FLASH_REGION_FIRMWARE              (0x01)
+#define MPI2_FLASH_REGION_BIOS                  (0x02)
+#define MPI2_FLASH_REGION_NVDATA                (0x03)
+#define MPI2_FLASH_REGION_FIRMWARE_BACKUP       (0x05)
+#define MPI2_FLASH_REGION_MFG_INFORMATION       (0x06)
+#define MPI2_FLASH_REGION_CONFIG_1              (0x07)
+#define MPI2_FLASH_REGION_CONFIG_2              (0x08)
+#define MPI2_FLASH_REGION_MEGARAID              (0x09)
+#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK     (0x0A)
+#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK)
+#define MPI2_FLASH_REGION_CBB_BACKUP            (0x0D)
+#define MPI2_FLASH_REGION_SBR                   (0x0E)
+#define MPI2_FLASH_REGION_SBR_BACKUP            (0x0F)
+#define MPI2_FLASH_REGION_HIIM                  (0x10)
+#define MPI2_FLASH_REGION_HIIA                  (0x11)
+#define MPI2_FLASH_REGION_CTLR                  (0x12)
+#define MPI2_FLASH_REGION_IMR_FIRMWARE          (0x13)
+#define MPI2_FLASH_REGION_MR_NVDATA             (0x14)
+#define MPI2_FLASH_REGION_CPLD                  (0x15)
+#define MPI2_FLASH_REGION_PSOC                  (0x16)
+
+/*ImageRevision */
+#define MPI2_FLASH_LAYOUT_IMAGE_REVISION        (0x00)
+
+/*Supported Devices Extended Image Data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumberOfDevices at runtime.
+ */
+#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
+#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES    (1)
+#endif
+
+typedef struct _MPI2_SUPPORTED_DEVICE {
+	U16 DeviceID;		/*0x00 */
+	U16 VendorID;		/*0x02 */
+	U16 DeviceIDMask;	/*0x04 */
+	U16 Reserved1;		/*0x06 */
+	U8 LowPCIRev;		/*0x08 */
+	U8 HighPCIRev;		/*0x09 */
+	U16 Reserved2;		/*0x0A */
+	U32 Reserved3;		/*0x0C */
+} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE,
+	Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t;
+
+typedef struct _MPI2_SUPPORTED_DEVICES_DATA {
+	U8 ImageRevision;	/*0x00 */
+	U8 Reserved1;		/*0x01 */
+	U8 NumberOfDevices;	/*0x02 */
+	U8 Reserved2;		/*0x03 */
+	U32 Reserved3;		/*0x04 */
+	MPI2_SUPPORTED_DEVICE
+	SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */
+} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA,
+	Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t;
+
+/*ImageRevision */
+#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION   (0x00)
+
+/*Init Extended Image Data */
+
+typedef struct _MPI2_INIT_IMAGE_FOOTER {
+	U32 BootFlags;		/*0x00 */
+	U32 ImageSize;		/*0x04 */
+	U32 Signature0;		/*0x08 */
+	U32 Signature1;		/*0x0C */
+	U32 Signature2;		/*0x10 */
+	U32 ResetVector;	/*0x14 */
+} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER,
+	Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t;
+
+/*defines for the BootFlags field */
+#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET        (0x00)
+
+/*defines for the ImageSize field */
+#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET        (0x04)
+
+/*defines for the Signature0 field */
+#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET       (0x08)
+#define MPI2_INIT_IMAGE_SIGNATURE0              (0x5AA55AEA)
+
+/*defines for the Signature1 field */
+#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET       (0x0C)
+#define MPI2_INIT_IMAGE_SIGNATURE1              (0xA55AEAA5)
+
+/*defines for the Signature2 field */
+#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET       (0x10)
+#define MPI2_INIT_IMAGE_SIGNATURE2              (0x5AEAA55A)
+
+/*Signature fields as individual bytes */
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0        (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1        (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2        (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3        (0x5A)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4        (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5        (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6        (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7        (0xA5)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8        (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9        (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A        (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B        (0x5A)
+
+/*defines for the ResetVector field */
+#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET      (0x14)
+
+
+/* Encrypted Hash Extended Image Data */
+
+typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
+	U8		HashImageType;		/*0x00 */
+	U8		HashAlgorithm;		/*0x01 */
+	U8		EncryptionAlgorithm;	/*0x02 */
+	U8		Reserved1;		/*0x03 */
+	U32		Reserved2;		/*0x04 */
+	U32		EncryptedHash[1];	/*0x08 */ /* variable length */
+} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY,
+Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
+
+/* values for HashImageType */
+#define MPI25_HASH_IMAGE_TYPE_UNUSED            (0x00)
+#define MPI25_HASH_IMAGE_TYPE_FIRMWARE          (0x01)
+#define MPI25_HASH_IMAGE_TYPE_BIOS              (0x02)
+
+#define MPI26_HASH_IMAGE_TYPE_UNUSED            (0x00)
+#define MPI26_HASH_IMAGE_TYPE_FIRMWARE          (0x01)
+#define MPI26_HASH_IMAGE_TYPE_BIOS              (0x02)
+#define MPI26_HASH_IMAGE_TYPE_KEY_HASH          (0x03)
+
+/* values for HashAlgorithm */
+#define MPI25_HASH_ALGORITHM_UNUSED             (0x00)
+#define MPI25_HASH_ALGORITHM_SHA256             (0x01)
+
+#define MPI26_HASH_ALGORITHM_VER_MASK		(0xE0)
+#define MPI26_HASH_ALGORITHM_VER_NONE		(0x00)
+#define MPI26_HASH_ALGORITHM_VER_SHA1		(0x20)
+#define MPI26_HASH_ALGORITHM_VER_SHA2		(0x40)
+#define MPI26_HASH_ALGORITHM_VER_SHA3		(0x60)
+#define MPI26_HASH_ALGORITHM_SIZE_MASK		(0x1F)
+#define MPI26_HASH_ALGORITHM_SIZE_256           (0x01)
+#define MPI26_HASH_ALGORITHM_SIZE_512           (0x02)
+
+
+/* values for EncryptionAlgorithm */
+#define MPI25_ENCRYPTION_ALG_UNUSED             (0x00)
+#define MPI25_ENCRYPTION_ALG_RSA256             (0x01)
+
+#define MPI26_ENCRYPTION_ALG_UNUSED             (0x00)
+#define MPI26_ENCRYPTION_ALG_RSA256             (0x01)
+#define MPI26_ENCRYPTION_ALG_RSA512             (0x02)
+#define MPI26_ENCRYPTION_ALG_RSA1024            (0x03)
+#define MPI26_ENCRYPTION_ALG_RSA2048            (0x04)
+#define MPI26_ENCRYPTION_ALG_RSA4096            (0x05)
+
+typedef struct _MPI25_ENCRYPTED_HASH_DATA {
+	U8				ImageVersion;		/*0x00 */
+	U8				NumHash;		/*0x01 */
+	U16				Reserved1;		/*0x02 */
+	U32				Reserved2;		/*0x04 */
+	MPI25_ENCRYPTED_HASH_ENTRY	EncryptedHashEntry[1];  /*0x08 */
+} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA,
+Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
+
+
+#endif /* MPI2_IMAGE_H */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index 6213ce6..8f1b903 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright 2000-2015 Avago Technologies.  All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
  *
  *
  *          Name:  mpi2_init.h
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 1faec3a..68ea408 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -1,13 +1,13 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright 2000-2015 Avago Technologies.  All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
  *
  *
  *          Name:  mpi2_ioc.h
  *         Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  * Creation Date:  October 11, 2006
  *
- * mpi2_ioc.h Version:  02.00.34
+ * mpi2_ioc.h Version:  02.00.37
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -171,6 +171,10 @@
  * 09-29-17   02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED
  *                     to the ReasonCode field in PCIe Device Status Change
  *                     Event Data.
+ * 07-22-18   02.00.35 Added FW_DOWNLOAD_ITYPE_CPLD and _PSOC.
+ *                     Moved FW image definitions ionto new mpi2_image,h
+ * 08-14-18   02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16)
+ * 09-07-18   02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
  * --------------------------------------------------------------------------
  */
 
@@ -1255,6 +1259,7 @@
 #define MPI26_EVENT_PCIE_TOPO_PI_2_LANES                    (0x20)
 #define MPI26_EVENT_PCIE_TOPO_PI_4_LANES                    (0x30)
 #define MPI26_EVENT_PCIE_TOPO_PI_8_LANES                    (0x40)
+#define MPI26_EVENT_PCIE_TOPO_PI_16_LANES                   (0x50)
 
 #define MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK                  (0x0F)
 #define MPI26_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN               (0x00)
@@ -1450,7 +1455,11 @@
 #define MPI2_FW_DOWNLOAD_ITYPE_CTLR                 (0x12)
 #define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE         (0x13)
 #define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA            (0x14)
+/*MPI v2.6 and newer */
+#define MPI2_FW_DOWNLOAD_ITYPE_CPLD                 (0x15)
+#define MPI2_FW_DOWNLOAD_ITYPE_PSOC                 (0x16)
 #define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
+#define MPI2_FW_DOWNLOAD_ITYPE_TERMINATE            (0xFF)
 
 /*MPI v2.0 FWDownload TransactionContext Element */
 typedef struct _MPI2_FW_DOWNLOAD_TCSGE {
@@ -1597,352 +1606,6 @@
 } MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY,
 	Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t;
 
-/*FW Image Header */
-typedef struct _MPI2_FW_IMAGE_HEADER {
-	U32 Signature;		/*0x00 */
-	U32 Signature0;		/*0x04 */
-	U32 Signature1;		/*0x08 */
-	U32 Signature2;		/*0x0C */
-	MPI2_VERSION_UNION MPIVersion;	/*0x10 */
-	MPI2_VERSION_UNION FWVersion;	/*0x14 */
-	MPI2_VERSION_UNION NVDATAVersion;	/*0x18 */
-	MPI2_VERSION_UNION PackageVersion;	/*0x1C */
-	U16 VendorID;		/*0x20 */
-	U16 ProductID;		/*0x22 */
-	U16 ProtocolFlags;	/*0x24 */
-	U16 Reserved26;		/*0x26 */
-	U32 IOCCapabilities;	/*0x28 */
-	U32 ImageSize;		/*0x2C */
-	U32 NextImageHeaderOffset;	/*0x30 */
-	U32 Checksum;		/*0x34 */
-	U32 Reserved38;		/*0x38 */
-	U32 Reserved3C;		/*0x3C */
-	U32 Reserved40;		/*0x40 */
-	U32 Reserved44;		/*0x44 */
-	U32 Reserved48;		/*0x48 */
-	U32 Reserved4C;		/*0x4C */
-	U32 Reserved50;		/*0x50 */
-	U32 Reserved54;		/*0x54 */
-	U32 Reserved58;		/*0x58 */
-	U32 Reserved5C;		/*0x5C */
-	U32 BootFlags;		/*0x60 */
-	U32 FirmwareVersionNameWhat;	/*0x64 */
-	U8 FirmwareVersionName[32];	/*0x68 */
-	U32 VendorNameWhat;	/*0x88 */
-	U8 VendorName[32];	/*0x8C */
-	U32 PackageNameWhat;	/*0x88 */
-	U8 PackageName[32];	/*0x8C */
-	U32 ReservedD0;		/*0xD0 */
-	U32 ReservedD4;		/*0xD4 */
-	U32 ReservedD8;		/*0xD8 */
-	U32 ReservedDC;		/*0xDC */
-	U32 ReservedE0;		/*0xE0 */
-	U32 ReservedE4;		/*0xE4 */
-	U32 ReservedE8;		/*0xE8 */
-	U32 ReservedEC;		/*0xEC */
-	U32 ReservedF0;		/*0xF0 */
-	U32 ReservedF4;		/*0xF4 */
-	U32 ReservedF8;		/*0xF8 */
-	U32 ReservedFC;		/*0xFC */
-} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER,
-	Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t;
-
-/*Signature field */
-#define MPI2_FW_HEADER_SIGNATURE_OFFSET         (0x00)
-#define MPI2_FW_HEADER_SIGNATURE_MASK           (0xFF000000)
-#define MPI2_FW_HEADER_SIGNATURE                (0xEA000000)
-#define MPI26_FW_HEADER_SIGNATURE               (0xEB000000)
-
-/*Signature0 field */
-#define MPI2_FW_HEADER_SIGNATURE0_OFFSET        (0x04)
-#define MPI2_FW_HEADER_SIGNATURE0               (0x5AFAA55A)
-/* Last byte is defined by architecture */
-#define MPI26_FW_HEADER_SIGNATURE0_BASE         (0x5AEAA500)
-#define MPI26_FW_HEADER_SIGNATURE0_ARC_0        (0x5A)
-#define MPI26_FW_HEADER_SIGNATURE0_ARC_1        (0x00)
-#define MPI26_FW_HEADER_SIGNATURE0_ARC_2        (0x01)
-/* legacy (0x5AEAA55A) */
-#define MPI26_FW_HEADER_SIGNATURE0_ARC_3        (0x02)
-#define MPI26_FW_HEADER_SIGNATURE0 \
-	(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0)
-#define MPI26_FW_HEADER_SIGNATURE0_3516 \
-	(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1)
-#define MPI26_FW_HEADER_SIGNATURE0_4008 \
-	(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3)
-
-/*Signature1 field */
-#define MPI2_FW_HEADER_SIGNATURE1_OFFSET        (0x08)
-#define MPI2_FW_HEADER_SIGNATURE1               (0xA55AFAA5)
-#define MPI26_FW_HEADER_SIGNATURE1              (0xA55AEAA5)
-
-/*Signature2 field */
-#define MPI2_FW_HEADER_SIGNATURE2_OFFSET        (0x0C)
-#define MPI2_FW_HEADER_SIGNATURE2               (0x5AA55AFA)
-#define MPI26_FW_HEADER_SIGNATURE2              (0x5AA55AEA)
-
-/*defines for using the ProductID field */
-#define MPI2_FW_HEADER_PID_TYPE_MASK            (0xF000)
-#define MPI2_FW_HEADER_PID_TYPE_SAS             (0x2000)
-
-#define MPI2_FW_HEADER_PID_PROD_MASK                    (0x0F00)
-#define MPI2_FW_HEADER_PID_PROD_A                       (0x0000)
-#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI   (0x0200)
-#define MPI2_FW_HEADER_PID_PROD_IR_SCSI                 (0x0700)
-
-#define MPI2_FW_HEADER_PID_FAMILY_MASK          (0x00FF)
-/*SAS ProductID Family bits */
-#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS      (0x0013)
-#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS      (0x0014)
-#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS     (0x0021)
-#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS     (0x0028)
-#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS     (0x0031)
-
-/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
-
-/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
-
-#define MPI2_FW_HEADER_IMAGESIZE_OFFSET         (0x2C)
-#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET         (0x30)
-#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET        (0x60)
-#define MPI2_FW_HEADER_VERNMHWAT_OFFSET         (0x64)
-
-#define MPI2_FW_HEADER_WHAT_SIGNATURE           (0x29232840)
-
-#define MPI2_FW_HEADER_SIZE                     (0x100)
-
-/*Extended Image Header */
-typedef struct _MPI2_EXT_IMAGE_HEADER {
-	U8 ImageType;		/*0x00 */
-	U8 Reserved1;		/*0x01 */
-	U16 Reserved2;		/*0x02 */
-	U32 Checksum;		/*0x04 */
-	U32 ImageSize;		/*0x08 */
-	U32 NextImageHeaderOffset;	/*0x0C */
-	U32 PackageVersion;	/*0x10 */
-	U32 Reserved3;		/*0x14 */
-	U32 Reserved4;		/*0x18 */
-	U32 Reserved5;		/*0x1C */
-	U8 IdentifyString[32];	/*0x20 */
-} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER,
-	Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t;
-
-/*useful offsets */
-#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET         (0x00)
-#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET         (0x08)
-#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET         (0x0C)
-
-#define MPI2_EXT_IMAGE_HEADER_SIZE              (0x40)
-
-/*defines for the ImageType field */
-#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED             (0x00)
-#define MPI2_EXT_IMAGE_TYPE_FW                      (0x01)
-#define MPI2_EXT_IMAGE_TYPE_NVDATA                  (0x03)
-#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER              (0x04)
-#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION          (0x05)
-#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT            (0x06)
-#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES       (0x07)
-#define MPI2_EXT_IMAGE_TYPE_MEGARAID                (0x08)
-#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH          (0x09)
-#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC    (0x80)
-#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC    (0xFF)
-
-#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC)
-
-/*FLASH Layout Extended Image Data */
-
-/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check RegionsPerLayout at runtime.
- */
-#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
-#define MPI2_FLASH_NUMBER_OF_REGIONS        (1)
-#endif
-
-/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check NumberOfLayouts at runtime.
- */
-#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
-#define MPI2_FLASH_NUMBER_OF_LAYOUTS        (1)
-#endif
-
-typedef struct _MPI2_FLASH_REGION {
-	U8 RegionType;		/*0x00 */
-	U8 Reserved1;		/*0x01 */
-	U16 Reserved2;		/*0x02 */
-	U32 RegionOffset;	/*0x04 */
-	U32 RegionSize;		/*0x08 */
-	U32 Reserved3;		/*0x0C */
-} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION,
-	Mpi2FlashRegion_t, *pMpi2FlashRegion_t;
-
-typedef struct _MPI2_FLASH_LAYOUT {
-	U32 FlashSize;		/*0x00 */
-	U32 Reserved1;		/*0x04 */
-	U32 Reserved2;		/*0x08 */
-	U32 Reserved3;		/*0x0C */
-	MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS];	/*0x10 */
-} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT,
-	Mpi2FlashLayout_t, *pMpi2FlashLayout_t;
-
-typedef struct _MPI2_FLASH_LAYOUT_DATA {
-	U8 ImageRevision;	/*0x00 */
-	U8 Reserved1;		/*0x01 */
-	U8 SizeOfRegion;	/*0x02 */
-	U8 Reserved2;		/*0x03 */
-	U16 NumberOfLayouts;	/*0x04 */
-	U16 RegionsPerLayout;	/*0x06 */
-	U16 MinimumSectorAlignment;	/*0x08 */
-	U16 Reserved3;		/*0x0A */
-	U32 Reserved4;		/*0x0C */
-	MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS];	/*0x10 */
-} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA,
-	Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t;
-
-/*defines for the RegionType field */
-#define MPI2_FLASH_REGION_UNUSED                (0x00)
-#define MPI2_FLASH_REGION_FIRMWARE              (0x01)
-#define MPI2_FLASH_REGION_BIOS                  (0x02)
-#define MPI2_FLASH_REGION_NVDATA                (0x03)
-#define MPI2_FLASH_REGION_FIRMWARE_BACKUP       (0x05)
-#define MPI2_FLASH_REGION_MFG_INFORMATION       (0x06)
-#define MPI2_FLASH_REGION_CONFIG_1              (0x07)
-#define MPI2_FLASH_REGION_CONFIG_2              (0x08)
-#define MPI2_FLASH_REGION_MEGARAID              (0x09)
-#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK     (0x0A)
-#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK)
-#define MPI2_FLASH_REGION_CBB_BACKUP            (0x0D)
-#define MPI2_FLASH_REGION_SBR                   (0x0E)
-#define MPI2_FLASH_REGION_SBR_BACKUP            (0x0F)
-#define MPI2_FLASH_REGION_HIIM                  (0x10)
-#define MPI2_FLASH_REGION_HIIA                  (0x11)
-#define MPI2_FLASH_REGION_CTLR                  (0x12)
-#define MPI2_FLASH_REGION_IMR_FIRMWARE          (0x13)
-#define MPI2_FLASH_REGION_MR_NVDATA             (0x14)
-
-/*ImageRevision */
-#define MPI2_FLASH_LAYOUT_IMAGE_REVISION        (0x00)
-
-/*Supported Devices Extended Image Data */
-
-/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check NumberOfDevices at runtime.
- */
-#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
-#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES    (1)
-#endif
-
-typedef struct _MPI2_SUPPORTED_DEVICE {
-	U16 DeviceID;		/*0x00 */
-	U16 VendorID;		/*0x02 */
-	U16 DeviceIDMask;	/*0x04 */
-	U16 Reserved1;		/*0x06 */
-	U8 LowPCIRev;		/*0x08 */
-	U8 HighPCIRev;		/*0x09 */
-	U16 Reserved2;		/*0x0A */
-	U32 Reserved3;		/*0x0C */
-} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE,
-	Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t;
-
-typedef struct _MPI2_SUPPORTED_DEVICES_DATA {
-	U8 ImageRevision;	/*0x00 */
-	U8 Reserved1;		/*0x01 */
-	U8 NumberOfDevices;	/*0x02 */
-	U8 Reserved2;		/*0x03 */
-	U32 Reserved3;		/*0x04 */
-	MPI2_SUPPORTED_DEVICE
-	SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */
-} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA,
-	Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t;
-
-/*ImageRevision */
-#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION   (0x00)
-
-/*Init Extended Image Data */
-
-typedef struct _MPI2_INIT_IMAGE_FOOTER {
-	U32 BootFlags;		/*0x00 */
-	U32 ImageSize;		/*0x04 */
-	U32 Signature0;		/*0x08 */
-	U32 Signature1;		/*0x0C */
-	U32 Signature2;		/*0x10 */
-	U32 ResetVector;	/*0x14 */
-} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER,
-	Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t;
-
-/*defines for the BootFlags field */
-#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET        (0x00)
-
-/*defines for the ImageSize field */
-#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET        (0x04)
-
-/*defines for the Signature0 field */
-#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET       (0x08)
-#define MPI2_INIT_IMAGE_SIGNATURE0              (0x5AA55AEA)
-
-/*defines for the Signature1 field */
-#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET       (0x0C)
-#define MPI2_INIT_IMAGE_SIGNATURE1              (0xA55AEAA5)
-
-/*defines for the Signature2 field */
-#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET       (0x10)
-#define MPI2_INIT_IMAGE_SIGNATURE2              (0x5AEAA55A)
-
-/*Signature fields as individual bytes */
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0        (0xEA)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1        (0x5A)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2        (0xA5)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3        (0x5A)
-
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4        (0xA5)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5        (0xEA)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6        (0x5A)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7        (0xA5)
-
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8        (0x5A)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9        (0xA5)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A        (0xEA)
-#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B        (0x5A)
-
-/*defines for the ResetVector field */
-#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET      (0x14)
-
-
-/* Encrypted Hash Extended Image Data */
-
-typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
-	U8		HashImageType;		/* 0x00 */
-	U8		HashAlgorithm;		/* 0x01 */
-	U8		EncryptionAlgorithm;	/* 0x02 */
-	U8		Reserved1;		/* 0x03 */
-	U32		Reserved2;		/* 0x04 */
-	U32		EncryptedHash[1];	/* 0x08 */ /* variable length */
-} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY,
-Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
-
-/* values for HashImageType */
-#define MPI25_HASH_IMAGE_TYPE_UNUSED		(0x00)
-#define MPI25_HASH_IMAGE_TYPE_FIRMWARE		(0x01)
-#define MPI25_HASH_IMAGE_TYPE_BIOS              (0x02)
-
-/* values for HashAlgorithm */
-#define MPI25_HASH_ALGORITHM_UNUSED		(0x00)
-#define MPI25_HASH_ALGORITHM_SHA256		(0x01)
-
-/* values for EncryptionAlgorithm */
-#define MPI25_ENCRYPTION_ALG_UNUSED		(0x00)
-#define MPI25_ENCRYPTION_ALG_RSA256		(0x01)
-
-typedef struct _MPI25_ENCRYPTED_HASH_DATA {
-	U8				ImageVersion;		/* 0x00 */
-	U8				NumHash;		/* 0x01 */
-	U16				Reserved1;		/* 0x02 */
-	U32				Reserved2;		/* 0x04 */
-	MPI25_ENCRYPTED_HASH_ENTRY	EncryptedHashEntry[1];  /* 0x08 */
-} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA,
-Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
-
 
 /****************************************************************************
 * PowerManagementControl message
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
index f0281f9..bb7b79c 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
@@ -1,12 +1,12 @@
 /*
- * Copyright 2012-2015 Avago Technologies.  All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
  *
  *
  *          Name:  mpi2_pci.h
  *         Title:  MPI PCIe Attached Devices structures and definitions.
  * Creation Date:  October 9, 2012
  *
- * mpi2_pci.h Version:  02.00.02
+ * mpi2_pci.h Version:  02.00.04
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -23,6 +23,9 @@
  *                     Removed SOP support.
  * 07-01-16  02.00.02  Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to
  *                     NVME Encapsulated Request.
+ * 07-22-18  02.00.03  Updted flags field for NVME Encapsulated req
+ * 12-17-18  02.00.04  Added MPI26_PCIE_DEVINFO_SCSI
+ *			Shortten some defines to be compatible with DOS
  * --------------------------------------------------------------------------
  */
 
@@ -40,7 +43,7 @@
 #define MPI26_PCIE_DEVINFO_NO_DEVICE            (0x00000000)
 #define MPI26_PCIE_DEVINFO_PCI_SWITCH           (0x00000001)
 #define MPI26_PCIE_DEVINFO_NVME                 (0x00000003)
-
+#define MPI26_PCIE_DEVINFO_SCSI                 (0x00000004)
 
 /****************************************************************************
 *  NVMe Encapsulated message
@@ -74,11 +77,10 @@
 #define MPI26_NVME_FLAGS_SUBMISSIONQ_IO             (0x0000)
 #define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN          (0x0010)
 /*Error Response Address Space */
-#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR        (0x000C)
-#define MPI26_NVME_FLAGS_SYSTEM_RSP_ADDR            (0x0000)
-#define MPI26_NVME_FLAGS_IOCPLB_RSP_ADDR            (0x0008)
-#define MPI26_NVME_FLAGS_IOCPLBNTA_RSP_ADDR         (0x000C)
-/*Data Direction*/
+#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_MASK          (0x000C)
+#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_SYSTEM        (0x0000)
+#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_IOCTL         (0x0008)
+/* Data Direction*/
 #define MPI26_NVME_FLAGS_DATADIRECTION_MASK         (0x0003)
 #define MPI26_NVME_FLAGS_NODATATRANSFER             (0x0000)
 #define MPI26_NVME_FLAGS_WRITE                      (0x0001)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
index b9bb1c1..b770eb5 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright 2000-2014 Avago Technologies.  All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
  *
  *
  *          Name:  mpi2_raid.h
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
index afa17ff..16c922a 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright 2000-2015 Avago Technologies.  All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
  *
  *
  *          Name:  mpi2_sas.h
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 629296e..17ef7f6 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -1,13 +1,13 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright 2000-2014 Avago Technologies.  All rights reserved.
+ * Copyright 2000-2020 Broadcom Inc. All rights reserved.
  *
  *
  *          Name:  mpi2_tool.h
  *         Title:  MPI diagnostic tool structures and definitions
  * Creation Date:  March 26, 2007
  *
- *   mpi2_tool.h Version:  02.00.14
+ *   mpi2_tool.h Version:  02.00.16
  *
  * Version History
  * ---------------
@@ -38,6 +38,9 @@
  * 11-18-14  02.00.13  Updated copyright information.
  * 08-25-16  02.00.14  Added new values for the Flags field of Toolbox Clean
  *                     Tool Request Message.
+ * 07-22-18  02.00.15  Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool.
+ *                     Added option for DeviceInfo field in ISTWI tool.
+ * 12-17-18  02.00.16  Shorten some defines to be compatible with DOS.
  * --------------------------------------------------------------------------
  */
 
@@ -58,6 +61,7 @@
 #define MPI2_TOOLBOX_BEACON_TOOL                    (0x05)
 #define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL            (0x06)
 #define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL              (0x07)
+#define MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN      (0x08)
 
 /****************************************************************************
 * Toolbox reply
@@ -226,6 +230,13 @@
 #define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE   (0x80)
 #define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK         (0x07)
 
+/*MPI26 TOOLBOX Request MsgFlags defines */
+#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_MASK           (0x01)
+/*Request uses Man Page 43 device index addressing */
+#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INDEX          (0x00)
+/*Request uses Man Page 43 device info struct addressing */
+#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INFO           (0x01)
+
 /*Toolbox ISTWI Read Write Tool reply message */
 typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
 	U8 Tool;		/*0x00 */
@@ -387,6 +398,64 @@
 #define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP     (0x01)
 
 
+/***************************************************************************
+ *  Toolbox Backend Lane Margining Tool
+ ***************************************************************************
+ */
+
+/*Toolbox Backend Lane Margining Tool request message */
+typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REQUEST {
+	U8 Tool;			/*0x00 */
+	U8 Reserved1;			/*0x01 */
+	U8 ChainOffset;			/*0x02 */
+	U8 Function;			/*0x03 */
+	U16 Reserved2;			/*0x04 */
+	U8 Reserved3;			/*0x06 */
+	U8 MsgFlags;			/*0x07 */
+	U8 VP_ID;			/*0x08 */
+	U8 VF_ID;			/*0x09 */
+	U16 Reserved4;			/*0x0A */
+	U8 Command;			/*0x0C */
+	U8 SwitchPort;			/*0x0D */
+	U16 DevHandle;			/*0x0E */
+	U8 RegisterOffset;		/*0x10 */
+	U8 Reserved5;			/*0x11 */
+	U16 DataLength;			/*0x12 */
+	MPI25_SGE_IO_UNION SGL;		/*0x14 */
+} MPI26_TOOLBOX_LANE_MARGINING_REQUEST,
+	*PTR_MPI2_TOOLBOX_LANE_MARGINING_REQUEST,
+	Mpi26ToolboxLaneMarginingRequest_t,
+	*pMpi2ToolboxLaneMarginingRequest_t;
+
+/* defines for the Command field */
+#define MPI26_TOOL_MARGIN_COMMAND_ENTER_MARGIN_MODE        (0x01)
+#define MPI26_TOOL_MARGIN_COMMAND_READ_REGISTER_DATA       (0x02)
+#define MPI26_TOOL_MARGIN_COMMAND_WRITE_REGISTER_DATA      (0x03)
+#define MPI26_TOOL_MARGIN_COMMAND_EXIT_MARGIN_MODE         (0x04)
+
+
+/*Toolbox Backend Lane Margining Tool reply message */
+typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REPLY {
+	U8 Tool;			/*0x00 */
+	U8 Reserved1;			/*0x01 */
+	U8 MsgLength;			/*0x02 */
+	U8 Function;			/*0x03 */
+	U16 Reserved2;			/*0x04 */
+	U8 Reserved3;			/*0x06 */
+	U8 MsgFlags;			/*0x07 */
+	U8 VP_ID;			/*0x08 */
+	U8 VF_ID;			/*0x09 */
+	U16 Reserved4;			/*0x0A */
+	U16 Reserved5;			/*0x0C */
+	U16 IOCStatus;			/*0x0E */
+	U32 IOCLogInfo;			/*0x10 */
+	U16 ReturnedDataLength;		/*0x14 */
+	U16 Reserved6;			/*0x16 */
+} MPI26_TOOLBOX_LANE_MARGINING_REPLY,
+	*PTR_MPI26_TOOLBOX_LANE_MARGINING_REPLY,
+	Mpi26ToolboxLaneMarginingReply_t,
+	*pMpi26ToolboxLaneMarginingReply_t;
+
 
 /*****************************************************************************
 *
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 59d7844..fea3cb6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -74,30 +74,55 @@
 #define MAX_HBA_QUEUE_DEPTH	30000
 #define MAX_CHAIN_DEPTH		100000
 static int max_queue_depth = -1;
-module_param(max_queue_depth, int, 0);
+module_param(max_queue_depth, int, 0444);
 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
 
 static int max_sgl_entries = -1;
-module_param(max_sgl_entries, int, 0);
+module_param(max_sgl_entries, int, 0444);
 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
 
 static int msix_disable = -1;
-module_param(msix_disable, int, 0);
+module_param(msix_disable, int, 0444);
 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
 
 static int smp_affinity_enable = 1;
-module_param(smp_affinity_enable, int, S_IRUGO);
+module_param(smp_affinity_enable, int, 0444);
 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
 
 static int max_msix_vectors = -1;
-module_param(max_msix_vectors, int, 0);
+module_param(max_msix_vectors, int, 0444);
 MODULE_PARM_DESC(max_msix_vectors,
 	" max msix vectors");
 
+static int irqpoll_weight = -1;
+module_param(irqpoll_weight, int, 0444);
+MODULE_PARM_DESC(irqpoll_weight,
+	"irq poll weight (default= one fourth of HBA queue depth)");
+
 static int mpt3sas_fwfault_debug;
 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
 	" enable detection of firmware fault and halt firmware - (default=0)");
 
+static int perf_mode = -1;
+module_param(perf_mode, int, 0444);
+MODULE_PARM_DESC(perf_mode,
+	"Performance mode (only for Aero/Sea Generation), options:\n\t\t"
+	"0 - balanced: high iops mode is enabled &\n\t\t"
+	"interrupt coalescing is enabled only on high iops queues,\n\t\t"
+	"1 - iops: high iops mode is disabled &\n\t\t"
+	"interrupt coalescing is enabled on all queues,\n\t\t"
+	"2 - latency: high iops mode is disabled &\n\t\t"
+	"interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
+	"\t\tdefault - default perf_mode is 'balanced'"
+	);
+
+enum mpt3sas_perf_mode {
+	MPT_PERF_MODE_DEFAULT	= -1,
+	MPT_PERF_MODE_BALANCED	= 0,
+	MPT_PERF_MODE_IOPS	= 1,
+	MPT_PERF_MODE_LATENCY	= 2,
+};
+
 static int
 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
 
@@ -122,8 +147,8 @@
 	if (!(status & MPT3_CMD_RESET))
 		issue_reset = 1;
 
-	pr_err(MPT3SAS_FMT "Command %s\n", ioc->name,
-	    ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout"));
+	ioc_err(ioc, "Command %s\n",
+		issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
 	_debug_dump_mf(mpi_request, sz);
 
 	return issue_reset;
@@ -157,6 +182,32 @@
 	param_get_int, &mpt3sas_fwfault_debug, 0644);
 
 /**
+ * _base_readl_aero - retry readl for max three times.
+ * @addr - MPT Fusion system interface register address
+ *
+ * Retry the readl() for max three times if it gets zero value
+ * while reading the system interface register.
+ */
+static inline u32
+_base_readl_aero(const volatile void __iomem *addr)
+{
+	u32 i = 0, ret_val;
+
+	do {
+		ret_val = readl(addr);
+		i++;
+	} while (ret_val == 0 && i < 3);
+
+	return ret_val;
+}
+
+static inline u32
+_base_readl(const volatile void __iomem *addr)
+{
+	return readl(addr);
+}
+
+/**
  * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
  *				  in BAR0 space.
  *
@@ -336,9 +387,7 @@
 				return ct->chain_buffer;
 		}
 	}
-	pr_info(MPT3SAS_FMT
-	    "Provided chain_buffer_dma address is not in the lookup list\n",
-	    ioc->name);
+	ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
 	return NULL;
 }
 
@@ -394,7 +443,7 @@
 		/* Get scsi_cmd using smid */
 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
 		if (scmd == NULL) {
-			pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name);
+			ioc_err(ioc, "scmd is NULL\n");
 			return;
 		}
 
@@ -532,11 +581,11 @@
 	struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
 	struct pci_dev *pdev;
 
-	if ((ioc == NULL))
+	if (!ioc)
 		return -1;
 
 	pdev = ioc->pdev;
-	if ((pdev == NULL))
+	if (!pdev)
 		return -1;
 	pci_stop_and_remove_bus_device_locked(pdev);
 	return 0;
@@ -566,8 +615,7 @@
 
 	doorbell = mpt3sas_base_get_iocstate(ioc, 0);
 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
-		pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
-		    ioc->name);
+		ioc_err(ioc, "SAS host is non-operational !!!!\n");
 
 		/* It may be possible that EEH recovery can resolve some of
 		 * pci bus failure issues rather removing the dead ioc function
@@ -600,13 +648,11 @@
 		p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
 		    "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
 		if (IS_ERR(p))
-			pr_err(MPT3SAS_FMT
-			"%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
-			ioc->name, __func__);
+			ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
+				__func__);
 		else
-			pr_err(MPT3SAS_FMT
-			"%s: Running mpt3sas_dead_ioc thread success !!!!\n",
-			ioc->name, __func__);
+			ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
+				__func__);
 		return; /* don't rearm timer */
 	}
 
@@ -614,8 +660,8 @@
 
 	if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
-		pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
-		    __func__, (rc == 0) ? "success" : "failed");
+		ioc_warn(ioc, "%s: hard reset: %s\n",
+			 __func__, rc == 0 ? "success" : "failed");
 		doorbell = mpt3sas_base_get_iocstate(ioc, 0);
 		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
 			mpt3sas_base_fault_info(ioc, doorbell &
@@ -657,8 +703,7 @@
 	ioc->fault_reset_work_q =
 		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
 	if (!ioc->fault_reset_work_q) {
-		pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
-		    ioc->name, __func__, __LINE__);
+		ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
 		return;
 	}
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
@@ -700,8 +745,7 @@
 void
 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
 {
-	pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
-	    ioc->name, fault_code);
+	ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
 }
 
 /**
@@ -723,13 +767,12 @@
 
 	dump_stack();
 
-	doorbell = readl(&ioc->chip->Doorbell);
+	doorbell = ioc->base_readl(&ioc->chip->Doorbell);
 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
 		mpt3sas_base_fault_info(ioc , doorbell);
 	else {
 		writel(0xC0FFEE00, &ioc->chip->Doorbell);
-		pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
-			ioc->name);
+		ioc_err(ioc, "Firmware is halted due to command timeout\n");
 	}
 
 	if (ioc->fwfault_debug == 2)
@@ -956,8 +999,8 @@
 		break;
 	}
 
-	pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
-		ioc->name, desc, ioc_status, request_hdr, func_str);
+	ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
+		 desc, ioc_status, request_hdr, func_str);
 
 	_debug_dump_mf(request_hdr, frame_sz/4);
 }
@@ -1003,9 +1046,9 @@
 	{
 		Mpi2EventDataSasDiscovery_t *event_data =
 		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
-		pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
-		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
-		    "start" : "stop");
+		ioc_info(ioc, "Discovery: (%s)",
+			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
+			 "start" : "stop");
 		if (event_data->DiscoveryStatus)
 			pr_cont(" discovery_status(0x%08x)",
 			    le32_to_cpu(event_data->DiscoveryStatus));
@@ -1059,14 +1102,13 @@
 	{
 		Mpi26EventDataPCIeEnumeration_t *event_data =
 			(Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
-		pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
-			   (event_data->ReasonCode ==
-				MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
-				"start" : "stop");
+		ioc_info(ioc, "PCIE Enumeration: (%s)",
+			 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
+			 "start" : "stop");
 		if (event_data->EnumerationStatus)
-			pr_info("enumeration_status(0x%08x)",
-				   le32_to_cpu(event_data->EnumerationStatus));
-		pr_info("\n");
+			pr_cont("enumeration_status(0x%08x)",
+				le32_to_cpu(event_data->EnumerationStatus));
+		pr_cont("\n");
 		return;
 	}
 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
@@ -1077,7 +1119,7 @@
 	if (!desc)
 		return;
 
-	pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
+	ioc_info(ioc, "%s\n", desc);
 }
 
 /**
@@ -1128,11 +1170,9 @@
 		break;
 	}
 
-	pr_warn(MPT3SAS_FMT
-		"log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
-		ioc->name, log_info,
-	     originator_str, sas_loginfo.dw.code,
-	     sas_loginfo.dw.subcode);
+	ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
+		 log_info,
+		 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
 }
 
 /**
@@ -1152,8 +1192,8 @@
 
 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 	if (unlikely(!mpi_reply)) {
-		pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return;
 	}
 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
@@ -1249,9 +1289,9 @@
 		delayed_event_ack->EventContext = mpi_reply->EventContext;
 		list_add_tail(&delayed_event_ack->list,
 				&ioc->delayed_event_ack_list);
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-				"DELAYED: EVENT ACK: event (0x%04x)\n",
-				ioc->name, le16_to_cpu(mpi_reply->Event)));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
+				    le16_to_cpu(mpi_reply->Event)));
 		goto out;
 	}
 
@@ -1262,7 +1302,7 @@
 	ack_request->EventContext = mpi_reply->EventContext;
 	ack_request->VF_ID = 0;  /* TODO */
 	ack_request->VP_ID = 0;
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 
  out:
 
@@ -1336,10 +1376,10 @@
 	u32 him_register;
 
 	ioc->mask_interrupts = 1;
-	him_register = readl(&ioc->chip->HostInterruptMask);
+	him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
 	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
 	writel(him_register, &ioc->chip->HostInterruptMask);
-	readl(&ioc->chip->HostInterruptMask);
+	ioc->base_readl(&ioc->chip->HostInterruptMask);
 }
 
 /**
@@ -1353,7 +1393,7 @@
 {
 	u32 him_register;
 
-	him_register = readl(&ioc->chip->HostInterruptMask);
+	him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
 	him_register &= ~MPI2_HIM_RIM;
 	writel(him_register, &ioc->chip->HostInterruptMask);
 	ioc->mask_interrupts = 0;
@@ -1367,20 +1407,30 @@
 	} u;
 };
 
-/**
- * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
- * @irq: irq number (not used)
- * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
- *
- * Return: IRQ_HANDLED if processed, else IRQ_NONE.
- */
-static irqreturn_t
-_base_interrupt(int irq, void *bus_id)
+static u32 base_mod64(u64 dividend, u32 divisor)
 {
-	struct adapter_reply_queue *reply_q = bus_id;
+	u32 remainder;
+
+	if (!divisor)
+		pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
+	remainder = do_div(dividend, divisor);
+	return remainder;
+}
+
+/**
+ * _base_process_reply_queue - Process reply descriptors from reply
+ *		descriptor post queue.
+ * @reply_q: per IRQ's reply queue object.
+ *
+ * Return: number of reply descriptors processed from reply
+ *		descriptor queue.
+ */
+static int
+_base_process_reply_queue(struct adapter_reply_queue *reply_q)
+{
 	union reply_descriptor rd;
-	u32 completed_cmds;
-	u8 request_desript_type;
+	u64 completed_cmds;
+	u8 request_descript_type;
 	u16 smid;
 	u8 cb_idx;
 	u32 reply;
@@ -1389,21 +1439,18 @@
 	Mpi2ReplyDescriptorsUnion_t *rpf;
 	u8 rc;
 
-	if (ioc->mask_interrupts)
-		return IRQ_NONE;
-
+	completed_cmds = 0;
 	if (!atomic_add_unless(&reply_q->busy, 1, 1))
-		return IRQ_NONE;
+		return completed_cmds;
 
 	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
-	request_desript_type = rpf->Default.ReplyFlags
+	request_descript_type = rpf->Default.ReplyFlags
 	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
-	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+	if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
 		atomic_dec(&reply_q->busy);
-		return IRQ_NONE;
+		return completed_cmds;
 	}
 
-	completed_cmds = 0;
 	cb_idx = 0xFF;
 	do {
 		rd.word = le64_to_cpu(rpf->Words);
@@ -1411,11 +1458,11 @@
 			goto out;
 		reply = 0;
 		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
-		if (request_desript_type ==
+		if (request_descript_type ==
 		    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
-		    request_desript_type ==
+		    request_descript_type ==
 		    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
-		    request_desript_type ==
+		    request_descript_type ==
 		    MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
 			cb_idx = _base_get_cb_idx(ioc, smid);
 			if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
@@ -1425,7 +1472,7 @@
 				if (rc)
 					mpt3sas_base_free_smid(ioc, smid);
 			}
-		} else if (request_desript_type ==
+		} else if (request_descript_type ==
 		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
 			reply = le32_to_cpu(
 			    rpf->AddressReply.ReplyFrameAddress);
@@ -1471,7 +1518,7 @@
 		    (reply_q->reply_post_host_index ==
 		    (ioc->reply_post_queue_depth - 1)) ? 0 :
 		    reply_q->reply_post_host_index + 1;
-		request_desript_type =
+		request_descript_type =
 		    reply_q->reply_post_free[reply_q->reply_post_host_index].
 		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
 		completed_cmds++;
@@ -1480,7 +1527,7 @@
 		 * So that FW can find enough entries to post the Reply
 		 * Descriptors in the reply descriptor post queue.
 		 */
-		if (completed_cmds > ioc->hba_queue_depth/3) {
+		if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
 			if (ioc->combined_reply_queue) {
 				writel(reply_q->reply_post_host_index |
 						((msix_index  & 7) <<
@@ -1492,9 +1539,14 @@
 						 MPI2_RPHI_MSIX_INDEX_SHIFT),
 						&ioc->chip->ReplyPostHostIndex);
 			}
-			completed_cmds = 1;
+			if (!reply_q->irq_poll_scheduled) {
+				reply_q->irq_poll_scheduled = true;
+				irq_poll_sched(&reply_q->irqpoll);
+			}
+			atomic_dec(&reply_q->busy);
+			return completed_cmds;
 		}
-		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+		if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
 			goto out;
 		if (!reply_q->reply_post_host_index)
 			rpf = reply_q->reply_post_free;
@@ -1506,14 +1558,14 @@
 
 	if (!completed_cmds) {
 		atomic_dec(&reply_q->busy);
-		return IRQ_NONE;
+		return completed_cmds;
 	}
 
 	if (ioc->is_warpdrive) {
 		writel(reply_q->reply_post_host_index,
 		ioc->reply_post_host_index[msix_index]);
 		atomic_dec(&reply_q->busy);
-		return IRQ_HANDLED;
+		return completed_cmds;
 	}
 
 	/* Update Reply Post Host Index.
@@ -1540,7 +1592,82 @@
 			MPI2_RPHI_MSIX_INDEX_SHIFT),
 			&ioc->chip->ReplyPostHostIndex);
 	atomic_dec(&reply_q->busy);
-	return IRQ_HANDLED;
+	return completed_cmds;
+}
+
+/**
+ * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
+ * @irq: irq number (not used)
+ * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ *
+ * Return: IRQ_HANDLED if processed, else IRQ_NONE.
+ */
+static irqreturn_t
+_base_interrupt(int irq, void *bus_id)
+{
+	struct adapter_reply_queue *reply_q = bus_id;
+	struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
+
+	if (ioc->mask_interrupts)
+		return IRQ_NONE;
+	if (reply_q->irq_poll_scheduled)
+		return IRQ_HANDLED;
+	return ((_base_process_reply_queue(reply_q) > 0) ?
+			IRQ_HANDLED : IRQ_NONE);
+}
+
+/**
+ * _base_irqpoll - IRQ poll callback handler
+ * @irqpoll - irq_poll object
+ * @budget - irq poll weight
+ *
+ * returns number of reply descriptors processed
+ */
+static int
+_base_irqpoll(struct irq_poll *irqpoll, int budget)
+{
+	struct adapter_reply_queue *reply_q;
+	int num_entries = 0;
+
+	reply_q = container_of(irqpoll, struct adapter_reply_queue,
+			irqpoll);
+	if (reply_q->irq_line_enable) {
+		disable_irq(reply_q->os_irq);
+		reply_q->irq_line_enable = false;
+	}
+	num_entries = _base_process_reply_queue(reply_q);
+	if (num_entries < budget) {
+		irq_poll_complete(irqpoll);
+		reply_q->irq_poll_scheduled = false;
+		reply_q->irq_line_enable = true;
+		enable_irq(reply_q->os_irq);
+	}
+
+	return num_entries;
+}
+
+/**
+ * _base_init_irqpolls - initliaze IRQ polls
+ * @ioc: per adapter object
+ *
+ * returns nothing
+ */
+static void
+_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
+{
+	struct adapter_reply_queue *reply_q, *next;
+
+	if (list_empty(&ioc->reply_queue_list))
+		return;
+
+	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+		irq_poll_init(&reply_q->irqpoll,
+			ioc->hba_queue_depth/4, _base_irqpoll);
+		reply_q->irq_poll_scheduled = false;
+		reply_q->irq_line_enable = true;
+		reply_q->os_irq = pci_irq_vector(ioc->pdev,
+		    reply_q->msix_index);
+	}
 }
 
 /**
@@ -1581,6 +1708,17 @@
 		/* TMs are on msix_index == 0 */
 		if (reply_q->msix_index == 0)
 			continue;
+		if (reply_q->irq_poll_scheduled) {
+			/* Calling irq_poll_disable will wait for any pending
+			 * callbacks to have completed.
+			 */
+			irq_poll_disable(&reply_q->irqpoll);
+			irq_poll_enable(&reply_q->irqpoll);
+			reply_q->irq_poll_scheduled = false;
+			reply_q->irq_line_enable = true;
+			enable_irq(reply_q->os_irq);
+			continue;
+		}
 		synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
 	}
 }
@@ -2122,6 +2260,11 @@
 	bool build_prp = true;
 
 	data_length = scsi_bufflen(scmd);
+	if (pcie_device &&
+	    (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
+		build_prp = false;
+		return build_prp;
+	}
 
 	/* If Datalenth is <= 16K and number of SGE’s entries are <= 2
 	 * we built IEEE SGL
@@ -2270,7 +2413,7 @@
 	sges_left = scsi_dma_map(scmd);
 	if (sges_left < 0) {
 		sdev_printk(KERN_ERR, scmd->device,
-		 "pci_map_sg failed: request for %d bytes!\n",
+		 "scsi_dma_map failed: request for %d bytes!\n",
 		 scsi_bufflen(scmd));
 		return -ENOMEM;
 	}
@@ -2418,7 +2561,7 @@
 	sges_left = scsi_dma_map(scmd);
 	if (sges_left < 0) {
 		sdev_printk(KERN_ERR, scmd->device,
-			"pci_map_sg failed: request for %d bytes!\n",
+			"scsi_dma_map failed: request for %d bytes!\n",
 			scsi_bufflen(scmd));
 		return -ENOMEM;
 	}
@@ -2563,44 +2706,43 @@
 static int
 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
 {
+	u64 required_mask, coherent_mask;
 	struct sysinfo s;
-	u64 consistent_dma_mask;
+	/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+	int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
 
 	if (ioc->is_mcpu_endpoint)
 		goto try_32bit;
 
-	if (ioc->dma_mask)
-		consistent_dma_mask = DMA_BIT_MASK(64);
-	else
-		consistent_dma_mask = DMA_BIT_MASK(32);
+	required_mask = dma_get_required_mask(&pdev->dev);
+	if (sizeof(dma_addr_t) == 4 || required_mask == 32)
+		goto try_32bit;
 
-	if (sizeof(dma_addr_t) > 4) {
-		const uint64_t required_mask =
-		    dma_get_required_mask(&pdev->dev);
-		if ((required_mask > DMA_BIT_MASK(32)) &&
-		    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
-		    !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
-			ioc->base_add_sg_single = &_base_add_sg_single_64;
-			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
-			ioc->dma_mask = 64;
-			goto out;
-		}
-	}
+	if (ioc->dma_mask)
+		coherent_mask = DMA_BIT_MASK(dma_mask);
+	else
+		coherent_mask = DMA_BIT_MASK(32);
+
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
+	    dma_set_coherent_mask(&pdev->dev, coherent_mask))
+		goto try_32bit;
+
+	ioc->base_add_sg_single = &_base_add_sg_single_64;
+	ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+	ioc->dma_mask = dma_mask;
+	goto out;
 
  try_32bit:
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
-	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-		ioc->base_add_sg_single = &_base_add_sg_single_32;
-		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
-		ioc->dma_mask = 32;
-	} else
+	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
 		return -ENODEV;
 
+	ioc->base_add_sg_single = &_base_add_sg_single_32;
+	ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+	ioc->dma_mask = 32;
  out:
 	si_meminfo(&s);
-	pr_info(MPT3SAS_FMT
-		"%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
-		ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
+	ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+		 ioc->dma_mask, convert_to_kb(s.totalram));
 
 	return 0;
 }
@@ -2609,7 +2751,7 @@
 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
 				      struct pci_dev *pdev)
 {
-	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
 			return -ENODEV;
 	}
@@ -2639,8 +2781,7 @@
 
 	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
 	if (!base) {
-		dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
-			ioc->name));
+		dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
 		return -EINVAL;
 	}
 
@@ -2658,9 +2799,8 @@
 		pci_read_config_word(ioc->pdev, base + 2, &message_control);
 		ioc->msix_vector_count = (message_control & 0x3FF) + 1;
 	}
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"msix is supported, vector_count(%d)\n",
-		ioc->name, ioc->msix_vector_count));
+	dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
+				  ioc->msix_vector_count));
 	return 0;
 }
 
@@ -2680,6 +2820,9 @@
 
 	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
 		list_del(&reply_q->list);
+		if (ioc->smp_affinity_enable)
+			irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
+			    reply_q->msix_index), NULL);
 		free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
 			 reply_q);
 		kfree(reply_q);
@@ -2702,8 +2845,8 @@
 
 	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
 	if (!reply_q) {
-		pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
-		    ioc->name, (int)sizeof(struct adapter_reply_queue));
+		ioc_err(ioc, "unable to allocate memory %zu!\n",
+			sizeof(struct adapter_reply_queue));
 		return -ENOMEM;
 	}
 	reply_q->ioc = ioc;
@@ -2719,7 +2862,7 @@
 	r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
 			IRQF_SHARED, reply_q->name, reply_q);
 	if (r) {
-		pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
+		pr_err("%s: unable to allocate interrupt %d!\n",
 		       reply_q->name, pci_irq_vector(pdev, index));
 		kfree(reply_q);
 		return -EBUSY;
@@ -2744,10 +2887,14 @@
 {
 	unsigned int cpu, nr_cpus, nr_msix, index = 0;
 	struct adapter_reply_queue *reply_q;
+	int local_numa_node;
 
 	if (!_base_is_controller_msix_enabled(ioc))
 		return;
 
+	if (ioc->msix_load_balance)
+		return;
+
 	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
 
 	nr_cpus = num_online_cpus();
@@ -2756,14 +2903,33 @@
 	if (!nr_msix)
 		return;
 
-	if (smp_affinity_enable) {
+	if (ioc->smp_affinity_enable) {
+
+		/*
+		 * set irq affinity to local numa node for those irqs
+		 * corresponding to high iops queues.
+		 */
+		if (ioc->high_iops_queues) {
+			local_numa_node = dev_to_node(&ioc->pdev->dev);
+			for (index = 0; index < ioc->high_iops_queues;
+			    index++) {
+				irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
+				    index), cpumask_of_node(local_numa_node));
+			}
+		}
+
 		list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
-			const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
-							reply_q->msix_index);
-			if (!mask) {
-				pr_warn(MPT3SAS_FMT "no affinity for msi %x\n",
-					ioc->name, reply_q->msix_index);
+			const cpumask_t *mask;
+
+			if (reply_q->msix_index < ioc->high_iops_queues)
 				continue;
+
+			mask = pci_irq_get_affinity(ioc->pdev,
+			    reply_q->msix_index);
+			if (!mask) {
+				ioc_warn(ioc, "no affinity for msi %x\n",
+					 reply_q->msix_index);
+				goto fall_back;
 			}
 
 			for_each_cpu_and(cpu, mask, cpu_online_mask) {
@@ -2774,12 +2940,18 @@
 		}
 		return;
 	}
+
+fall_back:
 	cpu = cpumask_first(cpu_online_mask);
+	nr_msix -= ioc->high_iops_queues;
+	index = 0;
 
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
-
 		unsigned int i, group = nr_cpus / nr_msix;
 
+		if (reply_q->msix_index < ioc->high_iops_queues)
+			continue;
+
 		if (cpu >= nr_cpus)
 			break;
 
@@ -2795,6 +2967,52 @@
 }
 
 /**
+ * _base_check_and_enable_high_iops_queues - enable high iops mode
+ * @ ioc - per adapter object
+ * @ hba_msix_vector_count - msix vectors supported by HBA
+ *
+ * Enable high iops queues only if
+ *  - HBA is a SEA/AERO controller and
+ *  - MSI-Xs vector supported by the HBA is 128 and
+ *  - total CPU count in the system >=16 and
+ *  - loaded driver with default max_msix_vectors module parameter and
+ *  - system booted in non kdump mode
+ *
+ * returns nothing.
+ */
+static void
+_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
+		int hba_msix_vector_count)
+{
+	u16 lnksta, speed;
+
+	if (perf_mode == MPT_PERF_MODE_IOPS ||
+	    perf_mode == MPT_PERF_MODE_LATENCY) {
+		ioc->high_iops_queues = 0;
+		return;
+	}
+
+	if (perf_mode == MPT_PERF_MODE_DEFAULT) {
+
+		pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
+		speed = lnksta & PCI_EXP_LNKSTA_CLS;
+
+		if (speed < 0x4) {
+			ioc->high_iops_queues = 0;
+			return;
+		}
+	}
+
+	if (!reset_devices && ioc->is_aero_ioc &&
+	    hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
+	    num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
+	    max_msix_vectors == -1)
+		ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
+	else
+		ioc->high_iops_queues = 0;
+}
+
+/**
  * _base_disable_msix - disables msix
  * @ioc: per adapter object
  *
@@ -2804,11 +3022,38 @@
 {
 	if (!ioc->msix_enable)
 		return;
-	pci_disable_msix(ioc->pdev);
+	pci_free_irq_vectors(ioc->pdev);
 	ioc->msix_enable = 0;
 }
 
 /**
+ * _base_alloc_irq_vectors - allocate msix vectors
+ * @ioc: per adapter object
+ *
+ */
+static int
+_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
+{
+	int i, irq_flags = PCI_IRQ_MSIX;
+	struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
+	struct irq_affinity *descp = &desc;
+
+	if (ioc->smp_affinity_enable)
+		irq_flags |= PCI_IRQ_AFFINITY;
+	else
+		descp = NULL;
+
+	ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
+	    ioc->msix_vector_count);
+
+	i = pci_alloc_irq_vectors_affinity(ioc->pdev,
+	    ioc->high_iops_queues,
+	    ioc->msix_vector_count, irq_flags, descp);
+
+	return i;
+}
+
+/**
  * _base_enable_msix - enables msix, failback to io_apic
  * @ioc: per adapter object
  *
@@ -2819,7 +3064,8 @@
 	int r;
 	int i, local_max_msix_vectors;
 	u8 try_msix = 0;
-	unsigned int irq_flags = PCI_IRQ_MSIX;
+
+	ioc->msix_load_balance = false;
 
 	if (msix_disable == -1 || msix_disable == 0)
 		try_msix = 1;
@@ -2830,13 +3076,16 @@
 	if (_base_check_enable_msix(ioc) != 0)
 		goto try_ioapic;
 
-	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
+	ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
+	pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
+		ioc->cpu_count, max_msix_vectors);
+	if (ioc->is_aero_ioc)
+		_base_check_and_enable_high_iops_queues(ioc,
+			ioc->msix_vector_count);
+	ioc->reply_queue_count =
+		min_t(int, ioc->cpu_count + ioc->high_iops_queues,
 		ioc->msix_vector_count);
 
-	printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
-	  ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
-	  ioc->cpu_count, max_msix_vectors);
-
 	if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
 		local_max_msix_vectors = (reset_devices) ? 1 : 8;
 	else
@@ -2848,18 +3097,27 @@
 	else if (local_max_msix_vectors == 0)
 		goto try_ioapic;
 
-	if (ioc->msix_vector_count < ioc->cpu_count)
-		smp_affinity_enable = 0;
+	/*
+	 * Enable msix_load_balance only if combined reply queue mode is
+	 * disabled on SAS3 & above generation HBA devices.
+	 */
+	if (!ioc->combined_reply_queue &&
+	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+		ioc->msix_load_balance = true;
+	}
 
-	if (smp_affinity_enable)
-		irq_flags |= PCI_IRQ_AFFINITY;
+	/*
+	 * smp affinity setting is not need when msix load balance
+	 * is enabled.
+	 */
+	if (ioc->msix_load_balance)
+		ioc->smp_affinity_enable = 0;
 
-	r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
-				  irq_flags);
+	r = _base_alloc_irq_vectors(ioc);
 	if (r < 0) {
-		dfailprintk(ioc, pr_info(MPT3SAS_FMT
-			"pci_alloc_irq_vectors failed (r=%d) !!!\n",
-			ioc->name, r));
+		dfailprintk(ioc,
+			    ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
+				     r));
 		goto try_ioapic;
 	}
 
@@ -2874,17 +3132,21 @@
 		}
 	}
 
+	ioc_info(ioc, "High IOPs queues : %s\n",
+			ioc->high_iops_queues ? "enabled" : "disabled");
+
 	return 0;
 
 /* failback to io_apic interrupt routing */
  try_ioapic:
-
+	ioc->high_iops_queues = 0;
+	ioc_info(ioc, "High IOPs queues : disabled\n");
 	ioc->reply_queue_count = 1;
 	r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
 	if (r < 0) {
-		dfailprintk(ioc, pr_info(MPT3SAS_FMT
-			"pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
-			ioc->name, r));
+		dfailprintk(ioc,
+			    ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
+				     r));
 	} else
 		r = _base_request_irq(ioc, 0);
 
@@ -2900,8 +3162,7 @@
 {
 	struct pci_dev *pdev = ioc->pdev;
 
-	dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
-		ioc->name, __func__));
+	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	_base_free_irq(ioc);
 	_base_disable_msix(ioc);
@@ -2922,6 +3183,37 @@
 	}
 }
 
+static int
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
+
+/**
+ * _base_check_for_fault_and_issue_reset - check if IOC is in fault state
+ *     and if it is in fault state then issue diag reset.
+ * @ioc: per adapter object
+ *
+ * Returns: 0 for success, non-zero for failure.
+ */
+static int
+_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+	u32 ioc_state;
+	int rc = -EFAULT;
+
+	dinitprintk(ioc, pr_info("%s\n", __func__));
+	if (ioc->pci_error_recovery)
+		return 0;
+	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+	dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
+
+	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+		mpt3sas_base_fault_info(ioc, ioc_state &
+		    MPI2_DOORBELL_DATA_MASK);
+		rc = _base_diag_reset(ioc);
+	}
+
+	return rc;
+}
+
 /**
  * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
  * @ioc: per adapter object
@@ -2934,18 +3226,16 @@
 	struct pci_dev *pdev = ioc->pdev;
 	u32 memap_sz;
 	u32 pio_sz;
-	int i, r = 0;
+	int i, r = 0, rc;
 	u64 pio_chip = 0;
 	phys_addr_t chip_phys = 0;
 	struct adapter_reply_queue *reply_q;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
-	    ioc->name, __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
 	if (pci_enable_device_mem(pdev)) {
-		pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
-			ioc->name);
+		ioc_warn(ioc, "pci_enable_device_mem: failed\n");
 		ioc->bars = 0;
 		return -ENODEV;
 	}
@@ -2953,8 +3243,7 @@
 
 	if (pci_request_selected_regions(pdev, ioc->bars,
 	    ioc->driver_name)) {
-		pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
-			ioc->name);
+		ioc_warn(ioc, "pci_request_selected_regions: failed\n");
 		ioc->bars = 0;
 		r = -ENODEV;
 		goto out_fail;
@@ -2967,8 +3256,7 @@
 
 
 	if (_base_config_dma_addressing(ioc, pdev) != 0) {
-		pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
-		    ioc->name, pci_name(pdev));
+		ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
 		r = -ENODEV;
 		goto out_fail;
 	}
@@ -2991,8 +3279,7 @@
 	}
 
 	if (ioc->chip == NULL) {
-		pr_err(MPT3SAS_FMT "unable to map adapter memory! "
-			" or resource not found\n", ioc->name);
+		ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
 		r = -EINVAL;
 		goto out_fail;
 	}
@@ -3000,8 +3287,11 @@
 	_base_mask_interrupts(ioc);
 
 	r = _base_get_ioc_facts(ioc);
-	if (r)
-		goto out_fail;
+	if (r) {
+		rc = _base_check_for_fault_and_issue_reset(ioc);
+		if (rc || (_base_get_ioc_facts(ioc)))
+			goto out_fail;
+	}
 
 	if (!ioc->rdpq_array_enable_assigned) {
 		ioc->rdpq_array_enable = ioc->rdpq_array_capable;
@@ -3012,6 +3302,8 @@
 	if (r)
 		goto out_fail;
 
+	if (!ioc->is_driver_loading)
+		_base_init_irqpolls(ioc);
 	/* Use the Combined reply queue feature only for SAS3 C0 & higher
 	 * revision HBAs and also only when reply queue count is greater than 8
 	 */
@@ -3026,9 +3318,8 @@
 		     ioc->combined_reply_index_count,
 		     sizeof(resource_size_t *), GFP_KERNEL);
 		if (!ioc->replyPostRegisterIndex) {
-			dfailprintk(ioc, printk(MPT3SAS_FMT
-			"allocation for reply Post Register Index failed!!!\n",
-								   ioc->name));
+			dfailprintk(ioc,
+				    ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
 			r = -ENOMEM;
 			goto out_fail;
 		}
@@ -3053,15 +3344,15 @@
 	}
 
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
-		pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
-		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
-		    "IO-APIC enabled"),
-		    pci_irq_vector(ioc->pdev, reply_q->msix_index));
+		pr_info("%s: %s enabled: IRQ %d\n",
+			reply_q->name,
+			ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
+			pci_irq_vector(ioc->pdev, reply_q->msix_index));
 
-	pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n",
-	    ioc->name, &chip_phys, ioc->chip, memap_sz);
-	pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
-	    ioc->name, (unsigned long long)pio_chip, pio_sz);
+	ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
+		 &chip_phys, ioc->chip, memap_sz);
+	ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
+		 (unsigned long long)pio_chip, pio_sz);
 
 	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
 	pci_save_state(pdev);
@@ -3153,13 +3444,58 @@
 	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
 }
 
+/**
+ * _base_get_msix_index - get the msix index
+ * @ioc: per adapter object
+ * @scmd: scsi_cmnd object
+ *
+ * returns msix index of general reply queues,
+ * i.e. reply queue on which IO request's reply
+ * should be posted by the HBA firmware.
+ */
 static inline u8
-_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
+_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
+	struct scsi_cmnd *scmd)
 {
+	/* Enables reply_queue load balancing */
+	if (ioc->msix_load_balance)
+		return ioc->reply_queue_count ?
+		    base_mod64(atomic64_add_return(1,
+		    &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
+
 	return ioc->cpu_msix_table[raw_smp_processor_id()];
 }
 
 /**
+ * _base_get_high_iops_msix_index - get the msix index of
+ *				high iops queues
+ * @ioc: per adapter object
+ * @scmd: scsi_cmnd object
+ *
+ * Returns: msix index of high iops reply queues.
+ * i.e. high iops reply queue on which IO request's
+ * reply should be posted by the HBA firmware.
+ */
+static inline u8
+_base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
+	struct scsi_cmnd *scmd)
+{
+	/**
+	 * Round robin the IO interrupts among the high iops
+	 * reply queues in terms of batch count 16 when outstanding
+	 * IOs on the target device is >=8.
+	 */
+	if (atomic_read(&scmd->device->device_busy) >
+	    MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
+		return base_mod64((
+		    atomic64_add_return(1, &ioc->high_iops_outstanding) /
+		    MPT3SAS_HIGH_IOPS_BATCH_COUNT),
+		    MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
+
+	return _base_get_msix_index(ioc, scmd);
+}
+
+/**
  * mpt3sas_base_get_smid - obtain a free smid from internal queue
  * @ioc: per adapter object
  * @cb_idx: callback index
@@ -3176,8 +3512,7 @@
 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
 	if (list_empty(&ioc->internal_free_list)) {
 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-		pr_err(MPT3SAS_FMT "%s: smid not available\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: smid not available\n", __func__);
 		return 0;
 	}
 
@@ -3208,8 +3543,8 @@
 
 	smid = tag + 1;
 	request->cb_idx = cb_idx;
-	request->msix_io = _base_get_msix_index(ioc);
 	request->smid = smid;
+	request->scmd = scmd;
 	INIT_LIST_HEAD(&request->chain_list);
 	return smid;
 }
@@ -3263,6 +3598,7 @@
 		return;
 	st->cb_idx = 0xFF;
 	st->direct_io = 0;
+	st->scmd = NULL;
 	atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
 	st->smid = 0;
 }
@@ -3280,12 +3616,18 @@
 
 	if (smid < ioc->hi_priority_smid) {
 		struct scsiio_tracker *st;
+		void *request;
 
 		st = _get_st_from_smid(ioc, smid);
 		if (!st) {
 			_base_recovery_check(ioc);
 			return;
 		}
+
+		/* Clear MPI request frame */
+		request = mpt3sas_base_get_msg_frame(ioc, smid);
+		memset(request, 0, ioc->request_sz);
+
 		mpt3sas_base_clear_st(ioc, st);
 		_base_recovery_check(ioc);
 		return;
@@ -3326,7 +3668,6 @@
 	spin_lock_irqsave(writeq_lock, flags);
 	__raw_writel((u32)(b), addr);
 	__raw_writel((u32)(b >> 32), (addr + 4));
-	mmiowb();
 	spin_unlock_irqrestore(writeq_lock, flags);
 }
 
@@ -3344,8 +3685,9 @@
 static inline void
 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
 {
+	wmb();
 	__raw_writeq(b, addr);
-	mmiowb();
+	barrier();
 }
 #else
 static inline void
@@ -3356,13 +3698,37 @@
 #endif
 
 /**
+ * _base_set_and_get_msix_index - get the msix index and assign to msix_io
+ *                                variable of scsi tracker
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * returns msix index.
+ */
+static u8
+_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+	struct scsiio_tracker *st = NULL;
+
+	if (smid < ioc->hi_priority_smid)
+		st = _get_st_from_smid(ioc, smid);
+
+	if (st == NULL)
+		return  _base_get_msix_index(ioc, NULL);
+
+	st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
+	return st->msix_io;
+}
+
+/**
  * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
  * @ioc: per adapter object
  * @smid: system request message index
  * @handle: device handle
  */
 static void
-_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
+	u16 smid, u16 handle)
 {
 	Mpi2RequestDescriptorUnion_t descriptor;
 	u64 *request = (u64 *)&descriptor;
@@ -3375,7 +3741,7 @@
 	_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
 					ioc->request_sz);
 	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
-	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
+	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
 	descriptor.SCSIIO.LMID = 0;
@@ -3397,7 +3763,7 @@
 
 
 	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
-	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
+	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
 	descriptor.SCSIIO.LMID = 0;
@@ -3406,13 +3772,13 @@
 }
 
 /**
- * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
+ * _base_put_smid_fast_path - send fast path request to firmware
  * @ioc: per adapter object
  * @smid: system request message index
  * @handle: device handle
  */
-void
-mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 	u16 handle)
 {
 	Mpi2RequestDescriptorUnion_t descriptor;
@@ -3420,7 +3786,7 @@
 
 	descriptor.SCSIIO.RequestFlags =
 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
-	descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
 	descriptor.SCSIIO.LMID = 0;
@@ -3429,13 +3795,13 @@
 }
 
 /**
- * mpt3sas_base_put_smid_hi_priority - send Task Management request to firmware
+ * _base_put_smid_hi_priority - send Task Management request to firmware
  * @ioc: per adapter object
  * @smid: system request message index
  * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
  */
-void
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 	u16 msix_task)
 {
 	Mpi2RequestDescriptorUnion_t descriptor;
@@ -3484,7 +3850,7 @@
 
 	descriptor.Default.RequestFlags =
 		MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
-	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
+	descriptor.Default.MSIxIndex =  _base_set_and_get_msix_index(ioc, smid);
 	descriptor.Default.SMID = cpu_to_le16(smid);
 	descriptor.Default.LMID = 0;
 	descriptor.Default.DescriptorTypeDependent = 0;
@@ -3493,12 +3859,12 @@
 }
 
 /**
- * mpt3sas_base_put_smid_default - Default, primarily used for config pages
+ * _base_put_smid_default - Default, primarily used for config pages
  * @ioc: per adapter object
  * @smid: system request message index
  */
-void
-mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+static void
+_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 {
 	Mpi2RequestDescriptorUnion_t descriptor;
 	void *mpi_req_iomem;
@@ -3516,7 +3882,7 @@
 	}
 	request = (u64 *)&descriptor;
 	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
-	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
+	descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
 	descriptor.Default.SMID = cpu_to_le16(smid);
 	descriptor.Default.LMID = 0;
 	descriptor.Default.DescriptorTypeDependent = 0;
@@ -3530,6 +3896,95 @@
 }
 
 /**
+ * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
+ *   Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle, unused in this function, for function type match
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+	u16 handle)
+{
+	Mpi26AtomicRequestDescriptor_t descriptor;
+	u32 *request = (u32 *)&descriptor;
+
+	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
+	descriptor.SMID = cpu_to_le16(smid);
+
+	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_fast_path_atomic - send fast path request to firmware
+ * using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle, unused in this function, for function type match
+ * Return nothing
+ */
+static void
+_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+	u16 handle)
+{
+	Mpi26AtomicRequestDescriptor_t descriptor;
+	u32 *request = (u32 *)&descriptor;
+
+	descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
+	descriptor.SMID = cpu_to_le16(smid);
+
+	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_hi_priority_atomic - send Task Management request to
+ * firmware using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+	u16 msix_task)
+{
+	Mpi26AtomicRequestDescriptor_t descriptor;
+	u32 *request = (u32 *)&descriptor;
+
+	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+	descriptor.MSIxIndex = msix_task;
+	descriptor.SMID = cpu_to_le16(smid);
+
+	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_default - Default, primarily used for config pages
+ * use Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+	Mpi26AtomicRequestDescriptor_t descriptor;
+	u32 *request = (u32 *)&descriptor;
+
+	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
+	descriptor.SMID = cpu_to_le16(smid);
+
+	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
  * _base_display_OEMs_branding - Display branding string
  * @ioc: per adapter object
  */
@@ -3545,89 +4000,87 @@
 		case MPI2_MFGPAGE_DEVID_SAS2008:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT2SAS_INTEL_RMS2LL080_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS2LL080_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS2LL080_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS2LL040_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS2LL040_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS2LL040_BRANDING);
 				break;
 			case MPT2SAS_INTEL_SSD910_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_SSD910_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_SSD910_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				 "Intel(R) Controller: Subsystem ID: 0x%X\n",
-				 ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
+			break;
 		case MPI2_MFGPAGE_DEVID_SAS2308_2:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT2SAS_INTEL_RS25GB008_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RS25GB008_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RS25GB008_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS25JB080_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS25JB080_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS25JB080_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS25JB040_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS25JB040_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS25JB040_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS25KB080_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS25KB080_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS25KB080_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS25KB040_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS25KB040_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS25KB040_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS25LB040_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS25LB040_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS25LB040_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS25LB080_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS25LB080_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS25LB080_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				 "Intel(R) Controller: Subsystem ID: 0x%X\n",
-				 ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
+			break;
 		case MPI25_MFGPAGE_DEVID_SAS3008:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT3SAS_INTEL_RMS3JC080_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_INTEL_RMS3JC080_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_INTEL_RMS3JC080_BRANDING);
 				break;
 
 			case MPT3SAS_INTEL_RS3GC008_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_INTEL_RS3GC008_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_INTEL_RS3GC008_BRANDING);
 				break;
 			case MPT3SAS_INTEL_RS3FC044_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_INTEL_RS3FC044_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_INTEL_RS3FC044_BRANDING);
 				break;
 			case MPT3SAS_INTEL_RS3UC080_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_INTEL_RS3UC080_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_INTEL_RS3UC080_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				 "Intel(R) Controller: Subsystem ID: 0x%X\n",
-				 ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
 			break;
 		default:
-			pr_info(MPT3SAS_FMT
-			 "Intel(R) Controller: Subsystem ID: 0x%X\n",
-			 ioc->name, ioc->pdev->subsystem_device);
+			ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+				 ioc->pdev->subsystem_device);
 			break;
 		}
 		break;
@@ -3636,57 +4089,54 @@
 		case MPI2_MFGPAGE_DEVID_SAS2008:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
 				break;
 			case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
 				break;
 			case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
 				break;
 			case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
 				break;
 			case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
 				break;
 			case MPT2SAS_DELL_PERC_H200_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_PERC_H200_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_PERC_H200_BRANDING);
 				break;
 			case MPT2SAS_DELL_6GBPS_SAS_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				   "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
-				   ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
 			break;
 		case MPI25_MFGPAGE_DEVID_SAS3008:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT3SAS_DELL_12G_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_DELL_12G_HBA_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_DELL_12G_HBA_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				   "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
-				   ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
 			break;
 		default:
-			pr_info(MPT3SAS_FMT
-			   "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
-			   ioc->pdev->subsystem_device);
+			ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
+				 ioc->pdev->subsystem_device);
 			break;
 		}
 		break;
@@ -3695,46 +4145,42 @@
 		case MPI25_MFGPAGE_DEVID_SAS3008:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
 				break;
 			case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
 				break;
 			case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				  "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
-				  ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
 			break;
 		case MPI25_MFGPAGE_DEVID_SAS3108_1:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
 				break;
 			case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
-				);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
-				 ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
 			break;
 		default:
-			pr_info(MPT3SAS_FMT
-			   "Cisco SAS HBA: Subsystem ID: 0x%X\n",
-			   ioc->name, ioc->pdev->subsystem_device);
+			ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
+				 ioc->pdev->subsystem_device);
 			break;
 		}
 		break;
@@ -3743,43 +4189,42 @@
 		case MPI2_MFGPAGE_DEVID_SAS2004:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				   "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
-				   ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
+			break;
 		case MPI2_MFGPAGE_DEVID_SAS2308_2:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT2SAS_HP_2_4_INTERNAL_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_HP_2_4_INTERNAL_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
 				break;
 			case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
 				break;
 			case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
 				break;
 			case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				   "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
-				   ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
+			break;
 		default:
-			pr_info(MPT3SAS_FMT
-			   "HP SAS HBA: Subsystem ID: 0x%X\n",
-			   ioc->name, ioc->pdev->subsystem_device);
+			ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
+				 ioc->pdev->subsystem_device);
 			break;
 		}
 	default:
@@ -3806,28 +4251,25 @@
 	u16 smid, ioc_status;
 	size_t data_length;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				__func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
-		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
-				ioc->name, __func__);
+		ioc_err(ioc, "%s: internal command already in use\n", __func__);
 		return -EAGAIN;
 	}
 
 	data_length = sizeof(Mpi2FWImageHeader_t);
-	fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
-			&fwpkg_data_dma);
+	fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
+			&fwpkg_data_dma, GFP_KERNEL);
 	if (!fwpkg_data) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-				ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -ENOMEM;
 	}
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-				ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		r = -EAGAIN;
 		goto out;
 	}
@@ -3842,15 +4284,13 @@
 	ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
 			data_length);
 	init_completion(&ioc->base_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	/* Wait for 15 seconds */
 	wait_for_completion_timeout(&ioc->base_cmds.done,
 			FW_IMG_HDR_READ_TIMEOUT*HZ);
-	pr_info(MPT3SAS_FMT "%s: complete\n",
-			ioc->name, __func__);
+	ioc_info(ioc, "%s: complete\n", __func__);
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
-		pr_err(MPT3SAS_FMT "%s: timeout\n",
-				ioc->name, __func__);
+		ioc_err(ioc, "%s: timeout\n", __func__);
 		_debug_dump_mf(mpi_request,
 				sizeof(Mpi25FWUploadRequest_t)/4);
 		r = -ETIME;
@@ -3864,13 +4304,11 @@
 			if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
 				FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
 				if (FWImgHdr->PackageVersion.Word) {
-					pr_info(MPT3SAS_FMT "FW Package Version"
-					"(%02d.%02d.%02d.%02d)\n",
-					ioc->name,
-					FWImgHdr->PackageVersion.Struct.Major,
-					FWImgHdr->PackageVersion.Struct.Minor,
-					FWImgHdr->PackageVersion.Struct.Unit,
-					FWImgHdr->PackageVersion.Struct.Dev);
+					ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
+						 FWImgHdr->PackageVersion.Struct.Major,
+						 FWImgHdr->PackageVersion.Struct.Minor,
+						 FWImgHdr->PackageVersion.Struct.Unit,
+						 FWImgHdr->PackageVersion.Struct.Dev);
 				}
 			} else {
 				_debug_dump_mf(&mpi_reply,
@@ -3881,7 +4319,7 @@
 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
 out:
 	if (fwpkg_data)
-		pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
+		dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
 				fwpkg_data_dma);
 	return r;
 }
@@ -3900,18 +4338,17 @@
 
 	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
 	strncpy(desc, ioc->manu_pg0.ChipName, 16);
-	pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
-	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
-	    ioc->name, desc,
-	   (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
-	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
-	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
-	   ioc->facts.FWVersion.Word & 0x000000FF,
-	   ioc->pdev->revision,
-	   (bios_version & 0xFF000000) >> 24,
-	   (bios_version & 0x00FF0000) >> 16,
-	   (bios_version & 0x0000FF00) >> 8,
-	    bios_version & 0x000000FF);
+	ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
+		 desc,
+		 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+		 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+		 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+		 ioc->facts.FWVersion.Word & 0x000000FF,
+		 ioc->pdev->revision,
+		 (bios_version & 0xFF000000) >> 24,
+		 (bios_version & 0x00FF0000) >> 16,
+		 (bios_version & 0x0000FF00) >> 8,
+		 bios_version & 0x000000FF);
 
 	_base_display_OEMs_branding(ioc);
 
@@ -3920,82 +4357,81 @@
 		i++;
 	}
 
-	pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
+	ioc_info(ioc, "Protocol=(");
 
 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
-		pr_info("Initiator");
+		pr_cont("Initiator");
 		i++;
 	}
 
 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
-		pr_info("%sTarget", i ? "," : "");
+		pr_cont("%sTarget", i ? "," : "");
 		i++;
 	}
 
 	i = 0;
-	pr_info("), ");
-	pr_info("Capabilities=(");
+	pr_cont("), Capabilities=(");
 
 	if (!ioc->hide_ir_msg) {
 		if (ioc->facts.IOCCapabilities &
 		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
-			pr_info("Raid");
+			pr_cont("Raid");
 			i++;
 		}
 	}
 
 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
-		pr_info("%sTLR", i ? "," : "");
+		pr_cont("%sTLR", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
-		pr_info("%sMulticast", i ? "," : "");
+		pr_cont("%sMulticast", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities &
 	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
-		pr_info("%sBIDI Target", i ? "," : "");
+		pr_cont("%sBIDI Target", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
-		pr_info("%sEEDP", i ? "," : "");
+		pr_cont("%sEEDP", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities &
 	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
-		pr_info("%sSnapshot Buffer", i ? "," : "");
+		pr_cont("%sSnapshot Buffer", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities &
 	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
-		pr_info("%sDiag Trace Buffer", i ? "," : "");
+		pr_cont("%sDiag Trace Buffer", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities &
 	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
-		pr_info("%sDiag Extended Buffer", i ? "," : "");
+		pr_cont("%sDiag Extended Buffer", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities &
 	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
-		pr_info("%sTask Set Full", i ? "," : "");
+		pr_cont("%sTask Set Full", i ? "," : "");
 		i++;
 	}
 
 	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
 	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
-		pr_info("%sNCQ", i ? "," : "");
+		pr_cont("%sNCQ", i ? "," : "");
 		i++;
 	}
 
-	pr_info(")\n");
+	pr_cont(")\n");
 }
 
 /**
@@ -4028,21 +4464,21 @@
 	    sizeof(Mpi2SasIOUnit1PhyData_t));
 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
 	if (!sas_iounit_pg1) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
 	    sas_iounit_pg1, sz))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 
@@ -4074,11 +4510,11 @@
 		else
 			dmd_new =
 		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
-		pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
-			ioc->name, dmd_orignal, dmd_new);
-		pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
-			ioc->name, io_missing_delay_original,
-		    io_missing_delay);
+		ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
+			 dmd_orignal, dmd_new);
+		ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
+			 io_missing_delay_original,
+			 io_missing_delay);
 		ioc->device_missing_delay = dmd_new;
 		ioc->io_missing_delay = io_missing_delay;
 	}
@@ -4088,6 +4524,71 @@
 }
 
 /**
+ * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
+ *    according to performance mode.
+ * @ioc : per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
+{
+	Mpi2IOCPage1_t ioc_pg1;
+	Mpi2ConfigReply_t mpi_reply;
+
+	mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
+	memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
+
+	switch (perf_mode) {
+	case MPT_PERF_MODE_DEFAULT:
+	case MPT_PERF_MODE_BALANCED:
+		if (ioc->high_iops_queues) {
+			ioc_info(ioc,
+				"Enable interrupt coalescing only for first\t"
+				"%d reply queues\n",
+				MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
+			/*
+			 * If 31st bit is zero then interrupt coalescing is
+			 * enabled for all reply descriptor post queues.
+			 * If 31st bit is set to one then user can
+			 * enable/disable interrupt coalescing on per reply
+			 * descriptor post queue group(8) basis. So to enable
+			 * interrupt coalescing only on first reply descriptor
+			 * post queue group 31st bit and zero th bit is enabled.
+			 */
+			ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
+			    ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
+			mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+			ioc_info(ioc, "performance mode: balanced\n");
+			return;
+		}
+		/* Fall through */
+	case MPT_PERF_MODE_LATENCY:
+		/*
+		 * Enable interrupt coalescing on all reply queues
+		 * with timeout value 0xA
+		 */
+		ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
+		ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
+		ioc_pg1.ProductSpecific = 0;
+		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+		ioc_info(ioc, "performance mode: latency\n");
+		break;
+	case MPT_PERF_MODE_IOPS:
+		/*
+		 * Enable interrupt coalescing on all reply queues.
+		 */
+		ioc_info(ioc,
+		    "performance mode: iops with coalescing timeout: 0x%x\n",
+		    le32_to_cpu(ioc_pg1.CoalescingTimeout));
+		ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
+		ioc_pg1.ProductSpecific = 0;
+		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+		break;
+	}
+}
+
+/**
  * _base_static_config_pages - static start of day config pages
  * @ioc: per adapter object
  */
@@ -4108,7 +4609,7 @@
 	 * flag unset in NVDATA.
 	 */
 	mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
-	if (ioc->manu_pg11.EEDPTagMode == 0) {
+	if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
 		pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
 		    ioc->name);
 		ioc->manu_pg11.EEDPTagMode &= ~0x3;
@@ -4154,6 +4655,8 @@
 
 	if (ioc->iounit_pg8.NumSensors)
 		ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
+	if (ioc->is_aero_ioc)
+		_base_update_ioc_page1_inlinewith_perf_mode(ioc);
 }
 
 /**
@@ -4189,33 +4692,32 @@
 	struct chain_tracker *ct;
 	struct reply_post_struct *rps;
 
-	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	if (ioc->request) {
-		pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
+		dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
 		    ioc->request,  ioc->request_dma);
-		dexitprintk(ioc, pr_info(MPT3SAS_FMT
-			"request_pool(0x%p): free\n",
-			ioc->name, ioc->request));
+		dexitprintk(ioc,
+			    ioc_info(ioc, "request_pool(0x%p): free\n",
+				     ioc->request));
 		ioc->request = NULL;
 	}
 
 	if (ioc->sense) {
 		dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
 		dma_pool_destroy(ioc->sense_dma_pool);
-		dexitprintk(ioc, pr_info(MPT3SAS_FMT
-			"sense_pool(0x%p): free\n",
-			ioc->name, ioc->sense));
+		dexitprintk(ioc,
+			    ioc_info(ioc, "sense_pool(0x%p): free\n",
+				     ioc->sense));
 		ioc->sense = NULL;
 	}
 
 	if (ioc->reply) {
 		dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
 		dma_pool_destroy(ioc->reply_dma_pool);
-		dexitprintk(ioc, pr_info(MPT3SAS_FMT
-			"reply_pool(0x%p): free\n",
-			ioc->name, ioc->reply));
+		dexitprintk(ioc,
+			    ioc_info(ioc, "reply_pool(0x%p): free\n",
+				     ioc->reply));
 		ioc->reply = NULL;
 	}
 
@@ -4223,9 +4725,9 @@
 		dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
 		    ioc->reply_free_dma);
 		dma_pool_destroy(ioc->reply_free_dma_pool);
-		dexitprintk(ioc, pr_info(MPT3SAS_FMT
-			"reply_free_pool(0x%p): free\n",
-			ioc->name, ioc->reply_free));
+		dexitprintk(ioc,
+			    ioc_info(ioc, "reply_free_pool(0x%p): free\n",
+				     ioc->reply_free));
 		ioc->reply_free = NULL;
 	}
 
@@ -4237,9 +4739,9 @@
 				    ioc->reply_post_free_dma_pool,
 				    rps->reply_post_free,
 				    rps->reply_post_free_dma);
-				dexitprintk(ioc, pr_info(MPT3SAS_FMT
-				    "reply_post_free_pool(0x%p): free\n",
-				    ioc->name, rps->reply_post_free));
+				dexitprintk(ioc,
+					    ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
+						     rps->reply_post_free));
 				rps->reply_post_free = NULL;
 			}
 		} while (ioc->rdpq_array_enable &&
@@ -4267,10 +4769,10 @@
 	}
 
 	if (ioc->config_page) {
-		dexitprintk(ioc, pr_info(MPT3SAS_FMT
-		    "config_page(0x%p): free\n", ioc->name,
-		    ioc->config_page));
-		pci_free_consistent(ioc->pdev, ioc->config_page_sz,
+		dexitprintk(ioc,
+			    ioc_info(ioc, "config_page(0x%p): free\n",
+				     ioc->config_page));
+		dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
 		    ioc->config_page, ioc->config_page_dma);
 	}
 
@@ -4338,8 +4840,7 @@
 	int i, j;
 	struct chain_tracker *ct;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 
 	retry_sz = 0;
@@ -4368,10 +4869,8 @@
 		else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
 			sg_tablesize = min_t(unsigned short, sg_tablesize,
 					SG_MAX_SEGMENTS);
-			pr_warn(MPT3SAS_FMT
-				"sg_tablesize(%u) is bigger than kernel "
-				"defined SG_CHUNK_SIZE(%u)\n", ioc->name,
-				sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
+			ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
+				 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
 		}
 		ioc->shost->sg_tablesize = sg_tablesize;
 	}
@@ -4381,9 +4880,8 @@
 	if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
 		if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
 				INTERNAL_SCSIIO_CMDS_COUNT)) {
-			pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
-			    Credits, it has just %d number of credits\n",
-			    ioc->name, facts->RequestCredit);
+			ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
+				facts->RequestCredit);
 			return -ENOMEM;
 		}
 		ioc->internal_depth = 10;
@@ -4482,11 +4980,12 @@
 		ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
 	}
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
-	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
-	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
-	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
-	    ioc->chains_needed_per_io));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
+			     ioc->max_sges_in_main_message,
+			     ioc->max_sges_in_chain_message,
+			     ioc->shost->sg_tablesize,
+			     ioc->chains_needed_per_io));
 
 	/* reply post queue, 16 byte align */
 	reply_post_free_sz = ioc->reply_post_queue_depth *
@@ -4501,48 +5000,40 @@
 	    sizeof(struct reply_post_struct), GFP_KERNEL);
 
 	if (!ioc->reply_post) {
-		pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
-			ioc->name);
+		ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
 		goto out;
 	}
 	ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
 	    &ioc->pdev->dev, sz, 16, 0);
 	if (!ioc->reply_post_free_dma_pool) {
-		pr_err(MPT3SAS_FMT
-		 "reply_post_free pool: dma_pool_create failed\n",
-		 ioc->name);
+		ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
 		goto out;
 	}
 	i = 0;
 	do {
 		ioc->reply_post[i].reply_post_free =
-		    dma_pool_alloc(ioc->reply_post_free_dma_pool,
+		    dma_pool_zalloc(ioc->reply_post_free_dma_pool,
 		    GFP_KERNEL,
 		    &ioc->reply_post[i].reply_post_free_dma);
 		if (!ioc->reply_post[i].reply_post_free) {
-			pr_err(MPT3SAS_FMT
-			"reply_post_free pool: dma_pool_alloc failed\n",
-			ioc->name);
+			ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
 			goto out;
 		}
-		memset(ioc->reply_post[i].reply_post_free, 0, sz);
-		dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		    "reply post free pool (0x%p): depth(%d),"
-		    "element_size(%d), pool_size(%d kB)\n", ioc->name,
-		    ioc->reply_post[i].reply_post_free,
-		    ioc->reply_post_queue_depth, 8, sz/1024));
-		dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		    "reply_post_free_dma = (0x%llx)\n", ioc->name,
-		    (unsigned long long)
-		    ioc->reply_post[i].reply_post_free_dma));
+		dinitprintk(ioc,
+			    ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+				     ioc->reply_post[i].reply_post_free,
+				     ioc->reply_post_queue_depth,
+				     8, sz / 1024));
+		dinitprintk(ioc,
+			    ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
+				     (u64)ioc->reply_post[i].reply_post_free_dma));
 		total_sz += sz;
 	} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
 
-	if (ioc->dma_mask == 64) {
+	if (ioc->dma_mask > 32) {
 		if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
-			pr_warn(MPT3SAS_FMT
-			    "no suitable consistent DMA mask for %s\n",
-			    ioc->name, pci_name(ioc->pdev));
+			ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
+				 pci_name(ioc->pdev));
 			goto out;
 		}
 	}
@@ -4554,9 +5045,9 @@
 	 * with some internal commands that could be outstanding
 	 */
 	ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"scsi host: can_queue depth (%d)\n",
-		ioc->name, ioc->shost->can_queue));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
+			     ioc->shost->can_queue));
 
 
 	/* contiguous pool for request and chains, 16 byte align, one extra "
@@ -4572,12 +5063,12 @@
 	sz += (ioc->internal_depth * ioc->request_sz);
 
 	ioc->request_dma_sz = sz;
-	ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
+	ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
+			&ioc->request_dma, GFP_KERNEL);
 	if (!ioc->request) {
-		pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
-		    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
-		    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
-		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+		ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
+			ioc->hba_queue_depth, ioc->chains_needed_per_io,
+			ioc->request_sz, sz / 1024);
 		if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
 			goto out;
 		retry_sz = 64;
@@ -4585,12 +5076,12 @@
 		_base_release_memory_pools(ioc);
 		goto retry_allocation;
 	}
+	memset(ioc->request, 0, sz);
 
 	if (retry_sz)
-		pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
-		    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
-		    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
-		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+		ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
+			ioc->hba_queue_depth, ioc->chains_needed_per_io,
+			ioc->request_sz, sz / 1024);
 
 	/* hi-priority queue */
 	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
@@ -4604,24 +5095,26 @@
 	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
 	    ioc->request_sz);
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
-		ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
-	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+			     ioc->request, ioc->hba_queue_depth,
+			     ioc->request_sz,
+			     (ioc->hba_queue_depth * ioc->request_sz) / 1024));
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
-	    ioc->name, (unsigned long long) ioc->request_dma));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "request pool: dma(0x%llx)\n",
+			     (unsigned long long)ioc->request_dma));
 	total_sz += sz;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
-		ioc->name, ioc->request, ioc->scsiio_depth));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
+			     ioc->request, ioc->scsiio_depth));
 
 	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
 	sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
 	ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
 	if (!ioc->chain_lookup) {
-		pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages "
-				"failed\n", ioc->name);
+		ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
 		goto out;
 	}
 
@@ -4629,8 +5122,7 @@
 	for (i = 0; i < ioc->scsiio_depth; i++) {
 		ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
 		if (!ioc->chain_lookup[i].chains_per_smid) {
-			pr_err(MPT3SAS_FMT "chain_lookup: "
-					" kzalloc failed\n", ioc->name);
+			ioc_err(ioc, "chain_lookup: kzalloc failed\n");
 			goto out;
 		}
 	}
@@ -4639,29 +5131,27 @@
 	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
 	    sizeof(struct request_tracker), GFP_KERNEL);
 	if (!ioc->hpr_lookup) {
-		pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
-		    ioc->name);
+		ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
 		goto out;
 	}
 	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"hi_priority(0x%p): depth(%d), start smid(%d)\n",
-		ioc->name, ioc->hi_priority,
-	    ioc->hi_priority_depth, ioc->hi_priority_smid));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
+			     ioc->hi_priority,
+			     ioc->hi_priority_depth, ioc->hi_priority_smid));
 
 	/* initialize internal queue smid's */
 	ioc->internal_lookup = kcalloc(ioc->internal_depth,
 	    sizeof(struct request_tracker), GFP_KERNEL);
 	if (!ioc->internal_lookup) {
-		pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
-		    ioc->name);
+		ioc_err(ioc, "internal_lookup: kcalloc failed\n");
 		goto out;
 	}
 	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"internal(0x%p): depth(%d), start smid(%d)\n",
-		ioc->name, ioc->internal,
-	    ioc->internal_depth, ioc->internal_smid));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
+			     ioc->internal,
+			     ioc->internal_depth, ioc->internal_smid));
 	/*
 	 * The number of NVMe page sized blocks needed is:
 	 *     (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
@@ -4685,17 +5175,14 @@
 		sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
 		ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
 		if (!ioc->pcie_sg_lookup) {
-			pr_info(MPT3SAS_FMT
-			    "PCIe SGL lookup: kzalloc failed\n", ioc->name);
+			ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
 			goto out;
 		}
 		sz = nvme_blocks_needed * ioc->page_size;
 		ioc->pcie_sgl_dma_pool =
 			dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
 		if (!ioc->pcie_sgl_dma_pool) {
-			pr_info(MPT3SAS_FMT
-			    "PCIe SGL pool: dma_pool_create failed\n",
-			    ioc->name);
+			ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
 			goto out;
 		}
 
@@ -4708,9 +5195,7 @@
 				ioc->pcie_sgl_dma_pool, GFP_KERNEL,
 				&ioc->pcie_sg_lookup[i].pcie_sgl_dma);
 			if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
-				pr_info(MPT3SAS_FMT
-				    "PCIe SGL pool: dma_pool_alloc failed\n",
-				    ioc->name);
+				ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
 				goto out;
 			}
 			for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
@@ -4724,20 +5209,20 @@
 			}
 		}
 
-		dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
-			"element_size(%d), pool_size(%d kB)\n", ioc->name,
-			ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
-		dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can "
-		    "fit in a PRP page(%d)\n", ioc->name,
-		    ioc->chains_per_prp_buffer));
+		dinitprintk(ioc,
+			    ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
+				     ioc->scsiio_depth, sz,
+				     (sz * ioc->scsiio_depth) / 1024));
+		dinitprintk(ioc,
+			    ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
+				     ioc->chains_per_prp_buffer));
 		total_sz += sz * ioc->scsiio_depth;
 	}
 
 	ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
 	    ioc->chain_segment_sz, 16, 0);
 	if (!ioc->chain_dma_pool) {
-		pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
-			ioc->name);
+		ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
 		goto out;
 	}
 	for (i = 0; i < ioc->scsiio_depth; i++) {
@@ -4748,8 +5233,7 @@
 					ioc->chain_dma_pool, GFP_KERNEL,
 					&ct->chain_buffer_dma);
 			if (!ct->chain_buffer) {
-				pr_err(MPT3SAS_FMT "chain_lookup: "
-				" pci_pool_alloc failed\n", ioc->name);
+				ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
 				_base_release_memory_pools(ioc);
 				goto out;
 			}
@@ -4757,25 +5241,23 @@
 		total_sz += ioc->chain_segment_sz;
 	}
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
-		ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
-		((ioc->chain_depth *  ioc->chain_segment_sz))/1024));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+			     ioc->chain_depth, ioc->chain_segment_sz,
+			     (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
 
 	/* sense buffers, 4 byte align */
 	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
 	ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
 					      4, 0);
 	if (!ioc->sense_dma_pool) {
-		pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n",
-		    ioc->name);
+		ioc_err(ioc, "sense pool: dma_pool_create failed\n");
 		goto out;
 	}
 	ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
 	    &ioc->sense_dma);
 	if (!ioc->sense) {
-		pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n",
-		    ioc->name);
+		ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
 		goto out;
 	}
 	/* sense buffer requires to be in same 4 gb region.
@@ -4797,24 +5279,23 @@
 			dma_pool_create("sense pool", &ioc->pdev->dev, sz,
 						roundup_pow_of_two(sz), 0);
 		if (!ioc->sense_dma_pool) {
-			pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
-					ioc->name);
+			ioc_err(ioc, "sense pool: pci_pool_create failed\n");
 			goto out;
 		}
 		ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
 				&ioc->sense_dma);
 		if (!ioc->sense) {
-			pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
-					ioc->name);
+			ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
 			goto out;
 		}
 	}
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-	    "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
-	    "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
-	    SCSI_SENSE_BUFFERSIZE, sz/1024));
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
-	    ioc->name, (unsigned long long)ioc->sense_dma));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+			     ioc->sense, ioc->scsiio_depth,
+			     SCSI_SENSE_BUFFERSIZE, sz / 1024));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "sense_dma(0x%llx)\n",
+			     (unsigned long long)ioc->sense_dma));
 	total_sz += sz;
 
 	/* reply pool, 4 byte align */
@@ -4822,25 +5303,24 @@
 	ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
 					      4, 0);
 	if (!ioc->reply_dma_pool) {
-		pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n",
-		    ioc->name);
+		ioc_err(ioc, "reply pool: dma_pool_create failed\n");
 		goto out;
 	}
 	ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
 	    &ioc->reply_dma);
 	if (!ioc->reply) {
-		pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n",
-		    ioc->name);
+		ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
 		goto out;
 	}
 	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
 	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
-		ioc->name, ioc->reply,
-	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
-	    ioc->name, (unsigned long long)ioc->reply_dma));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+			     ioc->reply, ioc->reply_free_queue_depth,
+			     ioc->reply_sz, sz / 1024));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "reply_dma(0x%llx)\n",
+			     (unsigned long long)ioc->reply_dma));
 	total_sz += sz;
 
 	/* reply free queue, 16 byte align */
@@ -4848,24 +5328,22 @@
 	ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
 	    &ioc->pdev->dev, sz, 16, 0);
 	if (!ioc->reply_free_dma_pool) {
-		pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n",
-			ioc->name);
+		ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
 		goto out;
 	}
-	ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL,
+	ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
 	    &ioc->reply_free_dma);
 	if (!ioc->reply_free) {
-		pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n",
-			ioc->name);
+		ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
 		goto out;
 	}
-	memset(ioc->reply_free, 0, sz);
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
-	    "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
-	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"reply_free_dma (0x%llx)\n",
-		ioc->name, (unsigned long long)ioc->reply_free_dma));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+			     ioc->reply_free, ioc->reply_free_queue_depth,
+			     4, sz / 1024));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "reply_free_dma (0x%llx)\n",
+			     (unsigned long long)ioc->reply_free_dma));
 	total_sz += sz;
 
 	if (ioc->rdpq_array_enable) {
@@ -4876,8 +5354,7 @@
 		    &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
 		if (!ioc->reply_post_free_array_dma_pool) {
 			dinitprintk(ioc,
-			    pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
-			    "dma_pool_create failed\n", ioc->name));
+				    ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
 			goto out;
 		}
 		ioc->reply_post_free_array =
@@ -4885,34 +5362,31 @@
 		    GFP_KERNEL, &ioc->reply_post_free_array_dma);
 		if (!ioc->reply_post_free_array) {
 			dinitprintk(ioc,
-			    pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
-			    "dma_pool_alloc failed\n", ioc->name));
+				    ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
 			goto out;
 		}
 	}
 	ioc->config_page_sz = 512;
-	ioc->config_page = pci_alloc_consistent(ioc->pdev,
-	    ioc->config_page_sz, &ioc->config_page_dma);
+	ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
+			ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
 	if (!ioc->config_page) {
-		pr_err(MPT3SAS_FMT
-			"config page: dma_pool_alloc failed\n",
-			ioc->name);
+		ioc_err(ioc, "config page: dma_pool_alloc failed\n");
 		goto out;
 	}
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"config page(0x%p): size(%d)\n",
-		ioc->name, ioc->config_page, ioc->config_page_sz));
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
-		ioc->name, (unsigned long long)ioc->config_page_dma));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "config page(0x%p): size(%d)\n",
+			     ioc->config_page, ioc->config_page_sz));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "config_page_dma(0x%llx)\n",
+			     (unsigned long long)ioc->config_page_dma));
 	total_sz += ioc->config_page_sz;
 
-	pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
-	    ioc->name, total_sz/1024);
-	pr_info(MPT3SAS_FMT
-		"Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
-	    ioc->name, ioc->shost->can_queue, facts->RequestCredit);
-	pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
-	    ioc->name, ioc->shost->sg_tablesize);
+	ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
+		 total_sz / 1024);
+	ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
+		 ioc->shost->can_queue, facts->RequestCredit);
+	ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
+		 ioc->shost->sg_tablesize);
 	return 0;
 
  out:
@@ -4932,7 +5406,7 @@
 {
 	u32 s, sc;
 
-	s = readl(&ioc->chip->Doorbell);
+	s = ioc->base_readl(&ioc->chip->Doorbell);
 	sc = s & MPI2_IOC_STATE_MASK;
 	return cooked ? sc : s;
 }
@@ -4976,8 +5450,6 @@
  *
  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
  */
-static int
-_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
 
 static int
 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
@@ -4988,11 +5460,11 @@
 	count = 0;
 	cntdn = 1000 * timeout;
 	do {
-		int_status = readl(&ioc->chip->HostInterruptStatus);
+		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
 		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
-			dhsprintk(ioc, pr_info(MPT3SAS_FMT
-				"%s: successful count(%d), timeout(%d)\n",
-				ioc->name, __func__, count, timeout));
+			dhsprintk(ioc,
+				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+					   __func__, count, timeout));
 			return 0;
 		}
 
@@ -5000,9 +5472,8 @@
 		count++;
 	} while (--cntdn);
 
-	pr_err(MPT3SAS_FMT
-		"%s: failed due to timeout count(%d), int_status(%x)!\n",
-		ioc->name, __func__, count, int_status);
+	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+		__func__, count, int_status);
 	return -EFAULT;
 }
 
@@ -5015,11 +5486,11 @@
 	count = 0;
 	cntdn = 2000 * timeout;
 	do {
-		int_status = readl(&ioc->chip->HostInterruptStatus);
+		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
 		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
-			dhsprintk(ioc, pr_info(MPT3SAS_FMT
-				"%s: successful count(%d), timeout(%d)\n",
-				ioc->name, __func__, count, timeout));
+			dhsprintk(ioc,
+				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+					   __func__, count, timeout));
 			return 0;
 		}
 
@@ -5027,9 +5498,8 @@
 		count++;
 	} while (--cntdn);
 
-	pr_err(MPT3SAS_FMT
-		"%s: failed due to timeout count(%d), int_status(%x)!\n",
-		ioc->name, __func__, count, int_status);
+	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+		__func__, count, int_status);
 	return -EFAULT;
 
 }
@@ -5054,14 +5524,14 @@
 	count = 0;
 	cntdn = 1000 * timeout;
 	do {
-		int_status = readl(&ioc->chip->HostInterruptStatus);
+		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
 		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
-			dhsprintk(ioc, pr_info(MPT3SAS_FMT
-				"%s: successful count(%d), timeout(%d)\n",
-				ioc->name, __func__, count, timeout));
+			dhsprintk(ioc,
+				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+					   __func__, count, timeout));
 			return 0;
 		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
-			doorbell = readl(&ioc->chip->Doorbell);
+			doorbell = ioc->base_readl(&ioc->chip->Doorbell);
 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
 			    MPI2_IOC_STATE_FAULT) {
 				mpt3sas_base_fault_info(ioc , doorbell);
@@ -5075,9 +5545,8 @@
 	} while (--cntdn);
 
  out:
-	pr_err(MPT3SAS_FMT
-	 "%s: failed due to timeout count(%d), int_status(%x)!\n",
-	 ioc->name, __func__, count, int_status);
+	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+		__func__, count, int_status);
 	return -EFAULT;
 }
 
@@ -5097,11 +5566,11 @@
 	count = 0;
 	cntdn = 1000 * timeout;
 	do {
-		doorbell_reg = readl(&ioc->chip->Doorbell);
+		doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
 		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
-			dhsprintk(ioc, pr_info(MPT3SAS_FMT
-				"%s: successful count(%d), timeout(%d)\n",
-				ioc->name, __func__, count, timeout));
+			dhsprintk(ioc,
+				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+					   __func__, count, timeout));
 			return 0;
 		}
 
@@ -5109,9 +5578,8 @@
 		count++;
 	} while (--cntdn);
 
-	pr_err(MPT3SAS_FMT
-		"%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
-		ioc->name, __func__, count, doorbell_reg);
+	ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
+		__func__, count, doorbell_reg);
 	return -EFAULT;
 }
 
@@ -5130,8 +5598,7 @@
 	int r = 0;
 
 	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
-		pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: unknown reset_type\n", __func__);
 		return -EFAULT;
 	}
 
@@ -5139,7 +5606,7 @@
 	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
 		return -EFAULT;
 
-	pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
+	ioc_info(ioc, "sending message unit reset !!\n");
 
 	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
 	    &ioc->chip->Doorbell);
@@ -5149,19 +5616,51 @@
 	}
 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
 	if (ioc_state) {
-		pr_err(MPT3SAS_FMT
-			"%s: failed going to ready state (ioc_state=0x%x)\n",
-			ioc->name, __func__, ioc_state);
+		ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+			__func__, ioc_state);
 		r = -EFAULT;
 		goto out;
 	}
  out:
-	pr_info(MPT3SAS_FMT "message unit reset: %s\n",
-	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+	ioc_info(ioc, "message unit reset: %s\n",
+		 r == 0 ? "SUCCESS" : "FAILED");
 	return r;
 }
 
 /**
+ * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
+ * @ioc: per adapter object
+ * @wait_count: timeout in seconds
+ *
+ * Return: Waits up to timeout seconds for the IOC to
+ * become operational. Returns 0 if IOC is present
+ * and operational; otherwise returns -EFAULT.
+ */
+
+int
+mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
+{
+	int wait_state_count = 0;
+	u32 ioc_state;
+
+	do {
+		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+		if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
+			break;
+		ssleep(1);
+		ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+				__func__, ++wait_state_count);
+	} while (--timeout);
+	if (!timeout) {
+		ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
+		return -EFAULT;
+	}
+	if (wait_state_count)
+		ioc_info(ioc, "ioc is operational\n");
+	return 0;
+}
+
+/**
  * _base_handshake_req_reply_wait - send request thru doorbell interface
  * @ioc: per adapter object
  * @request_bytes: request length
@@ -5182,15 +5681,13 @@
 	__le32 *mfp;
 
 	/* make sure doorbell is not in use */
-	if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
-		pr_err(MPT3SAS_FMT
-			"doorbell is in use (line=%d)\n",
-			ioc->name, __LINE__);
+	if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+		ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
 		return -EFAULT;
 	}
 
 	/* clear pending doorbell interrupts from previous state changes */
-	if (readl(&ioc->chip->HostInterruptStatus) &
+	if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
 	    MPI2_HIS_IOC2SYS_DB_STATUS)
 		writel(0, &ioc->chip->HostInterruptStatus);
 
@@ -5200,17 +5697,15 @@
 	    &ioc->chip->Doorbell);
 
 	if ((_base_spin_on_doorbell_int(ioc, 5))) {
-		pr_err(MPT3SAS_FMT
-			"doorbell handshake int failed (line=%d)\n",
-			ioc->name, __LINE__);
+		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+			__LINE__);
 		return -EFAULT;
 	}
 	writel(0, &ioc->chip->HostInterruptStatus);
 
 	if ((_base_wait_for_doorbell_ack(ioc, 5))) {
-		pr_err(MPT3SAS_FMT
-			"doorbell handshake ack failed (line=%d)\n",
-			ioc->name, __LINE__);
+		ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
+			__LINE__);
 		return -EFAULT;
 	}
 
@@ -5222,53 +5717,51 @@
 	}
 
 	if (failed) {
-		pr_err(MPT3SAS_FMT
-			"doorbell handshake sending request failed (line=%d)\n",
-			ioc->name, __LINE__);
+		ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
+			__LINE__);
 		return -EFAULT;
 	}
 
 	/* now wait for the reply */
 	if ((_base_wait_for_doorbell_int(ioc, timeout))) {
-		pr_err(MPT3SAS_FMT
-			"doorbell handshake int failed (line=%d)\n",
-			ioc->name, __LINE__);
+		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+			__LINE__);
 		return -EFAULT;
 	}
 
 	/* read the first two 16-bits, it gives the total length of the reply */
-	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+	reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
 	    & MPI2_DOORBELL_DATA_MASK);
 	writel(0, &ioc->chip->HostInterruptStatus);
 	if ((_base_wait_for_doorbell_int(ioc, 5))) {
-		pr_err(MPT3SAS_FMT
-			"doorbell handshake int failed (line=%d)\n",
-			ioc->name, __LINE__);
+		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+			__LINE__);
 		return -EFAULT;
 	}
-	reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+	reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
 	    & MPI2_DOORBELL_DATA_MASK);
 	writel(0, &ioc->chip->HostInterruptStatus);
 
 	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
 		if ((_base_wait_for_doorbell_int(ioc, 5))) {
-			pr_err(MPT3SAS_FMT
-				"doorbell handshake int failed (line=%d)\n",
-				ioc->name, __LINE__);
+			ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+				__LINE__);
 			return -EFAULT;
 		}
 		if (i >=  reply_bytes/2) /* overflow case */
-			readl(&ioc->chip->Doorbell);
+			ioc->base_readl(&ioc->chip->Doorbell);
 		else
-			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+			reply[i] = le16_to_cpu(
+			    ioc->base_readl(&ioc->chip->Doorbell)
 			    & MPI2_DOORBELL_DATA_MASK);
 		writel(0, &ioc->chip->HostInterruptStatus);
 	}
 
 	_base_wait_for_doorbell_int(ioc, 5);
 	if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
-		dhsprintk(ioc, pr_info(MPT3SAS_FMT
-			"doorbell is in use (line=%d)\n", ioc->name, __LINE__));
+		dhsprintk(ioc,
+			  ioc_info(ioc, "doorbell is in use (line=%d)\n",
+				   __LINE__));
 	}
 	writel(0, &ioc->chip->HostInterruptStatus);
 
@@ -5302,45 +5795,27 @@
 	Mpi2SasIoUnitControlRequest_t *mpi_request)
 {
 	u16 smid;
-	u32 ioc_state;
 	u8 issue_reset = 0;
 	int rc;
 	void *request;
-	u16 wait_state_count;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	mutex_lock(&ioc->base_cmds.mutex);
 
 	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: base_cmd in use\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
 
-	wait_state_count = 0;
-	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		if (wait_state_count++ == 10) {
-			pr_err(MPT3SAS_FMT
-			    "%s: failed due to ioc not operational\n",
-			    ioc->name, __func__);
-			rc = -EFAULT;
-			goto out;
-		}
-		ssleep(1);
-		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-		pr_info(MPT3SAS_FMT
-			"%s: waiting for operational state(count=%d)\n",
-			ioc->name, __func__, wait_state_count);
-	}
+	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+	if (rc)
+		goto out;
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -5354,7 +5829,7 @@
 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
 		ioc->ioc_link_reset_in_progress = 1;
 	init_completion(&ioc->base_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->base_cmds.done,
 	    msecs_to_jiffies(10000));
 	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -5402,46 +5877,27 @@
 	Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
 {
 	u16 smid;
-	u32 ioc_state;
 	u8 issue_reset = 0;
 	int rc;
 	void *request;
-	u16 wait_state_count;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	mutex_lock(&ioc->base_cmds.mutex);
 
 	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: base_cmd in use\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
 
-	wait_state_count = 0;
-	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		if (wait_state_count++ == 10) {
-			pr_err(MPT3SAS_FMT
-			    "%s: failed due to ioc not operational\n",
-			    ioc->name, __func__);
-			rc = -EFAULT;
-			goto out;
-		}
-		ssleep(1);
-		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-		pr_info(MPT3SAS_FMT
-			"%s: waiting for operational state(count=%d)\n",
-			ioc->name,
-		    __func__, wait_state_count);
-	}
+	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+	if (rc)
+		goto out;
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -5450,9 +5906,10 @@
 	ioc->base_cmds.status = MPT3_CMD_PENDING;
 	request = mpt3sas_base_get_msg_frame(ioc, smid);
 	ioc->base_cmds.smid = smid;
+	memset(request, 0, ioc->request_sz);
 	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
 	init_completion(&ioc->base_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->base_cmds.done,
 	    msecs_to_jiffies(10000));
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -5495,8 +5952,7 @@
 	struct mpt3sas_port_facts *pfacts;
 	int mpi_reply_sz, mpi_request_sz, r;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
 	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
@@ -5507,8 +5963,7 @@
 	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
 
 	if (r != 0) {
-		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
-		    ioc->name, __func__, r);
+		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
 		return r;
 	}
 
@@ -5536,26 +5991,26 @@
 	u32 ioc_state;
 	int rc;
 
-	dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	if (ioc->pci_error_recovery) {
-		dfailprintk(ioc, printk(MPT3SAS_FMT
-		    "%s: host in pci error recovery\n", ioc->name, __func__));
+		dfailprintk(ioc,
+			    ioc_info(ioc, "%s: host in pci error recovery\n",
+				     __func__));
 		return -EFAULT;
 	}
 
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
-	dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
-	    ioc->name, __func__, ioc_state));
+	dhsprintk(ioc,
+		  ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
+			   __func__, ioc_state));
 
 	if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
 	    (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
 		return 0;
 
 	if (ioc_state & MPI2_DOORBELL_USED) {
-		dhsprintk(ioc, printk(MPT3SAS_FMT
-		    "unexpected doorbell active!\n", ioc->name));
+		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
 		goto issue_diag_reset;
 	}
 
@@ -5567,9 +6022,9 @@
 
 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
 	if (ioc_state) {
-		dfailprintk(ioc, printk(MPT3SAS_FMT
-		    "%s: failed going to ready state (ioc_state=0x%x)\n",
-		    ioc->name, __func__, ioc_state));
+		dfailprintk(ioc,
+			    ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+				     __func__, ioc_state));
 		return -EFAULT;
 	}
 
@@ -5592,14 +6047,13 @@
 	struct mpt3sas_facts *facts;
 	int mpi_reply_sz, mpi_request_sz, r;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	r = _base_wait_for_iocstate(ioc, 10);
 	if (r) {
-		dfailprintk(ioc, printk(MPT3SAS_FMT
-		    "%s: failed getting to correct state\n",
-		    ioc->name, __func__));
+		dfailprintk(ioc,
+			    ioc_info(ioc, "%s: failed getting to correct state\n",
+				     __func__));
 		return r;
 	}
 	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
@@ -5610,8 +6064,7 @@
 	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
 
 	if (r != 0) {
-		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
-		    ioc->name, __func__, r);
+		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
 		return r;
 	}
 
@@ -5639,6 +6092,9 @@
 	if ((facts->IOCCapabilities &
 	      MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
 		ioc->rdpq_array_capable = 1;
+	if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
+	    && ioc->is_aero_ioc)
+		ioc->atomic_desc_capable = 1;
 	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
 	facts->IOCRequestFrameSize =
 	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -5663,20 +6119,20 @@
 	 */
 	ioc->page_size = 1 << facts->CurrentHostPageSize;
 	if (ioc->page_size == 1) {
-		pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
-			"default host page size to 4k\n", ioc->name);
+		ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
 		ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
 	}
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
-		ioc->name, facts->CurrentHostPageSize));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "CurrentHostPageSize(%d)\n",
+			     facts->CurrentHostPageSize));
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"hba queue depth(%d), max chains per io(%d)\n",
-		ioc->name, facts->RequestCredit,
-	    facts->MaxChainDepth));
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"request frame size(%d), reply frame size(%d)\n", ioc->name,
-	    facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
+			     facts->RequestCredit, facts->MaxChainDepth));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
+			     facts->IOCRequestFrameSize * 4,
+			     facts->ReplyFrameSize * 4));
 	return 0;
 }
 
@@ -5696,8 +6152,7 @@
 	u16 ioc_status;
 	u32 reply_post_free_array_sz = 0;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
@@ -5763,15 +6218,14 @@
 	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
 
 	if (r != 0) {
-		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
-		    ioc->name, __func__, r);
+		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
 		return r;
 	}
 
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
 	    mpi_reply.IOCLogInfo) {
-		pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
+		ioc_err(ioc, "%s: failed\n", __func__);
 		r = -EIO;
 	}
 
@@ -5842,18 +6296,16 @@
 	u16 smid;
 	u16 ioc_status;
 
-	pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+	ioc_info(ioc, "sending port enable !!\n");
 
 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
-		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: internal command already in use\n", __func__);
 		return -EAGAIN;
 	}
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		return -EAGAIN;
 	}
 
@@ -5864,11 +6316,10 @@
 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
 
 	init_completion(&ioc->port_enable_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
 	if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
-		pr_err(MPT3SAS_FMT "%s: timeout\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: timeout\n", __func__);
 		_debug_dump_mf(mpi_request,
 		    sizeof(Mpi2PortEnableRequest_t)/4);
 		if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
@@ -5881,16 +6332,15 @@
 	mpi_reply = ioc->port_enable_cmds.reply;
 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
-		    ioc->name, __func__, ioc_status);
+		ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
+			__func__, ioc_status);
 		r = -EFAULT;
 		goto out;
 	}
 
  out:
 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
-	pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
-	    "SUCCESS" : "FAILED"));
+	ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
 	return r;
 }
 
@@ -5906,18 +6356,16 @@
 	Mpi2PortEnableRequest_t *mpi_request;
 	u16 smid;
 
-	pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+	ioc_info(ioc, "sending port enable !!\n");
 
 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
-		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: internal command already in use\n", __func__);
 		return -EAGAIN;
 	}
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		return -EAGAIN;
 	}
 
@@ -5927,7 +6375,7 @@
 	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
 
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	return 0;
 }
 
@@ -6020,19 +6468,16 @@
 	int r = 0;
 	int i;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
-		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: internal command already in use\n", __func__);
 		return -EAGAIN;
 	}
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		return -EAGAIN;
 	}
 	ioc->base_cmds.status = MPT3_CMD_PENDING;
@@ -6046,11 +6491,10 @@
 		mpi_request->EventMasks[i] =
 		    cpu_to_le32(ioc->event_masks[i]);
 	init_completion(&ioc->base_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
-		pr_err(MPT3SAS_FMT "%s: timeout\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: timeout\n", __func__);
 		_debug_dump_mf(mpi_request,
 		    sizeof(Mpi2EventNotificationRequest_t)/4);
 		if (ioc->base_cmds.status & MPT3_CMD_RESET)
@@ -6058,8 +6502,7 @@
 		else
 			r = -ETIME;
 	} else
-		dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
-		    ioc->name, __func__));
+		dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
 	return r;
 }
@@ -6115,18 +6558,16 @@
 	u32 count;
 	u32 hcb_size;
 
-	pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
+	ioc_info(ioc, "sending diag reset !!\n");
 
-	drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
-	    ioc->name));
+	drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
 
 	count = 0;
 	do {
 		/* Write magic sequence to WriteSequence register
 		 * Loop until in diagnostic mode
 		 */
-		drsprintk(ioc, pr_info(MPT3SAS_FMT
-			"write magic sequence\n", ioc->name));
+		drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
 		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
 		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
 		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
@@ -6141,17 +6582,16 @@
 		if (count++ > 20)
 			goto out;
 
-		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
-		drsprintk(ioc, pr_info(MPT3SAS_FMT
-			"wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
-		    ioc->name, count, host_diagnostic));
+		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
+		drsprintk(ioc,
+			  ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+				   count, host_diagnostic));
 
 	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
 
-	hcb_size = readl(&ioc->chip->HCBSize);
+	hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
 
-	drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
-	    ioc->name));
+	drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
 	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
 	     &ioc->chip->HostDiagnostic);
 
@@ -6162,7 +6602,7 @@
 	for (count = 0; count < (300000000 /
 		MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
 
-		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
 
 		if (host_diagnostic == 0xFFFFFFFF)
 			goto out;
@@ -6174,43 +6614,38 @@
 
 	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
 
-		drsprintk(ioc, pr_info(MPT3SAS_FMT
-		"restart the adapter assuming the HCB Address points to good F/W\n",
-		    ioc->name));
+		drsprintk(ioc,
+			  ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
 		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
 		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
 		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
 
-		drsprintk(ioc, pr_info(MPT3SAS_FMT
-		    "re-enable the HCDW\n", ioc->name));
+		drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
 		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
 		    &ioc->chip->HCBSize);
 	}
 
-	drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
-	    ioc->name));
+	drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
 	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
 	    &ioc->chip->HostDiagnostic);
 
-	drsprintk(ioc, pr_info(MPT3SAS_FMT
-		"disable writes to the diagnostic register\n", ioc->name));
+	drsprintk(ioc,
+		  ioc_info(ioc, "disable writes to the diagnostic register\n"));
 	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
 
-	drsprintk(ioc, pr_info(MPT3SAS_FMT
-		"Wait for FW to go to the READY state\n", ioc->name));
+	drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
 	if (ioc_state) {
-		pr_err(MPT3SAS_FMT
-			"%s: failed going to ready state (ioc_state=0x%x)\n",
-			ioc->name, __func__, ioc_state);
+		ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+			__func__, ioc_state);
 		goto out;
 	}
 
-	pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
+	ioc_info(ioc, "diag reset: SUCCESS\n");
 	return 0;
 
  out:
-	pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
+	ioc_err(ioc, "diag reset: FAILED\n");
 	return -EFAULT;
 }
 
@@ -6228,15 +6663,15 @@
 	int rc;
 	int count;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	if (ioc->pci_error_recovery)
 		return 0;
 
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
-	dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
-	    ioc->name, __func__, ioc_state));
+	dhsprintk(ioc,
+		  ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
+			   __func__, ioc_state));
 
 	/* if in RESET state, it should move to READY state shortly */
 	count = 0;
@@ -6244,9 +6679,8 @@
 		while ((ioc_state & MPI2_IOC_STATE_MASK) !=
 		    MPI2_IOC_STATE_READY) {
 			if (count++ == 10) {
-				pr_err(MPT3SAS_FMT
-					"%s: failed going to ready state (ioc_state=0x%x)\n",
-				    ioc->name, __func__, ioc_state);
+				ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+					__func__, ioc_state);
 				return -EFAULT;
 			}
 			ssleep(1);
@@ -6258,9 +6692,7 @@
 		return 0;
 
 	if (ioc_state & MPI2_DOORBELL_USED) {
-		dhsprintk(ioc, pr_info(MPT3SAS_FMT
-			"unexpected doorbell active!\n",
-			ioc->name));
+		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
 		goto issue_diag_reset;
 	}
 
@@ -6293,7 +6725,7 @@
 static int
 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
 {
-	int r, i, index;
+	int r, i, index, rc;
 	unsigned long	flags;
 	u32 reply_address;
 	u16 smid;
@@ -6304,8 +6736,7 @@
 	struct adapter_reply_queue *reply_q;
 	Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	/* clean the delayed target reset list */
 	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
@@ -6397,8 +6828,19 @@
  skip_init_reply_post_free_queue:
 
 	r = _base_send_ioc_init(ioc);
-	if (r)
-		return r;
+	if (r) {
+		/*
+		 * No need to check IOC state for fault state & issue
+		 * diag reset during host reset. This check is need
+		 * only during driver load time.
+		 */
+		if (!ioc->is_driver_loading)
+			return r;
+
+		rc = _base_check_for_fault_and_issue_reset(ioc);
+		if (rc || (_base_send_ioc_init(ioc)))
+			return r;
+	}
 
 	/* initialize reply free host index */
 	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
@@ -6465,8 +6907,7 @@
 void
 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
 {
-	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	/* synchronizing freeing resource with pci_access_mutex lock */
 	mutex_lock(&ioc->pci_access_mutex);
@@ -6491,11 +6932,10 @@
 int
 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 {
-	int r, i;
+	int r, i, rc;
 	int cpu_id, last_cpu_id = 0;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	/* setup cpu_msix_table */
 	ioc->cpu_count = num_online_cpus();
@@ -6505,9 +6945,8 @@
 	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
 	ioc->reply_queue_count = 1;
 	if (!ioc->cpu_msix_table) {
-		dfailprintk(ioc, pr_info(MPT3SAS_FMT
-			"allocation for cpu_msix_table failed!!!\n",
-			ioc->name));
+		dfailprintk(ioc,
+			    ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
 		r = -ENOMEM;
 		goto out_free_resources;
 	}
@@ -6516,30 +6955,39 @@
 		ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
 		    sizeof(resource_size_t *), GFP_KERNEL);
 		if (!ioc->reply_post_host_index) {
-			dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
-				"for reply_post_host_index failed!!!\n",
-				ioc->name));
+			dfailprintk(ioc,
+				    ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
 			r = -ENOMEM;
 			goto out_free_resources;
 		}
 	}
 
+	ioc->smp_affinity_enable = smp_affinity_enable;
+
 	ioc->rdpq_array_enable_assigned = 0;
 	ioc->dma_mask = 0;
+	if (ioc->is_aero_ioc)
+		ioc->base_readl = &_base_readl_aero;
+	else
+		ioc->base_readl = &_base_readl;
 	r = mpt3sas_base_map_resources(ioc);
 	if (r)
 		goto out_free_resources;
 
 	pci_set_drvdata(ioc->pdev, ioc->shost);
 	r = _base_get_ioc_facts(ioc);
-	if (r)
-		goto out_free_resources;
+	if (r) {
+		rc = _base_check_for_fault_and_issue_reset(ioc);
+		if (rc || (_base_get_ioc_facts(ioc)))
+			goto out_free_resources;
+	}
 
 	switch (ioc->hba_mpi_version_belonged) {
 	case MPI2_VERSION:
 		ioc->build_sg_scmd = &_base_build_sg_scmd;
 		ioc->build_sg = &_base_build_sg;
 		ioc->build_zero_len_sge = &_base_build_zero_len_sge;
+		ioc->get_msix_index_for_smlio = &_base_get_msix_index;
 		break;
 	case MPI25_VERSION:
 	case MPI26_VERSION:
@@ -6554,15 +7002,30 @@
 		ioc->build_nvme_prp = &_base_build_nvme_prp;
 		ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
 		ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
-
+		if (ioc->high_iops_queues)
+			ioc->get_msix_index_for_smlio =
+					&_base_get_high_iops_msix_index;
+		else
+			ioc->get_msix_index_for_smlio = &_base_get_msix_index;
 		break;
 	}
-
-	if (ioc->is_mcpu_endpoint)
-		ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io;
-	else
-		ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
-
+	if (ioc->atomic_desc_capable) {
+		ioc->put_smid_default = &_base_put_smid_default_atomic;
+		ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
+		ioc->put_smid_fast_path =
+				&_base_put_smid_fast_path_atomic;
+		ioc->put_smid_hi_priority =
+				&_base_put_smid_hi_priority_atomic;
+	} else {
+		ioc->put_smid_default = &_base_put_smid_default;
+		ioc->put_smid_fast_path = &_base_put_smid_fast_path;
+		ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
+		if (ioc->is_mcpu_endpoint)
+			ioc->put_smid_scsi_io =
+				&_base_put_smid_mpi_ep_scsi_io;
+		else
+			ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
+	}
 	/*
 	 * These function pointers for other requests that don't
 	 * the require IEEE scatter gather elements.
@@ -6585,14 +7048,23 @@
 
 	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
 		r = _base_get_port_facts(ioc, i);
-		if (r)
-			goto out_free_resources;
+		if (r) {
+			rc = _base_check_for_fault_and_issue_reset(ioc);
+			if (rc || (_base_get_port_facts(ioc, i)))
+				goto out_free_resources;
+		}
 	}
 
 	r = _base_allocate_memory_pools(ioc);
 	if (r)
 		goto out_free_resources;
 
+	if (irqpoll_weight > 0)
+		ioc->thresh_hold = irqpoll_weight;
+	else
+		ioc->thresh_hold = ioc->hba_queue_depth/4;
+
+	_base_init_irqpolls(ioc);
 	init_waitqueue_head(&ioc->reset_wq);
 
 	/* allocate memory pd handle bitmask list */
@@ -6702,6 +7174,13 @@
 	if (r)
 		goto out_free_resources;
 
+	/*
+	 * Copy current copy of IOCFacts in prev_fw_facts
+	 * and it will be used during online firmware upgrade.
+	 */
+	memcpy(&ioc->prev_fw_facts, &ioc->facts,
+	    sizeof(struct mpt3sas_facts));
+
 	ioc->non_operational_loop = 0;
 	ioc->got_task_abort_from_ioctl = 0;
 	return 0;
@@ -6747,8 +7226,7 @@
 void
 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
 {
-	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	mpt3sas_base_stop_watchdog(ioc);
 	mpt3sas_base_free_resources(ioc);
@@ -6781,8 +7259,7 @@
 {
 	mpt3sas_scsih_pre_reset_handler(ioc);
 	mpt3sas_ctl_pre_reset_handler(ioc);
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
 }
 
 /**
@@ -6793,8 +7270,7 @@
 {
 	mpt3sas_scsih_after_reset_handler(ioc);
 	mpt3sas_ctl_after_reset_handler(ioc);
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
 	if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
 		ioc->transport_cmds.status |= MPT3_CMD_RESET;
 		mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
@@ -6835,8 +7311,7 @@
 {
 	mpt3sas_scsih_reset_done_handler(ioc);
 	mpt3sas_ctl_reset_done_handler(ioc);
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
 }
 
 /**
@@ -6868,6 +7343,85 @@
 }
 
 /**
+ * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
+ *     attributes during online firmware upgrade and update the corresponding
+ *     IOC variables accordingly.
+ *
+ * @ioc: Pointer to MPT_ADAPTER structure
+ */
+static int
+_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
+{
+	u16 pd_handles_sz;
+	void *pd_handles = NULL, *blocking_handles = NULL;
+	void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
+	struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
+
+	if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
+		pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
+		if (ioc->facts.MaxDevHandle % 8)
+			pd_handles_sz++;
+
+		pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
+		    GFP_KERNEL);
+		if (!pd_handles) {
+			ioc_info(ioc,
+			    "Unable to allocate the memory for pd_handles of sz: %d\n",
+			    pd_handles_sz);
+			return -ENOMEM;
+		}
+		memset(pd_handles + ioc->pd_handles_sz, 0,
+		    (pd_handles_sz - ioc->pd_handles_sz));
+		ioc->pd_handles = pd_handles;
+
+		blocking_handles = krealloc(ioc->blocking_handles,
+		    pd_handles_sz, GFP_KERNEL);
+		if (!blocking_handles) {
+			ioc_info(ioc,
+			    "Unable to allocate the memory for "
+			    "blocking_handles of sz: %d\n",
+			    pd_handles_sz);
+			return -ENOMEM;
+		}
+		memset(blocking_handles + ioc->pd_handles_sz, 0,
+		    (pd_handles_sz - ioc->pd_handles_sz));
+		ioc->blocking_handles = blocking_handles;
+		ioc->pd_handles_sz = pd_handles_sz;
+
+		pend_os_device_add = krealloc(ioc->pend_os_device_add,
+		    pd_handles_sz, GFP_KERNEL);
+		if (!pend_os_device_add) {
+			ioc_info(ioc,
+			    "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
+			    pd_handles_sz);
+			return -ENOMEM;
+		}
+		memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
+		    (pd_handles_sz - ioc->pend_os_device_add_sz));
+		ioc->pend_os_device_add = pend_os_device_add;
+		ioc->pend_os_device_add_sz = pd_handles_sz;
+
+		device_remove_in_progress = krealloc(
+		    ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
+		if (!device_remove_in_progress) {
+			ioc_info(ioc,
+			    "Unable to allocate the memory for "
+			    "device_remove_in_progress of sz: %d\n "
+			    , pd_handles_sz);
+			return -ENOMEM;
+		}
+		memset(device_remove_in_progress +
+		    ioc->device_remove_in_progress_sz, 0,
+		    (pd_handles_sz - ioc->device_remove_in_progress_sz));
+		ioc->device_remove_in_progress = device_remove_in_progress;
+		ioc->device_remove_in_progress_sz = pd_handles_sz;
+	}
+
+	memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
+	return 0;
+}
+
+/**
  * mpt3sas_base_hard_reset_handler - reset controller
  * @ioc: Pointer to MPT_ADAPTER structure
  * @type: FORCE_BIG_HAMMER or SOFT_RESET
@@ -6883,12 +7437,10 @@
 	u32 ioc_state;
 	u8 is_fault = 0, is_trigger = 0;
 
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-	    __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
 
 	if (ioc->pci_error_recovery) {
-		pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
 		r = 0;
 		goto out_unlocked;
 	}
@@ -6932,6 +7484,13 @@
 	if (r)
 		goto out;
 
+	r = _base_check_ioc_facts_changes(ioc);
+	if (r) {
+		ioc_info(ioc,
+		    "Some of the parameters got changed in this new firmware"
+		    " image and it requires system reboot\n");
+		goto out;
+	}
 	if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
 		panic("%s: Issue occurred with flashing controller firmware."
 		      "Please reboot the system and ensure that the correct"
@@ -6942,8 +7501,9 @@
 		_base_reset_done_handler(ioc);
 
  out:
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
-	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
+	dtmprintk(ioc,
+		  ioc_info(ioc, "%s: %s\n",
+			   __func__, r == 0 ? "SUCCESS" : "FAILED"));
 
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 	ioc->shost_recovery = 0;
@@ -6959,7 +7519,6 @@
 			mpt3sas_trigger_master(ioc,
 			    MASTER_TRIGGER_ADAPTER_RESET);
 	}
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-	    __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
 	return r;
 }
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 96dc15e..faca0a5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -55,6 +55,7 @@
 #include "mpi/mpi2_tool.h"
 #include "mpi/mpi2_sas.h"
 #include "mpi/mpi2_pci.h"
+#include "mpi/mpi2_image.h"
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -66,6 +67,7 @@
 #include <scsi/scsi_eh.h>
 #include <linux/pci.h>
 #include <linux/poll.h>
+#include <linux/irq_poll.h>
 
 #include "mpt3sas_debug.h"
 #include "mpt3sas_trigger_diag.h"
@@ -74,8 +76,8 @@
 #define MPT3SAS_DRIVER_NAME		"mpt3sas"
 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT3SAS_DESCRIPTION	"LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION		"26.100.00.00"
-#define MPT3SAS_MAJOR_VERSION		26
+#define MPT3SAS_DRIVER_VERSION		"31.100.00.00"
+#define MPT3SAS_MAJOR_VERSION		31
 #define MPT3SAS_MINOR_VERSION		100
 #define MPT3SAS_BUILD_VERSION		0
 #define MPT3SAS_RELEASE_VERSION	00
@@ -139,6 +141,9 @@
 #define DEFAULT_NUM_FWCHAIN_ELEMTS	8
 
 #define FW_IMG_HDR_READ_TIMEOUT	15
+
+#define IOC_OPERATIONAL_WAIT_COUNT	10
+
 /*
  * NVMe defines
  */
@@ -158,7 +163,14 @@
 /*
  * logging format
  */
-#define MPT3SAS_FMT			"%s: "
+#define ioc_err(ioc, fmt, ...)						\
+	pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_notice(ioc, fmt, ...)					\
+	pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_warn(ioc, fmt, ...)						\
+	pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_info(ioc, fmt, ...)						\
+	pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
 
 /*
  *  WarpDrive Specific Log codes
@@ -182,6 +194,9 @@
 #define SAS2_PCI_DEVICE_B0_REVISION	(0x01)
 #define SAS3_PCI_DEVICE_C0_REVISION	(0x02)
 
+/* Atlas PCIe Switch Management Port */
+#define MPI26_ATLAS_PCIe_SWITCH_DEVID	(0x00B2)
+
 /*
  * Intel HBA branding
  */
@@ -340,6 +355,12 @@
 
 #define VIRTUAL_IO_FAILED_RETRY			(0x32010081)
 
+/* High IOPs definitions */
+#define MPT3SAS_DEVICE_HIGH_IOPS_DEPTH		8
+#define MPT3SAS_HIGH_IOPS_REPLY_QUEUES		8
+#define MPT3SAS_HIGH_IOPS_BATCH_COUNT		16
+#define MPT3SAS_GEN35_MAX_MSIX_QUEUES		128
+
 /* OEM Specific Flags will come from OEM specific header files */
 struct Mpi2ManufacturingPage10_t {
 	MPI2_CONFIG_PAGE_HEADER	Header;		/* 00h */
@@ -562,6 +583,7 @@
  * @enclosure_level: The level of device's enclosure from the controller
  * @connector_name: ASCII value of the Connector's name
  * @serial_number: pointer of serial number string allocated runtime
+ * @access_status: Device's Access Status
  * @refcount: reference count for deletion
  */
 struct _pcie_device {
@@ -583,6 +605,7 @@
 	u8	connector_name[4];
 	u8	*serial_number;
 	u8	reset_timeout;
+	u8	access_status;
 	struct kref refcount;
 };
 /**
@@ -809,6 +832,7 @@
  */
 struct scsiio_tracker {
 	u16	smid;
+	struct scsi_cmnd *scmd;
 	u8	cb_idx;
 	u8	direct_io;
 	struct pcie_sg_list pcie_sg_list;
@@ -868,6 +892,9 @@
  * @reply_post_free: reply post base virt address
  * @name: the name registered to request_irq()
  * @busy: isr is actively processing replies on another cpu
+ * @os_irq: irq number
+ * @irqpoll: irq_poll object
+ * @irq_poll_scheduled: Tells whether irq poll is scheduled or not
  * @list: this list
 */
 struct adapter_reply_queue {
@@ -877,6 +904,10 @@
 	Mpi2ReplyDescriptorsUnion_t *reply_post_free;
 	char			name[MPT_NAME_LENGTH];
 	atomic_t		busy;
+	u32			os_irq;
+	struct irq_poll         irqpoll;
+	bool			irq_poll_scheduled;
+	bool			irq_line_enable;
 	struct list_head	list;
 };
 
@@ -901,6 +932,13 @@
 typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
 	u16 funcdep);
 typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
+typedef u32 (*BASE_READ_REG) (const volatile void __iomem *addr);
+/*
+ * To get high iops reply queue's msix index when high iops mode is enabled
+ * else get the msix index of general reply queues.
+ */
+typedef u8 (*GET_MSIX_INDEX) (struct MPT3SAS_ADAPTER *ioc,
+	struct scsi_cmnd *scmd);
 
 /* IOC Facts and Port Facts converted from little endian to cpu */
 union mpi3_version_union {
@@ -1001,7 +1039,15 @@
  * @msix_vector_count: number msix vectors
  * @cpu_msix_table: table for mapping cpus to msix index
  * @cpu_msix_table_sz: table size
+ * @total_io_cnt: Gives total IO count, used to load balance the interrupts
+ * @high_iops_outstanding: used to load balance the interrupts
+ *				within high iops reply queues
+ * @msix_load_balance: Enables load balancing of interrupts across
+ * the multiple MSIXs
  * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
+ * @thresh_hold: Max number of reply descriptors processed
+ *				before updating Host Index
+ * @drv_support_bitmap: driver's supported feature bit map
  * @scsi_io_cb_idx: shost generated commands
  * @tm_cb_idx: task management commands
  * @scsih_cb_idx: scsih internal commands
@@ -1023,6 +1069,7 @@
  * @event_log: event log pointer
  * @event_masks: events that are masked
  * @facts: static facts data
+ * @prev_fw_facts: previous fw facts data
  * @pfacts: static port facts data
  * @manu_pg0: static manufacturing page 0
  * @manu_pg10: static manufacturing page 10
@@ -1119,6 +1166,8 @@
  *	path functions resulting in Null pointer reference followed by kernel
  *	crash. To avoid the above race condition we use mutex syncrhonization
  *	which ensures the syncrhonization between cli/sysfs_show path.
+ * @atomic_desc_capable: Atomic Request Descriptor support.
+ * @GET_MSIX_INDEX: Get the msix index of high iops queues.
  */
 struct MPT3SAS_ADAPTER {
 	struct list_head list;
@@ -1177,6 +1226,13 @@
 	u32		ioc_reset_count;
 	MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
 	u32             non_operational_loop;
+	atomic64_t      total_io_cnt;
+	atomic64_t	high_iops_outstanding;
+	bool            msix_load_balance;
+	u16		thresh_hold;
+	u8		high_iops_queues;
+	u32		drv_support_bitmap;
+	bool		enable_sdev_max_qd;
 
 	/* internal commands, callback index */
 	u8		scsi_io_cb_idx;
@@ -1226,6 +1282,7 @@
 
 	/* static config pages */
 	struct mpt3sas_facts facts;
+	struct mpt3sas_facts prev_fw_facts;
 	struct mpt3sas_port_facts *pfacts;
 	Mpi2ManufacturingPage0_t manu_pg0;
 	struct Mpi2ManufacturingPage10_t manu_pg10;
@@ -1236,6 +1293,7 @@
 	Mpi2IOUnitPage0_t iounit_pg0;
 	Mpi2IOUnitPage1_t iounit_pg1;
 	Mpi2IOUnitPage8_t iounit_pg8;
+	Mpi2IOCPage1_t	ioc_pg1_copy;
 
 	struct _boot_device req_boot_device;
 	struct _boot_device req_alt_boot_device;
@@ -1354,6 +1412,7 @@
 
 	u8		combined_reply_queue;
 	u8		combined_reply_index_count;
+	u8		smp_affinity_enable;
 	/* reply post register index */
 	resource_size_t	**replyPostRegisterIndex;
 
@@ -1381,6 +1440,8 @@
 	u8		hide_drives;
 	spinlock_t	diag_trigger_lock;
 	u8		diag_trigger_active;
+	u8		atomic_desc_capable;
+	BASE_READ_REG	base_readl;
 	struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
 	struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
 	struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
@@ -1388,10 +1449,16 @@
 	void		*device_remove_in_progress;
 	u16		device_remove_in_progress_sz;
 	u8		is_gen35_ioc;
+	u8		is_aero_ioc;
 	PUT_SMID_IO_FP_HIP put_smid_scsi_io;
-
+	PUT_SMID_IO_FP_HIP put_smid_fast_path;
+	PUT_SMID_IO_FP_HIP put_smid_hi_priority;
+	PUT_SMID_DEFAULT put_smid_default;
+	GET_MSIX_INDEX get_msix_index_for_smlio;
 };
 
+#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001
+
 typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 	u32 reply);
 
@@ -1480,6 +1547,7 @@
 
 u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
 	u8 status, void *mpi_request, int sz);
+int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
 
 /* scsih shared API */
 struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
@@ -1520,6 +1588,7 @@
 void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc);
 struct _raid_device *
 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+void mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
 
 /* config shared API */
 u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1577,6 +1646,10 @@
 int mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
 	Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
 	u16 sz);
+int mpt3sas_config_get_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+	*mpi_reply, Mpi2IOCPage1_t *config_page);
+int mpt3sas_config_set_ioc_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+	*mpi_reply, Mpi2IOCPage1_t *config_page);
 int mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
 	*mpi_reply, Mpi2IOCPage8_t *config_page);
 int mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc,
@@ -1670,4 +1743,20 @@
 /* NCQ Prio Handling Check */
 bool scsih_ncq_prio_supp(struct scsi_device *sdev);
 
+/**
+ * _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device
+ * @device_info: bitfield providing information about the device.
+ * Context: none
+ *
+ * Returns 1 if scsi device.
+ */
+static inline int
+mpt3sas_scsih_is_pcie_scsi_device(u32 device_info)
+{
+	if ((device_info &
+	    MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) == MPI26_PCIE_DEVINFO_SCSI)
+		return 1;
+	else
+		return 0;
+}
 #endif /* MPT3SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index d29a2dc..14a1a27 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -119,7 +119,7 @@
 		desc = "raid_volume";
 		break;
 	case MPI2_CONFIG_PAGETYPE_MANUFACTURING:
-		desc = "manufaucturing";
+		desc = "manufacturing";
 		break;
 	case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK:
 		desc = "physdisk";
@@ -175,20 +175,18 @@
 	if (!desc)
 		return;
 
-	pr_info(MPT3SAS_FMT
-		"%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
-		ioc->name, calling_function_name, desc,
-	    mpi_request->Header.PageNumber, mpi_request->Action,
-	    le32_to_cpu(mpi_request->PageAddress), smid);
+	ioc_info(ioc, "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
+		 calling_function_name, desc,
+		 mpi_request->Header.PageNumber, mpi_request->Action,
+		 le32_to_cpu(mpi_request->PageAddress), smid);
 
 	if (!mpi_reply)
 		return;
 
 	if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
-		pr_info(MPT3SAS_FMT
-		    "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
-		    ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
-		    le32_to_cpu(mpi_reply->IOCLogInfo));
+		ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+			 le16_to_cpu(mpi_reply->IOCStatus),
+			 le32_to_cpu(mpi_reply->IOCLogInfo));
 }
 
 /**
@@ -210,9 +208,8 @@
 		mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
 		    &mem->page_dma, GFP_KERNEL);
 		if (!mem->page) {
-			pr_err(MPT3SAS_FMT
-				"%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
-			    ioc->name, __func__, mem->sz);
+			ioc_err(ioc, "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
+				__func__, mem->sz);
 			r = -ENOMEM;
 		}
 	} else { /* use tmp buffer if less than 512 bytes */
@@ -303,18 +300,15 @@
 	void *config_page, u16 config_page_sz)
 {
 	u16 smid;
-	u32 ioc_state;
 	Mpi2ConfigRequest_t *config_request;
 	int r;
 	u8 retry_count, issue_host_reset = 0;
-	u16 wait_state_count;
 	struct config_request mem;
 	u32 ioc_status = UINT_MAX;
 
 	mutex_lock(&ioc->config_cmds.mutex);
 	if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: config_cmd in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: config_cmd in use\n", __func__);
 		mutex_unlock(&ioc->config_cmds.mutex);
 		return -EAGAIN;
 	}
@@ -362,34 +356,17 @@
 			r = -EFAULT;
 			goto free_mem;
 		}
-		pr_info(MPT3SAS_FMT "%s: attempting retry (%d)\n",
-		    ioc->name, __func__, retry_count);
+		ioc_info(ioc, "%s: attempting retry (%d)\n",
+			 __func__, retry_count);
 	}
-	wait_state_count = 0;
-	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) {
-			pr_err(MPT3SAS_FMT
-			    "%s: failed due to ioc not operational\n",
-			    ioc->name, __func__);
-			ioc->config_cmds.status = MPT3_CMD_NOT_USED;
-			r = -EFAULT;
-			goto free_mem;
-		}
-		ssleep(1);
-		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-		pr_info(MPT3SAS_FMT
-			"%s: waiting for operational state(count=%d)\n",
-			ioc->name, __func__, wait_state_count);
-	}
-	if (wait_state_count)
-		pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
-		    ioc->name, __func__);
+
+	r = mpt3sas_wait_for_ioc(ioc, MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	if (r)
+		goto free_mem;
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		ioc->config_cmds.status = MPT3_CMD_NOT_USED;
 		r = -EAGAIN;
 		goto free_mem;
@@ -403,7 +380,7 @@
 	memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
 	_config_display_some_debug(ioc, smid, "config_request", NULL);
 	init_completion(&ioc->config_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
 	if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
 		mpt3sas_base_check_cmd_timeout(ioc,
@@ -429,12 +406,10 @@
 		    (mpi_reply->Header.PageType & 0xF)) {
 			_debug_dump_mf(mpi_request, ioc->request_sz/4);
 			_debug_dump_reply(mpi_reply, ioc->request_sz/4);
-			panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
-			    " mpi_reply mismatch: Requested PageType(0x%02x)" \
-			    " Reply PageType(0x%02x)\n", \
-			    ioc->name, __func__,
-			    (mpi_request->Header.PageType & 0xF),
-			    (mpi_reply->Header.PageType & 0xF));
+			panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
+			      ioc->name, __func__,
+			      mpi_request->Header.PageType & 0xF,
+			      mpi_reply->Header.PageType & 0xF);
 		}
 
 		if (((mpi_request->Header.PageType & 0xF) ==
@@ -442,19 +417,18 @@
 		    mpi_request->ExtPageType != mpi_reply->ExtPageType) {
 			_debug_dump_mf(mpi_request, ioc->request_sz/4);
 			_debug_dump_reply(mpi_reply, ioc->request_sz/4);
-			panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
-			    " mpi_reply mismatch: Requested ExtPageType(0x%02x)"
-			    " Reply ExtPageType(0x%02x)\n",
-			    ioc->name, __func__, mpi_request->ExtPageType,
-			    mpi_reply->ExtPageType);
+			panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
+			      ioc->name, __func__,
+			      mpi_request->ExtPageType,
+			      mpi_reply->ExtPageType);
 		}
 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
 		    & MPI2_IOCSTATUS_MASK;
 	}
 
 	if (retry_count)
-		pr_info(MPT3SAS_FMT "%s: retry (%d) completed!!\n", \
-		    ioc->name, __func__, retry_count);
+		ioc_info(ioc, "%s: retry (%d) completed!!\n",
+			 __func__, retry_count);
 
 	if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
 	    config_page && mpi_request->Action ==
@@ -469,14 +443,10 @@
 				_debug_dump_reply(mpi_reply, ioc->request_sz/4);
 				_debug_dump_config(p, min_t(u16, mem.sz,
 				    config_page_sz)/4);
-				panic(KERN_WARNING MPT3SAS_FMT
-					"%s: Firmware BUG:" \
-				    " config page mismatch:"
-				    " Requested PageType(0x%02x)"
-				    " Reply PageType(0x%02x)\n",
-				    ioc->name, __func__,
-				    (mpi_request->Header.PageType & 0xF),
-				    (p[3] & 0xF));
+				panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
+				      ioc->name, __func__,
+				      mpi_request->Header.PageType & 0xF,
+				      p[3] & 0xF);
 			}
 
 			if (((mpi_request->Header.PageType & 0xF) ==
@@ -486,13 +456,9 @@
 				_debug_dump_reply(mpi_reply, ioc->request_sz/4);
 				_debug_dump_config(p, min_t(u16, mem.sz,
 				    config_page_sz)/4);
-				panic(KERN_WARNING MPT3SAS_FMT
-					"%s: Firmware BUG:" \
-				    " config page mismatch:"
-				    " Requested ExtPageType(0x%02x)"
-				    " Reply ExtPageType(0x%02x)\n",
-				    ioc->name, __func__,
-				    mpi_request->ExtPageType, p[6]);
+				panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
+				      ioc->name, __func__,
+				      mpi_request->ExtPageType, p[6]);
 			}
 		}
 		memcpy(config_page, mem.page, min_t(u16, mem.sz,
@@ -692,10 +658,6 @@
 	r = _config_request(ioc, &mpi_request, mpi_reply,
 	    MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
 	    sizeof(*config_page));
-	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
-	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
-	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -987,6 +949,77 @@
  out:
 	return r;
 }
+/**
+ * mpt3sas_config_get_ioc_pg1 - obtain ioc page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_ioc_pg1(struct MPT3SAS_ADAPTER *ioc,
+	Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage1_t *config_page)
+{
+	Mpi2ConfigRequest_t mpi_request;
+	int r;
+
+	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+	mpi_request.Function = MPI2_FUNCTION_CONFIG;
+	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+	mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+	mpi_request.Header.PageNumber = 1;
+	mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+	ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+	r = _config_request(ioc, &mpi_request, mpi_reply,
+	    MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+	if (r)
+		goto out;
+
+	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+	r = _config_request(ioc, &mpi_request, mpi_reply,
+	    MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
+ out:
+	return r;
+}
+
+/**
+ * mpt3sas_config_set_ioc_pg1 - modify ioc page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_ioc_pg1(struct MPT3SAS_ADAPTER *ioc,
+	Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage1_t *config_page)
+{
+	Mpi2ConfigRequest_t mpi_request;
+	int r;
+
+	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+	mpi_request.Function = MPI2_FUNCTION_CONFIG;
+	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+	mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+	mpi_request.Header.PageNumber = 1;
+	mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+	ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+	r = _config_request(ioc, &mpi_request, mpi_reply,
+	    MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+	if (r)
+		goto out;
+
+	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+	r = _config_request(ioc, &mpi_request, mpi_reply,
+	    MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
+ out:
+	return r;
+}
 
 /**
  * mpt3sas_config_get_sas_device_pg0 - obtain sas device page 0
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 5e8c059..7d69695 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -185,17 +185,15 @@
 	if (!desc)
 		return;
 
-	pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
-	    ioc->name, calling_function_name, desc, smid);
+	ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid);
 
 	if (!mpi_reply)
 		return;
 
 	if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
-		pr_info(MPT3SAS_FMT
-		    "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
-		    ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
-		    le32_to_cpu(mpi_reply->IOCLogInfo));
+		ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+			 le16_to_cpu(mpi_reply->IOCStatus),
+			 le32_to_cpu(mpi_reply->IOCLogInfo));
 
 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
 	    mpi_request->Function ==
@@ -208,38 +206,32 @@
 		sas_device = mpt3sas_get_sdev_by_handle(ioc,
 		    le16_to_cpu(scsi_reply->DevHandle));
 		if (sas_device) {
-			pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
-				ioc->name, (unsigned long long)
-			    sas_device->sas_address, sas_device->phy);
-			pr_warn(MPT3SAS_FMT
-			    "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
-			    ioc->name, (unsigned long long)
-			    sas_device->enclosure_logical_id, sas_device->slot);
+			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
+				 (u64)sas_device->sas_address,
+				 sas_device->phy);
+			ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+				 (u64)sas_device->enclosure_logical_id,
+				 sas_device->slot);
 			sas_device_put(sas_device);
 		}
 		if (!sas_device) {
 			pcie_device = mpt3sas_get_pdev_by_handle(ioc,
 				le16_to_cpu(scsi_reply->DevHandle));
 			if (pcie_device) {
-				pr_warn(MPT3SAS_FMT
-				    "\tWWID(0x%016llx), port(%d)\n", ioc->name,
-				    (unsigned long long)pcie_device->wwid,
-				    pcie_device->port_num);
+				ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n",
+					 (unsigned long long)pcie_device->wwid,
+					 pcie_device->port_num);
 				if (pcie_device->enclosure_handle != 0)
-					pr_warn(MPT3SAS_FMT
-					    "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
-					    ioc->name, (unsigned long long)
-					    pcie_device->enclosure_logical_id,
-					    pcie_device->slot);
+					ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+						 (u64)pcie_device->enclosure_logical_id,
+						 pcie_device->slot);
 				pcie_device_put(pcie_device);
 			}
 		}
 		if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
-			pr_info(MPT3SAS_FMT
-			    "\tscsi_state(0x%02x), scsi_status"
-			    "(0x%02x)\n", ioc->name,
-			    scsi_reply->SCSIState,
-			    scsi_reply->SCSIStatus);
+			ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n",
+				 scsi_reply->SCSIState,
+				 scsi_reply->SCSIStatus);
 	}
 }
 
@@ -466,8 +458,7 @@
 	int i;
 	u8 issue_reset;
 
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
 	for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
 		if (!(ioc->diag_buffer_status[i] &
 		      MPT3_DIAG_BUFFER_IS_REGISTERED))
@@ -487,8 +478,7 @@
  */
 void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 {
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
 	if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
 		ioc->ctl_cmds.status |= MPT3_CMD_RESET;
 		mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
@@ -506,8 +496,7 @@
 {
 	int i;
 
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
 
 	for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
 		if (!(ioc->diag_buffer_status[i] &
@@ -607,15 +596,23 @@
 		if (priv_data->sas_target->handle != handle)
 			continue;
 		st = scsi_cmd_priv(scmd);
-		tm_request->TaskMID = cpu_to_le16(st->smid);
-		found = 1;
+
+		/*
+		 * If the given TaskMID from the user space is zero, then the
+		 * first outstanding smid will be picked up.  Otherwise,
+		 * targeted smid will be the one.
+		 */
+		if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) {
+			tm_request->TaskMID = cpu_to_le16(st->smid);
+			found = 1;
+		}
 	}
 
 	if (!found) {
-		dctlprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: handle(0x%04x), lun(%d), no active mid!!\n",
-			ioc->name,
-		    desc, le16_to_cpu(tm_request->DevHandle), lun));
+		dctlprintk(ioc,
+			   ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n",
+				    desc, le16_to_cpu(tm_request->DevHandle),
+				    lun));
 		tm_reply = ioc->ctl_cmds.reply;
 		tm_reply->DevHandle = tm_request->DevHandle;
 		tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -631,10 +628,10 @@
 		return 1;
 	}
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
-	    desc, le16_to_cpu(tm_request->DevHandle), lun,
-	     le16_to_cpu(tm_request->TaskMID)));
+	dctlprintk(ioc,
+		   ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n",
+			    desc, le16_to_cpu(tm_request->DevHandle), lun,
+			    le16_to_cpu(tm_request->TaskMID)));
 	return 0;
 }
 
@@ -652,7 +649,6 @@
 	MPI2DefaultReply_t *mpi_reply;
 	Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
 	struct _pcie_device *pcie_device = NULL;
-	u32 ioc_state;
 	u16 smid;
 	u8 timeout;
 	u8 issue_reset;
@@ -665,45 +661,24 @@
 	dma_addr_t data_in_dma = 0;
 	size_t data_in_sz = 0;
 	long ret;
-	u16 wait_state_count;
 	u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
-	u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
 
 	issue_reset = 0;
 
 	if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
 		ret = -EAGAIN;
 		goto out;
 	}
 
-	wait_state_count = 0;
-	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		if (wait_state_count++ == 10) {
-			pr_err(MPT3SAS_FMT
-			    "%s: failed due to ioc not operational\n",
-			    ioc->name, __func__);
-			ret = -EFAULT;
-			goto out;
-		}
-		ssleep(1);
-		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-		pr_info(MPT3SAS_FMT
-			"%s: waiting for operational state(count=%d)\n",
-			ioc->name,
-		    __func__, wait_state_count);
-	}
-	if (wait_state_count)
-		pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
-		    ioc->name, __func__);
+	ret = mpt3sas_wait_for_ioc(ioc,	IOC_OPERATIONAL_WAIT_COUNT);
+	if (ret)
+		goto out;
 
 	mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
 	if (!mpi_request) {
-		pr_err(MPT3SAS_FMT
-			"%s: failed obtaining a memory for mpi_request\n",
-			ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n",
+			__func__);
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -726,8 +701,7 @@
 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
 		smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
 		if (!smid) {
-			pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-			    ioc->name, __func__);
+			ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 			ret = -EAGAIN;
 			goto out;
 		}
@@ -740,6 +714,7 @@
 	ioc->ctl_cmds.status = MPT3_CMD_PENDING;
 	memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
 	request = mpt3sas_base_get_msg_frame(ioc, smid);
+	memset(request, 0, ioc->request_sz);
 	memcpy(request, mpi_request, karg.data_sge_offset*4);
 	ioc->ctl_cmds.smid = smid;
 	data_out_sz = karg.data_out_size;
@@ -762,8 +737,8 @@
 
 	/* obtain dma-able memory for data transfer */
 	if (data_out_sz) /* WRITE */ {
-		data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
-		    &data_out_dma);
+		data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
+				&data_out_dma, GFP_KERNEL);
 		if (!data_out) {
 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
 			    __LINE__, __func__);
@@ -782,8 +757,8 @@
 	}
 
 	if (data_in_sz) /* READ */ {
-		data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
-		    &data_in_dma);
+		data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
+				&data_in_dma, GFP_KERNEL);
 		if (!data_in) {
 			pr_err("failure at %s:%d/%s()!\n", __FILE__,
 			    __LINE__, __func__);
@@ -823,9 +798,9 @@
 		ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
 		    data_out_dma, data_out_sz, data_in_dma, data_in_sz);
 		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
-			dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :"
-			    "ioctl failed due to device removal in progress\n",
-			    ioc->name, device_handle));
+			dtmprintk(ioc,
+				  ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n",
+					   device_handle));
 			mpt3sas_base_free_smid(ioc, smid);
 			ret = -EINVAL;
 			goto out;
@@ -843,9 +818,9 @@
 		    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
 		memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
 		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
-			dtmprintk(ioc, pr_info(MPT3SAS_FMT
-				"handle(0x%04x) :ioctl failed due to device removal in progress\n",
-				ioc->name, device_handle));
+			dtmprintk(ioc,
+				  ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+					   device_handle));
 			mpt3sas_base_free_smid(ioc, smid);
 			ret = -EINVAL;
 			goto out;
@@ -855,7 +830,7 @@
 		if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
 			ioc->put_smid_scsi_io(ioc, smid, device_handle);
 		else
-			mpt3sas_base_put_smid_default(ioc, smid);
+			ioc->put_smid_default(ioc, smid);
 		break;
 	}
 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -863,10 +838,10 @@
 		Mpi2SCSITaskManagementRequest_t *tm_request =
 		    (Mpi2SCSITaskManagementRequest_t *)request;
 
-		dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
-			ioc->name,
-		    le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+		dtmprintk(ioc,
+			  ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
+				   le16_to_cpu(tm_request->DevHandle),
+				   tm_request->TaskType));
 		ioc->got_task_abort_from_ioctl = 1;
 		if (tm_request->TaskType ==
 		    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
@@ -881,9 +856,9 @@
 		ioc->got_task_abort_from_ioctl = 0;
 
 		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
-			dtmprintk(ioc, pr_info(MPT3SAS_FMT
-				"handle(0x%04x) :ioctl failed due to device removal in progress\n",
-				ioc->name, device_handle));
+			dtmprintk(ioc,
+				  ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+					   device_handle));
 			mpt3sas_base_free_smid(ioc, smid);
 			ret = -EINVAL;
 			goto out;
@@ -892,7 +867,7 @@
 		    tm_request->DevHandle));
 		ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
 		    data_in_dma, data_in_sz);
-		mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+		ioc->put_smid_hi_priority(ioc, smid, 0);
 		break;
 	}
 	case MPI2_FUNCTION_SMP_PASSTHROUGH:
@@ -923,22 +898,22 @@
 		}
 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
 		    data_in_sz);
-		mpt3sas_base_put_smid_default(ioc, smid);
+		ioc->put_smid_default(ioc, smid);
 		break;
 	}
 	case MPI2_FUNCTION_SATA_PASSTHROUGH:
 	{
 		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
-			dtmprintk(ioc, pr_info(MPT3SAS_FMT
-				"handle(0x%04x) :ioctl failed due to device removal in progress\n",
-				ioc->name, device_handle));
+			dtmprintk(ioc,
+				  ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+					   device_handle));
 			mpt3sas_base_free_smid(ioc, smid);
 			ret = -EINVAL;
 			goto out;
 		}
 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
 		    data_in_sz);
-		mpt3sas_base_put_smid_default(ioc, smid);
+		ioc->put_smid_default(ioc, smid);
 		break;
 	}
 	case MPI2_FUNCTION_FW_DOWNLOAD:
@@ -946,7 +921,7 @@
 	{
 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
 		    data_in_sz);
-		mpt3sas_base_put_smid_default(ioc, smid);
+		ioc->put_smid_default(ioc, smid);
 		break;
 	}
 	case MPI2_FUNCTION_TOOLBOX:
@@ -954,14 +929,38 @@
 		Mpi2ToolboxCleanRequest_t *toolbox_request =
 			(Mpi2ToolboxCleanRequest_t *)mpi_request;
 
-		if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) {
+		if ((toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL)
+		    || (toolbox_request->Tool ==
+		    MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN))
 			ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
 				data_in_dma, data_in_sz);
-		} else {
+		else if (toolbox_request->Tool ==
+				MPI2_TOOLBOX_MEMORY_MOVE_TOOL) {
+			Mpi2ToolboxMemMoveRequest_t *mem_move_request =
+					(Mpi2ToolboxMemMoveRequest_t *)request;
+			Mpi2SGESimple64_t tmp, *src = NULL, *dst = NULL;
+
+			ioc->build_sg_mpi(ioc, psge, data_out_dma,
+					data_out_sz, data_in_dma, data_in_sz);
+			if (data_out_sz && !data_in_sz) {
+				dst =
+				    (Mpi2SGESimple64_t *)&mem_move_request->SGL;
+				src = (void *)dst + ioc->sge_size;
+
+				memcpy(&tmp, src, ioc->sge_size);
+				memcpy(src, dst, ioc->sge_size);
+				memcpy(dst, &tmp, ioc->sge_size);
+			}
+			if (ioc->logging_level & MPT_DEBUG_TM) {
+				ioc_info(ioc,
+				  "Mpi2ToolboxMemMoveRequest_t request msg\n");
+				_debug_dump_mf(mem_move_request,
+							ioc->request_sz/4);
+			}
+		} else
 			ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
-				data_in_dma, data_in_sz);
-		}
-		mpt3sas_base_put_smid_default(ioc, smid);
+			    data_in_dma, data_in_sz);
+		ioc->put_smid_default(ioc, smid);
 		break;
 	}
 	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
@@ -981,7 +980,7 @@
 	default:
 		ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
 		    data_in_dma, data_in_sz);
-		mpt3sas_base_put_smid_default(ioc, smid);
+		ioc->put_smid_default(ioc, smid);
 		break;
 	}
 
@@ -1017,12 +1016,10 @@
 		Mpi2SCSITaskManagementReply_t *tm_reply =
 		    (Mpi2SCSITaskManagementReply_t *)mpi_reply;
 
-		pr_info(MPT3SAS_FMT "TASK_MGMT: " \
-		    "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
-		    "TerminationCount(0x%08x)\n", ioc->name,
-		    le16_to_cpu(tm_reply->IOCStatus),
-		    le32_to_cpu(tm_reply->IOCLogInfo),
-		    le32_to_cpu(tm_reply->TerminationCount));
+		ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n",
+			 le16_to_cpu(tm_reply->IOCStatus),
+			 le32_to_cpu(tm_reply->IOCLogInfo),
+			 le32_to_cpu(tm_reply->TerminationCount));
 	}
 
 	/* copy out xdata to user */
@@ -1054,9 +1051,7 @@
 	    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
 	    MPI2_FUNCTION_NVME_ENCAPSULATED)) {
 		if (karg.sense_data_ptr == NULL) {
-			pr_info(MPT3SAS_FMT "Response buffer provided"
-			    " by application is NULL; Response data will"
-			    " not be returned.\n", ioc->name);
+			ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n");
 			goto out;
 		}
 		sz_arg = (mpi_request->Function ==
@@ -1079,18 +1074,19 @@
 		    mpi_request->Function ==
 		    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
 		    mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
-			pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
-				ioc->name,
-				le16_to_cpu(mpi_request->FunctionDependent1));
+			ioc_info(ioc, "issue target reset: handle = (0x%04x)\n",
+				 le16_to_cpu(mpi_request->FunctionDependent1));
 			mpt3sas_halt_firmware(ioc);
 			pcie_device = mpt3sas_get_pdev_by_handle(ioc,
 				le16_to_cpu(mpi_request->FunctionDependent1));
-			if (pcie_device && (!ioc->tm_custom_handling))
+			if (pcie_device && (!ioc->tm_custom_handling) &&
+			    (!(mpt3sas_scsih_is_pcie_scsi_device(
+			    pcie_device->device_info))))
 				mpt3sas_scsih_issue_locked_tm(ioc,
 				  le16_to_cpu(mpi_request->FunctionDependent1),
 				  0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
 				  0, pcie_device->reset_timeout,
-				  tr_method);
+			MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
 			else
 				mpt3sas_scsih_issue_locked_tm(ioc,
 				  le16_to_cpu(mpi_request->FunctionDependent1),
@@ -1106,11 +1102,11 @@
 
 	/* free memory associated with sg buffers */
 	if (data_in)
-		pci_free_consistent(ioc->pdev, data_in_sz, data_in,
+		dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
 		    data_in_dma);
 
 	if (data_out)
-		pci_free_consistent(ioc->pdev, data_out_sz, data_out,
+		dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
 		    data_out_dma);
 
 	kfree(mpi_request);
@@ -1128,8 +1124,8 @@
 {
 	struct mpt3_ioctl_iocinfo karg;
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-	    __func__));
+	dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+				 __func__));
 
 	memset(&karg, 0 , sizeof(karg));
 	if (ioc->pfacts)
@@ -1188,8 +1184,8 @@
 		return -EFAULT;
 	}
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-	    __func__));
+	dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+				 __func__));
 
 	karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
 	memcpy(karg.event_types, ioc->event_type,
@@ -1219,8 +1215,8 @@
 		return -EFAULT;
 	}
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-	    __func__));
+	dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+				 __func__));
 
 	memcpy(ioc->event_type, karg.event_types,
 	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
@@ -1259,8 +1255,8 @@
 		return -EFAULT;
 	}
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-	    __func__));
+	dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+				 __func__));
 
 	number_bytes = karg.hdr.max_data_size -
 	    sizeof(struct mpt3_ioctl_header);
@@ -1306,12 +1302,11 @@
 	    ioc->is_driver_loading)
 		return -EAGAIN;
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-	    __func__));
+	dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+				 __func__));
 
 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
-	pr_info(MPT3SAS_FMT "host reset: %s\n",
-	    ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
+	ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
 	return 0;
 }
 
@@ -1440,8 +1435,8 @@
 		return -EFAULT;
 	}
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dctlprintk(ioc, ioc_info(ioc, "%s\n",
+				 __func__));
 
 	rc = _ctl_btdh_search_sas_device(ioc, &karg);
 	if (!rc)
@@ -1512,53 +1507,46 @@
 	u32 ioc_state;
 	u8 issue_reset = 0;
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dctlprintk(ioc, ioc_info(ioc, "%s\n",
+				 __func__));
 
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		pr_err(MPT3SAS_FMT
-		    "%s: failed due to ioc not operational\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed due to ioc not operational\n",
+			__func__);
 		rc = -EAGAIN;
 		goto out;
 	}
 
 	if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
 
 	buffer_type = diag_register->buffer_type;
 	if (!_ctl_diag_capability(ioc, buffer_type)) {
-		pr_err(MPT3SAS_FMT
-			"%s: doesn't have capability for buffer_type(0x%02x)\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+			__func__, buffer_type);
 		return -EPERM;
 	}
 
 	if (ioc->diag_buffer_status[buffer_type] &
 	    MPT3_DIAG_BUFFER_IS_REGISTERED) {
-		pr_err(MPT3SAS_FMT
-			"%s: already has a registered buffer for buffer_type(0x%02x)\n",
-			ioc->name, __func__,
-		    buffer_type);
+		ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+			__func__, buffer_type);
 		return -EINVAL;
 	}
 
 	if (diag_register->requested_buffer_size % 4)  {
-		pr_err(MPT3SAS_FMT
-			"%s: the requested_buffer_size is not 4 byte aligned\n",
-			ioc->name, __func__);
+		ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n",
+			__func__);
 		return -EINVAL;
 	}
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -1580,9 +1568,9 @@
 	if (request_data) {
 		request_data_dma = ioc->diag_buffer_dma[buffer_type];
 		if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
-			pci_free_consistent(ioc->pdev,
-			    ioc->diag_buffer_sz[buffer_type],
-			    request_data, request_data_dma);
+			dma_free_coherent(&ioc->pdev->dev,
+					ioc->diag_buffer_sz[buffer_type],
+					request_data, request_data_dma);
 			request_data = NULL;
 		}
 	}
@@ -1590,12 +1578,11 @@
 	if (request_data == NULL) {
 		ioc->diag_buffer_sz[buffer_type] = 0;
 		ioc->diag_buffer_dma[buffer_type] = 0;
-		request_data = pci_alloc_consistent(
-			ioc->pdev, request_data_sz, &request_data_dma);
+		request_data = dma_alloc_coherent(&ioc->pdev->dev,
+				request_data_sz, &request_data_dma, GFP_KERNEL);
 		if (request_data == NULL) {
-			pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
-			    " for diag buffers, requested size(%d)\n",
-			    ioc->name, __func__, request_data_sz);
+			ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
+				__func__, request_data_sz);
 			mpt3sas_base_free_smid(ioc, smid);
 			return -ENOMEM;
 		}
@@ -1612,18 +1599,18 @@
 	mpi_request->VF_ID = 0; /* TODO */
 	mpi_request->VP_ID = 0;
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
-		ioc->name, __func__, request_data,
-	    (unsigned long long)request_data_dma,
-	    le32_to_cpu(mpi_request->BufferLength)));
+	dctlprintk(ioc,
+		   ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
+			    __func__, request_data,
+			    (unsigned long long)request_data_dma,
+			    le32_to_cpu(mpi_request->BufferLength)));
 
 	for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
 		mpi_request->ProductSpecific[i] =
 			cpu_to_le32(ioc->product_specific[buffer_type][i]);
 
 	init_completion(&ioc->ctl_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
@@ -1637,8 +1624,7 @@
 
 	/* process the completed Reply Message Frame */
 	if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
-		pr_err(MPT3SAS_FMT "%s: no reply message\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: no reply message\n", __func__);
 		rc = -EFAULT;
 		goto out;
 	}
@@ -1649,13 +1635,11 @@
 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
 		ioc->diag_buffer_status[buffer_type] |=
 			MPT3_DIAG_BUFFER_IS_REGISTERED;
-		dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
-		    ioc->name, __func__));
+		dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
 	} else {
-		pr_info(MPT3SAS_FMT
-			"%s: ioc_status(0x%04x) log_info(0x%08x)\n",
-			ioc->name, __func__,
-		    ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+		ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+			 __func__,
+			 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
 		rc = -EFAULT;
 	}
 
@@ -1666,7 +1650,7 @@
  out:
 
 	if (rc && request_data)
-		pci_free_consistent(ioc->pdev, request_data_sz,
+		dma_free_coherent(&ioc->pdev->dev, request_data_sz,
 		    request_data, request_data_dma);
 
 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
@@ -1689,8 +1673,7 @@
 	memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
 
 	if (bits_to_register & 1) {
-		pr_info(MPT3SAS_FMT "registering trace buffer support\n",
-		    ioc->name);
+		ioc_info(ioc, "registering trace buffer support\n");
 		ioc->diag_trigger_master.MasterData =
 		    (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
 		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
@@ -1701,8 +1684,7 @@
 	}
 
 	if (bits_to_register & 2) {
-		pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
-		    ioc->name);
+		ioc_info(ioc, "registering snapshot buffer support\n");
 		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
 		/* register for 2MB buffers  */
 		diag_register.requested_buffer_size = 2 * (1024 * 1024);
@@ -1711,8 +1693,7 @@
 	}
 
 	if (bits_to_register & 4) {
-		pr_info(MPT3SAS_FMT "registering extended buffer support\n",
-		    ioc->name);
+		ioc_info(ioc, "registering extended buffer support\n");
 		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
 		/* register for 2MB buffers  */
 		diag_register.requested_buffer_size = 2 * (1024 * 1024);
@@ -1768,51 +1749,46 @@
 		return -EFAULT;
 	}
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dctlprintk(ioc, ioc_info(ioc, "%s\n",
+				 __func__));
 
 	buffer_type = karg.unique_id & 0x000000ff;
 	if (!_ctl_diag_capability(ioc, buffer_type)) {
-		pr_err(MPT3SAS_FMT
-			"%s: doesn't have capability for buffer_type(0x%02x)\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+			__func__, buffer_type);
 		return -EPERM;
 	}
 
 	if ((ioc->diag_buffer_status[buffer_type] &
 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
-		pr_err(MPT3SAS_FMT
-			"%s: buffer_type(0x%02x) is not registered\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+			__func__, buffer_type);
 		return -EINVAL;
 	}
 	if ((ioc->diag_buffer_status[buffer_type] &
 	    MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
-		pr_err(MPT3SAS_FMT
-			"%s: buffer_type(0x%02x) has not been released\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n",
+			__func__, buffer_type);
 		return -EINVAL;
 	}
 
 	if (karg.unique_id != ioc->unique_id[buffer_type]) {
-		pr_err(MPT3SAS_FMT
-			"%s: unique_id(0x%08x) is not registered\n",
-			ioc->name, __func__, karg.unique_id);
+		ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+			__func__, karg.unique_id);
 		return -EINVAL;
 	}
 
 	request_data = ioc->diag_buffer[buffer_type];
 	if (!request_data) {
-		pr_err(MPT3SAS_FMT
-			"%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+			__func__, buffer_type);
 		return -ENOMEM;
 	}
 
 	request_data_sz = ioc->diag_buffer_sz[buffer_type];
 	request_data_dma = ioc->diag_buffer_dma[buffer_type];
-	pci_free_consistent(ioc->pdev, request_data_sz,
-	    request_data, request_data_dma);
+	dma_free_coherent(&ioc->pdev->dev, request_data_sz,
+			request_data, request_data_dma);
 	ioc->diag_buffer[buffer_type] = NULL;
 	ioc->diag_buffer_status[buffer_type] = 0;
 	return 0;
@@ -1841,41 +1817,37 @@
 		return -EFAULT;
 	}
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dctlprintk(ioc, ioc_info(ioc, "%s\n",
+				 __func__));
 
 	karg.application_flags = 0;
 	buffer_type = karg.buffer_type;
 
 	if (!_ctl_diag_capability(ioc, buffer_type)) {
-		pr_err(MPT3SAS_FMT
-			"%s: doesn't have capability for buffer_type(0x%02x)\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+			__func__, buffer_type);
 		return -EPERM;
 	}
 
 	if ((ioc->diag_buffer_status[buffer_type] &
 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
-		pr_err(MPT3SAS_FMT
-			"%s: buffer_type(0x%02x) is not registered\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+			__func__, buffer_type);
 		return -EINVAL;
 	}
 
 	if (karg.unique_id & 0xffffff00) {
 		if (karg.unique_id != ioc->unique_id[buffer_type]) {
-			pr_err(MPT3SAS_FMT
-				"%s: unique_id(0x%08x) is not registered\n",
-				ioc->name, __func__, karg.unique_id);
+			ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+				__func__, karg.unique_id);
 			return -EINVAL;
 		}
 	}
 
 	request_data = ioc->diag_buffer[buffer_type];
 	if (!request_data) {
-		pr_err(MPT3SAS_FMT
-			"%s: doesn't have buffer for buffer_type(0x%02x)\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+			__func__, buffer_type);
 		return -ENOMEM;
 	}
 
@@ -1897,9 +1869,8 @@
 	karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
 
 	if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
-		pr_err(MPT3SAS_FMT
-			"%s: unable to write mpt3_diag_query data @ %p\n",
-			ioc->name, __func__, arg);
+		ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n",
+			__func__, arg);
 		return -EFAULT;
 	}
 	return 0;
@@ -1923,8 +1894,8 @@
 	u32 ioc_state;
 	int rc;
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dctlprintk(ioc, ioc_info(ioc, "%s\n",
+				 __func__));
 
 	rc = 0;
 	*issue_reset = 0;
@@ -1935,24 +1906,22 @@
 		    MPT3_DIAG_BUFFER_IS_REGISTERED)
 			ioc->diag_buffer_status[buffer_type] |=
 			    MPT3_DIAG_BUFFER_IS_RELEASED;
-		dctlprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: skipping due to FAULT state\n", ioc->name,
-		    __func__));
+		dctlprintk(ioc,
+			   ioc_info(ioc, "%s: skipping due to FAULT state\n",
+				    __func__));
 		rc = -EAGAIN;
 		goto out;
 	}
 
 	if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -1968,7 +1937,7 @@
 	mpi_request->VP_ID = 0;
 
 	init_completion(&ioc->ctl_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
@@ -1982,8 +1951,7 @@
 
 	/* process the completed Reply Message Frame */
 	if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
-		pr_err(MPT3SAS_FMT "%s: no reply message\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: no reply message\n", __func__);
 		rc = -EFAULT;
 		goto out;
 	}
@@ -1994,13 +1962,11 @@
 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
 		ioc->diag_buffer_status[buffer_type] |=
 		    MPT3_DIAG_BUFFER_IS_RELEASED;
-		dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
-		    ioc->name, __func__));
+		dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
 	} else {
-		pr_info(MPT3SAS_FMT
-			"%s: ioc_status(0x%04x) log_info(0x%08x)\n",
-			ioc->name, __func__,
-		    ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+		ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+			 __func__,
+			 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
 		rc = -EFAULT;
 	}
 
@@ -2033,47 +1999,41 @@
 		return -EFAULT;
 	}
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dctlprintk(ioc, ioc_info(ioc, "%s\n",
+				 __func__));
 
 	buffer_type = karg.unique_id & 0x000000ff;
 	if (!_ctl_diag_capability(ioc, buffer_type)) {
-		pr_err(MPT3SAS_FMT
-			"%s: doesn't have capability for buffer_type(0x%02x)\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+			__func__, buffer_type);
 		return -EPERM;
 	}
 
 	if ((ioc->diag_buffer_status[buffer_type] &
 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
-		pr_err(MPT3SAS_FMT
-			"%s: buffer_type(0x%02x) is not registered\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+			__func__, buffer_type);
 		return -EINVAL;
 	}
 
 	if (karg.unique_id != ioc->unique_id[buffer_type]) {
-		pr_err(MPT3SAS_FMT
-			"%s: unique_id(0x%08x) is not registered\n",
-			ioc->name, __func__, karg.unique_id);
+		ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+			__func__, karg.unique_id);
 		return -EINVAL;
 	}
 
 	if (ioc->diag_buffer_status[buffer_type] &
 	    MPT3_DIAG_BUFFER_IS_RELEASED) {
-		pr_err(MPT3SAS_FMT
-			"%s: buffer_type(0x%02x) is already released\n",
-			ioc->name, __func__,
-		    buffer_type);
+		ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
+			__func__, buffer_type);
 		return 0;
 	}
 
 	request_data = ioc->diag_buffer[buffer_type];
 
 	if (!request_data) {
-		pr_err(MPT3SAS_FMT
-			"%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+			__func__, buffer_type);
 		return -ENOMEM;
 	}
 
@@ -2084,9 +2044,8 @@
 		    MPT3_DIAG_BUFFER_IS_RELEASED;
 		ioc->diag_buffer_status[buffer_type] &=
 		    ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
-		pr_err(MPT3SAS_FMT
-			"%s: buffer_type(0x%02x) was released due to host reset\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n",
+			__func__, buffer_type);
 		return 0;
 	}
 
@@ -2124,38 +2083,34 @@
 		return -EFAULT;
 	}
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dctlprintk(ioc, ioc_info(ioc, "%s\n",
+				 __func__));
 
 	buffer_type = karg.unique_id & 0x000000ff;
 	if (!_ctl_diag_capability(ioc, buffer_type)) {
-		pr_err(MPT3SAS_FMT
-			"%s: doesn't have capability for buffer_type(0x%02x)\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+			__func__, buffer_type);
 		return -EPERM;
 	}
 
 	if (karg.unique_id != ioc->unique_id[buffer_type]) {
-		pr_err(MPT3SAS_FMT
-			"%s: unique_id(0x%08x) is not registered\n",
-			ioc->name, __func__, karg.unique_id);
+		ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+			__func__, karg.unique_id);
 		return -EINVAL;
 	}
 
 	request_data = ioc->diag_buffer[buffer_type];
 	if (!request_data) {
-		pr_err(MPT3SAS_FMT
-			"%s: doesn't have buffer for buffer_type(0x%02x)\n",
-			ioc->name, __func__, buffer_type);
+		ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+			__func__, buffer_type);
 		return -ENOMEM;
 	}
 
 	request_size = ioc->diag_buffer_sz[buffer_type];
 
 	if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
-		pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
-		    "or bytes_to_read are not 4 byte aligned\n", ioc->name,
-		    __func__);
+		ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n",
+			__func__);
 		return -EINVAL;
 	}
 
@@ -2163,10 +2118,10 @@
 		return -EINVAL;
 
 	diag_data = (void *)(request_data + karg.starting_offset);
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: diag_buffer(%p), offset(%d), sz(%d)\n",
-		ioc->name, __func__,
-	    diag_data, karg.starting_offset, karg.bytes_to_read));
+	dctlprintk(ioc,
+		   ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
+			    __func__, diag_data, karg.starting_offset,
+			    karg.bytes_to_read));
 
 	/* Truncate data on requests that are too large */
 	if ((diag_data + karg.bytes_to_read < diag_data) ||
@@ -2177,39 +2132,36 @@
 
 	if (copy_to_user((void __user *)uarg->diagnostic_data,
 	    diag_data, copy_size)) {
-		pr_err(MPT3SAS_FMT
-			"%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
-			ioc->name, __func__, diag_data);
+		ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
+			__func__, diag_data);
 		return -EFAULT;
 	}
 
 	if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
 		return 0;
 
-	dctlprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: Reregister buffer_type(0x%02x)\n",
-		ioc->name, __func__, buffer_type));
+	dctlprintk(ioc,
+		   ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n",
+			    __func__, buffer_type));
 	if ((ioc->diag_buffer_status[buffer_type] &
 	    MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
-		dctlprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: buffer_type(0x%02x) is still registered\n",
-			ioc->name, __func__, buffer_type));
+		dctlprintk(ioc,
+			   ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n",
+				    __func__, buffer_type));
 		return 0;
 	}
 	/* Get a free request frame and save the message context.
 	*/
 
 	if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -2233,7 +2185,7 @@
 	mpi_request->VP_ID = 0;
 
 	init_completion(&ioc->ctl_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
@@ -2247,8 +2199,7 @@
 
 	/* process the completed Reply Message Frame */
 	if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
-		pr_err(MPT3SAS_FMT "%s: no reply message\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: no reply message\n", __func__);
 		rc = -EFAULT;
 		goto out;
 	}
@@ -2259,13 +2210,11 @@
 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
 		ioc->diag_buffer_status[buffer_type] |=
 		    MPT3_DIAG_BUFFER_IS_REGISTERED;
-		dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
-		    ioc->name, __func__));
+		dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
 	} else {
-		pr_info(MPT3SAS_FMT
-			"%s: ioc_status(0x%04x) log_info(0x%08x)\n",
-			ioc->name, __func__,
-		    ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+		ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+			 __func__, ioc_status,
+			 le32_to_cpu(mpi_reply->IOCLogInfo));
 		rc = -EFAULT;
 	}
 
@@ -2404,6 +2353,10 @@
 			break;
 		}
 
+		if (karg.hdr.ioc_number != ioctl_header.ioc_number) {
+			ret = -EINVAL;
+			break;
+		}
 		if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
 			uarg = arg;
 			ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
@@ -2450,8 +2403,9 @@
 			ret = _ctl_diag_read_buffer(ioc, arg);
 		break;
 	default:
-		dctlprintk(ioc, pr_info(MPT3SAS_FMT
-		    "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
+		dctlprintk(ioc,
+			   ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
+				    cmd));
 		break;
 	}
 
@@ -2537,7 +2491,7 @@
 
 /* scsi host attributes */
 /**
- * _ctl_version_fw_show - firmware version
+ * version_fw_show - firmware version
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2545,7 +2499,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
+version_fw_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2557,10 +2511,10 @@
 	    (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
 	    ioc->facts.FWVersion.Word & 0x000000FF);
 }
-static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
+static DEVICE_ATTR_RO(version_fw);
 
 /**
- * _ctl_version_bios_show - bios version
+ * version_bios_show - bios version
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2568,7 +2522,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
+version_bios_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2582,10 +2536,10 @@
 	    (version & 0x0000FF00) >> 8,
 	    version & 0x000000FF);
 }
-static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
+static DEVICE_ATTR_RO(version_bios);
 
 /**
- * _ctl_version_mpi_show - MPI (message passing interface) version
+ * version_mpi_show - MPI (message passing interface) version
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2593,7 +2547,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
+version_mpi_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2602,10 +2556,10 @@
 	return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
 	    ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
 }
-static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
+static DEVICE_ATTR_RO(version_mpi);
 
 /**
- * _ctl_version_product_show - product name
+ * version_product_show - product name
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2613,7 +2567,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
+version_product_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2621,10 +2575,10 @@
 
 	return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
 }
-static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
+static DEVICE_ATTR_RO(version_product);
 
 /**
- * _ctl_version_nvdata_persistent_show - ndvata persistent version
+ * version_nvdata_persistent_show - ndvata persistent version
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2632,7 +2586,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_version_nvdata_persistent_show(struct device *cdev,
+version_nvdata_persistent_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2641,11 +2595,10 @@
 	return snprintf(buf, PAGE_SIZE, "%08xh\n",
 	    le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
 }
-static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
-	_ctl_version_nvdata_persistent_show, NULL);
+static DEVICE_ATTR_RO(version_nvdata_persistent);
 
 /**
- * _ctl_version_nvdata_default_show - nvdata default version
+ * version_nvdata_default_show - nvdata default version
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2653,7 +2606,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute
+version_nvdata_default_show(struct device *cdev, struct device_attribute
 	*attr, char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2662,11 +2615,10 @@
 	return snprintf(buf, PAGE_SIZE, "%08xh\n",
 	    le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
 }
-static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
-	_ctl_version_nvdata_default_show, NULL);
+static DEVICE_ATTR_RO(version_nvdata_default);
 
 /**
- * _ctl_board_name_show - board name
+ * board_name_show - board name
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2674,7 +2626,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
+board_name_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2682,10 +2634,10 @@
 
 	return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
 }
-static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
+static DEVICE_ATTR_RO(board_name);
 
 /**
- * _ctl_board_assembly_show - board assembly name
+ * board_assembly_show - board assembly name
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2693,7 +2645,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
+board_assembly_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2701,10 +2653,10 @@
 
 	return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
 }
-static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
+static DEVICE_ATTR_RO(board_assembly);
 
 /**
- * _ctl_board_tracer_show - board tracer number
+ * board_tracer_show - board tracer number
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2712,7 +2664,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
+board_tracer_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2720,10 +2672,10 @@
 
 	return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
 }
-static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
+static DEVICE_ATTR_RO(board_tracer);
 
 /**
- * _ctl_io_delay_show - io missing delay
+ * io_delay_show - io missing delay
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2734,7 +2686,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
+io_delay_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2742,10 +2694,10 @@
 
 	return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
 }
-static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
+static DEVICE_ATTR_RO(io_delay);
 
 /**
- * _ctl_device_delay_show - device missing delay
+ * device_delay_show - device missing delay
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2756,7 +2708,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
+device_delay_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2764,10 +2716,10 @@
 
 	return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
 }
-static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
+static DEVICE_ATTR_RO(device_delay);
 
 /**
- * _ctl_fw_queue_depth_show - global credits
+ * fw_queue_depth_show - global credits
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2777,7 +2729,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
+fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2785,10 +2737,10 @@
 
 	return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
 }
-static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
+static DEVICE_ATTR_RO(fw_queue_depth);
 
 /**
- * _ctl_sas_address_show - sas address
+ * sas_address_show - sas address
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2798,7 +2750,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
+host_sas_address_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 
 {
@@ -2808,11 +2760,10 @@
 	return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
 	    (unsigned long long)ioc->sas_hba.sas_address);
 }
-static DEVICE_ATTR(host_sas_address, S_IRUGO,
-	_ctl_host_sas_address_show, NULL);
+static DEVICE_ATTR_RO(host_sas_address);
 
 /**
- * _ctl_logging_level_show - logging level
+ * logging_level_show - logging level
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2820,7 +2771,7 @@
  * A sysfs 'read/write' shost attribute.
  */
 static ssize_t
-_ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
+logging_level_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2829,7 +2780,7 @@
 	return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
 }
 static ssize_t
-_ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
+logging_level_store(struct device *cdev, struct device_attribute *attr,
 	const char *buf, size_t count)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2840,15 +2791,14 @@
 		return -EINVAL;
 
 	ioc->logging_level = val;
-	pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
-	    ioc->logging_level);
+	ioc_info(ioc, "logging_level=%08xh\n",
+		 ioc->logging_level);
 	return strlen(buf);
 }
-static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
-	_ctl_logging_level_store);
+static DEVICE_ATTR_RW(logging_level);
 
 /**
- * _ctl_fwfault_debug_show - show/store fwfault_debug
+ * fwfault_debug_show - show/store fwfault_debug
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2857,7 +2807,7 @@
  * A sysfs 'read/write' shost attribute.
  */
 static ssize_t
-_ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
+fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2866,7 +2816,7 @@
 	return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
 }
 static ssize_t
-_ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
+fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
 	const char *buf, size_t count)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2877,15 +2827,14 @@
 		return -EINVAL;
 
 	ioc->fwfault_debug = val;
-	pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
-	    ioc->fwfault_debug);
+	ioc_info(ioc, "fwfault_debug=%d\n",
+		 ioc->fwfault_debug);
 	return strlen(buf);
 }
-static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
-	_ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
+static DEVICE_ATTR_RW(fwfault_debug);
 
 /**
- * _ctl_ioc_reset_count_show - ioc reset count
+ * ioc_reset_count_show - ioc reset count
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2895,7 +2844,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
+ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2903,10 +2852,10 @@
 
 	return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
 }
-static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
+static DEVICE_ATTR_RO(ioc_reset_count);
 
 /**
- * _ctl_ioc_reply_queue_count_show - number of reply queues
+ * reply_queue_count_show - number of reply queues
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2916,7 +2865,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_ioc_reply_queue_count_show(struct device *cdev,
+reply_queue_count_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
 	u8 reply_queue_count;
@@ -2931,11 +2880,10 @@
 
 	return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
 }
-static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
-	NULL);
+static DEVICE_ATTR_RO(reply_queue_count);
 
 /**
- * _ctl_BRM_status_show - Backup Rail Monitor Status
+ * BRM_status_show - Backup Rail Monitor Status
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -2945,7 +2893,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
+BRM_status_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -2958,8 +2906,8 @@
 	ssize_t rc = 0;
 
 	if (!ioc->is_warpdrive) {
-		pr_err(MPT3SAS_FMT "%s: BRM attribute is only for"
-		    " warpdrive\n", ioc->name, __func__);
+		ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
+			__func__);
 		goto out;
 	}
 	/* pci_access_mutex lock acquired by sysfs show path */
@@ -2973,30 +2921,28 @@
 	sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
 	io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
 	if (!io_unit_pg3) {
-		pr_err(MPT3SAS_FMT "%s: failed allocating memory "
-		    "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
+		ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
+			__func__, sz);
 		goto out;
 	}
 
 	if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
 	    0) {
-		pr_err(MPT3SAS_FMT
-		    "%s: failed reading iounit_pg3\n", ioc->name,
-		    __func__);
+		ioc_err(ioc, "%s: failed reading iounit_pg3\n",
+			__func__);
 		goto out;
 	}
 
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with "
-		    "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
+		ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
+			__func__, ioc_status);
 		goto out;
 	}
 
 	if (io_unit_pg3->GPIOCount < 25) {
-		pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than "
-		     "25 entries, detected (%d) entries\n", ioc->name, __func__,
-		    io_unit_pg3->GPIOCount);
+		ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
+			__func__, io_unit_pg3->GPIOCount);
 		goto out;
 	}
 
@@ -3009,7 +2955,7 @@
 	mutex_unlock(&ioc->pci_access_mutex);
 	return rc;
 }
-static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
+static DEVICE_ATTR_RO(BRM_status);
 
 struct DIAG_BUFFER_START {
 	__le32	Size;
@@ -3022,7 +2968,7 @@
 };
 
 /**
- * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
+ * host_trace_buffer_size_show - host buffer size (trace only)
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3030,7 +2976,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_host_trace_buffer_size_show(struct device *cdev,
+host_trace_buffer_size_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3039,17 +2985,15 @@
 	struct DIAG_BUFFER_START *request_data;
 
 	if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
-		pr_err(MPT3SAS_FMT
-			"%s: host_trace_buffer is not registered\n",
-			ioc->name, __func__);
+		ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+			__func__);
 		return 0;
 	}
 
 	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
-		pr_err(MPT3SAS_FMT
-			"%s: host_trace_buffer is not registered\n",
-			ioc->name, __func__);
+		ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+			__func__);
 		return 0;
 	}
 
@@ -3064,11 +3008,10 @@
 	ioc->ring_buffer_sz = size;
 	return snprintf(buf, PAGE_SIZE, "%d\n", size);
 }
-static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
-	_ctl_host_trace_buffer_size_show, NULL);
+static DEVICE_ATTR_RO(host_trace_buffer_size);
 
 /**
- * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
+ * host_trace_buffer_show - firmware ring buffer (trace only)
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3080,7 +3023,7 @@
  * offset to the same attribute, it will move the pointer.
  */
 static ssize_t
-_ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
+host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
 	char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3089,17 +3032,15 @@
 	u32 size;
 
 	if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
-		pr_err(MPT3SAS_FMT
-			"%s: host_trace_buffer is not registered\n",
-			ioc->name, __func__);
+		ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+			__func__);
 		return 0;
 	}
 
 	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
 	    MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
-		pr_err(MPT3SAS_FMT
-			"%s: host_trace_buffer is not registered\n",
-			ioc->name, __func__);
+		ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+			__func__);
 		return 0;
 	}
 
@@ -3114,7 +3055,7 @@
 }
 
 static ssize_t
-_ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
+host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
 	const char *buf, size_t count)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3127,14 +3068,13 @@
 	ioc->ring_buffer_offset = val;
 	return strlen(buf);
 }
-static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
-	_ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
+static DEVICE_ATTR_RW(host_trace_buffer);
 
 
 /*****************************************/
 
 /**
- * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
+ * host_trace_buffer_enable_show - firmware ring buffer (trace only)
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3144,7 +3084,7 @@
  * This is a mechnism to post/release host_trace_buffers
  */
 static ssize_t
-_ctl_host_trace_buffer_enable_show(struct device *cdev,
+host_trace_buffer_enable_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3162,7 +3102,7 @@
 }
 
 static ssize_t
-_ctl_host_trace_buffer_enable_store(struct device *cdev,
+host_trace_buffer_enable_store(struct device *cdev,
 	struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3188,8 +3128,7 @@
 		    MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
 			goto out;
 		memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
-		pr_info(MPT3SAS_FMT "posting host trace buffers\n",
-		    ioc->name);
+		ioc_info(ioc, "posting host trace buffers\n");
 		diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
 		diag_register.requested_buffer_size = (1024 * 1024);
 		diag_register.unique_id = 0x7075900;
@@ -3205,8 +3144,7 @@
 		if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
 		    MPT3_DIAG_BUFFER_IS_RELEASED))
 			goto out;
-		pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
-		    ioc->name);
+		ioc_info(ioc, "releasing host trace buffer\n");
 		mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
 		    &issue_reset);
 	}
@@ -3214,14 +3152,12 @@
  out:
 	return strlen(buf);
 }
-static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
-	_ctl_host_trace_buffer_enable_show,
-	_ctl_host_trace_buffer_enable_store);
+static DEVICE_ATTR_RW(host_trace_buffer_enable);
 
 /*********** diagnostic trigger suppport *********************************/
 
 /**
- * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
+ * diag_trigger_master_show - show the diag_trigger_master attribute
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3229,7 +3165,7 @@
  * A sysfs 'read/write' shost attribute.
  */
 static ssize_t
-_ctl_diag_trigger_master_show(struct device *cdev,
+diag_trigger_master_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 
 {
@@ -3246,7 +3182,7 @@
 }
 
 /**
- * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
+ * diag_trigger_master_store - store the diag_trigger_master attribute
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3255,7 +3191,7 @@
  * A sysfs 'read/write' shost attribute.
  */
 static ssize_t
-_ctl_diag_trigger_master_store(struct device *cdev,
+diag_trigger_master_store(struct device *cdev,
 	struct device_attribute *attr, const char *buf, size_t count)
 
 {
@@ -3274,12 +3210,11 @@
 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
 	return rc;
 }
-static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
-	_ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store);
+static DEVICE_ATTR_RW(diag_trigger_master);
 
 
 /**
- * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
+ * diag_trigger_event_show - show the diag_trigger_event attribute
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3287,7 +3222,7 @@
  * A sysfs 'read/write' shost attribute.
  */
 static ssize_t
-_ctl_diag_trigger_event_show(struct device *cdev,
+diag_trigger_event_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3303,7 +3238,7 @@
 }
 
 /**
- * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
+ * diag_trigger_event_store - store the diag_trigger_event attribute
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3312,7 +3247,7 @@
  * A sysfs 'read/write' shost attribute.
  */
 static ssize_t
-_ctl_diag_trigger_event_store(struct device *cdev,
+diag_trigger_event_store(struct device *cdev,
 	struct device_attribute *attr, const char *buf, size_t count)
 
 {
@@ -3331,12 +3266,11 @@
 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
 	return sz;
 }
-static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
-	_ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store);
+static DEVICE_ATTR_RW(diag_trigger_event);
 
 
 /**
- * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
+ * diag_trigger_scsi_show - show the diag_trigger_scsi attribute
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3344,7 +3278,7 @@
  * A sysfs 'read/write' shost attribute.
  */
 static ssize_t
-_ctl_diag_trigger_scsi_show(struct device *cdev,
+diag_trigger_scsi_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3360,7 +3294,7 @@
 }
 
 /**
- * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
+ * diag_trigger_scsi_store - store the diag_trigger_scsi attribute
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3369,7 +3303,7 @@
  * A sysfs 'read/write' shost attribute.
  */
 static ssize_t
-_ctl_diag_trigger_scsi_store(struct device *cdev,
+diag_trigger_scsi_store(struct device *cdev,
 	struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3378,21 +3312,19 @@
 	ssize_t sz;
 
 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
-	sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
-	memset(&ioc->diag_trigger_scsi, 0,
-	    sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+	sz = min(sizeof(ioc->diag_trigger_scsi), count);
+	memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi));
 	memcpy(&ioc->diag_trigger_scsi, buf, sz);
 	if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
 		ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
 	return sz;
 }
-static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
-	_ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store);
+static DEVICE_ATTR_RW(diag_trigger_scsi);
 
 
 /**
- * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
+ * diag_trigger_scsi_show - show the diag_trigger_mpi attribute
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3400,7 +3332,7 @@
  * A sysfs 'read/write' shost attribute.
  */
 static ssize_t
-_ctl_diag_trigger_mpi_show(struct device *cdev,
+diag_trigger_mpi_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3416,7 +3348,7 @@
 }
 
 /**
- * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
+ * diag_trigger_mpi_store - store the diag_trigger_mpi attribute
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3425,7 +3357,7 @@
  * A sysfs 'read/write' shost attribute.
  */
 static ssize_t
-_ctl_diag_trigger_mpi_store(struct device *cdev,
+diag_trigger_mpi_store(struct device *cdev,
 	struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3444,13 +3376,131 @@
 	return sz;
 }
 
-static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
-	_ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store);
+static DEVICE_ATTR_RW(diag_trigger_mpi);
 
 /*********** diagnostic trigger suppport *** END ****************************/
 
 /*****************************************/
 
+/**
+ * drv_support_bitmap_show - driver supported feature bitmap
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+drv_support_bitmap_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap);
+}
+static DEVICE_ATTR_RO(drv_support_bitmap);
+
+/**
+ * enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs read/write shost attribute. This attribute is used to set the
+ * targets queue depth to HBA IO queue depth if this attribute is enabled.
+ */
+static ssize_t
+enable_sdev_max_qd_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd);
+}
+
+/**
+ * enable_sdev_max_qd_store - Enable/disable sdev max qd
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs read/write shost attribute. This attribute is used to set the
+ * targets queue depth to HBA IO queue depth if this attribute is enabled.
+ * If this attribute is disabled then targets will have corresponding default
+ * queue depth.
+ */
+static ssize_t
+enable_sdev_max_qd_store(struct device *cdev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+	struct MPT3SAS_DEVICE *sas_device_priv_data;
+	struct MPT3SAS_TARGET *sas_target_priv_data;
+	int val = 0;
+	struct scsi_device *sdev;
+	struct _raid_device *raid_device;
+	int qdepth;
+
+	if (kstrtoint(buf, 0, &val) != 0)
+		return -EINVAL;
+
+	switch (val) {
+	case 0:
+		ioc->enable_sdev_max_qd = 0;
+		shost_for_each_device(sdev, ioc->shost) {
+			sas_device_priv_data = sdev->hostdata;
+			if (!sas_device_priv_data)
+				continue;
+			sas_target_priv_data = sas_device_priv_data->sas_target;
+			if (!sas_target_priv_data)
+				continue;
+
+			if (sas_target_priv_data->flags &
+			    MPT_TARGET_FLAGS_VOLUME) {
+				raid_device =
+				    mpt3sas_raid_device_find_by_handle(ioc,
+				    sas_target_priv_data->handle);
+
+				switch (raid_device->volume_type) {
+				case MPI2_RAID_VOL_TYPE_RAID0:
+					if (raid_device->device_info &
+					    MPI2_SAS_DEVICE_INFO_SSP_TARGET)
+						qdepth =
+						    MPT3SAS_SAS_QUEUE_DEPTH;
+					else
+						qdepth =
+						    MPT3SAS_SATA_QUEUE_DEPTH;
+					break;
+				case MPI2_RAID_VOL_TYPE_RAID1E:
+				case MPI2_RAID_VOL_TYPE_RAID1:
+				case MPI2_RAID_VOL_TYPE_RAID10:
+				case MPI2_RAID_VOL_TYPE_UNKNOWN:
+				default:
+					qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+				}
+			} else if (sas_target_priv_data->flags &
+			    MPT_TARGET_FLAGS_PCIE_DEVICE)
+				qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
+			else
+				qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+
+			mpt3sas_scsih_change_queue_depth(sdev, qdepth);
+		}
+		break;
+	case 1:
+		ioc->enable_sdev_max_qd = 1;
+		shost_for_each_device(sdev, ioc->shost)
+			mpt3sas_scsih_change_queue_depth(sdev,
+			    shost->can_queue);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return strlen(buf);
+}
+static DEVICE_ATTR_RW(enable_sdev_max_qd);
+
 struct device_attribute *mpt3sas_host_attrs[] = {
 	&dev_attr_version_fw,
 	&dev_attr_version_bios,
@@ -3476,14 +3526,16 @@
 	&dev_attr_diag_trigger_event,
 	&dev_attr_diag_trigger_scsi,
 	&dev_attr_diag_trigger_mpi,
+	&dev_attr_drv_support_bitmap,
 	&dev_attr_BRM_status,
+	&dev_attr_enable_sdev_max_qd,
 	NULL,
 };
 
 /* device attributes */
 
 /**
- * _ctl_device_sas_address_show - sas address
+ * sas_address_show - sas address
  * @dev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3493,7 +3545,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
+sas_address_show(struct device *dev, struct device_attribute *attr,
 	char *buf)
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
@@ -3502,10 +3554,10 @@
 	return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
 	    (unsigned long long)sas_device_priv_data->sas_target->sas_address);
 }
-static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
+static DEVICE_ATTR_RO(sas_address);
 
 /**
- * _ctl_device_handle_show - device handle
+ * sas_device_handle_show - device handle
  * @dev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3515,7 +3567,7 @@
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t
-_ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
+sas_device_handle_show(struct device *dev, struct device_attribute *attr,
 	char *buf)
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
@@ -3524,10 +3576,10 @@
 	return snprintf(buf, PAGE_SIZE, "0x%04x\n",
 	    sas_device_priv_data->sas_target->handle);
 }
-static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
+static DEVICE_ATTR_RO(sas_device_handle);
 
 /**
- * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
+ * sas_ncq_io_prio_show - send prioritized io commands to device
  * @dev: pointer to embedded device
  * @attr: ?
  * @buf: the buffer returned
@@ -3535,7 +3587,7 @@
  * A sysfs 'read/write' sdev attribute, only works with SATA
  */
 static ssize_t
-_ctl_device_ncq_prio_enable_show(struct device *dev,
+sas_ncq_prio_enable_show(struct device *dev,
 				 struct device_attribute *attr, char *buf)
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
@@ -3546,7 +3598,7 @@
 }
 
 static ssize_t
-_ctl_device_ncq_prio_enable_store(struct device *dev,
+sas_ncq_prio_enable_store(struct device *dev,
 				  struct device_attribute *attr,
 				  const char *buf, size_t count)
 {
@@ -3563,9 +3615,7 @@
 	sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
 	return strlen(buf);
 }
-static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
-		   _ctl_device_ncq_prio_enable_show,
-		   _ctl_device_ncq_prio_enable_store);
+static DEVICE_ATTR_RW(sas_ncq_prio_enable);
 
 struct device_attribute *mpt3sas_dev_attrs[] = {
 	&dev_attr_sas_address,
@@ -3658,8 +3708,10 @@
 			if ((ioc->diag_buffer_status[i] &
 			    MPT3_DIAG_BUFFER_IS_RELEASED))
 				continue;
-			pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
-			ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
+			dma_free_coherent(&ioc->pdev->dev,
+					  ioc->diag_buffer_sz[i],
+					  ioc->diag_buffer[i],
+					  ioc->diag_buffer_dma[i]);
 			ioc->diag_buffer[i] = NULL;
 			ioc->diag_buffer_status[i] = 0;
 		}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 53133cf..c8e512b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -51,7 +51,6 @@
 #include <linux/workqueue.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
-#include <linux/pci-aspm.h>
 #include <linux/interrupt.h>
 #include <linux/aer.h>
 #include <linux/raid_class.h>
@@ -113,22 +112,22 @@
 
 
 static ushort max_sectors = 0xFFFF;
-module_param(max_sectors, ushort, 0);
+module_param(max_sectors, ushort, 0444);
 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
 
 
 static int missing_delay[2] = {-1, -1};
-module_param_array(missing_delay, int, NULL, 0);
+module_param_array(missing_delay, int, NULL, 0444);
 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
 
 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
 #define MPT3SAS_MAX_LUN (16895)
 static u64 max_lun = MPT3SAS_MAX_LUN;
-module_param(max_lun, ullong, 0);
+module_param(max_lun, ullong, 0444);
 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
 
 static ushort hbas_to_enumerate;
-module_param(hbas_to_enumerate, ushort, 0);
+module_param(hbas_to_enumerate, ushort, 0444);
 MODULE_PARM_DESC(hbas_to_enumerate,
 		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
 		  1 - enumerates only SAS 2.0 generation HBAs\n \
@@ -142,19 +141,23 @@
  * Either bit can be set, or both
  */
 static int diag_buffer_enable = -1;
-module_param(diag_buffer_enable, int, 0);
+module_param(diag_buffer_enable, int, 0444);
 MODULE_PARM_DESC(diag_buffer_enable,
 	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
 static int disable_discovery = -1;
-module_param(disable_discovery, int, 0);
+module_param(disable_discovery, int, 0444);
 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
 
 
 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
 static int prot_mask = -1;
-module_param(prot_mask, int, 0);
+module_param(prot_mask, int, 0444);
 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
 
+static bool enable_sdev_max_qd;
+module_param(enable_sdev_max_qd, bool, 0444);
+MODULE_PARM_DESC(enable_sdev_max_qd,
+	"Enable sdev max qd as can_queue, def=disabled(0)");
 
 /* raid transport support */
 static struct raid_template *mpt3sas_raid_template;
@@ -418,8 +421,8 @@
 
 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
-		__FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -ENXIO;
 	}
 
@@ -442,10 +445,8 @@
 		return -ENXIO;
 
 	/* else error case */
-	pr_err(MPT3SAS_FMT
-		"handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
-		ioc->name, handle, ioc_status,
-	     __FILE__, __LINE__, __func__);
+	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
+		handle, ioc_status, __FILE__, __LINE__, __func__);
 	return -EIO;
 }
 
@@ -508,10 +509,9 @@
 		    (ioc->bios_pg2.ReqBootDeviceForm &
 		    MPI2_BIOSPAGE2_FORM_MASK),
 		    &ioc->bios_pg2.RequestedBootDevice)) {
-			dinitprintk(ioc, pr_info(MPT3SAS_FMT
-			   "%s: req_boot_device(0x%016llx)\n",
-			    ioc->name, __func__,
-			    (unsigned long long)sas_address));
+			dinitprintk(ioc,
+				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
+					     __func__, (u64)sas_address));
 			ioc->req_boot_device.device = device;
 			ioc->req_boot_device.channel = channel;
 		}
@@ -523,10 +523,9 @@
 		    (ioc->bios_pg2.ReqAltBootDeviceForm &
 		    MPI2_BIOSPAGE2_FORM_MASK),
 		    &ioc->bios_pg2.RequestedAltBootDevice)) {
-			dinitprintk(ioc, pr_info(MPT3SAS_FMT
-			   "%s: req_alt_boot_device(0x%016llx)\n",
-			    ioc->name, __func__,
-			    (unsigned long long)sas_address));
+			dinitprintk(ioc,
+				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
+					     __func__, (u64)sas_address));
 			ioc->req_alt_boot_device.device = device;
 			ioc->req_alt_boot_device.channel = channel;
 		}
@@ -538,10 +537,9 @@
 		    (ioc->bios_pg2.CurrentBootDeviceForm &
 		    MPI2_BIOSPAGE2_FORM_MASK),
 		    &ioc->bios_pg2.CurrentBootDevice)) {
-			dinitprintk(ioc, pr_info(MPT3SAS_FMT
-			   "%s: current_boot_device(0x%016llx)\n",
-			    ioc->name, __func__,
-			    (unsigned long long)sas_address));
+			dinitprintk(ioc,
+				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
+					     __func__, (u64)sas_address));
 			ioc->current_boot_device.device = device;
 			ioc->current_boot_device.channel = channel;
 		}
@@ -752,19 +750,16 @@
 			    sas_device->chassis_slot);
 	} else {
 		if (sas_device->enclosure_handle != 0)
-			pr_info(MPT3SAS_FMT
-			    "enclosure logical id(0x%016llx), slot(%d) \n",
-			    ioc->name, (unsigned long long)
-			    sas_device->enclosure_logical_id,
-			    sas_device->slot);
+			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
+				 (u64)sas_device->enclosure_logical_id,
+				 sas_device->slot);
 		if (sas_device->connector_name[0] != '\0')
-			pr_info(MPT3SAS_FMT
-			    "enclosure level(0x%04x), connector name( %s)\n",
-			    ioc->name, sas_device->enclosure_level,
-			    sas_device->connector_name);
+			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
+				 sas_device->enclosure_level,
+				 sas_device->connector_name);
 		if (sas_device->is_chassis_slot_valid)
-			pr_info(MPT3SAS_FMT "chassis slot(0x%04x)\n",
-			    ioc->name, sas_device->chassis_slot);
+			ioc_info(ioc, "chassis slot(0x%04x)\n",
+				 sas_device->chassis_slot);
 	}
 }
 
@@ -784,10 +779,8 @@
 
 	if (!sas_device)
 		return;
-	pr_info(MPT3SAS_FMT
-	    "removing handle(0x%04x), sas_addr(0x%016llx)\n",
-	    ioc->name, sas_device->handle,
-	    (unsigned long long) sas_device->sas_address);
+	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+		 sas_device->handle, (u64)sas_device->sas_address);
 
 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
 
@@ -872,10 +865,10 @@
 {
 	unsigned long flags;
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: handle(0x%04x), sas_addr(0x%016llx)\n",
-		ioc->name, __func__, sas_device->handle,
-		(unsigned long long)sas_device->sas_address));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+			    __func__, sas_device->handle,
+			    (u64)sas_device->sas_address));
 
 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
 	    NULL, NULL));
@@ -923,10 +916,10 @@
 {
 	unsigned long flags;
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
-		__func__, sas_device->handle,
-		(unsigned long long)sas_device->sas_address));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+			    __func__, sas_device->handle,
+			    (u64)sas_device->sas_address));
 
 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
 	    NULL, NULL));
@@ -1073,21 +1066,16 @@
 
 	if (!pcie_device)
 		return;
-	pr_info(MPT3SAS_FMT
-		"removing handle(0x%04x), wwid(0x%016llx)\n",
-		ioc->name, pcie_device->handle,
-		(unsigned long long) pcie_device->wwid);
+	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+		 pcie_device->handle, (u64)pcie_device->wwid);
 	if (pcie_device->enclosure_handle != 0)
-		pr_info(MPT3SAS_FMT
-			"removing enclosure logical id(0x%016llx), slot(%d)\n",
-			ioc->name,
-			(unsigned long long)pcie_device->enclosure_logical_id,
-		pcie_device->slot);
+		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
+			 (u64)pcie_device->enclosure_logical_id,
+			 pcie_device->slot);
 	if (pcie_device->connector_name[0] != '\0')
-		pr_info(MPT3SAS_FMT
-		    "removing enclosure level(0x%04x), connector name( %s)\n",
-			ioc->name, pcie_device->enclosure_level,
-			pcie_device->connector_name);
+		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
+			 pcie_device->enclosure_level,
+			 pcie_device->connector_name);
 
 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 	if (!list_empty(&pcie_device->list)) {
@@ -1146,26 +1134,32 @@
 {
 	unsigned long flags;
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
-		pcie_device->handle, (unsigned long long)pcie_device->wwid));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
+			    __func__,
+			    pcie_device->handle, (u64)pcie_device->wwid));
 	if (pcie_device->enclosure_handle != 0)
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: enclosure logical id(0x%016llx), slot( %d)\n",
-			ioc->name, __func__,
-			(unsigned long long)pcie_device->enclosure_logical_id,
-			pcie_device->slot));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+				    __func__,
+				    (u64)pcie_device->enclosure_logical_id,
+				    pcie_device->slot));
 	if (pcie_device->connector_name[0] != '\0')
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: enclosure level(0x%04x), connector name( %s)\n",
-			ioc->name, __func__, pcie_device->enclosure_level,
-			pcie_device->connector_name));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
+				    __func__, pcie_device->enclosure_level,
+				    pcie_device->connector_name));
 
 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 	pcie_device_get(pcie_device);
 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 
+	if (pcie_device->access_status ==
+	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
+		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
+		return;
+	}
 	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
 		_scsih_pcie_device_remove(ioc, pcie_device);
 	} else if (!pcie_device->starget) {
@@ -1191,25 +1185,28 @@
 {
 	unsigned long flags;
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
-		pcie_device->handle, (unsigned long long)pcie_device->wwid));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
+			    __func__,
+			    pcie_device->handle, (u64)pcie_device->wwid));
 	if (pcie_device->enclosure_handle != 0)
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: enclosure logical id(0x%016llx), slot( %d)\n",
-			ioc->name, __func__,
-			(unsigned long long)pcie_device->enclosure_logical_id,
-			pcie_device->slot));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+				    __func__,
+				    (u64)pcie_device->enclosure_logical_id,
+				    pcie_device->slot));
 	if (pcie_device->connector_name[0] != '\0')
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: enclosure level(0x%04x), connector name( %s)\n",
-			ioc->name, __func__, pcie_device->enclosure_level,
-			pcie_device->connector_name));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
+				    __func__, pcie_device->enclosure_level,
+				    pcie_device->connector_name));
 
 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 	pcie_device_get(pcie_device);
 	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
-	_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
+	if (pcie_device->access_status !=
+	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
+		_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 }
 /**
@@ -1304,9 +1301,10 @@
 {
 	unsigned long flags;
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
-	    raid_device->handle, (unsigned long long)raid_device->wwid));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
+			    __func__,
+			    raid_device->handle, (u64)raid_device->wwid));
 
 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
 	list_add_tail(&raid_device->list, &ioc->raid_device_list);
@@ -1445,17 +1443,20 @@
 }
 
 /**
- * _scsih_is_nvme_device - determines if device is an nvme device
+ * _scsih_is_nvme_pciescsi_device - determines if
+ *			device is an pcie nvme/scsi device
  * @device_info: bitfield providing information about the device.
  * Context: none
  *
- * Return: 1 if nvme device.
+ * Returns 1 if device is pcie device type nvme/scsi.
  */
 static int
-_scsih_is_nvme_device(u32 device_info)
+_scsih_is_nvme_pciescsi_device(u32 device_info)
 {
-	if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
-					== MPI26_PCIE_DEVINFO_NVME)
+	if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
+	    == MPI26_PCIE_DEVINFO_NVME) ||
+	    ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
+	    == MPI26_PCIE_DEVINFO_SCSI))
 		return 1;
 	else
 		return 0;
@@ -1474,11 +1475,23 @@
 {
 	struct scsi_cmnd *scmd = NULL;
 	struct scsiio_tracker *st;
+	Mpi25SCSIIORequest_t *mpi_request;
 
 	if (smid > 0  &&
 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
 		u32 unique_tag = smid - 1;
 
+		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+		/*
+		 * If SCSI IO request is outstanding at driver level then
+		 * DevHandle filed must be non-zero. If DevHandle is zero
+		 * then it means that this smid is free at driver level,
+		 * so return NULL.
+		 */
+		if (!mpi_request->DevHandle)
+			return scmd;
+
 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
 		if (scmd) {
 			st = scsi_cmd_priv(scmd);
@@ -1509,7 +1522,13 @@
 
 	max_depth = shost->can_queue;
 
-	/* limit max device queue for SATA to 32 */
+	/*
+	 * limit max device queue for SATA to 32 if enable_sdev_max_qd
+	 * is disabled.
+	 */
+	if (ioc->enable_sdev_max_qd)
+		goto not_sata;
+
 	sas_device_priv_data = sdev->hostdata;
 	if (!sas_device_priv_data)
 		goto not_sata;
@@ -1539,6 +1558,25 @@
 }
 
 /**
+ * mpt3sas_scsih_change_queue_depth - setting device queue depth
+ * @sdev: scsi device struct
+ * @qdepth: requested queue depth
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+	struct Scsi_Host *shost = sdev->host;
+	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+	if (ioc->enable_sdev_max_qd)
+		qdepth = shost->can_queue;
+
+	scsih_change_queue_depth(sdev, qdepth);
+}
+
+/**
  * scsih_target_alloc - target add routine
  * @starget: scsi target struct
  *
@@ -1857,16 +1895,16 @@
 
 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return;
 	}
 
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return;
 	}
 
@@ -1952,8 +1990,8 @@
 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
 	     sizeof(Mpi2RaidVolPage0_t))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		percent_complete = 0;
 		goto out;
 	}
@@ -2006,8 +2044,8 @@
 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
 	     sizeof(Mpi2RaidVolPage0_t))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 
@@ -2103,9 +2141,9 @@
 
 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
 	    &num_pds)) || !num_pds) {
-		dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-		    "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
-		    __func__));
+		dfailprintk(ioc,
+			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+				     __FILE__, __LINE__, __func__));
 		return 1;
 	}
 
@@ -2114,17 +2152,17 @@
 	    sizeof(Mpi2RaidVol0PhysDisk_t));
 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
 	if (!vol_pg0) {
-		dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-		    "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
-		    __func__));
+		dfailprintk(ioc,
+			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+				     __FILE__, __LINE__, __func__));
 		return 1;
 	}
 
 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
-		dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-		    "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
-		    __func__));
+		dfailprintk(ioc,
+			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+				     __FILE__, __LINE__, __func__));
 		kfree(vol_pg0);
 		return 1;
 	}
@@ -2215,16 +2253,16 @@
 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 		if (!raid_device) {
-			dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-			    "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
-			    __LINE__, __func__));
+			dfailprintk(ioc,
+				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+					     __FILE__, __LINE__, __func__));
 			return 1;
 		}
 
 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
-			dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-			    "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
-			    __LINE__, __func__));
+			dfailprintk(ioc,
+				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+					     __FILE__, __LINE__, __func__));
 			return 1;
 		}
 
@@ -2296,7 +2334,7 @@
 						MPT3SAS_RAID_MAX_SECTORS);
 		}
 
-		scsih_change_queue_depth(sdev, qdepth);
+		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
 
 		/* raid transport support */
 		if (!ioc->is_warpdrive)
@@ -2308,16 +2346,16 @@
 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
 		if (mpt3sas_config_get_volume_handle(ioc, handle,
 		    &volume_handle)) {
-			dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-			    "failure at %s:%d/%s()!\n", ioc->name,
-			    __FILE__, __LINE__, __func__));
+			dfailprintk(ioc,
+				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+					     __FILE__, __LINE__, __func__));
 			return 1;
 		}
 		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
 		    volume_handle, &volume_wwid)) {
-			dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-			    "failure at %s:%d/%s()!\n", ioc->name,
-			    __FILE__, __LINE__, __func__));
+			dfailprintk(ioc,
+				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+					     __FILE__, __LINE__, __func__));
 			return 1;
 		}
 	}
@@ -2329,9 +2367,9 @@
 				sas_device_priv_data->sas_target->sas_address);
 		if (!pcie_device) {
 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
-			dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-				"failure at %s:%d/%s()!\n", ioc->name, __FILE__,
-				__LINE__, __func__));
+			dfailprintk(ioc,
+				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+					     __FILE__, __LINE__, __func__));
 			return 1;
 		}
 
@@ -2360,7 +2398,7 @@
 
 		pcie_device_put(pcie_device);
 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
-		scsih_change_queue_depth(sdev, qdepth);
+		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
 		/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
 		 ** merged and can eliminate holes created during merging
 		 ** operation.
@@ -2377,9 +2415,9 @@
 	   sas_device_priv_data->sas_target->sas_address);
 	if (!sas_device) {
 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-		    "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
-		    __func__));
+		dfailprintk(ioc,
+			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+				     __FILE__, __LINE__, __func__));
 		return 1;
 	}
 
@@ -2420,7 +2458,7 @@
 		_scsih_display_sata_capabilities(ioc, handle, sdev);
 
 
-	scsih_change_queue_depth(sdev, qdepth);
+	mpt3sas_scsih_change_queue_depth(sdev, qdepth);
 
 	if (ssp_target) {
 		sas_read_port_mode_page(sdev);
@@ -2515,8 +2553,7 @@
 		desc = "unknown";
 		break;
 	}
-	pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n",
-		ioc->name, response_code, desc);
+	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
 }
 
 /**
@@ -2640,22 +2677,19 @@
 	lockdep_assert_held(&ioc->tm_cmds.mutex);
 
 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
-		    __func__, ioc->name);
+		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
 		return FAILED;
 	}
 
 	if (ioc->shost_recovery || ioc->remove_host ||
 	    ioc->pci_error_recovery) {
-		pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
-		    __func__, ioc->name);
+		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
 		return FAILED;
 	}
 
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
 	if (ioc_state & MPI2_DOORBELL_USED) {
-		dhsprintk(ioc, pr_info(MPT3SAS_FMT
-			"unexpected doorbell active!\n", ioc->name));
+		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 		return (!rc) ? SUCCESS : FAILED;
 	}
@@ -2669,14 +2703,13 @@
 
 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		return FAILED;
 	}
 
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-		"sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
-		ioc->name, handle, type, smid_task, timeout, tr_method));
+	dtmprintk(ioc,
+		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
+			   handle, type, smid_task, timeout, tr_method));
 	ioc->tm_cmds.status = MPT3_CMD_PENDING;
 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
 	ioc->tm_cmds.smid = smid;
@@ -2690,7 +2723,7 @@
 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
 	mpt3sas_scsih_set_tm_flag(ioc, handle);
 	init_completion(&ioc->tm_cmds.done);
-	mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
+	ioc->put_smid_hi_priority(ioc, smid, msix_task);
 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
 		if (mpt3sas_base_check_cmd_timeout(ioc,
@@ -2709,11 +2742,11 @@
 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
 		mpi_reply = ioc->tm_cmds.reply;
-		dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \
-		    "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
-		    ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
-		    le32_to_cpu(mpi_reply->IOCLogInfo),
-		    le32_to_cpu(mpi_reply->TerminationCount)));
+		dtmprintk(ioc,
+			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+				   le16_to_cpu(mpi_reply->IOCStatus),
+				   le32_to_cpu(mpi_reply->IOCLogInfo),
+				   le32_to_cpu(mpi_reply->TerminationCount)));
 		if (ioc->logging_level & MPT_DEBUG_TM) {
 			_scsih_response_code(ioc, mpi_reply->ResponseCode);
 			if (mpi_reply->IOCStatus)
@@ -2877,7 +2910,8 @@
 
 	handle = sas_device_priv_data->sas_target->handle;
 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
-	if (pcie_device && (!ioc->tm_custom_handling))
+	if (pcie_device && (!ioc->tm_custom_handling) &&
+	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
 		timeout = ioc->nvme_abort_timeout;
 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
 		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
@@ -2948,11 +2982,13 @@
 
 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
 
-	if (pcie_device && (!ioc->tm_custom_handling)) {
+	if (pcie_device && (!ioc->tm_custom_handling) &&
+	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
 		tr_timeout = pcie_device->reset_timeout;
 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
 	} else
 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
+
 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
 		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
 		tr_timeout, tr_method);
@@ -3025,7 +3061,8 @@
 
 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
 
-	if (pcie_device && (!ioc->tm_custom_handling)) {
+	if (pcie_device && (!ioc->tm_custom_handling) &&
+	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
 		tr_timeout = pcie_device->reset_timeout;
 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
 	} else
@@ -3060,13 +3097,11 @@
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	int r, retval;
 
-	pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n",
-	    ioc->name, scmd);
+	ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
 	scsi_print_command(scmd);
 
 	if (ioc->is_driver_loading || ioc->remove_host) {
-		pr_info(MPT3SAS_FMT "Blocking the host reset\n",
-		    ioc->name);
+		ioc_info(ioc, "Blocking the host reset\n");
 		r = FAILED;
 		goto out;
 	}
@@ -3074,8 +3109,8 @@
 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 	r = (retval < 0) ? FAILED : SUCCESS;
 out:
-	pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
-	    ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+	ioc_info(ioc, "host reset: %s scmd(%p)\n",
+		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
 
 	return r;
 }
@@ -3567,18 +3602,16 @@
 	u8 tr_method = 0;
 
 	if (ioc->pci_error_recovery) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: host in pci error recovery: handle(0x%04x)\n",
-			__func__, ioc->name,
-		    handle));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
+				    __func__, handle));
 		return;
 	}
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: host is not operational: handle(0x%04x)\n",
-			__func__, ioc->name,
-		   handle));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
+				    __func__, handle));
 		return;
 	}
 
@@ -3607,46 +3640,40 @@
 			sas_address = pcie_device->wwid;
 		}
 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
-		if (pcie_device && (!ioc->tm_custom_handling))
+		if (pcie_device && (!ioc->tm_custom_handling) &&
+		    (!(mpt3sas_scsih_is_pcie_scsi_device(
+		    pcie_device->device_info))))
 			tr_method =
 			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
 		else
 			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
 	}
 	if (sas_target_priv_data) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
-			ioc->name, handle,
-		    (unsigned long long)sas_address));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
+				    handle, (u64)sas_address));
 		if (sas_device) {
 			if (sas_device->enclosure_handle != 0)
-				dewtprintk(ioc, pr_info(MPT3SAS_FMT
-				    "setting delete flag:enclosure logical "
-				    "id(0x%016llx), slot(%d)\n", ioc->name,
-				    (unsigned long long)
-				    sas_device->enclosure_logical_id,
-				    sas_device->slot));
+				dewtprintk(ioc,
+					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
+						    (u64)sas_device->enclosure_logical_id,
+						    sas_device->slot));
 			if (sas_device->connector_name[0] != '\0')
-				dewtprintk(ioc, pr_info(MPT3SAS_FMT
-				    "setting delete flag: enclosure "
-				    "level(0x%04x), connector name( %s)\n",
-				    ioc->name, sas_device->enclosure_level,
-				    sas_device->connector_name));
+				dewtprintk(ioc,
+					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
+						    sas_device->enclosure_level,
+						    sas_device->connector_name));
 		} else if (pcie_device) {
 			if (pcie_device->enclosure_handle != 0)
-				dewtprintk(ioc, pr_info(MPT3SAS_FMT
-				    "setting delete flag: logical "
-				    "id(0x%016llx), slot(%d)\n", ioc->name,
-				    (unsigned long long)
-				    pcie_device->enclosure_logical_id,
-				    pcie_device->slot));
+				dewtprintk(ioc,
+					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
+						    (u64)pcie_device->enclosure_logical_id,
+						    pcie_device->slot));
 			if (pcie_device->connector_name[0] != '\0')
-				dewtprintk(ioc, pr_info(MPT3SAS_FMT
-				    "setting delete flag:, enclosure "
-				    "level(0x%04x), "
-				    "connector name( %s)\n", ioc->name,
-				    pcie_device->enclosure_level,
-				    pcie_device->connector_name));
+				dewtprintk(ioc,
+					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
+						    pcie_device->enclosure_level,
+						    pcie_device->connector_name));
 		}
 		_scsih_ublock_io_device(ioc, sas_address);
 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
@@ -3660,16 +3687,15 @@
 		INIT_LIST_HEAD(&delayed_tr->list);
 		delayed_tr->handle = handle;
 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		    "DELAYED:tr:handle(0x%04x), (open)\n",
-		    ioc->name, handle));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+				    handle));
 		goto out;
 	}
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		"tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
-		ioc->name, handle, smid,
-	    ioc->tm_tr_cb_idx));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+			    handle, smid, ioc->tm_tr_cb_idx));
 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -3677,7 +3703,7 @@
 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
 	mpi_request->MsgFlags = tr_method;
 	set_bit(handle, ioc->device_remove_in_progress);
-	mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+	ioc->put_smid_hi_priority(ioc, smid, 0);
 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
 
 out:
@@ -3717,39 +3743,39 @@
 	struct _sc_list *delayed_sc;
 
 	if (ioc->pci_error_recovery) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: host in pci error recovery\n", __func__,
-			ioc->name));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: host in pci error recovery\n",
+				    __func__));
 		return 1;
 	}
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: host is not operational\n", __func__, ioc->name));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: host is not operational\n",
+				    __func__));
 		return 1;
 	}
 	if (unlikely(!mpi_reply)) {
-		pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return 1;
 	}
 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
-		dewtprintk(ioc, pr_err(MPT3SAS_FMT
-			"spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
-			ioc->name, handle,
-		    le16_to_cpu(mpi_reply->DevHandle), smid));
+		dewtprintk(ioc,
+			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+				   handle,
+				   le16_to_cpu(mpi_reply->DevHandle), smid));
 		return 0;
 	}
 
 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-	    "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
-	    "loginfo(0x%08x), completed(%d)\n", ioc->name,
-	    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
-	    le32_to_cpu(mpi_reply->IOCLogInfo),
-	    le32_to_cpu(mpi_reply->TerminationCount)));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
+			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+			    le32_to_cpu(mpi_reply->IOCLogInfo),
+			    le32_to_cpu(mpi_reply->TerminationCount)));
 
 	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
 	if (!smid_sas_ctrl) {
@@ -3759,26 +3785,59 @@
 		INIT_LIST_HEAD(&delayed_sc->list);
 		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
 		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		    "DELAYED:sc:handle(0x%04x), (open)\n",
-		    ioc->name, handle));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
+				    handle));
 		return _scsih_check_for_pending_tm(ioc, smid);
 	}
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		"sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
-		ioc->name, handle, smid_sas_ctrl,
-	    ioc->tm_sas_control_cb_idx));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
-	mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+	ioc->put_smid_default(ioc, smid_sas_ctrl);
 
 	return _scsih_check_for_pending_tm(ioc, smid);
 }
 
+/** _scsih_allow_scmd_to_device - check whether scmd needs to
+ *				 issue to IOC or not.
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ *
+ * Returns true if scmd can be issued to IOC otherwise returns false.
+ */
+inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
+	struct scsi_cmnd *scmd)
+{
+
+	if (ioc->pci_error_recovery)
+		return false;
+
+	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
+		if (ioc->remove_host)
+			return false;
+
+		return true;
+	}
+
+	if (ioc->remove_host) {
+
+		switch (scmd->cmnd[0]) {
+		case SYNCHRONIZE_CACHE:
+		case START_STOP:
+			return true;
+		default:
+			return false;
+		}
+	}
+
+	return true;
+}
 
 /**
  * _scsih_sas_control_complete - completion routine
@@ -3803,20 +3862,19 @@
 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
 
 	if (likely(mpi_reply)) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		"sc_complete:handle(0x%04x), (open) "
-		"smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
-		ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
-		le16_to_cpu(mpi_reply->IOCStatus),
-		le32_to_cpu(mpi_reply->IOCLogInfo)));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+				    le16_to_cpu(mpi_reply->DevHandle), smid,
+				    le16_to_cpu(mpi_reply->IOCStatus),
+				    le32_to_cpu(mpi_reply->IOCLogInfo)));
 		if (le16_to_cpu(mpi_reply->IOCStatus) ==
 		     MPI2_IOCSTATUS_SUCCESS) {
 			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
 			    ioc->device_remove_in_progress);
 		}
 	} else {
-		pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 	}
 	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
 }
@@ -3839,9 +3897,9 @@
 	struct _tr_list *delayed_tr;
 
 	if (ioc->pci_error_recovery) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: host reset in progress!\n",
-			__func__, ioc->name));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: host reset in progress!\n",
+				    __func__));
 		return;
 	}
 
@@ -3853,22 +3911,21 @@
 		INIT_LIST_HEAD(&delayed_tr->list);
 		delayed_tr->handle = handle;
 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		    "DELAYED:tr:handle(0x%04x), (open)\n",
-		    ioc->name, handle));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+				    handle));
 		return;
 	}
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		"tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
-		ioc->name, handle, smid,
-	    ioc->tm_tr_volume_cb_idx));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+			    handle, smid, ioc->tm_tr_volume_cb_idx));
 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 	mpi_request->DevHandle = cpu_to_le16(handle);
 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
-	mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+	ioc->put_smid_hi_priority(ioc, smid, 0);
 }
 
 /**
@@ -3892,33 +3949,32 @@
 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
 
 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: host reset in progress!\n",
-			__func__, ioc->name));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: host reset in progress!\n",
+				    __func__));
 		return 1;
 	}
 	if (unlikely(!mpi_reply)) {
-		pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return 1;
 	}
 
 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
-		dewtprintk(ioc, pr_err(MPT3SAS_FMT
-			"spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
-			ioc->name, handle,
-		    le16_to_cpu(mpi_reply->DevHandle), smid));
+		dewtprintk(ioc,
+			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+				   handle, le16_to_cpu(mpi_reply->DevHandle),
+				   smid));
 		return 0;
 	}
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-	    "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
-	    "loginfo(0x%08x), completed(%d)\n", ioc->name,
-	    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
-	    le32_to_cpu(mpi_reply->IOCLogInfo),
-	    le32_to_cpu(mpi_reply->TerminationCount)));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
+			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+			    le32_to_cpu(mpi_reply->IOCLogInfo),
+			    le32_to_cpu(mpi_reply->TerminationCount)));
 
 	return _scsih_check_for_pending_tm(ioc, smid);
 }
@@ -3948,10 +4004,9 @@
 	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		"EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
-		ioc->name, le16_to_cpu(event), smid,
-		ioc->base_cb_idx));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
+			    le16_to_cpu(event), smid, ioc->base_cb_idx));
 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
@@ -3959,7 +4014,7 @@
 	ack_request->EventContext = event_context;
 	ack_request->VF_ID = 0;  /* TODO */
 	ack_request->VP_ID = 0;
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 }
 
 /**
@@ -3981,21 +4036,21 @@
 	unsigned long flags;
 
 	if (ioc->remove_host) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			    "%s: host has been removed\n",
-			     __func__, ioc->name));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: host has been removed\n",
+				    __func__));
 		return;
 	} else if (ioc->pci_error_recovery) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			    "%s: host in pci error recovery\n",
-			    __func__, ioc->name));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: host in pci error recovery\n",
+				    __func__));
 		return;
 	}
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		    "%s: host is not operational\n",
-		    __func__, ioc->name));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: host is not operational\n",
+				    __func__));
 		return;
 	}
 
@@ -4007,16 +4062,15 @@
 	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-	    "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
-	    ioc->name, handle, smid,
-	    ioc->tm_sas_control_cb_idx));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+			    handle, smid, ioc->tm_sas_control_cb_idx));
 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
 	mpi_request->DevHandle = cpu_to_le16(handle);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 }
 
 /**
@@ -4171,8 +4225,8 @@
 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
 			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
 			    expander_handle) {
-				dewtprintk(ioc, pr_info(MPT3SAS_FMT
-				    "setting ignoring flag\n", ioc->name));
+				dewtprintk(ioc,
+					   ioc_info(ioc, "setting ignoring flag\n"));
 				fw_event->ignore = 1;
 			}
 		}
@@ -4243,9 +4297,8 @@
 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
 			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
 				switch_handle) {
-				dewtprintk(ioc, pr_info(MPT3SAS_FMT
-					"setting ignoring flag for switch event\n",
-					ioc->name));
+				dewtprintk(ioc,
+					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
 				fw_event->ignore = 1;
 			}
 		}
@@ -4274,10 +4327,9 @@
 		sas_target_priv_data =
 		    raid_device->starget->hostdata;
 		sas_target_priv_data->deleted = 1;
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		    "setting delete flag: handle(0x%04x), "
-		    "wwid(0x%016llx)\n", ioc->name, handle,
-		    (unsigned long long) raid_device->wwid));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
+				    handle, (u64)raid_device->wwid));
 	}
 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 }
@@ -4379,9 +4431,9 @@
 			INIT_LIST_HEAD(&delayed_tr->list);
 			delayed_tr->handle = handle;
 			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
-			dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			    "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
-			    handle));
+			dewtprintk(ioc,
+				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+					    handle));
 		} else
 			_scsih_tm_tr_send(ioc, handle);
 	}
@@ -4424,15 +4476,14 @@
 	Mpi2EventDataTemperature_t *event_data)
 {
 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
-		pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s"
-		  " exceeded for Sensor: %d !!!\n", ioc->name,
-		  ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
-		  ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
-		  ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
-		  ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
-		  event_data->SensorNum);
-		pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n",
-			ioc->name, event_data->CurrentTemperature);
+		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
+			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
+			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
+			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
+			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
+			event_data->SensorNum);
+		ioc_err(ioc, "Current Temp In Celsius: %d\n",
+			event_data->CurrentTemperature);
 	}
 }
 
@@ -4480,8 +4531,7 @@
 			scmd->result = DID_RESET << 16;
 		scmd->scsi_done(scmd);
 	}
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n",
-	    ioc->name, count));
+	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
 }
 
 /**
@@ -4611,7 +4661,7 @@
 		return 0;
 	}
 
-	if (ioc->pci_error_recovery || ioc->remove_host) {
+	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
 		scmd->result = DID_NO_CONNECT << 16;
 		scmd->scsi_done(scmd);
 		return 0;
@@ -4648,11 +4698,8 @@
 	 * since we're lockless at this point
 	 */
 	do {
-		if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
-			scmd->result = SAM_STAT_BUSY;
-			scmd->scsi_done(scmd);
-			return 0;
-		}
+		if (test_bit(0, &sas_device_priv_data->ata_command_pending))
+			return SCSI_MLQUEUE_DEVICE_BUSY;
 	} while (_scsih_set_satl_pending(scmd, true));
 
 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
@@ -4680,8 +4727,7 @@
 
 	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		_scsih_set_satl_pending(scmd, false);
 		goto out;
 	}
@@ -4729,12 +4775,12 @@
 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
-			mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+			ioc->put_smid_fast_path(ioc, smid, handle);
 		} else
 			ioc->put_smid_scsi_io(ioc, smid,
 			    le16_to_cpu(mpi_request->DevHandle));
 	} else
-		mpt3sas_base_put_smid_default(ioc, smid);
+		ioc->put_smid_default(ioc, smid);
 	return 0;
 
  out:
@@ -4919,37 +4965,28 @@
 	scsi_print_command(scmd);
 
 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
-		pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
-		    device_str, (unsigned long long)priv_target->sas_address);
+		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
+			 device_str, (u64)priv_target->sas_address);
 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
 		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
 		if (pcie_device) {
-			pr_info(MPT3SAS_FMT "\twwid(0x%016llx), port(%d)\n",
-			    ioc->name,
-			    (unsigned long long)pcie_device->wwid,
-			    pcie_device->port_num);
+			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
+				 (u64)pcie_device->wwid, pcie_device->port_num);
 			if (pcie_device->enclosure_handle != 0)
-				pr_info(MPT3SAS_FMT
-				    "\tenclosure logical id(0x%016llx), "
-				    "slot(%d)\n", ioc->name,
-				    (unsigned long long)
-				    pcie_device->enclosure_logical_id,
-				    pcie_device->slot);
+				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
+					 (u64)pcie_device->enclosure_logical_id,
+					 pcie_device->slot);
 			if (pcie_device->connector_name[0])
-				pr_info(MPT3SAS_FMT
-				    "\tenclosure level(0x%04x),"
-				    "connector name( %s)\n",
-				    ioc->name, pcie_device->enclosure_level,
-				    pcie_device->connector_name);
+				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
+					 pcie_device->enclosure_level,
+					 pcie_device->connector_name);
 			pcie_device_put(pcie_device);
 		}
 	} else {
 		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
 		if (sas_device) {
-			pr_warn(MPT3SAS_FMT
-				"\tsas_address(0x%016llx), phy(%d)\n",
-				ioc->name, (unsigned long long)
-			    sas_device->sas_address, sas_device->phy);
+			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
+				 (u64)sas_device->sas_address, sas_device->phy);
 
 			_scsih_display_enclosure_chassis_info(ioc, sas_device,
 			    NULL, NULL);
@@ -4958,30 +4995,23 @@
 		}
 	}
 
-	pr_warn(MPT3SAS_FMT
-		"\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
-		ioc->name, le16_to_cpu(mpi_reply->DevHandle),
-	    desc_ioc_state, ioc_status, smid);
-	pr_warn(MPT3SAS_FMT
-		"\trequest_len(%d), underflow(%d), resid(%d)\n",
-		ioc->name, scsi_bufflen(scmd), scmd->underflow,
-	    scsi_get_resid(scmd));
-	pr_warn(MPT3SAS_FMT
-		"\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
-		ioc->name, le16_to_cpu(mpi_reply->TaskTag),
-	    le32_to_cpu(mpi_reply->TransferCount), scmd->result);
-	pr_warn(MPT3SAS_FMT
-		"\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
-		ioc->name, desc_scsi_status,
-	    scsi_status, desc_scsi_state, scsi_state);
+	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
+		 le16_to_cpu(mpi_reply->DevHandle),
+		 desc_ioc_state, ioc_status, smid);
+	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
+		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
+	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
+		 le16_to_cpu(mpi_reply->TaskTag),
+		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
+	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
+		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
 
 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
 		struct sense_info data;
 		_scsih_normalize_sense(scmd->sense_buffer, &data);
-		pr_warn(MPT3SAS_FMT
-		  "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
-		  ioc->name, data.skey,
-		  data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
+			 data.skey, data.asc, data.ascq,
+			 le32_to_cpu(mpi_reply->SenseCount));
 	}
 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
@@ -5016,17 +5046,17 @@
 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
 	    &mpi_request)) != 0) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
-		__FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 	sas_device->pfa_led_on = 1;
 
 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
-			ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
-		    le32_to_cpu(mpi_reply.IOCLogInfo)));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+				    le16_to_cpu(mpi_reply.IOCStatus),
+				    le32_to_cpu(mpi_reply.IOCLogInfo)));
 		goto out;
 	}
 out:
@@ -5056,16 +5086,16 @@
 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
 		&mpi_request)) != 0) {
-		printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
-		__FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return;
 	}
 
 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
-		dewtprintk(ioc, printk(MPT3SAS_FMT
-		 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
-		 ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
-		 le32_to_cpu(mpi_reply.IOCLogInfo)));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+				    le16_to_cpu(mpi_reply.IOCStatus),
+				    le32_to_cpu(mpi_reply.IOCLogInfo)));
 		return;
 	}
 }
@@ -5133,8 +5163,8 @@
 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
 	event_reply = kzalloc(sz, GFP_KERNEL);
 	if (!event_reply) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 
@@ -5221,6 +5251,7 @@
 	     ((ioc_status & MPI2_IOCSTATUS_MASK)
 	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
 		st->direct_io = 0;
+		st->scmd = scmd;
 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
 		mpi_request->DevHandle =
 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
@@ -5424,16 +5455,16 @@
 	u16 attached_handle;
 	u8 link_rate;
 
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-	    "updating handles for sas_host(0x%016llx)\n",
-	    ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
+	dtmprintk(ioc,
+		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
+			   (u64)ioc->sas_hba.sas_address));
 
 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
 	    * sizeof(Mpi2SasIOUnit0PhyData_t));
 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
 	if (!sas_iounit_pg0) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return;
 	}
 
@@ -5483,15 +5514,15 @@
 
 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
 	if (!num_phys) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return;
 	}
 	ioc->sas_hba.phy = kcalloc(num_phys,
 	    sizeof(struct _sas_phy), GFP_KERNEL);
 	if (!ioc->sas_hba.phy) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 	ioc->sas_hba.num_phys = num_phys;
@@ -5501,21 +5532,21 @@
 	    sizeof(Mpi2SasIOUnit0PhyData_t));
 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
 	if (!sas_iounit_pg0) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return;
 	}
 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
 	    sas_iounit_pg0, sz))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 
@@ -5524,21 +5555,21 @@
 	    sizeof(Mpi2SasIOUnit1PhyData_t));
 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
 	if (!sas_iounit_pg1) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
 	    sas_iounit_pg1, sz))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 
@@ -5557,15 +5588,15 @@
 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
 		    i))) {
-			pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-			    ioc->name, __FILE__, __LINE__, __func__);
+			ioc_err(ioc, "failure at %s:%d/%s()!\n",
+				__FILE__, __LINE__, __func__);
 			goto out;
 		}
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-			pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-			    ioc->name, __FILE__, __LINE__, __func__);
+			ioc_err(ioc, "failure at %s:%d/%s()!\n",
+				__FILE__, __LINE__, __func__);
 			goto out;
 		}
 
@@ -5579,18 +5610,17 @@
 	}
 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 	ioc->sas_hba.enclosure_handle =
 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
-	pr_info(MPT3SAS_FMT
-		"host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
-		ioc->name, ioc->sas_hba.handle,
-	    (unsigned long long) ioc->sas_hba.sas_address,
-	    ioc->sas_hba.num_phys) ;
+	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+		 ioc->sas_hba.handle,
+		 (u64)ioc->sas_hba.sas_address,
+		 ioc->sas_hba.num_phys);
 
 	if (ioc->sas_hba.enclosure_handle) {
 		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -5639,16 +5669,16 @@
 
 	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -1;
 	}
 
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -1;
 	}
 
@@ -5656,8 +5686,8 @@
 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
 	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
 	    != 0) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -1;
 	}
 	if (sas_address_parent != ioc->sas_hba.sas_address) {
@@ -5684,8 +5714,8 @@
 	sas_expander = kzalloc(sizeof(struct _sas_node),
 	    GFP_KERNEL);
 	if (!sas_expander) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -1;
 	}
 
@@ -5694,18 +5724,17 @@
 	sas_expander->sas_address_parent = sas_address_parent;
 	sas_expander->sas_address = sas_address;
 
-	pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \
-	    " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
-	    handle, parent_handle, (unsigned long long)
-	    sas_expander->sas_address, sas_expander->num_phys);
+	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+		 handle, parent_handle,
+		 (u64)sas_expander->sas_address, sas_expander->num_phys);
 
 	if (!sas_expander->num_phys)
 		goto out_fail;
 	sas_expander->phy = kcalloc(sas_expander->num_phys,
 	    sizeof(struct _sas_phy), GFP_KERNEL);
 	if (!sas_expander->phy) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rc = -1;
 		goto out_fail;
 	}
@@ -5714,8 +5743,8 @@
 	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
 	    sas_address_parent);
 	if (!mpt3sas_port) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rc = -1;
 		goto out_fail;
 	}
@@ -5724,8 +5753,8 @@
 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
 		    &expander_pg1, i, handle))) {
-			pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-			    ioc->name, __FILE__, __LINE__, __func__);
+			ioc_err(ioc, "failure at %s:%d/%s()!\n",
+				__FILE__, __LINE__, __func__);
 			rc = -1;
 			goto out_fail;
 		}
@@ -5735,8 +5764,8 @@
 		if ((mpt3sas_transport_add_expander_phy(ioc,
 		    &sas_expander->phy[i], expander_pg1,
 		    sas_expander->parent_dev))) {
-			pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-			    ioc->name, __FILE__, __LINE__, __func__);
+			ioc_err(ioc, "failure at %s:%d/%s()!\n",
+				__FILE__, __LINE__, __func__);
 			rc = -1;
 			goto out_fail;
 		}
@@ -5883,9 +5912,8 @@
 	if (!rc)
 		return 0;
 
-	pr_err(MPT3SAS_FMT
-		"discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
-		ioc->name, desc, (unsigned long long)sas_address, handle);
+	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
+		desc, (u64)sas_address, handle);
 	return rc;
 }
 
@@ -5979,9 +6007,8 @@
 	/* check if device is present */
 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
-		pr_err(MPT3SAS_FMT
-			"device is not present handle(0x%04x), flags!!!\n",
-			ioc->name, handle);
+		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
+			handle);
 		goto out_unlock;
 	}
 
@@ -6028,16 +6055,16 @@
 
 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -1;
 	}
 
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -1;
 	}
 
@@ -6051,8 +6078,8 @@
 	/* check if device is present */
 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
-		pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n",
-			ioc->name, handle);
+		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
+			handle);
 		return -1;
 	}
 
@@ -6074,16 +6101,15 @@
 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
 			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
 		if (enclosure_dev == NULL)
-			pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
-			    "doesn't match with enclosure device!\n",
-			    ioc->name, sas_device_pg0.EnclosureHandle);
+			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
+				 sas_device_pg0.EnclosureHandle);
 	}
 
 	sas_device = kzalloc(sizeof(struct _sas_device),
 	    GFP_KERNEL);
 	if (!sas_device) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return 0;
 	}
 
@@ -6092,8 +6118,8 @@
 	if (_scsih_get_sas_address(ioc,
 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
 	    &sas_device->sas_address_parent) != 0)
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 	sas_device->enclosure_handle =
 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
 	if (sas_device->enclosure_handle != 0)
@@ -6158,11 +6184,10 @@
 		sas_device->pfa_led_on = 0;
 	}
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
-		ioc->name, __func__,
-	    sas_device->handle, (unsigned long long)
-	    sas_device->sas_address));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
+			    __func__,
+			    sas_device->handle, (u64)sas_device->sas_address));
 
 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
 	    NULL, NULL));
@@ -6180,18 +6205,15 @@
 		    sas_device->sas_address,
 		    sas_device->sas_address_parent);
 
-	pr_info(MPT3SAS_FMT
-		"removing handle(0x%04x), sas_addr(0x%016llx)\n",
-		ioc->name, sas_device->handle,
-	    (unsigned long long) sas_device->sas_address);
+	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+		 sas_device->handle, (u64)sas_device->sas_address);
 
 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
-		ioc->name, __func__,
-		sas_device->handle, (unsigned long long)
-		sas_device->sas_address));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
+			    __func__,
+			    sas_device->handle, (u64)sas_device->sas_address));
 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
 	    NULL, NULL));
 }
@@ -6231,8 +6253,7 @@
 		status_str = "unknown status";
 		break;
 	}
-	pr_info(MPT3SAS_FMT "sas topology change: (%s)\n",
-	    ioc->name, status_str);
+	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
 	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
 	    "start_phy(%02d), count(%d)\n",
 	    le16_to_cpu(event_data->ExpanderDevHandle),
@@ -6309,8 +6330,7 @@
 		_scsih_sas_host_refresh(ioc);
 
 	if (fw_event->ignore) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"ignoring expander event\n", ioc->name));
+		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
 		return 0;
 	}
 
@@ -6339,8 +6359,8 @@
 	/* handle siblings events */
 	for (i = 0; i < event_data->NumEntries; i++) {
 		if (fw_event->ignore) {
-			dewtprintk(ioc, pr_info(MPT3SAS_FMT
-				"ignoring expander event\n", ioc->name));
+			dewtprintk(ioc,
+				   ioc_info(ioc, "ignoring expander event\n"));
 			return 0;
 		}
 		if (ioc->remove_host || ioc->pci_error_recovery)
@@ -6464,38 +6484,30 @@
 		reason_str = "unknown reason";
 		break;
 	}
-	pr_info(MPT3SAS_FMT "device status change: (%s)\n"
-	    "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
-	    ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
-	    (unsigned long long)le64_to_cpu(event_data->SASAddress),
-	    le16_to_cpu(event_data->TaskTag));
+	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+		 reason_str, le16_to_cpu(event_data->DevHandle),
+		 (u64)le64_to_cpu(event_data->SASAddress),
+		 le16_to_cpu(event_data->TaskTag));
 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
-		pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
-		    event_data->ASC, event_data->ASCQ);
-	pr_info("\n");
+		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
+			event_data->ASC, event_data->ASCQ);
+	pr_cont("\n");
 }
 
 /**
  * _scsih_sas_device_status_change_event - handle device status change
  * @ioc: per adapter object
- * @fw_event: The fw_event_work object
+ * @event_data: The fw event
  * Context: user.
  */
 static void
 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
-	struct fw_event_work *fw_event)
+	Mpi2EventDataSasDeviceStatusChange_t *event_data)
 {
 	struct MPT3SAS_TARGET *target_priv_data;
 	struct _sas_device *sas_device;
 	u64 sas_address;
 	unsigned long flags;
-	Mpi2EventDataSasDeviceStatusChange_t *event_data =
-		(Mpi2EventDataSasDeviceStatusChange_t *)
-		fw_event->event_data;
-
-	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
-		_scsih_sas_device_status_change_event_debug(ioc,
-		     event_data);
 
 	/* In MPI Revision K (0xC), the internal device reset complete was
 	 * implemented, so avoid setting tm_busy flag for older firmware.
@@ -6527,6 +6539,12 @@
 	else
 		target_priv_data->tm_busy = 0;
 
+	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+		ioc_info(ioc,
+		    "%s tm_busy flag for handle(0x%04x)\n",
+		    (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
+		    target_priv_data->handle);
+
 out:
 	if (sas_device)
 		sas_device_put(sas_device);
@@ -6561,6 +6579,11 @@
 		break;
 	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
 		desc = "PCIe device blocked";
+		ioc_info(ioc,
+		    "Device with Access Status (%s): wwid(0x%016llx), "
+		    "handle(0x%04x)\n ll only be added to the internal list",
+		    desc, (u64)wwid, handle);
+		rc = 0;
 		break;
 	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
 		desc = "PCIe device mem space access failed";
@@ -6605,20 +6628,16 @@
 		desc = "nvme failure status";
 		break;
 	default:
-		pr_err(MPT3SAS_FMT
-		    " NVMe discovery error(0x%02x): wwid(0x%016llx),"
-			"handle(0x%04x)\n", ioc->name, access_status,
-			(unsigned long long)wwid, handle);
+		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
+			access_status, (u64)wwid, handle);
 		return rc;
 	}
 
 	if (!rc)
 		return rc;
 
-	pr_info(MPT3SAS_FMT
-		"NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
-			ioc->name, desc,
-			(unsigned long long)wwid, handle);
+	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
+		 desc, (u64)wwid, handle);
 	return rc;
 }
 
@@ -6634,22 +6653,22 @@
 {
 	struct MPT3SAS_TARGET *sas_target_priv_data;
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-	    "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
-	    pcie_device->handle, (unsigned long long)
-	    pcie_device->wwid));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
+			    __func__,
+			    pcie_device->handle, (u64)pcie_device->wwid));
 	if (pcie_device->enclosure_handle != 0)
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		    "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
-		    ioc->name, __func__,
-		    (unsigned long long)pcie_device->enclosure_logical_id,
-		    pcie_device->slot));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
+				    __func__,
+				    (u64)pcie_device->enclosure_logical_id,
+				    pcie_device->slot));
 	if (pcie_device->connector_name[0] != '\0')
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		    "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
-		    ioc->name, __func__,
-		    pcie_device->enclosure_level,
-		    pcie_device->connector_name));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
+				    __func__,
+				    pcie_device->enclosure_level,
+				    pcie_device->connector_name));
 
 	if (pcie_device->starget && pcie_device->starget->hostdata) {
 		sas_target_priv_data = pcie_device->starget->hostdata;
@@ -6658,39 +6677,36 @@
 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
 	}
 
-	pr_info(MPT3SAS_FMT
-		"removing handle(0x%04x), wwid (0x%016llx)\n",
-		ioc->name, pcie_device->handle,
-		(unsigned long long) pcie_device->wwid);
+	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+		 pcie_device->handle, (u64)pcie_device->wwid);
 	if (pcie_device->enclosure_handle != 0)
-		pr_info(MPT3SAS_FMT
-		    "removing : enclosure logical id(0x%016llx), slot(%d)\n",
-		    ioc->name,
-		    (unsigned long long)pcie_device->enclosure_logical_id,
-		    pcie_device->slot);
+		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
+			 (u64)pcie_device->enclosure_logical_id,
+			 pcie_device->slot);
 	if (pcie_device->connector_name[0] != '\0')
-		pr_info(MPT3SAS_FMT
-		    "removing: enclosure level(0x%04x), connector name( %s)\n",
-		    ioc->name, pcie_device->enclosure_level,
-		    pcie_device->connector_name);
+		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
+			 pcie_device->enclosure_level,
+			 pcie_device->connector_name);
 
-	if (pcie_device->starget)
+	if (pcie_device->starget && (pcie_device->access_status !=
+				MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
 		scsi_remove_target(&pcie_device->starget->dev);
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-	    "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
-	    pcie_device->handle, (unsigned long long)
-	    pcie_device->wwid));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
+			    __func__,
+			    pcie_device->handle, (u64)pcie_device->wwid));
 	if (pcie_device->enclosure_handle != 0)
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
-			ioc->name, __func__,
-			(unsigned long long)pcie_device->enclosure_logical_id,
-			pcie_device->slot));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
+				    __func__,
+				    (u64)pcie_device->enclosure_logical_id,
+				    pcie_device->slot));
 	if (pcie_device->connector_name[0] != '\0')
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		    "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
-		    ioc->name, __func__, pcie_device->enclosure_level,
-		    pcie_device->connector_name));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
+				    __func__,
+				    pcie_device->enclosure_level,
+				    pcie_device->connector_name));
 
 	kfree(pcie_device->serial_number);
 }
@@ -6724,7 +6740,7 @@
 
 	/* check if this is end device */
 	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
-	if (!(_scsih_is_nvme_device(device_info)))
+	if (!(_scsih_is_nvme_pciescsi_device(device_info)))
 		return;
 
 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
@@ -6739,6 +6755,7 @@
 	if (unlikely(pcie_device->handle != handle)) {
 		starget = pcie_device->starget;
 		sas_target_priv_data = starget->hostdata;
+		pcie_device->access_status = pcie_device_pg0.AccessStatus;
 		starget_printk(KERN_INFO, starget,
 		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
 		    pcie_device->handle, handle);
@@ -6760,9 +6777,8 @@
 	/* check if device is present */
 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
 	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
-		pr_info(MPT3SAS_FMT
-		    "device is not present handle(0x%04x), flags!!!\n",
-		    ioc->name, handle);
+		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
+			 handle);
 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 		pcie_device_put(pcie_device);
 		return;
@@ -6806,16 +6822,15 @@
 
 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
 	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return 0;
 	}
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT
-		    "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return 0;
 	}
 
@@ -6825,9 +6840,8 @@
 	/* check if device is present */
 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
 		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
-		pr_err(MPT3SAS_FMT
-		    "device is not present handle(0x04%x)!!!\n",
-		    ioc->name, handle);
+		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
+			handle);
 		return 0;
 	}
 
@@ -6836,7 +6850,8 @@
 	    pcie_device_pg0.AccessStatus))
 		return 0;
 
-	if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo))))
+	if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
+	    (pcie_device_pg0.DeviceInfo))))
 		return 0;
 
 	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
@@ -6846,10 +6861,35 @@
 		return 0;
 	}
 
+	/* PCIe Device Page 2 contains read-only information about a
+	 * specific NVMe device; therefore, this page is only
+	 * valid for NVMe devices and skip for pcie devices of type scsi.
+	 */
+	if (!(mpt3sas_scsih_is_pcie_scsi_device(
+		le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
+		if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
+		    &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+		    handle)) {
+			ioc_err(ioc,
+			    "failure at %s:%d/%s()!\n", __FILE__,
+			    __LINE__, __func__);
+			return 0;
+		}
+
+		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+					MPI2_IOCSTATUS_MASK;
+		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+			ioc_err(ioc,
+			    "failure at %s:%d/%s()!\n", __FILE__,
+			    __LINE__, __func__);
+			return 0;
+		}
+	}
+
 	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
 	if (!pcie_device) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-			ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return 0;
 	}
 
@@ -6857,6 +6897,7 @@
 	pcie_device->id = ioc->pcie_target_id++;
 	pcie_device->channel = PCIE_CHANNEL;
 	pcie_device->handle = handle;
+	pcie_device->access_status = pcie_device_pg0.AccessStatus;
 	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
 	pcie_device->wwid = wwid;
 	pcie_device->port_num = pcie_device_pg0.PortNum;
@@ -6888,27 +6929,16 @@
 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
 	}
 	/* TODO -- Add device name once FW supports it */
-	if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
-		&pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-				ioc->name, __FILE__, __LINE__, __func__);
-		kfree(pcie_device);
-		return 0;
-	}
-
-	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
-	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-			ioc->name, __FILE__, __LINE__, __func__);
-		kfree(pcie_device);
-		return 0;
-	}
-	pcie_device->nvme_mdts =
-		le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
-	if (pcie_device_pg2.ControllerResetTO)
-		pcie_device->reset_timeout =
-			pcie_device_pg2.ControllerResetTO;
-	else
+	if (!(mpt3sas_scsih_is_pcie_scsi_device(
+	    le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
+		pcie_device->nvme_mdts =
+		    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
+		if (pcie_device_pg2.ControllerResetTO)
+			pcie_device->reset_timeout =
+			    pcie_device_pg2.ControllerResetTO;
+		else
+			pcie_device->reset_timeout = 30;
+	} else
 		pcie_device->reset_timeout = 30;
 
 	if (ioc->wait_for_discovery_to_complete)
@@ -6956,8 +6986,7 @@
 		status_str = "unknown status";
 		break;
 	}
-	pr_info(MPT3SAS_FMT "pcie topology change: (%s)\n",
-		ioc->name, status_str);
+	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
 	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
 		"start_port(%02d), count(%d)\n",
 		le16_to_cpu(event_data->SwitchDevHandle),
@@ -7030,16 +7059,15 @@
 		return;
 
 	if (fw_event->ignore) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
-			ioc->name));
+		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
 		return;
 	}
 
 	/* handle siblings events */
 	for (i = 0; i < event_data->NumEntries; i++) {
 		if (fw_event->ignore) {
-			dewtprintk(ioc, pr_info(MPT3SAS_FMT
-				"ignoring switch event\n", ioc->name));
+			dewtprintk(ioc,
+				   ioc_info(ioc, "ignoring switch event\n"));
 			return;
 		}
 		if (ioc->remove_host || ioc->pci_error_recovery)
@@ -7084,9 +7112,9 @@
 			if (!test_bit(handle, ioc->pend_os_device_add))
 				break;
 
-			dewtprintk(ioc, pr_info(MPT3SAS_FMT
-				"handle(0x%04x) device not found: convert "
-				"event to a device add\n", ioc->name, handle));
+			dewtprintk(ioc,
+				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
+					    handle));
 			event_data->PortEntry[i].PortStatus &= 0xF0;
 			event_data->PortEntry[i].PortStatus |=
 				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
@@ -7169,15 +7197,15 @@
 		break;
 	}
 
-	pr_info(MPT3SAS_FMT "PCIE device status change: (%s)\n"
-		"\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
-		ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
-		(unsigned long long)le64_to_cpu(event_data->WWID),
-		le16_to_cpu(event_data->TaskTag));
+	ioc_info(ioc, "PCIE device status change: (%s)\n"
+		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
+		 reason_str, le16_to_cpu(event_data->DevHandle),
+		 (u64)le64_to_cpu(event_data->WWID),
+		 le16_to_cpu(event_data->TaskTag));
 	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
-		pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
+		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
 			event_data->ASC, event_data->ASCQ);
-	pr_info("\n");
+	pr_cont("\n");
 }
 
 /**
@@ -7255,12 +7283,12 @@
 		break;
 	}
 
-	pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n"
-	    "\thandle(0x%04x), enclosure logical id(0x%016llx)"
-	    " number slots(%d)\n", ioc->name, reason_str,
-	    le16_to_cpu(event_data->EnclosureHandle),
-	    (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
-	    le16_to_cpu(event_data->StartSlot));
+	ioc_info(ioc, "enclosure status change: (%s)\n"
+		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
+		 reason_str,
+		 le16_to_cpu(event_data->EnclosureHandle),
+		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
+		 le16_to_cpu(event_data->StartSlot));
 }
 
 /**
@@ -7298,9 +7326,8 @@
 				kzalloc(sizeof(struct _enclosure_node),
 					GFP_KERNEL);
 			if (!enclosure_dev) {
-				pr_info(MPT3SAS_FMT
-					"failure at %s:%d/%s()!\n", ioc->name,
-					__FILE__, __LINE__, __func__);
+				ioc_info(ioc, "failure at %s:%d/%s()!\n",
+					 __FILE__, __LINE__, __func__);
 				return;
 			}
 			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -7358,10 +7385,8 @@
 	u8 task_abort_retries;
 
 	mutex_lock(&ioc->tm_cmds.mutex);
-	pr_info(MPT3SAS_FMT
-		"%s: enter: phy number(%d), width(%d)\n",
-		ioc->name, __func__, event_data->PhyNum,
-	     event_data->PortWidth);
+	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
+		 __func__, event_data->PhyNum, event_data->PortWidth);
 
 	_scsih_block_io_all_device(ioc);
 
@@ -7371,12 +7396,12 @@
 
 	/* sanity checks for retrying this loop */
 	if (max_retries++ == 5) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n",
-		    ioc->name, __func__));
+		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
 		goto out;
 	} else if (max_retries > 1)
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n",
-		    ioc->name, __func__, max_retries - 1));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: %d retry\n",
+				    __func__, max_retries - 1));
 
 	termination_count = 0;
 	query_count = 0;
@@ -7443,9 +7468,9 @@
 		task_abort_retries = 0;
  tm_retry:
 		if (task_abort_retries++ == 60) {
-			dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			    "%s: ABORT_TASK: giving up\n", ioc->name,
-			    __func__));
+			dewtprintk(ioc,
+				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
+					    __func__));
 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
 			goto broadcast_aen_retry;
 		}
@@ -7474,9 +7499,10 @@
 	}
 
 	if (ioc->broadcast_aen_pending) {
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: loop back due to pending AEN\n",
-			ioc->name, __func__));
+		dewtprintk(ioc,
+			   ioc_info(ioc,
+				    "%s: loop back due to pending AEN\n",
+				    __func__));
 		 ioc->broadcast_aen_pending = 0;
 		 goto broadcast_aen_retry;
 	}
@@ -7485,9 +7511,9 @@
 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  out_no_lock:
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT
-	    "%s - exit, query_count = %d termination_count = %d\n",
-	    ioc->name, __func__, query_count, termination_count));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
+			    __func__, query_count, termination_count));
 
 	ioc->broadcast_aen_busy = 0;
 	if (!ioc->shost_recovery)
@@ -7509,13 +7535,13 @@
 		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
 
 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
-		pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name,
-		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
-		    "start" : "stop");
+		ioc_info(ioc, "discovery event: (%s)",
+			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
+			 "start" : "stop");
 		if (event_data->DiscoveryStatus)
-			pr_info("discovery_status(0x%08x)",
-			    le32_to_cpu(event_data->DiscoveryStatus));
-		pr_info("\n");
+			pr_cont("discovery_status(0x%08x)",
+				le32_to_cpu(event_data->DiscoveryStatus));
+		pr_cont("\n");
 	}
 
 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
@@ -7545,20 +7571,16 @@
 
 	switch (event_data->ReasonCode) {
 	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
-		pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
-			"(handle:0x%04x, sas_address:0x%016llx,"
-			"physical_port:0x%02x) has failed",
-			ioc->name, le16_to_cpu(event_data->DevHandle),
-			(unsigned long long)le64_to_cpu(event_data->SASAddress),
-			event_data->PhysicalPort);
+		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
+			 le16_to_cpu(event_data->DevHandle),
+			 (u64)le64_to_cpu(event_data->SASAddress),
+			 event_data->PhysicalPort);
 		break;
 	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
-		pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
-			"(handle:0x%04x, sas_address:0x%016llx,"
-			"physical_port:0x%02x) has timed out",
-			ioc->name, le16_to_cpu(event_data->DevHandle),
-			(unsigned long long)le64_to_cpu(event_data->SASAddress),
-			event_data->PhysicalPort);
+		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
+			 le16_to_cpu(event_data->DevHandle),
+			 (u64)le64_to_cpu(event_data->SASAddress),
+			 event_data->PhysicalPort);
 		break;
 	default:
 		break;
@@ -7581,11 +7603,10 @@
 	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
 		return;
 
-	pr_info(MPT3SAS_FMT "pcie enumeration event: (%s) Flag 0x%02x",
-		ioc->name,
-		(event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
-			"started" : "completed",
-		event_data->Flags);
+	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
+		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
+		 "started" : "completed",
+		 event_data->Flags);
 	if (event_data->EnumerationStatus)
 		pr_cont("enumeration_status(0x%08x)",
 			le32_to_cpu(event_data->EnumerationStatus));
@@ -7617,8 +7638,7 @@
 	mutex_lock(&ioc->scsih_cmds.mutex);
 
 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -7626,8 +7646,7 @@
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
 		rc = -EAGAIN;
 		goto out;
@@ -7641,12 +7660,12 @@
 	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
 	mpi_request->PhysDiskNum = phys_disk_num;
 
-	dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\
-	    "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name,
-	    handle, phys_disk_num));
+	dewtprintk(ioc,
+		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
+			    handle, phys_disk_num));
 
 	init_completion(&ioc->scsih_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
 
 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -7668,15 +7687,13 @@
 			log_info = 0;
 		ioc_status &= MPI2_IOCSTATUS_MASK;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-			dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			    "IR RAID_ACTION: failed: ioc_status(0x%04x), "
-			    "loginfo(0x%08x)!!!\n", ioc->name, ioc_status,
-			    log_info));
+			dewtprintk(ioc,
+				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
+					    ioc_status, log_info));
 			rc = -EFAULT;
 		} else
-			dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			    "IR RAID_ACTION: completed successfully\n",
-			    ioc->name));
+			dewtprintk(ioc,
+				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
 	}
 
  out:
@@ -7721,9 +7738,8 @@
 
 	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
 	if (!wwid) {
-		pr_err(MPT3SAS_FMT
-		    "failure at %s:%d/%s()!\n", ioc->name,
-		    __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return;
 	}
 
@@ -7736,9 +7752,8 @@
 
 	raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
 	if (!raid_device) {
-		pr_err(MPT3SAS_FMT
-		    "failure at %s:%d/%s()!\n", ioc->name,
-		    __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return;
 	}
 
@@ -7781,9 +7796,8 @@
 			sas_target_priv_data = starget->hostdata;
 			sas_target_priv_data->deleted = 1;
 		}
-		pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
-			ioc->name,  raid_device->handle,
-		    (unsigned long long) raid_device->wwid);
+		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+			 raid_device->handle, (u64)raid_device->wwid);
 		list_del(&raid_device->list);
 		kfree(raid_device);
 	}
@@ -7925,16 +7939,16 @@
 
 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return;
 	}
 
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return;
 	}
 
@@ -7964,10 +7978,10 @@
 
 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
 
-	pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n",
-	    ioc->name, (le32_to_cpu(event_data->Flags) &
-	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
-	    "foreign" : "native", event_data->NumElements);
+	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
+		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
+		 "foreign" : "native",
+		 event_data->NumElements);
 	for (i = 0; i < event_data->NumElements; i++, element++) {
 		switch (element->ReasonCode) {
 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
@@ -8123,10 +8137,11 @@
 	handle = le16_to_cpu(event_data->VolDevHandle);
 	state = le32_to_cpu(event_data->NewValue);
 	if (!ioc->hide_ir_msg)
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		    "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
-		    ioc->name, __func__,  handle,
-		    le32_to_cpu(event_data->PreviousValue), state));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+				    __func__, handle,
+				    le32_to_cpu(event_data->PreviousValue),
+				    state));
 	switch (state) {
 	case MPI2_RAID_VOL_STATE_MISSING:
 	case MPI2_RAID_VOL_STATE_FAILED:
@@ -8146,17 +8161,15 @@
 
 		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
 		if (!wwid) {
-			pr_err(MPT3SAS_FMT
-			    "failure at %s:%d/%s()!\n", ioc->name,
-			    __FILE__, __LINE__, __func__);
+			ioc_err(ioc, "failure at %s:%d/%s()!\n",
+				__FILE__, __LINE__, __func__);
 			break;
 		}
 
 		raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
 		if (!raid_device) {
-			pr_err(MPT3SAS_FMT
-			    "failure at %s:%d/%s()!\n", ioc->name,
-			    __FILE__, __LINE__, __func__);
+			ioc_err(ioc, "failure at %s:%d/%s()!\n",
+				__FILE__, __LINE__, __func__);
 			break;
 		}
 
@@ -8207,10 +8220,11 @@
 	state = le32_to_cpu(event_data->NewValue);
 
 	if (!ioc->hide_ir_msg)
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-		    "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
-		    ioc->name, __func__,  handle,
-		    le32_to_cpu(event_data->PreviousValue), state));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+				    __func__, handle,
+				    le32_to_cpu(event_data->PreviousValue),
+				    state));
 
 	switch (state) {
 	case MPI2_RAID_PD_STATE_ONLINE:
@@ -8231,16 +8245,16 @@
 		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
 		    handle))) {
-			pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-			    ioc->name, __FILE__, __LINE__, __func__);
+			ioc_err(ioc, "failure at %s:%d/%s()!\n",
+				__FILE__, __LINE__, __func__);
 			return;
 		}
 
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-			pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-			    ioc->name, __FILE__, __LINE__, __func__);
+			ioc_err(ioc, "failure at %s:%d/%s()!\n",
+				__FILE__, __LINE__, __func__);
 			return;
 		}
 
@@ -8294,11 +8308,10 @@
 	if (!reason_str)
 		return;
 
-	pr_info(MPT3SAS_FMT "raid operational status: (%s)" \
-	    "\thandle(0x%04x), percent complete(%d)\n",
-	    ioc->name, reason_str,
-	    le16_to_cpu(event_data->VolDevHandle),
-	    event_data->PercentComplete);
+	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
+		 reason_str,
+		 le16_to_cpu(event_data->VolDevHandle),
+		 event_data->PercentComplete);
 }
 
 /**
@@ -8379,9 +8392,8 @@
 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
 				le16_to_cpu(sas_device_pg0->EnclosureHandle));
 		if (enclosure_dev == NULL)
-			pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
-			    "doesn't match with enclosure device!\n",
-			    ioc->name, sas_device_pg0->EnclosureHandle);
+			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
+				 sas_device_pg0->EnclosureHandle);
 	}
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
@@ -8475,8 +8487,7 @@
 		enclosure_dev =
 			kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
 		if (!enclosure_dev) {
-			pr_err(MPT3SAS_FMT
-				"failure at %s:%d/%s()!\n", ioc->name,
+			ioc_err(ioc, "failure at %s:%d/%s()!\n",
 				__FILE__, __LINE__, __func__);
 			return;
 		}
@@ -8513,7 +8524,7 @@
 	u16 handle;
 	u32 device_info;
 
-	pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+	ioc_info(ioc, "search for end-devices: start\n");
 
 	if (list_empty(&ioc->sas_device_list))
 		goto out;
@@ -8534,8 +8545,7 @@
 	}
 
  out:
-	pr_info(MPT3SAS_FMT "search for end-devices: complete\n",
-	    ioc->name);
+	ioc_info(ioc, "search for end-devices: complete\n");
 }
 
 /**
@@ -8560,6 +8570,8 @@
 		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
 		    && (pcie_device->slot == le16_to_cpu(
 		    pcie_device_pg0->Slot))) {
+			pcie_device->access_status =
+					pcie_device_pg0->AccessStatus;
 			pcie_device->responding = 1;
 			starget = pcie_device->starget;
 			if (starget && starget->hostdata) {
@@ -8628,7 +8640,7 @@
 	u16 handle;
 	u32 device_info;
 
-	pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+	ioc_info(ioc, "search for end-devices: start\n");
 
 	if (list_empty(&ioc->pcie_device_list))
 		goto out;
@@ -8640,21 +8652,19 @@
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-			pr_info(MPT3SAS_FMT "\tbreak from %s: "
-			    "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name,
-			    __func__, ioc_status,
-			    le32_to_cpu(mpi_reply.IOCLogInfo));
+			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
+				 __func__, ioc_status,
+				 le32_to_cpu(mpi_reply.IOCLogInfo));
 			break;
 		}
 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
 		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
-		if (!(_scsih_is_nvme_device(device_info)))
+		if (!(_scsih_is_nvme_pciescsi_device(device_info)))
 			continue;
 		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
 	}
 out:
-	pr_info(MPT3SAS_FMT "search for PCIe end-devices: complete\n",
-	    ioc->name);
+	ioc_info(ioc, "search for PCIe end-devices: complete\n");
 }
 
 /**
@@ -8735,8 +8745,7 @@
 	if (!ioc->ir_firmware)
 		return;
 
-	pr_info(MPT3SAS_FMT "search for raid volumes: start\n",
-	    ioc->name);
+	ioc_info(ioc, "search for raid volumes: start\n");
 
 	if (list_empty(&ioc->raid_device_list))
 		goto out;
@@ -8779,8 +8788,7 @@
 		}
 	}
  out:
-	pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n",
-		ioc->name);
+	ioc_info(ioc, "search for responding raid volumes: complete\n");
 }
 
 /**
@@ -8852,7 +8860,7 @@
 	u64 sas_address;
 	u16 handle;
 
-	pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name);
+	ioc_info(ioc, "search for expanders: start\n");
 
 	if (list_empty(&ioc->sas_expander_list))
 		goto out;
@@ -8875,7 +8883,7 @@
 	}
 
  out:
-	pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name);
+	ioc_info(ioc, "search for expanders: complete\n");
 }
 
 /**
@@ -8893,12 +8901,10 @@
 	unsigned long flags;
 	LIST_HEAD(head);
 
-	pr_info(MPT3SAS_FMT "removing unresponding devices: start\n",
-	    ioc->name);
+	ioc_info(ioc, "removing unresponding devices: start\n");
 
 	/* removing unresponding end devices */
-	pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n",
-	    ioc->name);
+	ioc_info(ioc, "removing unresponding devices: end-devices\n");
 	/*
 	 * Iterate, pulling off devices marked as non-responding. We become the
 	 * owner for the reference the list had on any object we prune.
@@ -8922,9 +8928,7 @@
 		sas_device_put(sas_device);
 	}
 
-	pr_info(MPT3SAS_FMT
-		" Removing unresponding devices: pcie end-devices\n"
-		, ioc->name);
+	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
 	INIT_LIST_HEAD(&head);
 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
 	list_for_each_entry_safe(pcie_device, pcie_device_next,
@@ -8944,8 +8948,7 @@
 
 	/* removing unresponding volumes */
 	if (ioc->ir_firmware) {
-		pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
-			ioc->name);
+		ioc_info(ioc, "removing unresponding devices: volumes\n");
 		list_for_each_entry_safe(raid_device, raid_device_next,
 		    &ioc->raid_device_list, list) {
 			if (!raid_device->responding)
@@ -8957,8 +8960,7 @@
 	}
 
 	/* removing unresponding expanders */
-	pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n",
-	    ioc->name);
+	ioc_info(ioc, "removing unresponding devices: expanders\n");
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
 	INIT_LIST_HEAD(&tmp_list);
 	list_for_each_entry_safe(sas_expander, sas_expander_next,
@@ -8974,8 +8976,7 @@
 		_scsih_expander_node_remove(ioc, sas_expander);
 	}
 
-	pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n",
-	    ioc->name);
+	ioc_info(ioc, "removing unresponding devices: complete\n");
 
 	/* unblock devices */
 	_scsih_ublock_io_all_device(ioc);
@@ -8992,8 +8993,8 @@
 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
 		    &expander_pg1, i, handle))) {
-			pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-			    ioc->name, __FILE__, __LINE__, __func__);
+			ioc_err(ioc, "failure at %s:%d/%s()!\n",
+				__FILE__, __LINE__, __func__);
 			return;
 		}
 
@@ -9029,11 +9030,11 @@
 	u8 retry_count;
 	unsigned long flags;
 
-	pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name);
+	ioc_info(ioc, "scan devices: start\n");
 
 	_scsih_sas_host_refresh(ioc);
 
-	pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name);
+	ioc_info(ioc, "\tscan devices: expanders start\n");
 
 	/* expanders */
 	handle = 0xFFFF;
@@ -9042,10 +9043,8 @@
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-			pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
-			    "ioc_status(0x%04x), loginfo(0x%08x)\n",
-			    ioc->name, ioc_status,
-			    le32_to_cpu(mpi_reply.IOCLogInfo));
+			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
 			break;
 		}
 		handle = le16_to_cpu(expander_pg0.DevHandle);
@@ -9057,25 +9056,22 @@
 			_scsih_refresh_expander_links(ioc, expander_device,
 			    handle);
 		else {
-			pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \
-			    "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
-			    handle, (unsigned long long)
-			    le64_to_cpu(expander_pg0.SASAddress));
+			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
+				 handle,
+				 (u64)le64_to_cpu(expander_pg0.SASAddress));
 			_scsih_expander_add(ioc, handle);
-			pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \
-			    "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
-			    handle, (unsigned long long)
-			    le64_to_cpu(expander_pg0.SASAddress));
+			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
+				 handle,
+				 (u64)le64_to_cpu(expander_pg0.SASAddress));
 		}
 	}
 
-	pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n",
-	    ioc->name);
+	ioc_info(ioc, "\tscan devices: expanders complete\n");
 
 	if (!ioc->ir_firmware)
 		goto skip_to_sas;
 
-	pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name);
+	ioc_info(ioc, "\tscan devices: phys disk start\n");
 
 	/* phys disk */
 	phys_disk_num = 0xFF;
@@ -9085,10 +9081,8 @@
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-			pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
-			    "ioc_status(0x%04x), loginfo(0x%08x)\n",
-			    ioc->name, ioc_status,
-			    le32_to_cpu(mpi_reply.IOCLogInfo));
+			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
 			break;
 		}
 		phys_disk_num = pd_pg0.PhysDiskNum;
@@ -9105,19 +9099,16 @@
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-			pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \
-			    "ioc_status(0x%04x), loginfo(0x%08x)\n",
-			    ioc->name, ioc_status,
-			    le32_to_cpu(mpi_reply.IOCLogInfo));
+			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
+				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
 			break;
 		}
 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
 		if (!_scsih_get_sas_address(ioc, parent_handle,
 		    &sas_address)) {
-			pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \
-			    " handle (0x%04x), sas_addr(0x%016llx)\n",
-			    ioc->name, handle, (unsigned long long)
-			    le64_to_cpu(sas_device_pg0.SASAddress));
+			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
+				 handle,
+				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
 			mpt3sas_transport_update_links(ioc, sas_address,
 			    handle, sas_device_pg0.PhyNum,
 			    MPI2_SAS_NEG_LINK_RATE_1_5);
@@ -9131,17 +9122,15 @@
 			    1)) {
 				ssleep(1);
 			}
-			pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \
-			    " handle (0x%04x), sas_addr(0x%016llx)\n",
-			    ioc->name, handle, (unsigned long long)
-			    le64_to_cpu(sas_device_pg0.SASAddress));
+			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
+				 handle,
+				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
 		}
 	}
 
-	pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n",
-	    ioc->name);
+	ioc_info(ioc, "\tscan devices: phys disk complete\n");
 
-	pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name);
+	ioc_info(ioc, "\tscan devices: volumes start\n");
 
 	/* volumes */
 	handle = 0xFFFF;
@@ -9150,10 +9139,8 @@
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-			pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
-			    "ioc_status(0x%04x), loginfo(0x%08x)\n",
-			    ioc->name, ioc_status,
-			    le32_to_cpu(mpi_reply.IOCLogInfo));
+			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
 			break;
 		}
 		handle = le16_to_cpu(volume_pg1.DevHandle);
@@ -9170,10 +9157,8 @@
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-			pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
-			    "ioc_status(0x%04x), loginfo(0x%08x)\n",
-			    ioc->name, ioc_status,
-			    le32_to_cpu(mpi_reply.IOCLogInfo));
+			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
 			break;
 		}
 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
@@ -9182,23 +9167,19 @@
 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
 			element.VolDevHandle = volume_pg1.DevHandle;
-			pr_info(MPT3SAS_FMT
-				"\tBEFORE adding volume: handle (0x%04x)\n",
-				ioc->name, volume_pg1.DevHandle);
+			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
+				 volume_pg1.DevHandle);
 			_scsih_sas_volume_add(ioc, &element);
-			pr_info(MPT3SAS_FMT
-				"\tAFTER adding volume: handle (0x%04x)\n",
-				ioc->name, volume_pg1.DevHandle);
+			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
+				 volume_pg1.DevHandle);
 		}
 	}
 
-	pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n",
-	    ioc->name);
+	ioc_info(ioc, "\tscan devices: volumes complete\n");
 
  skip_to_sas:
 
-	pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n",
-	    ioc->name);
+	ioc_info(ioc, "\tscan devices: end devices start\n");
 
 	/* sas devices */
 	handle = 0xFFFF;
@@ -9208,10 +9189,8 @@
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-			pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
-			    " ioc_status(0x%04x), loginfo(0x%08x)\n",
-			    ioc->name, ioc_status,
-			    le32_to_cpu(mpi_reply.IOCLogInfo));
+			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
 			break;
 		}
 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
@@ -9226,10 +9205,9 @@
 		}
 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
-			pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \
-			    "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
-			    handle, (unsigned long long)
-			    le64_to_cpu(sas_device_pg0.SASAddress));
+			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
+				 handle,
+				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
 			mpt3sas_transport_update_links(ioc, sas_address, handle,
 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
 			retry_count = 0;
@@ -9241,16 +9219,13 @@
 			    0)) {
 				ssleep(1);
 			}
-			pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \
-			    "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
-			    handle, (unsigned long long)
-			    le64_to_cpu(sas_device_pg0.SASAddress));
+			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
+				 handle,
+				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
 		}
 	}
-	pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
-	    ioc->name);
-	pr_info(MPT3SAS_FMT "\tscan devices: pcie end devices start\n",
-	    ioc->name);
+	ioc_info(ioc, "\tscan devices: end devices complete\n");
+	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
 
 	/* pcie devices */
 	handle = 0xFFFF;
@@ -9260,14 +9235,12 @@
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
 				& MPI2_IOCSTATUS_MASK;
 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-			pr_info(MPT3SAS_FMT "\tbreak from pcie end device"
-				" scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
-				ioc->name, ioc_status,
-				le32_to_cpu(mpi_reply.IOCLogInfo));
+			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
 			break;
 		}
 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
-		if (!(_scsih_is_nvme_device(
+		if (!(_scsih_is_nvme_pciescsi_device(
 			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
 			continue;
 		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
@@ -9280,14 +9253,11 @@
 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
 		_scsih_pcie_add_device(ioc, handle);
 
-		pr_info(MPT3SAS_FMT "\tAFTER adding pcie end device: "
-			"handle (0x%04x), wwid(0x%016llx)\n", ioc->name,
-			handle,
-			(unsigned long long) le64_to_cpu(pcie_device_pg0.WWID));
+		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
+			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
 	}
-	pr_info(MPT3SAS_FMT "\tpcie devices: pcie end devices complete\n",
-		ioc->name);
-	pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
+	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
+	ioc_info(ioc, "scan devices: complete\n");
 }
 
 /**
@@ -9298,8 +9268,7 @@
  */
 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 {
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
 }
 
 /**
@@ -9311,8 +9280,7 @@
 void
 mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 {
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
@@ -9340,8 +9308,7 @@
 void
 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
 {
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
 	if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
 					   !ioc->sas_hba.num_phys)) {
 		_scsih_prep_device_scan(ioc);
@@ -9396,9 +9363,8 @@
 		if (missing_delay[0] != -1 && missing_delay[1] != -1)
 			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
 			    missing_delay[1]);
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-			"port enable: complete from worker thread\n",
-			ioc->name));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "port enable: complete from worker thread\n"));
 		break;
 	case MPT3SAS_TURN_ON_PFA_LED:
 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
@@ -9407,7 +9373,10 @@
 		_scsih_sas_topology_change_event(ioc, fw_event);
 		break;
 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
-		_scsih_sas_device_status_change_event(ioc, fw_event);
+		if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+			_scsih_sas_device_status_change_event_debug(ioc,
+			    (Mpi2EventDataSasDeviceStatusChange_t *)
+			    fw_event->event_data);
 		break;
 	case MPI2_EVENT_SAS_DISCOVERY:
 		_scsih_sas_discovery_event(ioc, fw_event);
@@ -9496,8 +9465,8 @@
 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 
 	if (unlikely(!mpi_reply)) {
-		pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return 1;
 	}
 
@@ -9564,36 +9533,26 @@
 
 		switch (le32_to_cpu(*log_code)) {
 		case MPT2_WARPDRIVE_LC_SSDT:
-			pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
-			    "IO Throttling has occurred in the WarpDrive "
-			    "subsystem. Check WarpDrive documentation for "
-			    "additional details.\n", ioc->name);
+			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
 			break;
 		case MPT2_WARPDRIVE_LC_SSDLW:
-			pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
-			    "Program/Erase Cycles for the WarpDrive subsystem "
-			    "in degraded range. Check WarpDrive documentation "
-			    "for additional details.\n", ioc->name);
+			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
 			break;
 		case MPT2_WARPDRIVE_LC_SSDLF:
-			pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
-			    "There are no Program/Erase Cycles for the "
-			    "WarpDrive subsystem. The storage device will be "
-			    "in read-only mode. Check WarpDrive documentation "
-			    "for additional details.\n", ioc->name);
+			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
 			break;
 		case MPT2_WARPDRIVE_LC_BRMF:
-			pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
-			    "The Backup Rail Monitor has failed on the "
-			    "WarpDrive subsystem. Check WarpDrive "
-			    "documentation for additional details.\n",
-			    ioc->name);
+			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
 			break;
 		}
 
 		break;
 	}
 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+		_scsih_sas_device_status_change_event(ioc,
+		    (Mpi2EventDataSasDeviceStatusChange_t *)
+		    mpi_reply->EventData);
+		break;
 	case MPI2_EVENT_IR_OPERATION_STATUS:
 	case MPI2_EVENT_SAS_DISCOVERY:
 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
@@ -9613,9 +9572,8 @@
 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
 		switch (ActiveCableEventData->ReasonCode) {
 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
-			pr_notice(MPT3SAS_FMT
-			    "Currently an active cable with ReceptacleID %d\n",
-			    ioc->name, ActiveCableEventData->ReceptacleID);
+			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
+				   ActiveCableEventData->ReceptacleID);
 			pr_notice("cannot be powered and devices connected\n");
 			pr_notice("to this active cable will not be seen\n");
 			pr_notice("This active cable requires %d mW of power\n",
@@ -9623,9 +9581,8 @@
 			break;
 
 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
-			pr_notice(MPT3SAS_FMT
-			    "Currently a cable with ReceptacleID %d\n",
-			    ioc->name, ActiveCableEventData->ReceptacleID);
+			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
+				   ActiveCableEventData->ReceptacleID);
 			pr_notice(
 			    "is not running at optimal speed(12 Gb/s rate)\n");
 			break;
@@ -9640,8 +9597,8 @@
 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
 	fw_event = alloc_fw_event_work(sz);
 	if (!fw_event) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return 1;
 	}
 
@@ -9690,11 +9647,9 @@
 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
 	    sas_expander->sas_address_parent);
 
-	pr_info(MPT3SAS_FMT
-		"expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
-		ioc->name,
-	    sas_expander->handle, (unsigned long long)
-	    sas_expander->sas_address);
+	ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
+		 sas_expander->handle, (unsigned long long)
+		 sas_expander->sas_address);
 
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
 	list_del(&sas_expander->list);
@@ -9729,16 +9684,14 @@
 	mutex_lock(&ioc->scsih_cmds.mutex);
 
 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
 		goto out;
 	}
 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
 		goto out;
 	}
@@ -9751,24 +9704,22 @@
 	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
 
 	if (!ioc->hide_ir_msg)
-		pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
+		ioc_info(ioc, "IR shutdown (sending)\n");
 	init_completion(&ioc->scsih_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
 
 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
-		pr_err(MPT3SAS_FMT "%s: timeout\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: timeout\n", __func__);
 		goto out;
 	}
 
 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
 		mpi_reply = ioc->scsih_cmds.reply;
 		if (!ioc->hide_ir_msg)
-			pr_info(MPT3SAS_FMT "IR shutdown "
-			   "(complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
-			    ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
-			    le32_to_cpu(mpi_reply->IOCLogInfo));
+			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
+				 le16_to_cpu(mpi_reply->IOCStatus),
+				 le32_to_cpu(mpi_reply->IOCLogInfo));
 	}
 
  out:
@@ -9792,6 +9743,7 @@
 	struct _pcie_device *pcie_device, *pcienext;
 	struct workqueue_struct	*wq;
 	unsigned long flags;
+	Mpi2ConfigReply_t mpi_reply;
 
 	ioc->remove_host = 1;
 
@@ -9806,9 +9758,16 @@
 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 	if (wq)
 		destroy_workqueue(wq);
-
+	/*
+	 * Copy back the unmodified ioc page1. so that on next driver load,
+	 * current modified changes on ioc page1 won't take effect.
+	 */
+	if (ioc->is_aero_ioc)
+		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
+				&ioc->ioc_pg1_copy);
 	/* release all the volumes */
 	_scsih_ir_shutdown(ioc);
+	sas_remove_host(shost);
 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
 	    list) {
 		if (raid_device->starget) {
@@ -9817,9 +9776,8 @@
 			sas_target_priv_data->deleted = 1;
 			scsi_remove_target(&raid_device->starget->dev);
 		}
-		pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
-			ioc->name,  raid_device->handle,
-		    (unsigned long long) raid_device->wwid);
+		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+			 raid_device->handle, (u64)raid_device->wwid);
 		_scsih_raid_device_remove(ioc, raid_device);
 	}
 	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
@@ -9851,7 +9809,6 @@
 		ioc->sas_hba.num_phys = 0;
 	}
 
-	sas_remove_host(shost);
 	mpt3sas_base_detach(ioc);
 	spin_lock(&gioc_lock);
 	list_del(&ioc->list);
@@ -9870,6 +9827,7 @@
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 	struct workqueue_struct	*wq;
 	unsigned long flags;
+	Mpi2ConfigReply_t mpi_reply;
 
 	ioc->remove_host = 1;
 
@@ -9884,6 +9842,13 @@
 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 	if (wq)
 		destroy_workqueue(wq);
+	/*
+	 * Copy back the unmodified ioc page1 so that on next driver load,
+	 * current modified changes on ioc page1 won't take effect.
+	 */
+	if (ioc->is_aero_ioc)
+		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
+				&ioc->ioc_pg1_copy);
 
 	_scsih_ir_shutdown(ioc);
 	mpt3sas_base_detach(ioc);
@@ -10146,6 +10111,12 @@
 			pcie_device_put(pcie_device);
 			continue;
 		}
+		if (pcie_device->access_status ==
+		    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
+			pcie_device_make_active(ioc, pcie_device);
+			pcie_device_put(pcie_device);
+			continue;
+		}
 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
 			pcie_device->id, 0);
 		if (rc) {
@@ -10230,7 +10201,7 @@
 	rc = mpt3sas_port_enable(ioc);
 
 	if (rc != 0)
-		pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name);
+		ioc_info(ioc, "port enable: FAILED\n");
 }
 
 /**
@@ -10255,9 +10226,7 @@
 
 	if (time >= (300 * HZ)) {
 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
-		pr_info(MPT3SAS_FMT
-			"port enable: FAILED with timeout (timeout=300s)\n",
-			ioc->name);
+		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
 		ioc->is_driver_loading = 0;
 		return 1;
 	}
@@ -10266,16 +10235,15 @@
 		return 0;
 
 	if (ioc->start_scan_failed) {
-		pr_info(MPT3SAS_FMT
-			"port enable: FAILED with (ioc_status=0x%08x)\n",
-			ioc->name, ioc->start_scan_failed);
+		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
+			 ioc->start_scan_failed);
 		ioc->is_driver_loading = 0;
 		ioc->wait_for_discovery_to_complete = 0;
 		ioc->remove_host = 1;
 		return 1;
 	}
 
-	pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
+	ioc_info(ioc, "port enable: SUCCESS\n");
 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
 
 	if (ioc->wait_for_discovery_to_complete) {
@@ -10311,7 +10279,6 @@
 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
 	.max_sectors			= 32767,
 	.cmd_per_lun			= 7,
-	.use_clustering			= ENABLE_CLUSTERING,
 	.shost_attrs			= mpt3sas_host_attrs,
 	.sdev_attrs			= mpt3sas_dev_attrs,
 	.track_queue_depth		= 1,
@@ -10349,8 +10316,8 @@
 	.this_id			= -1,
 	.sg_tablesize			= MPT3SAS_SG_DEPTH,
 	.max_sectors			= 32767,
+	.max_segment_size		= 0xffffffff,
 	.cmd_per_lun			= 7,
-	.use_clustering			= ENABLE_CLUSTERING,
 	.shost_attrs			= mpt3sas_host_attrs,
 	.sdev_attrs			= mpt3sas_dev_attrs,
 	.track_queue_depth		= 1,
@@ -10396,7 +10363,8 @@
 	case MPI2_MFGPAGE_DEVID_SAS2308_1:
 	case MPI2_MFGPAGE_DEVID_SAS2308_2:
 	case MPI2_MFGPAGE_DEVID_SAS2308_3:
-	case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP:
+	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
+	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
 		return MPI2_VERSION;
 	case MPI25_MFGPAGE_DEVID_SAS3004:
 	case MPI25_MFGPAGE_DEVID_SAS3008:
@@ -10422,6 +10390,11 @@
 	case MPI26_MFGPAGE_DEVID_SAS3516_1:
 	case MPI26_MFGPAGE_DEVID_SAS3416:
 	case MPI26_MFGPAGE_DEVID_SAS3616:
+	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
+	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
+	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
+	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
+	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
 		return MPI26_VERSION;
 	}
 	return 0;
@@ -10479,7 +10452,8 @@
 			ioc->is_warpdrive = 1;
 			ioc->hide_ir_msg = 1;
 			break;
-		case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP:
+		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
+		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
 			ioc->is_mcpu_endpoint = 1;
 			break;
 		default:
@@ -10507,10 +10481,20 @@
 		case MPI26_MFGPAGE_DEVID_SAS3516_1:
 		case MPI26_MFGPAGE_DEVID_SAS3416:
 		case MPI26_MFGPAGE_DEVID_SAS3616:
+		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
 			ioc->is_gen35_ioc = 1;
 			break;
+		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
+		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
+			dev_info(&pdev->dev,
+			    "HBA is in Configurable Secure mode\n");
+			/* fall through */
+		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
+		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
+			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
+			break;
 		default:
-			ioc->is_gen35_ioc = 0;
+			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
 		}
 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
@@ -10547,6 +10531,13 @@
 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
 	ioc->logging_level = logging_level;
 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+	/*
+	 * Enable MEMORY MOVE support flag.
+	 */
+	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
+
+	ioc->enable_sdev_max_qd = enable_sdev_max_qd;
+
 	/* misc semaphores and spin locks */
 	mutex_init(&ioc->reset_in_progress_mutex);
 	/* initializing pci_access_mutex lock */
@@ -10586,28 +10577,22 @@
 	if (ioc->is_mcpu_endpoint) {
 		/* mCPU MPI support 64K max IO */
 		shost->max_sectors = 128;
-		pr_info(MPT3SAS_FMT
-				"The max_sectors value is set to %d\n",
-				ioc->name, shost->max_sectors);
+		ioc_info(ioc, "The max_sectors value is set to %d\n",
+			 shost->max_sectors);
 	} else {
 		if (max_sectors != 0xFFFF) {
 			if (max_sectors < 64) {
 				shost->max_sectors = 64;
-				pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
-				    "for max_sectors, range is 64 to 32767. " \
-				    "Assigning value of 64.\n", \
-				    ioc->name, max_sectors);
+				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
+					 max_sectors);
 			} else if (max_sectors > 32767) {
 				shost->max_sectors = 32767;
-				pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
-				    "for max_sectors, range is 64 to 32767." \
-				    "Assigning default value of 32767.\n", \
-				    ioc->name, max_sectors);
+				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
+					 max_sectors);
 			} else {
 				shost->max_sectors = max_sectors & 0xFFFE;
-				pr_info(MPT3SAS_FMT
-					"The max_sectors value is set to %d\n",
-					ioc->name, shost->max_sectors);
+				ioc_info(ioc, "The max_sectors value is set to %d\n",
+					 shost->max_sectors);
 			}
 		}
 	}
@@ -10627,16 +10612,16 @@
 	ioc->firmware_event_thread = alloc_ordered_workqueue(
 	    ioc->firmware_event_name, 0);
 	if (!ioc->firmware_event_thread) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rv = -ENODEV;
 		goto out_thread_fail;
 	}
 
 	ioc->is_driver_loading = 1;
 	if ((mpt3sas_base_attach(ioc))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rv = -ENODEV;
 		goto out_attach_fail;
 	}
@@ -10657,8 +10642,8 @@
 
 	rv = scsi_add_host(shost, &pdev->dev);
 	if (rv) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out_add_shost_fail;
 	}
 
@@ -10695,9 +10680,8 @@
 	flush_scheduled_work();
 	scsi_block_requests(shost);
 	device_state = pci_choose_state(pdev, state);
-	pr_info(MPT3SAS_FMT
-		"pdev=0x%p, slot=%s, entering operating state [D%d]\n",
-		ioc->name, pdev, pci_name(pdev), device_state);
+	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
+		 pdev, pci_name(pdev), device_state);
 
 	pci_save_state(pdev);
 	mpt3sas_base_free_resources(ioc);
@@ -10719,9 +10703,8 @@
 	pci_power_t device_state = pdev->current_state;
 	int r;
 
-	pr_info(MPT3SAS_FMT
-		"pdev=0x%p, slot=%s, previous operating state [D%d]\n",
-		ioc->name, pdev, pci_name(pdev), device_state);
+	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+		 pdev, pci_name(pdev), device_state);
 
 	pci_set_power_state(pdev, PCI_D0);
 	pci_enable_wake(pdev, PCI_D0, 0);
@@ -10753,8 +10736,7 @@
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 
-	pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n",
-	    ioc->name, state);
+	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
 
 	switch (state) {
 	case pci_channel_io_normal:
@@ -10791,8 +10773,7 @@
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 	int rc;
 
-	pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n",
-	     ioc->name);
+	ioc_info(ioc, "PCI error: slot reset callback!!\n");
 
 	ioc->pci_error_recovery = 0;
 	ioc->pdev = pdev;
@@ -10803,8 +10784,8 @@
 
 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 
-	pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
-	    (rc == 0) ? "success" : "failed");
+	ioc_warn(ioc, "hard reset: %s\n",
+		 (rc == 0) ? "success" : "failed");
 
 	if (!rc)
 		return PCI_ERS_RESULT_RECOVERED;
@@ -10826,9 +10807,8 @@
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 
-	pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name);
+	ioc_info(ioc, "PCI error: resume callback!!\n");
 
-	pci_cleanup_aer_uncorrect_error_status(pdev);
 	mpt3sas_base_start_watchdog(ioc);
 	scsi_unblock_requests(ioc->shost);
 }
@@ -10843,8 +10823,7 @@
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 
-	pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n",
-	    ioc->name);
+	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
 
 	/* TODO - dump whatever for debugging purposes */
 
@@ -10922,7 +10901,9 @@
 		PCI_ANY_ID, PCI_ANY_ID },
 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
 		PCI_ANY_ID, PCI_ANY_ID },
-	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP,
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
 		PCI_ANY_ID, PCI_ANY_ID },
 	/* SSS6200 */
 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
@@ -10979,6 +10960,27 @@
 	/* Mercator ~ 3616*/
 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
 		PCI_ANY_ID, PCI_ANY_ID },
+
+	/* Aero SI 0x00E1 Configurable Secure
+	 * 0x00E2 Hard Secure
+	 */
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
+		PCI_ANY_ID, PCI_ANY_ID },
+
+	/* Atlas PCIe Switch Management Port */
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
+		PCI_ANY_ID, PCI_ANY_ID },
+
+	/* Sea SI 0x00E5 Configurable Secure
+	 * 0x00E6 Hard Secure
+	 */
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
+		PCI_ANY_ID, PCI_ANY_ID },
+
 	{0}     /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index f8cc267..5324662 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -146,25 +146,22 @@
 	u32 ioc_status;
 
 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
-		pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
-		    __func__, ioc->name);
+		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
 		return -EFAULT;
 	}
 
 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -ENXIO;
 	}
 
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT
-			"handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n",
-			ioc->name, handle, ioc_status,
-		     __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+			handle, ioc_status, __FILE__, __LINE__, __func__);
 		return -EIO;
 	}
 
@@ -299,7 +296,6 @@
 	struct rep_manu_request *manufacture_request;
 	int rc;
 	u16 smid;
-	u32 ioc_state;
 	void *psge;
 	u8 issue_reset = 0;
 	void *data_out = NULL;
@@ -307,48 +303,28 @@
 	dma_addr_t data_in_dma;
 	size_t data_in_sz;
 	size_t data_out_sz;
-	u16 wait_state_count;
 
 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
-		pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
-		    __func__, ioc->name);
+		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
 		return -EFAULT;
 	}
 
 	mutex_lock(&ioc->transport_cmds.mutex);
 
 	if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
 	ioc->transport_cmds.status = MPT3_CMD_PENDING;
 
-	wait_state_count = 0;
-	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		if (wait_state_count++ == 10) {
-			pr_err(MPT3SAS_FMT
-			    "%s: failed due to ioc not operational\n",
-			    ioc->name, __func__);
-			rc = -EFAULT;
-			goto out;
-		}
-		ssleep(1);
-		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-		pr_info(MPT3SAS_FMT
-			"%s: waiting for operational state(count=%d)\n",
-			ioc->name, __func__, wait_state_count);
-	}
-	if (wait_state_count)
-		pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
-		    ioc->name, __func__);
+	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+	if (rc)
+		goto out;
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -359,9 +335,8 @@
 
 	data_out_sz = sizeof(struct rep_manu_request);
 	data_in_sz = sizeof(struct rep_manu_reply);
-	data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz,
-	    &data_out_dma);
-
+	data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
+			&data_out_dma, GFP_KERNEL);
 	if (!data_out) {
 		pr_err("failure at %s:%d/%s()!\n", __FILE__,
 		    __LINE__, __func__);
@@ -388,16 +363,15 @@
 	ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
 	    data_in_sz);
 
-	dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		"report_manufacture - send to sas_addr(0x%016llx)\n",
-		ioc->name, (unsigned long long)sas_address));
+	dtransportprintk(ioc,
+			 ioc_info(ioc, "report_manufacture - send to sas_addr(0x%016llx)\n",
+				  (u64)sas_address));
 	init_completion(&ioc->transport_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
-		pr_err(MPT3SAS_FMT "%s: timeout\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: timeout\n", __func__);
 		_debug_dump_mf(mpi_request,
 		    sizeof(Mpi2SmpPassthroughRequest_t)/4);
 		if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -405,17 +379,16 @@
 		goto issue_host_reset;
 	}
 
-	dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		"report_manufacture - complete\n", ioc->name));
+	dtransportprintk(ioc, ioc_info(ioc, "report_manufacture - complete\n"));
 
 	if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
 		u8 *tmp;
 
 		mpi_reply = ioc->transport_cmds.reply;
 
-		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		    "report_manufacture - reply data transfer size(%d)\n",
-		    ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+		dtransportprintk(ioc,
+				 ioc_info(ioc, "report_manufacture - reply data transfer size(%d)\n",
+					  le16_to_cpu(mpi_reply->ResponseDataLength)));
 
 		if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
 		    sizeof(struct rep_manu_reply))
@@ -439,8 +412,8 @@
 			    manufacture_reply->component_revision_id;
 		}
 	} else
-		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		    "report_manufacture - no reply\n", ioc->name));
+		dtransportprintk(ioc,
+				 ioc_info(ioc, "report_manufacture - no reply\n"));
 
  issue_host_reset:
 	if (issue_reset)
@@ -448,7 +421,7 @@
  out:
 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
 	if (data_out)
-		pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz,
+		dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
 		    data_out, data_out_dma);
 
 	mutex_unlock(&ioc->transport_cmds.mutex);
@@ -643,8 +616,8 @@
 	mpt3sas_port = kzalloc(sizeof(struct _sas_port),
 	    GFP_KERNEL);
 	if (!mpt3sas_port) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return NULL;
 	}
 
@@ -655,22 +628,21 @@
 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 
 	if (!sas_node) {
-		pr_err(MPT3SAS_FMT
-			"%s: Could not find parent sas_address(0x%016llx)!\n",
-			ioc->name, __func__, (unsigned long long)sas_address);
+		ioc_err(ioc, "%s: Could not find parent sas_address(0x%016llx)!\n",
+			__func__, (u64)sas_address);
 		goto out_fail;
 	}
 
 	if ((_transport_set_identify(ioc, handle,
 	    &mpt3sas_port->remote_identify))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out_fail;
 	}
 
 	if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out_fail;
 	}
 
@@ -687,20 +659,20 @@
 	}
 
 	if (!mpt3sas_port->num_phys) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out_fail;
 	}
 
 	if (!sas_node->parent_dev) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out_fail;
 	}
 	port = sas_port_alloc_num(sas_node->parent_dev);
 	if ((sas_port_add(port))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out_fail;
 	}
 
@@ -729,17 +701,17 @@
 		sas_device = mpt3sas_get_sdev_by_addr(ioc,
 				    mpt3sas_port->remote_identify.sas_address);
 		if (!sas_device) {
-			dfailprintk(ioc, printk(MPT3SAS_FMT
-				"failure at %s:%d/%s()!\n",
-				ioc->name, __FILE__, __LINE__, __func__));
+			dfailprintk(ioc,
+				    ioc_info(ioc, "failure at %s:%d/%s()!\n",
+					     __FILE__, __LINE__, __func__));
 			goto out_fail;
 		}
 		sas_device->pend_sas_rphy_add = 1;
 	}
 
 	if ((sas_rphy_add(rphy))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 	}
 
 	if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -834,10 +806,13 @@
 			    mpt3sas_port->remote_identify.sas_address,
 			    mpt3sas_phy->phy_id);
 		mpt3sas_phy->phy_belongs_to_port = 0;
-		sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+		if (!ioc->remove_host)
+			sas_port_delete_phy(mpt3sas_port->port,
+						mpt3sas_phy->phy);
 		list_del(&mpt3sas_phy->port_siblings);
 	}
-	sas_port_delete(mpt3sas_port->port);
+	if (!ioc->remove_host)
+		sas_port_delete(mpt3sas_port->port);
 	kfree(mpt3sas_port);
 }
 
@@ -861,14 +836,14 @@
 	INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
 	phy = sas_phy_alloc(parent_dev, phy_index);
 	if (!phy) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -1;
 	}
 	if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
 	    &mpt3sas_phy->identify))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		sas_phy_free(phy);
 		return -1;
 	}
@@ -890,8 +865,8 @@
 	    phy_pg0.ProgrammedLinkRate >> 4);
 
 	if ((sas_phy_add(phy))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		sas_phy_free(phy);
 		return -1;
 	}
@@ -929,14 +904,14 @@
 	INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
 	phy = sas_phy_alloc(parent_dev, phy_index);
 	if (!phy) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -1;
 	}
 	if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
 	    &mpt3sas_phy->identify))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		sas_phy_free(phy);
 		return -1;
 	}
@@ -960,8 +935,8 @@
 	    expander_pg1.ProgrammedLinkRate >> 4);
 
 	if ((sas_phy_add(phy))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		sas_phy_free(phy);
 		return -1;
 	}
@@ -1089,54 +1064,33 @@
 	struct phy_error_log_reply *phy_error_log_reply;
 	int rc;
 	u16 smid;
-	u32 ioc_state;
 	void *psge;
 	u8 issue_reset = 0;
 	void *data_out = NULL;
 	dma_addr_t data_out_dma;
 	u32 sz;
-	u16 wait_state_count;
 
 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
-		pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
-		    __func__, ioc->name);
+		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
 		return -EFAULT;
 	}
 
 	mutex_lock(&ioc->transport_cmds.mutex);
 
 	if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
 	ioc->transport_cmds.status = MPT3_CMD_PENDING;
 
-	wait_state_count = 0;
-	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		if (wait_state_count++ == 10) {
-			pr_err(MPT3SAS_FMT
-			    "%s: failed due to ioc not operational\n",
-			    ioc->name, __func__);
-			rc = -EFAULT;
-			goto out;
-		}
-		ssleep(1);
-		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-		pr_info(MPT3SAS_FMT
-			"%s: waiting for operational state(count=%d)\n",
-			ioc->name, __func__, wait_state_count);
-	}
-	if (wait_state_count)
-		pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
-		    ioc->name, __func__);
+	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+	if (rc)
+		goto out;
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -1146,7 +1100,8 @@
 
 	sz = sizeof(struct phy_error_log_request) +
 	    sizeof(struct phy_error_log_reply);
-	data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+	data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
+			GFP_KERNEL);
 	if (!data_out) {
 		pr_err("failure at %s:%d/%s()!\n", __FILE__,
 		    __LINE__, __func__);
@@ -1179,17 +1134,16 @@
 	    data_out_dma + sizeof(struct phy_error_log_request),
 	    sizeof(struct phy_error_log_reply));
 
-	dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		"phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
-		ioc->name, (unsigned long long)phy->identify.sas_address,
-		phy->number));
+	dtransportprintk(ioc,
+			 ioc_info(ioc, "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
+				  (u64)phy->identify.sas_address,
+				  phy->number));
 	init_completion(&ioc->transport_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
-		pr_err(MPT3SAS_FMT "%s: timeout\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: timeout\n", __func__);
 		_debug_dump_mf(mpi_request,
 		    sizeof(Mpi2SmpPassthroughRequest_t)/4);
 		if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -1197,16 +1151,15 @@
 		goto issue_host_reset;
 	}
 
-	dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		"phy_error_log - complete\n", ioc->name));
+	dtransportprintk(ioc, ioc_info(ioc, "phy_error_log - complete\n"));
 
 	if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
 
 		mpi_reply = ioc->transport_cmds.reply;
 
-		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		    "phy_error_log - reply data transfer size(%d)\n",
-		    ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+		dtransportprintk(ioc,
+				 ioc_info(ioc, "phy_error_log - reply data transfer size(%d)\n",
+					  le16_to_cpu(mpi_reply->ResponseDataLength)));
 
 		if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
 		    sizeof(struct phy_error_log_reply))
@@ -1215,9 +1168,9 @@
 		phy_error_log_reply = data_out +
 		    sizeof(struct phy_error_log_request);
 
-		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		    "phy_error_log - function_result(%d)\n",
-		    ioc->name, phy_error_log_reply->function_result));
+		dtransportprintk(ioc,
+				 ioc_info(ioc, "phy_error_log - function_result(%d)\n",
+					  phy_error_log_reply->function_result));
 
 		phy->invalid_dword_count =
 		    be32_to_cpu(phy_error_log_reply->invalid_dword);
@@ -1229,8 +1182,8 @@
 		    be32_to_cpu(phy_error_log_reply->phy_reset_problem);
 		rc = 0;
 	} else
-		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		    "phy_error_log - no reply\n", ioc->name));
+		dtransportprintk(ioc,
+				 ioc_info(ioc, "phy_error_log - no reply\n"));
 
  issue_host_reset:
 	if (issue_reset)
@@ -1238,7 +1191,7 @@
  out:
 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
 	if (data_out)
-		pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+		dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma);
 
 	mutex_unlock(&ioc->transport_cmds.mutex);
 	return rc;
@@ -1273,17 +1226,16 @@
 	/* get hba phy error logs */
 	if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
 		    phy->number))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -ENXIO;
 	}
 
 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
-		pr_info(MPT3SAS_FMT
-			"phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
-			ioc->name, phy->number,
-			le16_to_cpu(mpi_reply.IOCStatus),
-		    le32_to_cpu(mpi_reply.IOCLogInfo));
+		ioc_info(ioc, "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
+			 phy->number,
+			 le16_to_cpu(mpi_reply.IOCStatus),
+			 le32_to_cpu(mpi_reply.IOCLogInfo));
 
 	phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
 	phy->running_disparity_error_count =
@@ -1402,54 +1354,33 @@
 	struct phy_control_reply *phy_control_reply;
 	int rc;
 	u16 smid;
-	u32 ioc_state;
 	void *psge;
 	u8 issue_reset = 0;
 	void *data_out = NULL;
 	dma_addr_t data_out_dma;
 	u32 sz;
-	u16 wait_state_count;
 
 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
-		pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
-		    __func__, ioc->name);
+		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
 		return -EFAULT;
 	}
 
 	mutex_lock(&ioc->transport_cmds.mutex);
 
 	if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
 	ioc->transport_cmds.status = MPT3_CMD_PENDING;
 
-	wait_state_count = 0;
-	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		if (wait_state_count++ == 10) {
-			pr_err(MPT3SAS_FMT
-			    "%s: failed due to ioc not operational\n",
-			    ioc->name, __func__);
-			rc = -EFAULT;
-			goto out;
-		}
-		ssleep(1);
-		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-		pr_info(MPT3SAS_FMT
-			"%s: waiting for operational state(count=%d)\n",
-			ioc->name, __func__, wait_state_count);
-	}
-	if (wait_state_count)
-		pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
-		    ioc->name, __func__);
+	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+	if (rc)
+		goto out;
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -1459,7 +1390,8 @@
 
 	sz = sizeof(struct phy_control_request) +
 	    sizeof(struct phy_control_reply);
-	data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+	data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
+			GFP_KERNEL);
 	if (!data_out) {
 		pr_err("failure at %s:%d/%s()!\n", __FILE__,
 		    __LINE__, __func__);
@@ -1497,17 +1429,16 @@
 	    data_out_dma + sizeof(struct phy_control_request),
 	    sizeof(struct phy_control_reply));
 
-	dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		"phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
-		ioc->name, (unsigned long long)phy->identify.sas_address,
-		phy->number, phy_operation));
+	dtransportprintk(ioc,
+			 ioc_info(ioc, "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
+				  (u64)phy->identify.sas_address,
+				  phy->number, phy_operation));
 	init_completion(&ioc->transport_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
-		pr_err(MPT3SAS_FMT "%s: timeout\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: timeout\n", __func__);
 		_debug_dump_mf(mpi_request,
 		    sizeof(Mpi2SmpPassthroughRequest_t)/4);
 		if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -1515,16 +1446,15 @@
 		goto issue_host_reset;
 	}
 
-	dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		"phy_control - complete\n", ioc->name));
+	dtransportprintk(ioc, ioc_info(ioc, "phy_control - complete\n"));
 
 	if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
 
 		mpi_reply = ioc->transport_cmds.reply;
 
-		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		    "phy_control - reply data transfer size(%d)\n",
-		    ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+		dtransportprintk(ioc,
+				 ioc_info(ioc, "phy_control - reply data transfer size(%d)\n",
+					  le16_to_cpu(mpi_reply->ResponseDataLength)));
 
 		if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
 		    sizeof(struct phy_control_reply))
@@ -1533,14 +1463,14 @@
 		phy_control_reply = data_out +
 		    sizeof(struct phy_control_request);
 
-		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		    "phy_control - function_result(%d)\n",
-		    ioc->name, phy_control_reply->function_result));
+		dtransportprintk(ioc,
+				 ioc_info(ioc, "phy_control - function_result(%d)\n",
+					  phy_control_reply->function_result));
 
 		rc = 0;
 	} else
-		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		    "phy_control - no reply\n", ioc->name));
+		dtransportprintk(ioc,
+				 ioc_info(ioc, "phy_control - no reply\n"));
 
  issue_host_reset:
 	if (issue_reset)
@@ -1548,7 +1478,8 @@
  out:
 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
 	if (data_out)
-		pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+		dma_free_coherent(&ioc->pdev->dev, sz, data_out,
+				data_out_dma);
 
 	mutex_unlock(&ioc->transport_cmds.mutex);
 	return rc;
@@ -1591,16 +1522,15 @@
 	mpi_request.PhyNum = phy->number;
 
 	if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -ENXIO;
 	}
 
 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
-		pr_info(MPT3SAS_FMT
-		"phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
-		ioc->name, phy->number, le16_to_cpu(mpi_reply.IOCStatus),
-		    le32_to_cpu(mpi_reply.IOCLogInfo));
+		ioc_info(ioc, "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+			 phy->number, le16_to_cpu(mpi_reply.IOCStatus),
+			 le32_to_cpu(mpi_reply.IOCLogInfo));
 
 	return 0;
 }
@@ -1647,23 +1577,23 @@
 	    sizeof(Mpi2SasIOUnit0PhyData_t));
 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
 	if (!sas_iounit_pg0) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rc = -ENOMEM;
 		goto out;
 	}
 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
 	    sas_iounit_pg0, sz))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rc = -ENXIO;
 		goto out;
 	}
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rc = -EIO;
 		goto out;
 	}
@@ -1672,10 +1602,8 @@
 	for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
 		if (sas_iounit_pg0->PhyData[i].PortFlags &
 		    MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
-			pr_err(MPT3SAS_FMT "discovery is active on " \
-			    "port = %d, phy = %d: unable to enable/disable "
-			    "phys, try again later!\n", ioc->name,
-			    sas_iounit_pg0->PhyData[i].Port, i);
+			ioc_err(ioc, "discovery is active on port = %d, phy = %d: unable to enable/disable phys, try again later!\n",
+				sas_iounit_pg0->PhyData[i].Port, i);
 			discovery_active = 1;
 		}
 	}
@@ -1690,23 +1618,23 @@
 	    sizeof(Mpi2SasIOUnit1PhyData_t));
 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
 	if (!sas_iounit_pg1) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rc = -ENOMEM;
 		goto out;
 	}
 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
 	    sas_iounit_pg1, sz))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rc = -ENXIO;
 		goto out;
 	}
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rc = -EIO;
 		goto out;
 	}
@@ -1798,23 +1726,23 @@
 	    sizeof(Mpi2SasIOUnit1PhyData_t));
 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
 	if (!sas_iounit_pg1) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rc = -ENOMEM;
 		goto out;
 	}
 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
 	    sas_iounit_pg1, sz))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rc = -ENXIO;
 		goto out;
 	}
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rc = -EIO;
 		goto out;
 	}
@@ -1833,8 +1761,8 @@
 
 	if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
 	    sz)) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		rc = -ENXIO;
 		goto out;
 	}
@@ -1910,7 +1838,6 @@
 	Mpi2SmpPassthroughReply_t *mpi_reply;
 	int rc;
 	u16 smid;
-	u32 ioc_state;
 	void *psge;
 	dma_addr_t dma_addr_in;
 	dma_addr_t dma_addr_out;
@@ -1918,12 +1845,10 @@
 	void *addr_out = NULL;
 	size_t dma_len_in;
 	size_t dma_len_out;
-	u16 wait_state_count;
 	unsigned int reslen = 0;
 
 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
-		pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
-		    __func__, ioc->name);
+		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
 		rc = -EFAULT;
 		goto job_done;
 	}
@@ -1933,8 +1858,8 @@
 		goto job_done;
 
 	if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
-		    __func__);
+		ioc_err(ioc, "%s: transport_cmds in use\n",
+			__func__);
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -1955,30 +1880,13 @@
 	if (rc)
 		goto unmap_out;
 
-	wait_state_count = 0;
-	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		if (wait_state_count++ == 10) {
-			pr_err(MPT3SAS_FMT
-			    "%s: failed due to ioc not operational\n",
-			    ioc->name, __func__);
-			rc = -EFAULT;
-			goto unmap_in;
-		}
-		ssleep(1);
-		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-		pr_info(MPT3SAS_FMT
-			"%s: waiting for operational state(count=%d)\n",
-			ioc->name, __func__, wait_state_count);
-	}
-	if (wait_state_count)
-		pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
-		    ioc->name, __func__);
+	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+	if (rc)
+		goto unmap_in;
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		rc = -EAGAIN;
 		goto unmap_in;
 	}
@@ -1999,16 +1907,15 @@
 	ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in,
 			dma_len_in - 4);
 
-	dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s - sending smp request\n", ioc->name, __func__));
+	dtransportprintk(ioc,
+			 ioc_info(ioc, "%s: sending smp request\n", __func__));
 
 	init_completion(&ioc->transport_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
-		pr_err(MPT3SAS_FMT "%s : timeout\n",
-		    __func__, ioc->name);
+		ioc_err(ioc, "%s: timeout\n", __func__);
 		_debug_dump_mf(mpi_request,
 		    sizeof(Mpi2SmpPassthroughRequest_t)/4);
 		if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) {
@@ -2018,12 +1925,11 @@
 		}
 	}
 
-	dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		"%s - complete\n", ioc->name, __func__));
+	dtransportprintk(ioc, ioc_info(ioc, "%s - complete\n", __func__));
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) {
-		dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-		    "%s - no reply\n", ioc->name, __func__));
+		dtransportprintk(ioc,
+				 ioc_info(ioc, "%s: no reply\n", __func__));
 		rc = -ENXIO;
 		goto unmap_in;
 	}
@@ -2031,9 +1937,9 @@
 	mpi_reply = ioc->transport_cmds.reply;
 
 	dtransportprintk(ioc,
-		pr_info(MPT3SAS_FMT "%s - reply data transfer size(%d)\n",
-			ioc->name, __func__,
-			le16_to_cpu(mpi_reply->ResponseDataLength)));
+			 ioc_info(ioc, "%s: reply data transfer size(%d)\n",
+				  __func__,
+				  le16_to_cpu(mpi_reply->ResponseDataLength)));
 
 	memcpy(job->reply, mpi_reply, sizeof(*mpi_reply));
 	job->reply_len = sizeof(*mpi_reply);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index cae7c1e..6ac453f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -72,8 +72,7 @@
 	u16 sz, event_data_sz;
 	unsigned long flags;
 
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
-	    ioc->name, __func__));
+	dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
 
 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
 	    sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4;
@@ -85,23 +84,23 @@
 	mpi_reply->EventDataLength = cpu_to_le16(event_data_sz);
 	memcpy(&mpi_reply->EventData, event_data,
 	    sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: add to driver event log\n",
-		ioc->name, __func__));
+	dTriggerDiagPrintk(ioc,
+			   ioc_info(ioc, "%s: add to driver event log\n",
+				    __func__));
 	mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
 	kfree(mpi_reply);
  out:
 
 	/* clearing the diag_trigger_active flag */
 	spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: clearing diag_trigger_active flag\n",
-		ioc->name, __func__));
+	dTriggerDiagPrintk(ioc,
+			   ioc_info(ioc, "%s: clearing diag_trigger_active flag\n",
+				    __func__));
 	ioc->diag_trigger_active = 0;
 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
 
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-	    __func__));
+	dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+					 __func__));
 }
 
 /**
@@ -115,22 +114,22 @@
 {
 	u8 issue_reset = 0;
 
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
-	    ioc->name, __func__));
+	dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
 
 	/* release the diag buffer trace */
 	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
 	    MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
-		dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: release trace diag buffer\n", ioc->name, __func__));
+		dTriggerDiagPrintk(ioc,
+				   ioc_info(ioc, "%s: release trace diag buffer\n",
+					    __func__));
 		mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
 		    &issue_reset);
 	}
 
 	_mpt3sas_raise_sigio(ioc, event_data);
 
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-	    __func__));
+	dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+					 __func__));
 }
 
 /**
@@ -168,9 +167,9 @@
 
  by_pass_checks:
 
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: enter - trigger_bitmask = 0x%08x\n",
-		ioc->name, __func__, trigger_bitmask));
+	dTriggerDiagPrintk(ioc,
+			   ioc_info(ioc, "%s: enter - trigger_bitmask = 0x%08x\n",
+				    __func__, trigger_bitmask));
 
 	/* don't send trigger if an trigger is currently active */
 	if (ioc->diag_trigger_active) {
@@ -182,9 +181,9 @@
 	if (ioc->diag_trigger_master.MasterData & trigger_bitmask) {
 		found_match = 1;
 		ioc->diag_trigger_active = 1;
-		dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: setting diag_trigger_active flag\n",
-		ioc->name, __func__));
+		dTriggerDiagPrintk(ioc,
+				   ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+					    __func__));
 	}
 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
 
@@ -202,8 +201,8 @@
 		mpt3sas_send_trigger_data_event(ioc, &event_data);
 
  out:
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-	    __func__));
+	dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+					 __func__));
 }
 
 /**
@@ -239,9 +238,9 @@
 		return;
 	}
 
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
-		ioc->name, __func__, event, log_entry_qualifier));
+	dTriggerDiagPrintk(ioc,
+			   ioc_info(ioc, "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
+				    __func__, event, log_entry_qualifier));
 
 	/* don't send trigger if an trigger is currently active */
 	if (ioc->diag_trigger_active) {
@@ -263,26 +262,26 @@
 		}
 		found_match = 1;
 		ioc->diag_trigger_active = 1;
-		dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: setting diag_trigger_active flag\n",
-			ioc->name, __func__));
+		dTriggerDiagPrintk(ioc,
+				   ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+					    __func__));
 	}
 	spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
 
 	if (!found_match)
 		goto out;
 
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: setting diag_trigger_active flag\n",
-		ioc->name, __func__));
+	dTriggerDiagPrintk(ioc,
+			   ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+				    __func__));
 	memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
 	event_data.trigger_type = MPT3SAS_TRIGGER_EVENT;
 	event_data.u.event.EventValue = event;
 	event_data.u.event.LogEntryQualifier = log_entry_qualifier;
 	mpt3sas_send_trigger_data_event(ioc, &event_data);
  out:
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-	    __func__));
+	dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+					 __func__));
 }
 
 /**
@@ -319,9 +318,9 @@
 		return;
 	}
 
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
-		ioc->name, __func__, sense_key, asc, ascq));
+	dTriggerDiagPrintk(ioc,
+			   ioc_info(ioc, "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
+				    __func__, sense_key, asc, ascq));
 
 	/* don't send trigger if an trigger is currently active */
 	if (ioc->diag_trigger_active) {
@@ -347,9 +346,9 @@
 	if (!found_match)
 		goto out;
 
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: setting diag_trigger_active flag\n",
-		ioc->name, __func__));
+	dTriggerDiagPrintk(ioc,
+			   ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+				    __func__));
 	memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
 	event_data.trigger_type = MPT3SAS_TRIGGER_SCSI;
 	event_data.u.scsi.SenseKey = sense_key;
@@ -357,8 +356,8 @@
 	event_data.u.scsi.ASCQ = ascq;
 	mpt3sas_send_trigger_data_event(ioc, &event_data);
  out:
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-	    __func__));
+	dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+					 __func__));
 }
 
 /**
@@ -393,9 +392,9 @@
 		return;
 	}
 
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
-		ioc->name, __func__, ioc_status, loginfo));
+	dTriggerDiagPrintk(ioc,
+			   ioc_info(ioc, "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
+				    __func__, ioc_status, loginfo));
 
 	/* don't send trigger if an trigger is currently active */
 	if (ioc->diag_trigger_active) {
@@ -420,15 +419,15 @@
 	if (!found_match)
 		goto out;
 
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-		"%s: setting diag_trigger_active flag\n",
-		ioc->name, __func__));
+	dTriggerDiagPrintk(ioc,
+			   ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+				    __func__));
 	memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
 	event_data.trigger_type = MPT3SAS_TRIGGER_MPI;
 	event_data.u.mpi.IOCStatus = ioc_status;
 	event_data.u.mpi.IocLogInfo = loginfo;
 	mpt3sas_send_trigger_data_event(ioc, &event_data);
  out:
-	dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-	    __func__));
+	dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+					 __func__));
 }
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index b4927f2..cc07ba4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -127,20 +127,17 @@
 		return;
 
 	if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS) {
-		pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-		    "globally as drives are exposed\n", ioc->name);
+		ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as drives are exposed\n");
 		return;
 	}
 	if (mpt3sas_get_num_volumes(ioc) > 1) {
 		_warpdrive_disable_ddio(ioc);
-		pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-		    "globally as number of drives > 1\n", ioc->name);
+		ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as number of drives > 1\n");
 		return;
 	}
 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
 	    &num_pds)) || !num_pds) {
-		pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-		    "Failure in computing number of drives\n", ioc->name);
+		ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in computing number of drives\n");
 		return;
 	}
 
@@ -148,15 +145,13 @@
 	    sizeof(Mpi2RaidVol0PhysDisk_t));
 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
 	if (!vol_pg0) {
-		pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-		    "Memory allocation failure for RVPG0\n", ioc->name);
+		ioc_info(ioc, "WarpDrive : Direct IO is disabled Memory allocation failure for RVPG0\n");
 		return;
 	}
 
 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
-		pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-		    "Failure in retrieving RVPG0\n", ioc->name);
+		ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in retrieving RVPG0\n");
 		kfree(vol_pg0);
 		return;
 	}
@@ -166,10 +161,8 @@
 	 * assumed for WARPDRIVE, disable direct I/O
 	 */
 	if (num_pds > MPT_MAX_WARPDRIVE_PDS) {
-		pr_warn(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-		    "for the drive with handle(0x%04x): num_mem=%d, "
-		    "max_mem_allowed=%d\n", ioc->name, raid_device->handle,
-		    num_pds, MPT_MAX_WARPDRIVE_PDS);
+		ioc_warn(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): num_mem=%d, max_mem_allowed=%d\n",
+			 raid_device->handle, num_pds, MPT_MAX_WARPDRIVE_PDS);
 		kfree(vol_pg0);
 		return;
 	}
@@ -179,22 +172,18 @@
 		    vol_pg0->PhysDisk[count].PhysDiskNum) ||
 		    le16_to_cpu(pd_pg0.DevHandle) ==
 		    MPT3SAS_INVALID_DEVICE_HANDLE) {
-			pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
-			    "disabled for the drive with handle(0x%04x) member"
-			    "handle retrieval failed for member number=%d\n",
-			    ioc->name, raid_device->handle,
-			    vol_pg0->PhysDisk[count].PhysDiskNum);
+			ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle retrieval failed for member number=%d\n",
+				 raid_device->handle,
+				 vol_pg0->PhysDisk[count].PhysDiskNum);
 			goto out_error;
 		}
 		/* Disable direct I/O if member drive lba exceeds 4 bytes */
 		dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
 		if (dev_max_lba >> 32) {
-			pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
-			    "disabled for the drive with handle(0x%04x) member"
-			    " handle (0x%04x) unsupported max lba 0x%016llx\n",
-			    ioc->name, raid_device->handle,
-			    le16_to_cpu(pd_pg0.DevHandle),
-			    (unsigned long long)dev_max_lba);
+			ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle (0x%04x) unsupported max lba 0x%016llx\n",
+				 raid_device->handle,
+				 le16_to_cpu(pd_pg0.DevHandle),
+				 (u64)dev_max_lba);
 			goto out_error;
 		}
 
@@ -206,41 +195,36 @@
 	 * not RAID0
 	 */
 	if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
-		pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-		    "for the drive with handle(0x%04x): type=%d, "
-		    "s_sz=%uK, blk_size=%u\n", ioc->name,
-		    raid_device->handle, raid_device->volume_type,
-		    (le32_to_cpu(vol_pg0->StripeSize) *
-		    le16_to_cpu(vol_pg0->BlockSize)) / 1024,
-		    le16_to_cpu(vol_pg0->BlockSize));
+		ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): type=%d, s_sz=%uK, blk_size=%u\n",
+			 raid_device->handle, raid_device->volume_type,
+			 (le32_to_cpu(vol_pg0->StripeSize) *
+			  le16_to_cpu(vol_pg0->BlockSize)) / 1024,
+			 le16_to_cpu(vol_pg0->BlockSize));
 		goto out_error;
 	}
 
 	stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
 	stripe_exp = find_first_bit(&stripe_sz, 32);
 	if (stripe_exp == 32) {
-		pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-		"for the drive with handle(0x%04x) invalid stripe sz %uK\n",
-		    ioc->name, raid_device->handle,
-		    (le32_to_cpu(vol_pg0->StripeSize) *
-		    le16_to_cpu(vol_pg0->BlockSize)) / 1024);
+		ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+			 raid_device->handle,
+			 (le32_to_cpu(vol_pg0->StripeSize) *
+			  le16_to_cpu(vol_pg0->BlockSize)) / 1024);
 		goto out_error;
 	}
 	raid_device->stripe_exponent = stripe_exp;
 	block_sz = le16_to_cpu(vol_pg0->BlockSize);
 	block_exp = find_first_bit(&block_sz, 16);
 	if (block_exp == 16) {
-		pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-		    "for the drive with handle(0x%04x) invalid block sz %u\n",
-		    ioc->name, raid_device->handle,
-		    le16_to_cpu(vol_pg0->BlockSize));
+		ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid block sz %u\n",
+			 raid_device->handle, le16_to_cpu(vol_pg0->BlockSize));
 		goto out_error;
 	}
 	raid_device->block_exponent = block_exp;
 	raid_device->direct_io_enabled = 1;
 
-	pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is Enabled for the drive"
-	    " with handle(0x%04x)\n", ioc->name, raid_device->handle);
+	ioc_info(ioc, "WarpDrive : Direct IO is Enabled for the drive with handle(0x%04x)\n",
+		 raid_device->handle);
 	/*
 	 * WARPDRIVE: Though the following fields are not used for direct IO,
 	 * stored for future purpose:
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index 7d1ab41..ca96d6d 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -78,7 +78,6 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= CMD_PER_LUN,
-	.use_clustering		= ENABLE_CLUSTERING
 };
 
 static struct Scsi_Host *mvme147_shost;
diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c
index 050c8c3..21d6382 100644
--- a/drivers/scsi/mvme16x_scsi.c
+++ b/drivers/scsi/mvme16x_scsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Detection routine for the NCR53c710 based MVME16x SCSI Controllers for Linux.
  *
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
index 78f7e20..79812b8 100644
--- a/drivers/scsi/mvsas/Kconfig
+++ b/drivers/scsi/mvsas/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver.
 #
@@ -5,25 +6,6 @@
 # Copyright 2008 Marvell. <kewei@marvell.com>
 # Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
 #
-# This file is licensed under GPLv2.
-#
-# This file is part of the 88SE64XX/88SE94XX driver.
-#
-# The 88SE64XX/88SE94XX driver is free software; you can redistribute
-# it and/or modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; version 2 of the
-# License.
-#
-# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be
-# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-#
-#
 
 config SCSI_MVSAS
 	tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support"
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
index 87b231a..7584925 100644
--- a/drivers/scsi/mvsas/Makefile
+++ b/drivers/scsi/mvsas/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver.
 #
@@ -5,22 +6,6 @@
 # Copyright 2008 Marvell. <kewei@marvell.com>
 # Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
 #
-# This file is licensed under GPLv2.
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2 of the
-# License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
-# USA
 
 ccflags-$(CONFIG_SCSI_MVSAS_DEBUG) := -DMV_DEBUG
 
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index b757d38..1f2b61d 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -1,26 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Marvell 88SE64xx hardware specific
  *
  * Copyright 2007 Red Hat, Inc.
  * Copyright 2008 Marvell. <kewei@marvell.com>
  * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
 */
 
 #include "mv_sas.h"
@@ -678,7 +662,8 @@
 static void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
 {
 	void __iomem *regs = mvi->regs_ex;
-	 iow32(SPI_DATA_REG_64XX, data);
+
+	iow32(SPI_DATA_REG_64XX, data);
 }
 
 
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
index 545889b..c25a5df 100644
--- a/drivers/scsi/mvsas/mv_64xx.h
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -1,26 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Marvell 88SE64xx hardware specific head file
  *
  * Copyright 2007 Red Hat, Inc.
  * Copyright 2008 Marvell. <kewei@marvell.com>
  * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
 */
 
 #ifndef _MVS64XX_REG_H_
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index eb5471b..fc0b8eb 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -1,26 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Marvell 88SE94xx hardware specific
  *
  * Copyright 2007 Red Hat, Inc.
  * Copyright 2008 Marvell. <kewei@marvell.com>
  * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
 */
 
 #include "mv_sas.h"
@@ -946,7 +930,8 @@
 static void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
 {
 	void __iomem *regs = mvi->regs_ex - 0x10200;
-	 mw32(SPI_RD_DATA_REG_94XX, data);
+
+	mw32(SPI_RD_DATA_REG_94XX, data);
 }
 
 
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 5789608..a243182 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -1,26 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Marvell 88SE94xx hardware specific head file
  *
  * Copyright 2007 Red Hat, Inc.
  * Copyright 2008 Marvell. <kewei@marvell.com>
  * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
 */
 
 #ifndef _MVS94XX_REG_H_
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
index 8c4479a..0e7366f 100644
--- a/drivers/scsi/mvsas/mv_chips.h
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -1,26 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Marvell 88SE64xx/88SE94xx register IO interface
  *
  * Copyright 2007 Red Hat, Inc.
  * Copyright 2008 Marvell. <kewei@marvell.com>
  * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
 */
 
 
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index f545194..199ab49 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -1,26 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Marvell 88SE64xx/88SE94xx const head file
  *
  * Copyright 2007 Red Hat, Inc.
  * Copyright 2008 Marvell. <kewei@marvell.com>
  * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
 */
 
 #ifndef _MV_DEFS_H_
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 8c91637..da719b0 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -1,26 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Marvell 88SE64xx/88SE94xx pci init
  *
  * Copyright 2007 Red Hat, Inc.
  * Copyright 2008 Marvell. <kewei@marvell.com>
  * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
 */
 
 
@@ -59,7 +43,6 @@
 	.this_id		= -1,
 	.sg_tablesize		= SG_ALL,
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
 	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
@@ -403,29 +386,14 @@
 {
 	int rc;
 
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-		if (rc) {
-			rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-			if (rc) {
-				dev_printk(KERN_ERR, &pdev->dev,
-					   "64-bit DMA enable failed\n");
-				return rc;
-			}
-		}
-	} else {
-		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (rc) {
+		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 		if (rc) {
 			dev_printk(KERN_ERR, &pdev->dev,
 				   "32-bit DMA enable failed\n");
 			return rc;
 		}
-		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (rc) {
-			dev_printk(KERN_ERR, &pdev->dev,
-				   "32-bit consistent DMA enable failed\n");
-			return rc;
-		}
 	}
 
 	return rc;
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index cff43bd..3e0b8eb 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1,26 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Marvell 88SE64xx/88SE94xx main function
  *
  * Copyright 2007 Red Hat, Inc.
  * Copyright 2008 Marvell. <kewei@marvell.com>
  * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
 */
 
 #include "mv_sas.h"
@@ -336,13 +320,13 @@
 	 * DMA-map SMP request, response buffers
 	 */
 	sg_req = &task->smp_task.smp_req;
-	elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
+	elem = dma_map_sg(mvi->dev, sg_req, 1, DMA_TO_DEVICE);
 	if (!elem)
 		return -ENOMEM;
 	req_len = sg_dma_len(sg_req);
 
 	sg_resp = &task->smp_task.smp_resp;
-	elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+	elem = dma_map_sg(mvi->dev, sg_resp, 1, DMA_FROM_DEVICE);
 	if (!elem) {
 		rc = -ENOMEM;
 		goto err_out;
@@ -416,10 +400,10 @@
 
 err_out_2:
 	dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
-		     PCI_DMA_FROMDEVICE);
+		     DMA_FROM_DEVICE);
 err_out:
 	dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
-		     PCI_DMA_TODEVICE);
+		     DMA_TO_DEVICE);
 	return rc;
 }
 
@@ -790,12 +774,11 @@
 	slot->n_elem = n_elem;
 	slot->slot_tag = tag;
 
-	slot->buf = dma_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
+	slot->buf = dma_pool_zalloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
 	if (!slot->buf) {
 		rc = -ENOMEM;
 		goto err_out_tag;
 	}
-	memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
 
 	tei.task = task;
 	tei.hdr = &mvi->slot[tag];
@@ -904,9 +887,9 @@
 	switch (task->task_proto) {
 	case SAS_PROTOCOL_SMP:
 		dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
-			     PCI_DMA_FROMDEVICE);
+			     DMA_FROM_DEVICE);
 		dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
-			     PCI_DMA_TODEVICE);
+			     DMA_TO_DEVICE);
 		break;
 
 	case SAS_PROTOCOL_SATA:
@@ -1210,7 +1193,7 @@
 	mvi_device->dev_type = dev->dev_type;
 	mvi_device->mvi_info = mvi;
 	mvi_device->sas_device = dev;
-	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
 		int phy_id;
 		u8 phy_num = parent_dev->ex_dev.num_phys;
 		struct ex_phy *phy;
@@ -1423,7 +1406,7 @@
 {
 	unsigned long flags;
 	int rc = TMF_RESP_FUNC_FAILED;
-    struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
+	struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
 	struct mvs_info *mvi = mvi_dev->mvi_info;
 
 	if (mvi_dev->dev_status != MVS_DEV_EH)
@@ -1906,8 +1889,7 @@
 
 		if (phy->phy_event & PHY_PLUG_OUT) {
 			u32 tmp;
-			struct sas_identify_frame *id;
-			id = (struct sas_identify_frame *)phy->frame_rcvd;
+
 			tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
 			phy->phy_event &= ~PHY_PLUG_OUT;
 			if (!(tmp & PHY_READY_MASK)) {
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 080676c..519edc7 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -1,26 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Marvell 88SE64xx/88SE94xx main function head file
  *
  * Copyright 2007 Red Hat, Inc.
  * Copyright 2008 Marvell. <kewei@marvell.com>
  * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
 */
 
 #ifndef _MV_SAS_H_
@@ -66,9 +50,6 @@
 extern const struct mvs_dispatch mvs_64xx_dispatch;
 extern const struct mvs_dispatch mvs_94xx_dispatch;
 
-#define DEV_IS_EXPANDER(type)	\
-	((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
-
 #define bit(n) ((u64)1 << n)
 
 #define for_each_phy(__lseq_mask, __mc, __lseq)			\
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index b3cd9a6..8906ace 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -1,24 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Marvell UMI driver
  *
  * Copyright 2011 Marvell. <jyli@marvell.com>
- *
- * This file is licensed under GPLv2.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
 */
 
 #include <linux/kernel.h>
@@ -143,8 +127,9 @@
 
 	case RESOURCE_UNCACHED_MEMORY:
 		size = round_up(size, 8);
-		res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
-						       &res->bus_addr);
+		res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
+						    &res->bus_addr,
+						    GFP_KERNEL);
 		if (!res->virt_addr) {
 			dev_err(&mhba->pdev->dev,
 					"unable to allocate consistent mem,"
@@ -175,7 +160,7 @@
 	list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
 		switch (res->type) {
 		case RESOURCE_UNCACHED_MEMORY:
-			pci_free_consistent(mhba->pdev, res->size,
+			dma_free_coherent(&mhba->pdev->dev, res->size,
 						res->virt_addr, res->bus_addr);
 			break;
 		case RESOURCE_CACHED_MEMORY:
@@ -210,23 +195,22 @@
 	unsigned int sgnum = scsi_sg_count(scmd);
 	dma_addr_t busaddr;
 
-	sg = scsi_sglist(scmd);
-	*sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
-			       (int) scmd->sc_data_direction);
+	*sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
+			       scmd->sc_data_direction);
 	if (*sg_count > mhba->max_sge) {
 		dev_err(&mhba->pdev->dev,
 			"sg count[0x%x] is bigger than max sg[0x%x].\n",
 			*sg_count, mhba->max_sge);
-		pci_unmap_sg(mhba->pdev, sg, sgnum,
-			     (int) scmd->sc_data_direction);
+		dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
+			     scmd->sc_data_direction);
 		return -1;
 	}
-	for (i = 0; i < *sg_count; i++) {
-		busaddr = sg_dma_address(&sg[i]);
+	scsi_for_each_sg(scmd, sg, *sg_count, i) {
+		busaddr = sg_dma_address(sg);
 		m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 		m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 		m_sg->flags = 0;
-		sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
+		sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg)));
 		if ((i + 1) == *sg_count)
 			m_sg->flags |= 1U << mhba->eot_flag;
 
@@ -246,7 +230,8 @@
 	if (size == 0)
 		return 0;
 
-	virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
+	virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
+				       GFP_KERNEL);
 	if (!virt_addr)
 		return -1;
 
@@ -274,8 +259,8 @@
 	}
 	INIT_LIST_HEAD(&cmd->queue_pointer);
 
-	cmd->frame = pci_alloc_consistent(mhba->pdev,
-				mhba->ib_max_size, &cmd->frame_phys);
+	cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
+			&cmd->frame_phys, GFP_KERNEL);
 	if (!cmd->frame) {
 		dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
 			" frame,size = %d.\n", mhba->ib_max_size);
@@ -287,7 +272,7 @@
 		if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
 			dev_err(&mhba->pdev->dev, "failed to allocate memory"
 						" for internal frame\n");
-			pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+			dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
 					cmd->frame, cmd->frame_phys);
 			kfree(cmd);
 			return NULL;
@@ -313,10 +298,10 @@
 			phy_addr = (dma_addr_t) m_sg->baseaddr_l |
 				(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
 
-			pci_free_consistent(mhba->pdev, size, cmd->data_buf,
+			dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
 								phy_addr);
 		}
-		pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+		dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
 				cmd->frame, cmd->frame_phys);
 		kfree(cmd);
 	}
@@ -663,16 +648,17 @@
 	}
 }
 
-static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
+static int mvumi_pci_set_master(struct pci_dev *pdev)
 {
-	unsigned int ret = 0;
+	int ret = 0;
+
 	pci_set_master(pdev);
 
 	if (IS_DMA64) {
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
-			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+			ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 	} else
-		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 
 	return ret;
 }
@@ -715,8 +701,8 @@
 
 	mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
 
-	scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
-			scmd->serial_number, scmd->cmnd[0], scmd->retries);
+	scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
+			scmd->request->tag, scmd->cmnd[0], scmd->retries);
 
 	return mhba->instancet->reset_host(mhba);
 }
@@ -749,7 +735,7 @@
 		spin_lock_irqsave(mhba->shost->host_lock, flags);
 		atomic_dec(&cmd->sync_cmd);
 		if (mhba->tag_cmd[cmd->frame->tag]) {
-			mhba->tag_cmd[cmd->frame->tag] = 0;
+			mhba->tag_cmd[cmd->frame->tag] = NULL;
 			dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
 							cmd->frame->tag);
 			tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
@@ -771,7 +757,7 @@
 	mvumi_free_cmds(mhba);
 	mvumi_release_mem_resource(mhba);
 	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
-	pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+	dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
 		mhba->handshake_page, mhba->handshake_page_phys);
 	kfree(mhba->regs);
 	pci_release_regions(mhba->pdev);
@@ -1339,9 +1325,9 @@
 	}
 
 	if (scsi_bufflen(scmd))
-		pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
+		dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
 			     scsi_sg_count(scmd),
-			     (int) scmd->sc_data_direction);
+			     scmd->sc_data_direction);
 	cmd->scmd->scsi_done(scmd);
 	mvumi_return_cmd(mhba, cmd);
 }
@@ -1791,7 +1777,7 @@
 		cmd = mhba->tag_cmd[ob_frame->tag];
 
 		atomic_dec(&mhba->fw_outstanding);
-		mhba->tag_cmd[ob_frame->tag] = 0;
+		mhba->tag_cmd[ob_frame->tag] = NULL;
 		tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
 		if (cmd->scmd)
 			mvumi_complete_cmd(mhba, cmd, ob_frame);
@@ -2101,7 +2087,6 @@
 	unsigned long irq_flags;
 
 	spin_lock_irqsave(shost->host_lock, irq_flags);
-	scsi_cmd_get_serial(shost, scmd);
 
 	mhba = (struct mvumi_hba *) shost->hostdata;
 	scmd->result = 0;
@@ -2137,7 +2122,7 @@
 	spin_lock_irqsave(mhba->shost->host_lock, flags);
 
 	if (mhba->tag_cmd[cmd->frame->tag]) {
-		mhba->tag_cmd[cmd->frame->tag] = 0;
+		mhba->tag_cmd[cmd->frame->tag] = NULL;
 		tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
 	}
 	if (!list_empty(&cmd->queue_pointer))
@@ -2148,9 +2133,9 @@
 	scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
 	scmd->SCp.ptr = NULL;
 	if (scsi_bufflen(scmd)) {
-		pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
+		dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
 			     scsi_sg_count(scmd),
-			     (int)scmd->sc_data_direction);
+			     scmd->sc_data_direction);
 	}
 	mvumi_return_cmd(mhba, cmd);
 	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
@@ -2195,6 +2180,7 @@
 	.eh_timed_out = mvumi_timed_out,
 	.eh_host_reset_handler = mvumi_host_reset,
 	.bios_param = mvumi_bios_param,
+	.dma_boundary = PAGE_SIZE - 1,
 	.this_id = -1,
 };
 
@@ -2362,8 +2348,8 @@
 		ret = -ENOMEM;
 		goto fail_alloc_mem;
 	}
-	mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
-						&mhba->handshake_page_phys);
+	mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
+			HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
 	if (!mhba->handshake_page) {
 		dev_err(&mhba->pdev->dev,
 			"failed to allocate memory for handshake\n");
@@ -2383,7 +2369,7 @@
 
 fail_ready_state:
 	mvumi_release_mem_resource(mhba);
-	pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+	dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
 		mhba->handshake_page, mhba->handshake_page_phys);
 fail_alloc_page:
 	kfree(mhba->regs);
@@ -2480,20 +2466,9 @@
 	if (ret)
 		return ret;
 
-	pci_set_master(pdev);
-
-	if (IS_DMA64) {
-		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-		if (ret) {
-			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-			if (ret)
-				goto fail_set_dma_mask;
-		}
-	} else {
-		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (ret)
-			goto fail_set_dma_mask;
-	}
+	ret = mvumi_pci_set_master(pdev);
+	if (ret)
+		goto fail_set_dma_mask;
 
 	host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
 	if (!host) {
@@ -2627,19 +2602,11 @@
 		dev_err(&pdev->dev, "enable device failed\n");
 		return ret;
 	}
-	pci_set_master(pdev);
-	if (IS_DMA64) {
-		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-		if (ret) {
-			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-			if (ret)
-				goto fail;
-		}
-	} else {
-		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (ret)
-			goto fail;
-	}
+
+	ret = mvumi_pci_set_master(pdev);
+	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		goto fail;
 	ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
 	if (ret)
 		goto fail;
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
index 41f1687..ec8cc22 100644
--- a/drivers/scsi/mvumi.h
+++ b/drivers/scsi/mvumi.h
@@ -1,24 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
   * Marvell UMI head file
   *
   * Copyright 2011 Marvell. <jyli@marvell.com>
-  *
-  * This file is licensed under GPLv2.
-  *
-  * This program is free software; you can redistribute it and/or
-  * modify it under the terms of the GNU General Public License as
-  * published by the Free Software Foundation; version 2 of the
-  * License.
-  *
-  * This program is distributed in the hope that it will be useful,
-  * but WITHOUT ANY WARRANTY; without even the implied warranty of
-  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-  * General Public License for more details.
-  *
-  * You should have received a copy of the GNU General Public License
-  * along with this program; if not, write to the Free Software
-  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
-  * USA
  */
 
 #ifndef MVUMI_H
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
new file mode 100644
index 0000000..539ac8c
--- /dev/null
+++ b/drivers/scsi/myrb.c
@@ -0,0 +1,3660 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver,
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/raid_class.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "myrb.h"
+
+static struct raid_template *myrb_raid_template;
+
+static void myrb_monitor(struct work_struct *work);
+static inline void myrb_translate_devstate(void *DeviceState);
+
+static inline int myrb_logical_channel(struct Scsi_Host *shost)
+{
+	return shost->max_channel - 1;
+}
+
+static struct myrb_devstate_name_entry {
+	enum myrb_devstate state;
+	const char *name;
+} myrb_devstate_name_list[] = {
+	{ MYRB_DEVICE_DEAD, "Dead" },
+	{ MYRB_DEVICE_WO, "WriteOnly" },
+	{ MYRB_DEVICE_ONLINE, "Online" },
+	{ MYRB_DEVICE_CRITICAL, "Critical" },
+	{ MYRB_DEVICE_STANDBY, "Standby" },
+	{ MYRB_DEVICE_OFFLINE, "Offline" },
+};
+
+static const char *myrb_devstate_name(enum myrb_devstate state)
+{
+	struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
+		if (entry[i].state == state)
+			return entry[i].name;
+	}
+	return "Unknown";
+}
+
+static struct myrb_raidlevel_name_entry {
+	enum myrb_raidlevel level;
+	const char *name;
+} myrb_raidlevel_name_list[] = {
+	{ MYRB_RAID_LEVEL0, "RAID0" },
+	{ MYRB_RAID_LEVEL1, "RAID1" },
+	{ MYRB_RAID_LEVEL3, "RAID3" },
+	{ MYRB_RAID_LEVEL5, "RAID5" },
+	{ MYRB_RAID_LEVEL6, "RAID6" },
+	{ MYRB_RAID_JBOD, "JBOD" },
+};
+
+static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
+{
+	struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
+		if (entry[i].level == level)
+			return entry[i].name;
+	}
+	return NULL;
+}
+
+/**
+ * myrb_create_mempools - allocates auxiliary data structures
+ *
+ * Return: true on success, false otherwise.
+ */
+static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
+{
+	size_t elem_size, elem_align;
+
+	elem_align = sizeof(struct myrb_sge);
+	elem_size = cb->host->sg_tablesize * elem_align;
+	cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
+				      elem_size, elem_align, 0);
+	if (cb->sg_pool == NULL) {
+		shost_printk(KERN_ERR, cb->host,
+			     "Failed to allocate SG pool\n");
+		return false;
+	}
+
+	cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
+				       sizeof(struct myrb_dcdb),
+				       sizeof(unsigned int), 0);
+	if (!cb->dcdb_pool) {
+		dma_pool_destroy(cb->sg_pool);
+		cb->sg_pool = NULL;
+		shost_printk(KERN_ERR, cb->host,
+			     "Failed to allocate DCDB pool\n");
+		return false;
+	}
+
+	snprintf(cb->work_q_name, sizeof(cb->work_q_name),
+		 "myrb_wq_%d", cb->host->host_no);
+	cb->work_q = create_singlethread_workqueue(cb->work_q_name);
+	if (!cb->work_q) {
+		dma_pool_destroy(cb->dcdb_pool);
+		cb->dcdb_pool = NULL;
+		dma_pool_destroy(cb->sg_pool);
+		cb->sg_pool = NULL;
+		shost_printk(KERN_ERR, cb->host,
+			     "Failed to create workqueue\n");
+		return false;
+	}
+
+	/*
+	 * Initialize the Monitoring Timer.
+	 */
+	INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
+	queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
+
+	return true;
+}
+
+/**
+ * myrb_destroy_mempools - tears down the memory pools for the controller
+ */
+static void myrb_destroy_mempools(struct myrb_hba *cb)
+{
+	cancel_delayed_work_sync(&cb->monitor_work);
+	destroy_workqueue(cb->work_q);
+
+	dma_pool_destroy(cb->sg_pool);
+	dma_pool_destroy(cb->dcdb_pool);
+}
+
+/**
+ * myrb_reset_cmd - reset command block
+ */
+static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
+{
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+	memset(mbox, 0, sizeof(union myrb_cmd_mbox));
+	cmd_blk->status = 0;
+}
+
+/**
+ * myrb_qcmd - queues command block for execution
+ */
+static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+	void __iomem *base = cb->io_base;
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
+
+	cb->write_cmd_mbox(next_mbox, mbox);
+	if (cb->prev_cmd_mbox1->words[0] == 0 ||
+	    cb->prev_cmd_mbox2->words[0] == 0)
+		cb->get_cmd_mbox(base);
+	cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
+	cb->prev_cmd_mbox1 = next_mbox;
+	if (++next_mbox > cb->last_cmd_mbox)
+		next_mbox = cb->first_cmd_mbox;
+	cb->next_cmd_mbox = next_mbox;
+}
+
+/**
+ * myrb_exec_cmd - executes command block and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
+		struct myrb_cmdblk *cmd_blk)
+{
+	DECLARE_COMPLETION_ONSTACK(cmpl);
+	unsigned long flags;
+
+	cmd_blk->completion = &cmpl;
+
+	spin_lock_irqsave(&cb->queue_lock, flags);
+	cb->qcmd(cb, cmd_blk);
+	spin_unlock_irqrestore(&cb->queue_lock, flags);
+
+	WARN_ON(in_interrupt());
+	wait_for_completion(&cmpl);
+	return cmd_blk->status;
+}
+
+/**
+ * myrb_exec_type3 - executes a type 3 command and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_type3(struct myrb_hba *cb,
+		enum myrb_cmd_opcode op, dma_addr_t addr)
+{
+	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	unsigned short status;
+
+	mutex_lock(&cb->dcmd_mutex);
+	myrb_reset_cmd(cmd_blk);
+	mbox->type3.id = MYRB_DCMD_TAG;
+	mbox->type3.opcode = op;
+	mbox->type3.addr = addr;
+	status = myrb_exec_cmd(cb, cmd_blk);
+	mutex_unlock(&cb->dcmd_mutex);
+	return status;
+}
+
+/**
+ * myrb_exec_type3D - executes a type 3D command and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
+		enum myrb_cmd_opcode op, struct scsi_device *sdev,
+		struct myrb_pdev_state *pdev_info)
+{
+	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	unsigned short status;
+	dma_addr_t pdev_info_addr;
+
+	pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
+					sizeof(struct myrb_pdev_state),
+					DMA_FROM_DEVICE);
+	if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
+		return MYRB_STATUS_SUBSYS_FAILED;
+
+	mutex_lock(&cb->dcmd_mutex);
+	myrb_reset_cmd(cmd_blk);
+	mbox->type3D.id = MYRB_DCMD_TAG;
+	mbox->type3D.opcode = op;
+	mbox->type3D.channel = sdev->channel;
+	mbox->type3D.target = sdev->id;
+	mbox->type3D.addr = pdev_info_addr;
+	status = myrb_exec_cmd(cb, cmd_blk);
+	mutex_unlock(&cb->dcmd_mutex);
+	dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
+			 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
+	if (status == MYRB_STATUS_SUCCESS &&
+	    mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
+		myrb_translate_devstate(pdev_info);
+
+	return status;
+}
+
+static char *myrb_event_msg[] = {
+	"killed because write recovery failed",
+	"killed because of SCSI bus reset failure",
+	"killed because of double check condition",
+	"killed because it was removed",
+	"killed because of gross error on SCSI chip",
+	"killed because of bad tag returned from drive",
+	"killed because of timeout on SCSI command",
+	"killed because of reset SCSI command issued from system",
+	"killed because busy or parity error count exceeded limit",
+	"killed because of 'kill drive' command from system",
+	"killed because of selection timeout",
+	"killed due to SCSI phase sequence error",
+	"killed due to unknown status",
+};
+
+/**
+ * myrb_get_event - get event log from HBA
+ * @cb: pointer to the hba structure
+ * @event: number of the event
+ *
+ * Execute a type 3E command and logs the event message
+ */
+static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
+{
+	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	struct myrb_log_entry *ev_buf;
+	dma_addr_t ev_addr;
+	unsigned short status;
+
+	ev_buf = dma_alloc_coherent(&cb->pdev->dev,
+				    sizeof(struct myrb_log_entry),
+				    &ev_addr, GFP_KERNEL);
+	if (!ev_buf)
+		return;
+
+	myrb_reset_cmd(cmd_blk);
+	mbox->type3E.id = MYRB_MCMD_TAG;
+	mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
+	mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
+	mbox->type3E.opqual = 1;
+	mbox->type3E.ev_seq = event;
+	mbox->type3E.addr = ev_addr;
+	status = myrb_exec_cmd(cb, cmd_blk);
+	if (status != MYRB_STATUS_SUCCESS)
+		shost_printk(KERN_INFO, cb->host,
+			     "Failed to get event log %d, status %04x\n",
+			     event, status);
+
+	else if (ev_buf->seq_num == event) {
+		struct scsi_sense_hdr sshdr;
+
+		memset(&sshdr, 0, sizeof(sshdr));
+		scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
+
+		if (sshdr.sense_key == VENDOR_SPECIFIC &&
+		    sshdr.asc == 0x80 &&
+		    sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
+			shost_printk(KERN_CRIT, cb->host,
+				     "Physical drive %d:%d: %s\n",
+				     ev_buf->channel, ev_buf->target,
+				     myrb_event_msg[sshdr.ascq]);
+		else
+			shost_printk(KERN_CRIT, cb->host,
+				     "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
+				     ev_buf->channel, ev_buf->target,
+				     sshdr.sense_key, sshdr.asc, sshdr.ascq);
+	}
+
+	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
+			  ev_buf, ev_addr);
+}
+
+/**
+ * myrb_get_errtable - retrieves the error table from the controller
+ *
+ * Executes a type 3 command and logs the error table from the controller.
+ */
+static void myrb_get_errtable(struct myrb_hba *cb)
+{
+	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	unsigned short status;
+	struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
+
+	memcpy(&old_table, cb->err_table, sizeof(old_table));
+
+	myrb_reset_cmd(cmd_blk);
+	mbox->type3.id = MYRB_MCMD_TAG;
+	mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
+	mbox->type3.addr = cb->err_table_addr;
+	status = myrb_exec_cmd(cb, cmd_blk);
+	if (status == MYRB_STATUS_SUCCESS) {
+		struct myrb_error_entry *table = cb->err_table;
+		struct myrb_error_entry *new, *old;
+		size_t err_table_offset;
+		struct scsi_device *sdev;
+
+		shost_for_each_device(sdev, cb->host) {
+			if (sdev->channel >= myrb_logical_channel(cb->host))
+				continue;
+			err_table_offset = sdev->channel * MYRB_MAX_TARGETS
+				+ sdev->id;
+			new = table + err_table_offset;
+			old = &old_table[err_table_offset];
+			if (new->parity_err == old->parity_err &&
+			    new->soft_err == old->soft_err &&
+			    new->hard_err == old->hard_err &&
+			    new->misc_err == old->misc_err)
+				continue;
+			sdev_printk(KERN_CRIT, sdev,
+				    "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
+				    new->parity_err, new->soft_err,
+				    new->hard_err, new->misc_err);
+		}
+	}
+}
+
+/**
+ * myrb_get_ldev_info - retrieves the logical device table from the controller
+ *
+ * Executes a type 3 command and updates the logical device table.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
+{
+	unsigned short status;
+	int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
+	struct Scsi_Host *shost = cb->host;
+
+	status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
+				 cb->ldev_info_addr);
+	if (status != MYRB_STATUS_SUCCESS)
+		return status;
+
+	for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
+		struct myrb_ldev_info *old = NULL;
+		struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
+		struct scsi_device *sdev;
+
+		sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
+					  ldev_num, 0);
+		if (!sdev) {
+			if (new->state == MYRB_DEVICE_OFFLINE)
+				continue;
+			shost_printk(KERN_INFO, shost,
+				     "Adding Logical Drive %d in state %s\n",
+				     ldev_num, myrb_devstate_name(new->state));
+			scsi_add_device(shost, myrb_logical_channel(shost),
+					ldev_num, 0);
+			continue;
+		}
+		old = sdev->hostdata;
+		if (new->state != old->state)
+			shost_printk(KERN_INFO, shost,
+				     "Logical Drive %d is now %s\n",
+				     ldev_num, myrb_devstate_name(new->state));
+		if (new->wb_enabled != old->wb_enabled)
+			sdev_printk(KERN_INFO, sdev,
+				    "Logical Drive is now WRITE %s\n",
+				    (new->wb_enabled ? "BACK" : "THRU"));
+		memcpy(old, new, sizeof(*new));
+		scsi_device_put(sdev);
+	}
+	return status;
+}
+
+/**
+ * myrb_get_rbld_progress - get rebuild progress information
+ *
+ * Executes a type 3 command and returns the rebuild progress
+ * information.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
+		struct myrb_rbld_progress *rbld)
+{
+	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	struct myrb_rbld_progress *rbld_buf;
+	dma_addr_t rbld_addr;
+	unsigned short status;
+
+	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
+				      sizeof(struct myrb_rbld_progress),
+				      &rbld_addr, GFP_KERNEL);
+	if (!rbld_buf)
+		return MYRB_STATUS_RBLD_NOT_CHECKED;
+
+	myrb_reset_cmd(cmd_blk);
+	mbox->type3.id = MYRB_MCMD_TAG;
+	mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
+	mbox->type3.addr = rbld_addr;
+	status = myrb_exec_cmd(cb, cmd_blk);
+	if (rbld)
+		memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
+	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
+			  rbld_buf, rbld_addr);
+	return status;
+}
+
+/**
+ * myrb_update_rbld_progress - updates the rebuild status
+ *
+ * Updates the rebuild status for the attached logical devices.
+ *
+ */
+static void myrb_update_rbld_progress(struct myrb_hba *cb)
+{
+	struct myrb_rbld_progress rbld_buf;
+	unsigned short status;
+
+	status = myrb_get_rbld_progress(cb, &rbld_buf);
+	if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
+	    cb->last_rbld_status == MYRB_STATUS_SUCCESS)
+		status = MYRB_STATUS_RBLD_SUCCESS;
+	if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
+		unsigned int blocks_done =
+			rbld_buf.ldev_size - rbld_buf.blocks_left;
+		struct scsi_device *sdev;
+
+		sdev = scsi_device_lookup(cb->host,
+					  myrb_logical_channel(cb->host),
+					  rbld_buf.ldev_num, 0);
+		if (!sdev)
+			return;
+
+		switch (status) {
+		case MYRB_STATUS_SUCCESS:
+			sdev_printk(KERN_INFO, sdev,
+				    "Rebuild in Progress, %d%% completed\n",
+				    (100 * (blocks_done >> 7))
+				    / (rbld_buf.ldev_size >> 7));
+			break;
+		case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
+			sdev_printk(KERN_INFO, sdev,
+				    "Rebuild Failed due to Logical Drive Failure\n");
+			break;
+		case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
+			sdev_printk(KERN_INFO, sdev,
+				    "Rebuild Failed due to Bad Blocks on Other Drives\n");
+			break;
+		case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
+			sdev_printk(KERN_INFO, sdev,
+				    "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
+			break;
+		case MYRB_STATUS_RBLD_SUCCESS:
+			sdev_printk(KERN_INFO, sdev,
+				    "Rebuild Completed Successfully\n");
+			break;
+		case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
+			sdev_printk(KERN_INFO, sdev,
+				     "Rebuild Successfully Terminated\n");
+			break;
+		default:
+			break;
+		}
+		scsi_device_put(sdev);
+	}
+	cb->last_rbld_status = status;
+}
+
+/**
+ * myrb_get_cc_progress - retrieve the rebuild status
+ *
+ * Execute a type 3 Command and fetch the rebuild / consistency check
+ * status.
+ */
+static void myrb_get_cc_progress(struct myrb_hba *cb)
+{
+	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	struct myrb_rbld_progress *rbld_buf;
+	dma_addr_t rbld_addr;
+	unsigned short status;
+
+	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
+				      sizeof(struct myrb_rbld_progress),
+				      &rbld_addr, GFP_KERNEL);
+	if (!rbld_buf) {
+		cb->need_cc_status = true;
+		return;
+	}
+	myrb_reset_cmd(cmd_blk);
+	mbox->type3.id = MYRB_MCMD_TAG;
+	mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
+	mbox->type3.addr = rbld_addr;
+	status = myrb_exec_cmd(cb, cmd_blk);
+	if (status == MYRB_STATUS_SUCCESS) {
+		unsigned int ldev_num = rbld_buf->ldev_num;
+		unsigned int ldev_size = rbld_buf->ldev_size;
+		unsigned int blocks_done =
+			ldev_size - rbld_buf->blocks_left;
+		struct scsi_device *sdev;
+
+		sdev = scsi_device_lookup(cb->host,
+					  myrb_logical_channel(cb->host),
+					  ldev_num, 0);
+		if (sdev) {
+			sdev_printk(KERN_INFO, sdev,
+				    "Consistency Check in Progress: %d%% completed\n",
+				    (100 * (blocks_done >> 7))
+				    / (ldev_size >> 7));
+			scsi_device_put(sdev);
+		}
+	}
+	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
+			  rbld_buf, rbld_addr);
+}
+
+/**
+ * myrb_bgi_control - updates background initialisation status
+ *
+ * Executes a type 3B command and updates the background initialisation status
+ */
+static void myrb_bgi_control(struct myrb_hba *cb)
+{
+	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	struct myrb_bgi_status *bgi, *last_bgi;
+	dma_addr_t bgi_addr;
+	struct scsi_device *sdev = NULL;
+	unsigned short status;
+
+	bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
+				 &bgi_addr, GFP_KERNEL);
+	if (!bgi) {
+		shost_printk(KERN_ERR, cb->host,
+			     "Failed to allocate bgi memory\n");
+		return;
+	}
+	myrb_reset_cmd(cmd_blk);
+	mbox->type3B.id = MYRB_DCMD_TAG;
+	mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
+	mbox->type3B.optype = 0x20;
+	mbox->type3B.addr = bgi_addr;
+	status = myrb_exec_cmd(cb, cmd_blk);
+	last_bgi = &cb->bgi_status;
+	sdev = scsi_device_lookup(cb->host,
+				  myrb_logical_channel(cb->host),
+				  bgi->ldev_num, 0);
+	switch (status) {
+	case MYRB_STATUS_SUCCESS:
+		switch (bgi->status) {
+		case MYRB_BGI_INVALID:
+			break;
+		case MYRB_BGI_STARTED:
+			if (!sdev)
+				break;
+			sdev_printk(KERN_INFO, sdev,
+				    "Background Initialization Started\n");
+			break;
+		case MYRB_BGI_INPROGRESS:
+			if (!sdev)
+				break;
+			if (bgi->blocks_done == last_bgi->blocks_done &&
+			    bgi->ldev_num == last_bgi->ldev_num)
+				break;
+			sdev_printk(KERN_INFO, sdev,
+				 "Background Initialization in Progress: %d%% completed\n",
+				 (100 * (bgi->blocks_done >> 7))
+				 / (bgi->ldev_size >> 7));
+			break;
+		case MYRB_BGI_SUSPENDED:
+			if (!sdev)
+				break;
+			sdev_printk(KERN_INFO, sdev,
+				    "Background Initialization Suspended\n");
+			break;
+		case MYRB_BGI_CANCELLED:
+			if (!sdev)
+				break;
+			sdev_printk(KERN_INFO, sdev,
+				    "Background Initialization Cancelled\n");
+			break;
+		}
+		memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
+		break;
+	case MYRB_STATUS_BGI_SUCCESS:
+		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
+			sdev_printk(KERN_INFO, sdev,
+				    "Background Initialization Completed Successfully\n");
+		cb->bgi_status.status = MYRB_BGI_INVALID;
+		break;
+	case MYRB_STATUS_BGI_ABORTED:
+		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
+			sdev_printk(KERN_INFO, sdev,
+				    "Background Initialization Aborted\n");
+		/* Fallthrough */
+	case MYRB_STATUS_NO_BGI_INPROGRESS:
+		cb->bgi_status.status = MYRB_BGI_INVALID;
+		break;
+	}
+	if (sdev)
+		scsi_device_put(sdev);
+	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
+			  bgi, bgi_addr);
+}
+
+/**
+ * myrb_hba_enquiry - updates the controller status
+ *
+ * Executes a DAC_V1_Enquiry command and updates the controller status.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
+{
+	struct myrb_enquiry old, *new;
+	unsigned short status;
+
+	memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
+
+	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
+	if (status != MYRB_STATUS_SUCCESS)
+		return status;
+
+	new = cb->enquiry;
+	if (new->ldev_count > old.ldev_count) {
+		int ldev_num = old.ldev_count - 1;
+
+		while (++ldev_num < new->ldev_count)
+			shost_printk(KERN_CRIT, cb->host,
+				     "Logical Drive %d Now Exists\n",
+				     ldev_num);
+	}
+	if (new->ldev_count < old.ldev_count) {
+		int ldev_num = new->ldev_count - 1;
+
+		while (++ldev_num < old.ldev_count)
+			shost_printk(KERN_CRIT, cb->host,
+				     "Logical Drive %d No Longer Exists\n",
+				     ldev_num);
+	}
+	if (new->status.deferred != old.status.deferred)
+		shost_printk(KERN_CRIT, cb->host,
+			     "Deferred Write Error Flag is now %s\n",
+			     (new->status.deferred ? "TRUE" : "FALSE"));
+	if (new->ev_seq != old.ev_seq) {
+		cb->new_ev_seq = new->ev_seq;
+		cb->need_err_info = true;
+		shost_printk(KERN_INFO, cb->host,
+			     "Event log %d/%d (%d/%d) available\n",
+			     cb->old_ev_seq, cb->new_ev_seq,
+			     old.ev_seq, new->ev_seq);
+	}
+	if ((new->ldev_critical > 0 &&
+	     new->ldev_critical != old.ldev_critical) ||
+	    (new->ldev_offline > 0 &&
+	     new->ldev_offline != old.ldev_offline) ||
+	    (new->ldev_count != old.ldev_count)) {
+		shost_printk(KERN_INFO, cb->host,
+			     "Logical drive count changed (%d/%d/%d)\n",
+			     new->ldev_critical,
+			     new->ldev_offline,
+			     new->ldev_count);
+		cb->need_ldev_info = true;
+	}
+	if (new->pdev_dead > 0 ||
+	    new->pdev_dead != old.pdev_dead ||
+	    time_after_eq(jiffies, cb->secondary_monitor_time
+			  + MYRB_SECONDARY_MONITOR_INTERVAL)) {
+		cb->need_bgi_status = cb->bgi_status_supported;
+		cb->secondary_monitor_time = jiffies;
+	}
+	if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
+	    new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
+	    old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
+	    old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
+		cb->need_rbld = true;
+		cb->rbld_first = (new->ldev_critical < old.ldev_critical);
+	}
+	if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
+		switch (new->rbld) {
+		case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
+			shost_printk(KERN_INFO, cb->host,
+				     "Consistency Check Completed Successfully\n");
+			break;
+		case MYRB_STDBY_RBLD_IN_PROGRESS:
+		case MYRB_BG_RBLD_IN_PROGRESS:
+			break;
+		case MYRB_BG_CHECK_IN_PROGRESS:
+			cb->need_cc_status = true;
+			break;
+		case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
+			shost_printk(KERN_INFO, cb->host,
+				     "Consistency Check Completed with Error\n");
+			break;
+		case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
+			shost_printk(KERN_INFO, cb->host,
+				     "Consistency Check Failed - Physical Device Failed\n");
+			break;
+		case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
+			shost_printk(KERN_INFO, cb->host,
+				     "Consistency Check Failed - Logical Drive Failed\n");
+			break;
+		case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
+			shost_printk(KERN_INFO, cb->host,
+				     "Consistency Check Failed - Other Causes\n");
+			break;
+		case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
+			shost_printk(KERN_INFO, cb->host,
+				     "Consistency Check Successfully Terminated\n");
+			break;
+		}
+	else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
+		cb->need_cc_status = true;
+
+	return MYRB_STATUS_SUCCESS;
+}
+
+/**
+ * myrb_set_pdev_state - sets the device state for a physical device
+ *
+ * Return: command status
+ */
+static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
+		struct scsi_device *sdev, enum myrb_devstate state)
+{
+	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	unsigned short status;
+
+	mutex_lock(&cb->dcmd_mutex);
+	mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
+	mbox->type3D.id = MYRB_DCMD_TAG;
+	mbox->type3D.channel = sdev->channel;
+	mbox->type3D.target = sdev->id;
+	mbox->type3D.state = state & 0x1F;
+	status = myrb_exec_cmd(cb, cmd_blk);
+	mutex_unlock(&cb->dcmd_mutex);
+
+	return status;
+}
+
+/**
+ * myrb_enable_mmio - enables the Memory Mailbox Interface
+ *
+ * PD and P controller types have no memory mailbox, but still need the
+ * other dma mapped memory.
+ *
+ * Return: true on success, false otherwise.
+ */
+static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
+{
+	void __iomem *base = cb->io_base;
+	struct pci_dev *pdev = cb->pdev;
+	size_t err_table_size;
+	size_t ldev_info_size;
+	union myrb_cmd_mbox *cmd_mbox_mem;
+	struct myrb_stat_mbox *stat_mbox_mem;
+	union myrb_cmd_mbox mbox;
+	unsigned short status;
+
+	memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
+
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+		dev_err(&pdev->dev, "DMA mask out of range\n");
+		return false;
+	}
+
+	cb->enquiry = dma_alloc_coherent(&pdev->dev,
+					 sizeof(struct myrb_enquiry),
+					 &cb->enquiry_addr, GFP_KERNEL);
+	if (!cb->enquiry)
+		return false;
+
+	err_table_size = sizeof(struct myrb_error_entry) *
+		MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
+	cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
+					   &cb->err_table_addr, GFP_KERNEL);
+	if (!cb->err_table)
+		return false;
+
+	ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
+	cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
+					       &cb->ldev_info_addr, GFP_KERNEL);
+	if (!cb->ldev_info_buf)
+		return false;
+
+	/*
+	 * Skip mailbox initialisation for PD and P Controllers
+	 */
+	if (!mmio_init_fn)
+		return true;
+
+	/* These are the base addresses for the command memory mailbox array */
+	cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
+	cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
+						cb->cmd_mbox_size,
+						&cb->cmd_mbox_addr,
+						GFP_KERNEL);
+	if (!cb->first_cmd_mbox)
+		return false;
+
+	cmd_mbox_mem = cb->first_cmd_mbox;
+	cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
+	cb->last_cmd_mbox = cmd_mbox_mem;
+	cb->next_cmd_mbox = cb->first_cmd_mbox;
+	cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
+	cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
+
+	/* These are the base addresses for the status memory mailbox array */
+	cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
+	    sizeof(struct myrb_stat_mbox);
+	cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
+						 cb->stat_mbox_size,
+						 &cb->stat_mbox_addr,
+						 GFP_KERNEL);
+	if (!cb->first_stat_mbox)
+		return false;
+
+	stat_mbox_mem = cb->first_stat_mbox;
+	stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
+	cb->last_stat_mbox = stat_mbox_mem;
+	cb->next_stat_mbox = cb->first_stat_mbox;
+
+	/* Enable the Memory Mailbox Interface. */
+	cb->dual_mode_interface = true;
+	mbox.typeX.opcode = 0x2B;
+	mbox.typeX.id = 0;
+	mbox.typeX.opcode2 = 0x14;
+	mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
+	mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
+
+	status = mmio_init_fn(pdev, base, &mbox);
+	if (status != MYRB_STATUS_SUCCESS) {
+		cb->dual_mode_interface = false;
+		mbox.typeX.opcode2 = 0x10;
+		status = mmio_init_fn(pdev, base, &mbox);
+		if (status != MYRB_STATUS_SUCCESS) {
+			dev_err(&pdev->dev,
+				"Failed to enable mailbox, statux %02X\n",
+				status);
+			return false;
+		}
+	}
+	return true;
+}
+
+/**
+ * myrb_get_hba_config - reads the configuration information
+ *
+ * Reads the configuration information from the controller and
+ * initializes the controller structure.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int myrb_get_hba_config(struct myrb_hba *cb)
+{
+	struct myrb_enquiry2 *enquiry2;
+	dma_addr_t enquiry2_addr;
+	struct myrb_config2 *config2;
+	dma_addr_t config2_addr;
+	struct Scsi_Host *shost = cb->host;
+	struct pci_dev *pdev = cb->pdev;
+	int pchan_max = 0, pchan_cur = 0;
+	unsigned short status;
+	int ret = -ENODEV, memsize = 0;
+
+	enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+				      &enquiry2_addr, GFP_KERNEL);
+	if (!enquiry2) {
+		shost_printk(KERN_ERR, cb->host,
+			     "Failed to allocate V1 enquiry2 memory\n");
+		return -ENOMEM;
+	}
+	config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
+				     &config2_addr, GFP_KERNEL);
+	if (!config2) {
+		shost_printk(KERN_ERR, cb->host,
+			     "Failed to allocate V1 config2 memory\n");
+		dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+				  enquiry2, enquiry2_addr);
+		return -ENOMEM;
+	}
+	mutex_lock(&cb->dma_mutex);
+	status = myrb_hba_enquiry(cb);
+	mutex_unlock(&cb->dma_mutex);
+	if (status != MYRB_STATUS_SUCCESS) {
+		shost_printk(KERN_WARNING, cb->host,
+			     "Failed it issue V1 Enquiry\n");
+		goto out_free;
+	}
+
+	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
+	if (status != MYRB_STATUS_SUCCESS) {
+		shost_printk(KERN_WARNING, cb->host,
+			     "Failed to issue V1 Enquiry2\n");
+		goto out_free;
+	}
+
+	status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
+	if (status != MYRB_STATUS_SUCCESS) {
+		shost_printk(KERN_WARNING, cb->host,
+			     "Failed to issue ReadConfig2\n");
+		goto out_free;
+	}
+
+	status = myrb_get_ldev_info(cb);
+	if (status != MYRB_STATUS_SUCCESS) {
+		shost_printk(KERN_WARNING, cb->host,
+			     "Failed to get logical drive information\n");
+		goto out_free;
+	}
+
+	/*
+	 * Initialize the Controller Model Name and Full Model Name fields.
+	 */
+	switch (enquiry2->hw.sub_model) {
+	case DAC960_V1_P_PD_PU:
+		if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
+			strcpy(cb->model_name, "DAC960PU");
+		else
+			strcpy(cb->model_name, "DAC960PD");
+		break;
+	case DAC960_V1_PL:
+		strcpy(cb->model_name, "DAC960PL");
+		break;
+	case DAC960_V1_PG:
+		strcpy(cb->model_name, "DAC960PG");
+		break;
+	case DAC960_V1_PJ:
+		strcpy(cb->model_name, "DAC960PJ");
+		break;
+	case DAC960_V1_PR:
+		strcpy(cb->model_name, "DAC960PR");
+		break;
+	case DAC960_V1_PT:
+		strcpy(cb->model_name, "DAC960PT");
+		break;
+	case DAC960_V1_PTL0:
+		strcpy(cb->model_name, "DAC960PTL0");
+		break;
+	case DAC960_V1_PRL:
+		strcpy(cb->model_name, "DAC960PRL");
+		break;
+	case DAC960_V1_PTL1:
+		strcpy(cb->model_name, "DAC960PTL1");
+		break;
+	case DAC960_V1_1164P:
+		strcpy(cb->model_name, "eXtremeRAID 1100");
+		break;
+	default:
+		shost_printk(KERN_WARNING, cb->host,
+			     "Unknown Model %X\n",
+			     enquiry2->hw.sub_model);
+		goto out;
+	}
+	/*
+	 * Initialize the Controller Firmware Version field and verify that it
+	 * is a supported firmware version.
+	 * The supported firmware versions are:
+	 *
+	 * DAC1164P		    5.06 and above
+	 * DAC960PTL/PRL/PJ/PG	    4.06 and above
+	 * DAC960PU/PD/PL	    3.51 and above
+	 * DAC960PU/PD/PL/P	    2.73 and above
+	 */
+#if defined(CONFIG_ALPHA)
+	/*
+	 * DEC Alpha machines were often equipped with DAC960 cards that were
+	 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
+	 * the last custom FW revision to be released by DEC for these older
+	 * controllers, appears to work quite well with this driver.
+	 *
+	 * Cards tested successfully were several versions each of the PD and
+	 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
+	 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
+	 * back of the board, of:
+	 *
+	 * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
+	 *         or D040349 (3-channel)
+	 * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
+	 *         or D040397 (3-channel)
+	 */
+# define FIRMWARE_27X	"2.70"
+#else
+# define FIRMWARE_27X	"2.73"
+#endif
+
+	if (enquiry2->fw.major_version == 0) {
+		enquiry2->fw.major_version = cb->enquiry->fw_major_version;
+		enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
+		enquiry2->fw.firmware_type = '0';
+		enquiry2->fw.turn_id = 0;
+	}
+	snprintf(cb->fw_version, sizeof(cb->fw_version),
+		"%d.%02d-%c-%02d",
+		enquiry2->fw.major_version,
+		enquiry2->fw.minor_version,
+		enquiry2->fw.firmware_type,
+		enquiry2->fw.turn_id);
+	if (!((enquiry2->fw.major_version == 5 &&
+	       enquiry2->fw.minor_version >= 6) ||
+	      (enquiry2->fw.major_version == 4 &&
+	       enquiry2->fw.minor_version >= 6) ||
+	      (enquiry2->fw.major_version == 3 &&
+	       enquiry2->fw.minor_version >= 51) ||
+	      (enquiry2->fw.major_version == 2 &&
+	       strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
+		shost_printk(KERN_WARNING, cb->host,
+			"Firmware Version '%s' unsupported\n",
+			cb->fw_version);
+		goto out;
+	}
+	/*
+	 * Initialize the Channels, Targets, Memory Size, and SAF-TE
+	 * Enclosure Management Enabled fields.
+	 */
+	switch (enquiry2->hw.model) {
+	case MYRB_5_CHANNEL_BOARD:
+		pchan_max = 5;
+		break;
+	case MYRB_3_CHANNEL_BOARD:
+	case MYRB_3_CHANNEL_ASIC_DAC:
+		pchan_max = 3;
+		break;
+	case MYRB_2_CHANNEL_BOARD:
+		pchan_max = 2;
+		break;
+	default:
+		pchan_max = enquiry2->cfg_chan;
+		break;
+	}
+	pchan_cur = enquiry2->cur_chan;
+	if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
+		cb->bus_width = 32;
+	else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
+		cb->bus_width = 16;
+	else
+		cb->bus_width = 8;
+	cb->ldev_block_size = enquiry2->ldev_block_size;
+	shost->max_channel = pchan_cur;
+	shost->max_id = enquiry2->max_targets;
+	memsize = enquiry2->mem_size >> 20;
+	cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
+	/*
+	 * Initialize the Controller Queue Depth, Driver Queue Depth,
+	 * Logical Drive Count, Maximum Blocks per Command, Controller
+	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
+	 * The Driver Queue Depth must be at most one less than the
+	 * Controller Queue Depth to allow for an automatic drive
+	 * rebuild operation.
+	 */
+	shost->can_queue = cb->enquiry->max_tcq;
+	if (shost->can_queue < 3)
+		shost->can_queue = enquiry2->max_cmds;
+	if (shost->can_queue < 3)
+		/* Play safe and disable TCQ */
+		shost->can_queue = 1;
+
+	if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
+		shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
+	shost->max_sectors = enquiry2->max_sectors;
+	shost->sg_tablesize = enquiry2->max_sge;
+	if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
+		shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
+	/*
+	 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
+	 */
+	cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
+		>> (10 - MYRB_BLKSIZE_BITS);
+	cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
+		>> (10 - MYRB_BLKSIZE_BITS);
+	/* Assume 255/63 translation */
+	cb->ldev_geom_heads = 255;
+	cb->ldev_geom_sectors = 63;
+	if (config2->drive_geometry) {
+		cb->ldev_geom_heads = 128;
+		cb->ldev_geom_sectors = 32;
+	}
+
+	/*
+	 * Initialize the Background Initialization Status.
+	 */
+	if ((cb->fw_version[0] == '4' &&
+	     strcmp(cb->fw_version, "4.08") >= 0) ||
+	    (cb->fw_version[0] == '5' &&
+	     strcmp(cb->fw_version, "5.08") >= 0)) {
+		cb->bgi_status_supported = true;
+		myrb_bgi_control(cb);
+	}
+	cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
+	ret = 0;
+
+out:
+	shost_printk(KERN_INFO, cb->host,
+		"Configuring %s PCI RAID Controller\n", cb->model_name);
+	shost_printk(KERN_INFO, cb->host,
+		"  Firmware Version: %s, Memory Size: %dMB\n",
+		cb->fw_version, memsize);
+	if (cb->io_addr == 0)
+		shost_printk(KERN_INFO, cb->host,
+			"  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
+			(unsigned long)cb->pci_addr, cb->irq);
+	else
+		shost_printk(KERN_INFO, cb->host,
+			"  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
+			(unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
+			cb->irq);
+	shost_printk(KERN_INFO, cb->host,
+		"  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
+		cb->host->can_queue, cb->host->max_sectors);
+	shost_printk(KERN_INFO, cb->host,
+		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
+		     cb->host->can_queue, cb->host->sg_tablesize,
+		     MYRB_SCATTER_GATHER_LIMIT);
+	shost_printk(KERN_INFO, cb->host,
+		     "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
+		     cb->stripe_size, cb->segment_size,
+		     cb->ldev_geom_heads, cb->ldev_geom_sectors,
+		     cb->safte_enabled ?
+		     "  SAF-TE Enclosure Management Enabled" : "");
+	shost_printk(KERN_INFO, cb->host,
+		     "  Physical: %d/%d channels %d/%d/%d devices\n",
+		     pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
+		     cb->host->max_id);
+
+	shost_printk(KERN_INFO, cb->host,
+		     "  Logical: 1/1 channels, %d/%d disks\n",
+		     cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
+
+out_free:
+	dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+			  enquiry2, enquiry2_addr);
+	dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
+			  config2, config2_addr);
+
+	return ret;
+}
+
+/**
+ * myrb_unmap - unmaps controller structures
+ */
+static void myrb_unmap(struct myrb_hba *cb)
+{
+	if (cb->ldev_info_buf) {
+		size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
+			MYRB_MAX_LDEVS;
+		dma_free_coherent(&cb->pdev->dev, ldev_info_size,
+				  cb->ldev_info_buf, cb->ldev_info_addr);
+		cb->ldev_info_buf = NULL;
+	}
+	if (cb->err_table) {
+		size_t err_table_size = sizeof(struct myrb_error_entry) *
+			MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
+		dma_free_coherent(&cb->pdev->dev, err_table_size,
+				  cb->err_table, cb->err_table_addr);
+		cb->err_table = NULL;
+	}
+	if (cb->enquiry) {
+		dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
+				  cb->enquiry, cb->enquiry_addr);
+		cb->enquiry = NULL;
+	}
+	if (cb->first_stat_mbox) {
+		dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
+				  cb->first_stat_mbox, cb->stat_mbox_addr);
+		cb->first_stat_mbox = NULL;
+	}
+	if (cb->first_cmd_mbox) {
+		dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
+				  cb->first_cmd_mbox, cb->cmd_mbox_addr);
+		cb->first_cmd_mbox = NULL;
+	}
+}
+
+/**
+ * myrb_cleanup - cleanup controller structures
+ */
+static void myrb_cleanup(struct myrb_hba *cb)
+{
+	struct pci_dev *pdev = cb->pdev;
+
+	/* Free the memory mailbox, status, and related structures */
+	myrb_unmap(cb);
+
+	if (cb->mmio_base) {
+		cb->disable_intr(cb->io_base);
+		iounmap(cb->mmio_base);
+	}
+	if (cb->irq)
+		free_irq(cb->irq, cb);
+	if (cb->io_addr)
+		release_region(cb->io_addr, 0x80);
+	pci_set_drvdata(pdev, NULL);
+	pci_disable_device(pdev);
+	scsi_host_put(cb->host);
+}
+
+static int myrb_host_reset(struct scsi_cmnd *scmd)
+{
+	struct Scsi_Host *shost = scmd->device->host;
+	struct myrb_hba *cb = shost_priv(shost);
+
+	cb->reset(cb->io_base);
+	return SUCCESS;
+}
+
+static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
+		struct scsi_cmnd *scmd)
+{
+	struct myrb_hba *cb = shost_priv(shost);
+	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	struct myrb_dcdb *dcdb;
+	dma_addr_t dcdb_addr;
+	struct scsi_device *sdev = scmd->device;
+	struct scatterlist *sgl;
+	unsigned long flags;
+	int nsge;
+
+	myrb_reset_cmd(cmd_blk);
+	dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
+	if (!dcdb)
+		return SCSI_MLQUEUE_HOST_BUSY;
+	nsge = scsi_dma_map(scmd);
+	if (nsge > 1) {
+		dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
+		scmd->result = (DID_ERROR << 16);
+		scmd->scsi_done(scmd);
+		return 0;
+	}
+
+	mbox->type3.opcode = MYRB_CMD_DCDB;
+	mbox->type3.id = scmd->request->tag + 3;
+	mbox->type3.addr = dcdb_addr;
+	dcdb->channel = sdev->channel;
+	dcdb->target = sdev->id;
+	switch (scmd->sc_data_direction) {
+	case DMA_NONE:
+		dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
+		break;
+	case DMA_TO_DEVICE:
+		dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
+		break;
+	case DMA_FROM_DEVICE:
+		dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
+		break;
+	default:
+		dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
+		break;
+	}
+	dcdb->early_status = false;
+	if (scmd->request->timeout <= 10)
+		dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
+	else if (scmd->request->timeout <= 60)
+		dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
+	else if (scmd->request->timeout <= 600)
+		dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
+	else
+		dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
+	dcdb->no_autosense = false;
+	dcdb->allow_disconnect = true;
+	sgl = scsi_sglist(scmd);
+	dcdb->dma_addr = sg_dma_address(sgl);
+	if (sg_dma_len(sgl) > USHRT_MAX) {
+		dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
+		dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
+	} else {
+		dcdb->xfer_len_lo = sg_dma_len(sgl);
+		dcdb->xfer_len_hi4 = 0;
+	}
+	dcdb->cdb_len = scmd->cmd_len;
+	dcdb->sense_len = sizeof(dcdb->sense);
+	memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
+
+	spin_lock_irqsave(&cb->queue_lock, flags);
+	cb->qcmd(cb, cmd_blk);
+	spin_unlock_irqrestore(&cb->queue_lock, flags);
+	return 0;
+}
+
+static void myrb_inquiry(struct myrb_hba *cb,
+		struct scsi_cmnd *scmd)
+{
+	unsigned char inq[36] = {
+		0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
+		0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
+		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+		0x20, 0x20, 0x20, 0x20,
+	};
+
+	if (cb->bus_width > 16)
+		inq[7] |= 1 << 6;
+	if (cb->bus_width > 8)
+		inq[7] |= 1 << 5;
+	memcpy(&inq[16], cb->model_name, 16);
+	memcpy(&inq[32], cb->fw_version, 1);
+	memcpy(&inq[33], &cb->fw_version[2], 2);
+	memcpy(&inq[35], &cb->fw_version[7], 1);
+
+	scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
+}
+
+static void
+myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
+		struct myrb_ldev_info *ldev_info)
+{
+	unsigned char modes[32], *mode_pg;
+	bool dbd;
+	size_t mode_len;
+
+	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
+	if (dbd) {
+		mode_len = 24;
+		mode_pg = &modes[4];
+	} else {
+		mode_len = 32;
+		mode_pg = &modes[12];
+	}
+	memset(modes, 0, sizeof(modes));
+	modes[0] = mode_len - 1;
+	if (!dbd) {
+		unsigned char *block_desc = &modes[4];
+
+		modes[3] = 8;
+		put_unaligned_be32(ldev_info->size, &block_desc[0]);
+		put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
+	}
+	mode_pg[0] = 0x08;
+	mode_pg[1] = 0x12;
+	if (ldev_info->wb_enabled)
+		mode_pg[2] |= 0x04;
+	if (cb->segment_size) {
+		mode_pg[2] |= 0x08;
+		put_unaligned_be16(cb->segment_size, &mode_pg[14]);
+	}
+
+	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
+}
+
+static void myrb_request_sense(struct myrb_hba *cb,
+		struct scsi_cmnd *scmd)
+{
+	scsi_build_sense_buffer(0, scmd->sense_buffer,
+				NO_SENSE, 0, 0);
+	scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
+				 SCSI_SENSE_BUFFERSIZE);
+}
+
+static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
+		struct myrb_ldev_info *ldev_info)
+{
+	unsigned char data[8];
+
+	dev_dbg(&scmd->device->sdev_gendev,
+		"Capacity %u, blocksize %u\n",
+		ldev_info->size, cb->ldev_block_size);
+	put_unaligned_be32(ldev_info->size - 1, &data[0]);
+	put_unaligned_be32(cb->ldev_block_size, &data[4]);
+	scsi_sg_copy_from_buffer(scmd, data, 8);
+}
+
+static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
+		struct scsi_cmnd *scmd)
+{
+	struct myrb_hba *cb = shost_priv(shost);
+	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	struct myrb_ldev_info *ldev_info;
+	struct scsi_device *sdev = scmd->device;
+	struct scatterlist *sgl;
+	unsigned long flags;
+	u64 lba;
+	u32 block_cnt;
+	int nsge;
+
+	ldev_info = sdev->hostdata;
+	if (ldev_info->state != MYRB_DEVICE_ONLINE &&
+	    ldev_info->state != MYRB_DEVICE_WO) {
+		dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
+			sdev->id, ldev_info ? ldev_info->state : 0xff);
+		scmd->result = (DID_BAD_TARGET << 16);
+		scmd->scsi_done(scmd);
+		return 0;
+	}
+	switch (scmd->cmnd[0]) {
+	case TEST_UNIT_READY:
+		scmd->result = (DID_OK << 16);
+		scmd->scsi_done(scmd);
+		return 0;
+	case INQUIRY:
+		if (scmd->cmnd[1] & 1) {
+			/* Illegal request, invalid field in CDB */
+			scsi_build_sense_buffer(0, scmd->sense_buffer,
+						ILLEGAL_REQUEST, 0x24, 0);
+			scmd->result = (DRIVER_SENSE << 24) |
+				SAM_STAT_CHECK_CONDITION;
+		} else {
+			myrb_inquiry(cb, scmd);
+			scmd->result = (DID_OK << 16);
+		}
+		scmd->scsi_done(scmd);
+		return 0;
+	case SYNCHRONIZE_CACHE:
+		scmd->result = (DID_OK << 16);
+		scmd->scsi_done(scmd);
+		return 0;
+	case MODE_SENSE:
+		if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
+		    (scmd->cmnd[2] & 0x3F) != 0x08) {
+			/* Illegal request, invalid field in CDB */
+			scsi_build_sense_buffer(0, scmd->sense_buffer,
+						ILLEGAL_REQUEST, 0x24, 0);
+			scmd->result = (DRIVER_SENSE << 24) |
+				SAM_STAT_CHECK_CONDITION;
+		} else {
+			myrb_mode_sense(cb, scmd, ldev_info);
+			scmd->result = (DID_OK << 16);
+		}
+		scmd->scsi_done(scmd);
+		return 0;
+	case READ_CAPACITY:
+		if ((scmd->cmnd[1] & 1) ||
+		    (scmd->cmnd[8] & 1)) {
+			/* Illegal request, invalid field in CDB */
+			scsi_build_sense_buffer(0, scmd->sense_buffer,
+						ILLEGAL_REQUEST, 0x24, 0);
+			scmd->result = (DRIVER_SENSE << 24) |
+				SAM_STAT_CHECK_CONDITION;
+			scmd->scsi_done(scmd);
+			return 0;
+		}
+		lba = get_unaligned_be32(&scmd->cmnd[2]);
+		if (lba) {
+			/* Illegal request, invalid field in CDB */
+			scsi_build_sense_buffer(0, scmd->sense_buffer,
+						ILLEGAL_REQUEST, 0x24, 0);
+			scmd->result = (DRIVER_SENSE << 24) |
+				SAM_STAT_CHECK_CONDITION;
+			scmd->scsi_done(scmd);
+			return 0;
+		}
+		myrb_read_capacity(cb, scmd, ldev_info);
+		scmd->scsi_done(scmd);
+		return 0;
+	case REQUEST_SENSE:
+		myrb_request_sense(cb, scmd);
+		scmd->result = (DID_OK << 16);
+		return 0;
+	case SEND_DIAGNOSTIC:
+		if (scmd->cmnd[1] != 0x04) {
+			/* Illegal request, invalid field in CDB */
+			scsi_build_sense_buffer(0, scmd->sense_buffer,
+						ILLEGAL_REQUEST, 0x24, 0);
+			scmd->result = (DRIVER_SENSE << 24) |
+				SAM_STAT_CHECK_CONDITION;
+		} else {
+			/* Assume good status */
+			scmd->result = (DID_OK << 16);
+		}
+		scmd->scsi_done(scmd);
+		return 0;
+	case READ_6:
+		if (ldev_info->state == MYRB_DEVICE_WO) {
+			/* Data protect, attempt to read invalid data */
+			scsi_build_sense_buffer(0, scmd->sense_buffer,
+						DATA_PROTECT, 0x21, 0x06);
+			scmd->result = (DRIVER_SENSE << 24) |
+				SAM_STAT_CHECK_CONDITION;
+			scmd->scsi_done(scmd);
+			return 0;
+		}
+		/* fall through */
+	case WRITE_6:
+		lba = (((scmd->cmnd[1] & 0x1F) << 16) |
+		       (scmd->cmnd[2] << 8) |
+		       scmd->cmnd[3]);
+		block_cnt = scmd->cmnd[4];
+		break;
+	case READ_10:
+		if (ldev_info->state == MYRB_DEVICE_WO) {
+			/* Data protect, attempt to read invalid data */
+			scsi_build_sense_buffer(0, scmd->sense_buffer,
+						DATA_PROTECT, 0x21, 0x06);
+			scmd->result = (DRIVER_SENSE << 24) |
+				SAM_STAT_CHECK_CONDITION;
+			scmd->scsi_done(scmd);
+			return 0;
+		}
+		/* fall through */
+	case WRITE_10:
+	case VERIFY:		/* 0x2F */
+	case WRITE_VERIFY:	/* 0x2E */
+		lba = get_unaligned_be32(&scmd->cmnd[2]);
+		block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
+		break;
+	case READ_12:
+		if (ldev_info->state == MYRB_DEVICE_WO) {
+			/* Data protect, attempt to read invalid data */
+			scsi_build_sense_buffer(0, scmd->sense_buffer,
+						DATA_PROTECT, 0x21, 0x06);
+			scmd->result = (DRIVER_SENSE << 24) |
+				SAM_STAT_CHECK_CONDITION;
+			scmd->scsi_done(scmd);
+			return 0;
+		}
+		/* fall through */
+	case WRITE_12:
+	case VERIFY_12: /* 0xAF */
+	case WRITE_VERIFY_12:	/* 0xAE */
+		lba = get_unaligned_be32(&scmd->cmnd[2]);
+		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+		break;
+	default:
+		/* Illegal request, invalid opcode */
+		scsi_build_sense_buffer(0, scmd->sense_buffer,
+					ILLEGAL_REQUEST, 0x20, 0);
+		scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+		scmd->scsi_done(scmd);
+		return 0;
+	}
+
+	myrb_reset_cmd(cmd_blk);
+	mbox->type5.id = scmd->request->tag + 3;
+	if (scmd->sc_data_direction == DMA_NONE)
+		goto submit;
+	nsge = scsi_dma_map(scmd);
+	if (nsge == 1) {
+		sgl = scsi_sglist(scmd);
+		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+			mbox->type5.opcode = MYRB_CMD_READ;
+		else
+			mbox->type5.opcode = MYRB_CMD_WRITE;
+
+		mbox->type5.ld.xfer_len = block_cnt;
+		mbox->type5.ld.ldev_num = sdev->id;
+		mbox->type5.lba = lba;
+		mbox->type5.addr = (u32)sg_dma_address(sgl);
+	} else {
+		struct myrb_sge *hw_sgl;
+		dma_addr_t hw_sgl_addr;
+		int i;
+
+		hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
+		if (!hw_sgl)
+			return SCSI_MLQUEUE_HOST_BUSY;
+
+		cmd_blk->sgl = hw_sgl;
+		cmd_blk->sgl_addr = hw_sgl_addr;
+
+		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+			mbox->type5.opcode = MYRB_CMD_READ_SG;
+		else
+			mbox->type5.opcode = MYRB_CMD_WRITE_SG;
+
+		mbox->type5.ld.xfer_len = block_cnt;
+		mbox->type5.ld.ldev_num = sdev->id;
+		mbox->type5.lba = lba;
+		mbox->type5.addr = hw_sgl_addr;
+		mbox->type5.sg_count = nsge;
+
+		scsi_for_each_sg(scmd, sgl, nsge, i) {
+			hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
+			hw_sgl->sge_count = (u32)sg_dma_len(sgl);
+			hw_sgl++;
+		}
+	}
+submit:
+	spin_lock_irqsave(&cb->queue_lock, flags);
+	cb->qcmd(cb, cmd_blk);
+	spin_unlock_irqrestore(&cb->queue_lock, flags);
+
+	return 0;
+}
+
+static int myrb_queuecommand(struct Scsi_Host *shost,
+		struct scsi_cmnd *scmd)
+{
+	struct scsi_device *sdev = scmd->device;
+
+	if (sdev->channel > myrb_logical_channel(shost)) {
+		scmd->result = (DID_BAD_TARGET << 16);
+		scmd->scsi_done(scmd);
+		return 0;
+	}
+	if (sdev->channel == myrb_logical_channel(shost))
+		return myrb_ldev_queuecommand(shost, scmd);
+
+	return myrb_pthru_queuecommand(shost, scmd);
+}
+
+static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
+{
+	struct myrb_hba *cb = shost_priv(sdev->host);
+	struct myrb_ldev_info *ldev_info;
+	unsigned short ldev_num = sdev->id;
+	enum raid_level level;
+
+	ldev_info = cb->ldev_info_buf + ldev_num;
+	if (!ldev_info)
+		return -ENXIO;
+
+	sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
+	if (!sdev->hostdata)
+		return -ENOMEM;
+	dev_dbg(&sdev->sdev_gendev,
+		"slave alloc ldev %d state %x\n",
+		ldev_num, ldev_info->state);
+	memcpy(sdev->hostdata, ldev_info,
+	       sizeof(*ldev_info));
+	switch (ldev_info->raid_level) {
+	case MYRB_RAID_LEVEL0:
+		level = RAID_LEVEL_LINEAR;
+		break;
+	case MYRB_RAID_LEVEL1:
+		level = RAID_LEVEL_1;
+		break;
+	case MYRB_RAID_LEVEL3:
+		level = RAID_LEVEL_3;
+		break;
+	case MYRB_RAID_LEVEL5:
+		level = RAID_LEVEL_5;
+		break;
+	case MYRB_RAID_LEVEL6:
+		level = RAID_LEVEL_6;
+		break;
+	case MYRB_RAID_JBOD:
+		level = RAID_LEVEL_JBOD;
+		break;
+	default:
+		level = RAID_LEVEL_UNKNOWN;
+		break;
+	}
+	raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
+	return 0;
+}
+
+static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
+{
+	struct myrb_hba *cb = shost_priv(sdev->host);
+	struct myrb_pdev_state *pdev_info;
+	unsigned short status;
+
+	if (sdev->id > MYRB_MAX_TARGETS)
+		return -ENXIO;
+
+	pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+	if (!pdev_info)
+		return -ENOMEM;
+
+	status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
+				  sdev, pdev_info);
+	if (status != MYRB_STATUS_SUCCESS) {
+		dev_dbg(&sdev->sdev_gendev,
+			"Failed to get device state, status %x\n",
+			status);
+		kfree(pdev_info);
+		return -ENXIO;
+	}
+	if (!pdev_info->present) {
+		dev_dbg(&sdev->sdev_gendev,
+			"device not present, skip\n");
+		kfree(pdev_info);
+		return -ENXIO;
+	}
+	dev_dbg(&sdev->sdev_gendev,
+		"slave alloc pdev %d:%d state %x\n",
+		sdev->channel, sdev->id, pdev_info->state);
+	sdev->hostdata = pdev_info;
+
+	return 0;
+}
+
+static int myrb_slave_alloc(struct scsi_device *sdev)
+{
+	if (sdev->channel > myrb_logical_channel(sdev->host))
+		return -ENXIO;
+
+	if (sdev->lun > 0)
+		return -ENXIO;
+
+	if (sdev->channel == myrb_logical_channel(sdev->host))
+		return myrb_ldev_slave_alloc(sdev);
+
+	return myrb_pdev_slave_alloc(sdev);
+}
+
+static int myrb_slave_configure(struct scsi_device *sdev)
+{
+	struct myrb_ldev_info *ldev_info;
+
+	if (sdev->channel > myrb_logical_channel(sdev->host))
+		return -ENXIO;
+
+	if (sdev->channel < myrb_logical_channel(sdev->host)) {
+		sdev->no_uld_attach = 1;
+		return 0;
+	}
+	if (sdev->lun != 0)
+		return -ENXIO;
+
+	ldev_info = sdev->hostdata;
+	if (!ldev_info)
+		return -ENXIO;
+	if (ldev_info->state != MYRB_DEVICE_ONLINE)
+		sdev_printk(KERN_INFO, sdev,
+			    "Logical drive is %s\n",
+			    myrb_devstate_name(ldev_info->state));
+
+	sdev->tagged_supported = 1;
+	return 0;
+}
+
+static void myrb_slave_destroy(struct scsi_device *sdev)
+{
+	kfree(sdev->hostdata);
+}
+
+static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+		sector_t capacity, int geom[])
+{
+	struct myrb_hba *cb = shost_priv(sdev->host);
+
+	geom[0] = cb->ldev_geom_heads;
+	geom[1] = cb->ldev_geom_sectors;
+	geom[2] = sector_div(capacity, geom[0] * geom[1]);
+
+	return 0;
+}
+
+static ssize_t raid_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrb_hba *cb = shost_priv(sdev->host);
+	int ret;
+
+	if (!sdev->hostdata)
+		return snprintf(buf, 16, "Unknown\n");
+
+	if (sdev->channel == myrb_logical_channel(sdev->host)) {
+		struct myrb_ldev_info *ldev_info = sdev->hostdata;
+		const char *name;
+
+		name = myrb_devstate_name(ldev_info->state);
+		if (name)
+			ret = snprintf(buf, 32, "%s\n", name);
+		else
+			ret = snprintf(buf, 32, "Invalid (%02X)\n",
+				       ldev_info->state);
+	} else {
+		struct myrb_pdev_state *pdev_info = sdev->hostdata;
+		unsigned short status;
+		const char *name;
+
+		status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
+					  sdev, pdev_info);
+		if (status != MYRB_STATUS_SUCCESS)
+			sdev_printk(KERN_INFO, sdev,
+				    "Failed to get device state, status %x\n",
+				    status);
+
+		if (!pdev_info->present)
+			name = "Removed";
+		else
+			name = myrb_devstate_name(pdev_info->state);
+		if (name)
+			ret = snprintf(buf, 32, "%s\n", name);
+		else
+			ret = snprintf(buf, 32, "Invalid (%02X)\n",
+				       pdev_info->state);
+	}
+	return ret;
+}
+
+static ssize_t raid_state_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrb_hba *cb = shost_priv(sdev->host);
+	struct myrb_pdev_state *pdev_info;
+	enum myrb_devstate new_state;
+	unsigned short status;
+
+	if (!strncmp(buf, "kill", 4) ||
+	    !strncmp(buf, "offline", 7))
+		new_state = MYRB_DEVICE_DEAD;
+	else if (!strncmp(buf, "online", 6))
+		new_state = MYRB_DEVICE_ONLINE;
+	else if (!strncmp(buf, "standby", 7))
+		new_state = MYRB_DEVICE_STANDBY;
+	else
+		return -EINVAL;
+
+	pdev_info = sdev->hostdata;
+	if (!pdev_info) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Failed - no physical device information\n");
+		return -ENXIO;
+	}
+	if (!pdev_info->present) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Failed - device not present\n");
+		return -ENXIO;
+	}
+
+	if (pdev_info->state == new_state)
+		return count;
+
+	status = myrb_set_pdev_state(cb, sdev, new_state);
+	switch (status) {
+	case MYRB_STATUS_SUCCESS:
+		break;
+	case MYRB_STATUS_START_DEVICE_FAILED:
+		sdev_printk(KERN_INFO, sdev,
+			     "Failed - Unable to Start Device\n");
+		count = -EAGAIN;
+		break;
+	case MYRB_STATUS_NO_DEVICE:
+		sdev_printk(KERN_INFO, sdev,
+			    "Failed - No Device at Address\n");
+		count = -ENODEV;
+		break;
+	case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
+		sdev_printk(KERN_INFO, sdev,
+			 "Failed - Invalid Channel or Target or Modifier\n");
+		count = -EINVAL;
+		break;
+	case MYRB_STATUS_CHANNEL_BUSY:
+		sdev_printk(KERN_INFO, sdev,
+			 "Failed - Channel Busy\n");
+		count = -EBUSY;
+		break;
+	default:
+		sdev_printk(KERN_INFO, sdev,
+			 "Failed - Unexpected Status %04X\n", status);
+		count = -EIO;
+		break;
+	}
+	return count;
+}
+static DEVICE_ATTR_RW(raid_state);
+
+static ssize_t raid_level_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+
+	if (sdev->channel == myrb_logical_channel(sdev->host)) {
+		struct myrb_ldev_info *ldev_info = sdev->hostdata;
+		const char *name;
+
+		if (!ldev_info)
+			return -ENXIO;
+
+		name = myrb_raidlevel_name(ldev_info->raid_level);
+		if (!name)
+			return snprintf(buf, 32, "Invalid (%02X)\n",
+					ldev_info->state);
+		return snprintf(buf, 32, "%s\n", name);
+	}
+	return snprintf(buf, 32, "Physical Drive\n");
+}
+static DEVICE_ATTR_RO(raid_level);
+
+static ssize_t rebuild_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrb_hba *cb = shost_priv(sdev->host);
+	struct myrb_rbld_progress rbld_buf;
+	unsigned char status;
+
+	if (sdev->channel < myrb_logical_channel(sdev->host))
+		return snprintf(buf, 32, "physical device - not rebuilding\n");
+
+	status = myrb_get_rbld_progress(cb, &rbld_buf);
+
+	if (rbld_buf.ldev_num != sdev->id ||
+	    status != MYRB_STATUS_SUCCESS)
+		return snprintf(buf, 32, "not rebuilding\n");
+
+	return snprintf(buf, 32, "rebuilding block %u of %u\n",
+			rbld_buf.ldev_size - rbld_buf.blocks_left,
+			rbld_buf.ldev_size);
+}
+
+static ssize_t rebuild_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrb_hba *cb = shost_priv(sdev->host);
+	struct myrb_cmdblk *cmd_blk;
+	union myrb_cmd_mbox *mbox;
+	unsigned short status;
+	int rc, start;
+	const char *msg;
+
+	rc = kstrtoint(buf, 0, &start);
+	if (rc)
+		return rc;
+
+	if (sdev->channel >= myrb_logical_channel(sdev->host))
+		return -ENXIO;
+
+	status = myrb_get_rbld_progress(cb, NULL);
+	if (start) {
+		if (status == MYRB_STATUS_SUCCESS) {
+			sdev_printk(KERN_INFO, sdev,
+				    "Rebuild Not Initiated; already in progress\n");
+			return -EALREADY;
+		}
+		mutex_lock(&cb->dcmd_mutex);
+		cmd_blk = &cb->dcmd_blk;
+		myrb_reset_cmd(cmd_blk);
+		mbox = &cmd_blk->mbox;
+		mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
+		mbox->type3D.id = MYRB_DCMD_TAG;
+		mbox->type3D.channel = sdev->channel;
+		mbox->type3D.target = sdev->id;
+		status = myrb_exec_cmd(cb, cmd_blk);
+		mutex_unlock(&cb->dcmd_mutex);
+	} else {
+		struct pci_dev *pdev = cb->pdev;
+		unsigned char *rate;
+		dma_addr_t rate_addr;
+
+		if (status != MYRB_STATUS_SUCCESS) {
+			sdev_printk(KERN_INFO, sdev,
+				    "Rebuild Not Cancelled; not in progress\n");
+			return 0;
+		}
+
+		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
+					  &rate_addr, GFP_KERNEL);
+		if (rate == NULL) {
+			sdev_printk(KERN_INFO, sdev,
+				    "Cancellation of Rebuild Failed - Out of Memory\n");
+			return -ENOMEM;
+		}
+		mutex_lock(&cb->dcmd_mutex);
+		cmd_blk = &cb->dcmd_blk;
+		myrb_reset_cmd(cmd_blk);
+		mbox = &cmd_blk->mbox;
+		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
+		mbox->type3R.id = MYRB_DCMD_TAG;
+		mbox->type3R.rbld_rate = 0xFF;
+		mbox->type3R.addr = rate_addr;
+		status = myrb_exec_cmd(cb, cmd_blk);
+		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
+		mutex_unlock(&cb->dcmd_mutex);
+	}
+	if (status == MYRB_STATUS_SUCCESS) {
+		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
+			    start ? "Initiated" : "Cancelled");
+		return count;
+	}
+	if (!start) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Rebuild Not Cancelled, status 0x%x\n",
+			    status);
+		return -EIO;
+	}
+
+	switch (status) {
+	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
+		msg = "Attempt to Rebuild Online or Unresponsive Drive";
+		break;
+	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
+		msg = "New Disk Failed During Rebuild";
+		break;
+	case MYRB_STATUS_INVALID_ADDRESS:
+		msg = "Invalid Device Address";
+		break;
+	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
+		msg = "Already in Progress";
+		break;
+	default:
+		msg = NULL;
+		break;
+	}
+	if (msg)
+		sdev_printk(KERN_INFO, sdev,
+			    "Rebuild Failed - %s\n", msg);
+	else
+		sdev_printk(KERN_INFO, sdev,
+			    "Rebuild Failed, status 0x%x\n", status);
+
+	return -EIO;
+}
+static DEVICE_ATTR_RW(rebuild);
+
+static ssize_t consistency_check_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrb_hba *cb = shost_priv(sdev->host);
+	struct myrb_rbld_progress rbld_buf;
+	struct myrb_cmdblk *cmd_blk;
+	union myrb_cmd_mbox *mbox;
+	unsigned short ldev_num = 0xFFFF;
+	unsigned short status;
+	int rc, start;
+	const char *msg;
+
+	rc = kstrtoint(buf, 0, &start);
+	if (rc)
+		return rc;
+
+	if (sdev->channel < myrb_logical_channel(sdev->host))
+		return -ENXIO;
+
+	status = myrb_get_rbld_progress(cb, &rbld_buf);
+	if (start) {
+		if (status == MYRB_STATUS_SUCCESS) {
+			sdev_printk(KERN_INFO, sdev,
+				    "Check Consistency Not Initiated; already in progress\n");
+			return -EALREADY;
+		}
+		mutex_lock(&cb->dcmd_mutex);
+		cmd_blk = &cb->dcmd_blk;
+		myrb_reset_cmd(cmd_blk);
+		mbox = &cmd_blk->mbox;
+		mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
+		mbox->type3C.id = MYRB_DCMD_TAG;
+		mbox->type3C.ldev_num = sdev->id;
+		mbox->type3C.auto_restore = true;
+
+		status = myrb_exec_cmd(cb, cmd_blk);
+		mutex_unlock(&cb->dcmd_mutex);
+	} else {
+		struct pci_dev *pdev = cb->pdev;
+		unsigned char *rate;
+		dma_addr_t rate_addr;
+
+		if (ldev_num != sdev->id) {
+			sdev_printk(KERN_INFO, sdev,
+				    "Check Consistency Not Cancelled; not in progress\n");
+			return 0;
+		}
+		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
+					  &rate_addr, GFP_KERNEL);
+		if (rate == NULL) {
+			sdev_printk(KERN_INFO, sdev,
+				    "Cancellation of Check Consistency Failed - Out of Memory\n");
+			return -ENOMEM;
+		}
+		mutex_lock(&cb->dcmd_mutex);
+		cmd_blk = &cb->dcmd_blk;
+		myrb_reset_cmd(cmd_blk);
+		mbox = &cmd_blk->mbox;
+		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
+		mbox->type3R.id = MYRB_DCMD_TAG;
+		mbox->type3R.rbld_rate = 0xFF;
+		mbox->type3R.addr = rate_addr;
+		status = myrb_exec_cmd(cb, cmd_blk);
+		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
+		mutex_unlock(&cb->dcmd_mutex);
+	}
+	if (status == MYRB_STATUS_SUCCESS) {
+		sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
+			    start ? "Initiated" : "Cancelled");
+		return count;
+	}
+	if (!start) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Check Consistency Not Cancelled, status 0x%x\n",
+			    status);
+		return -EIO;
+	}
+
+	switch (status) {
+	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
+		msg = "Dependent Physical Device is DEAD";
+		break;
+	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
+		msg = "New Disk Failed During Rebuild";
+		break;
+	case MYRB_STATUS_INVALID_ADDRESS:
+		msg = "Invalid or Nonredundant Logical Drive";
+		break;
+	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
+		msg = "Already in Progress";
+		break;
+	default:
+		msg = NULL;
+		break;
+	}
+	if (msg)
+		sdev_printk(KERN_INFO, sdev,
+			    "Check Consistency Failed - %s\n", msg);
+	else
+		sdev_printk(KERN_INFO, sdev,
+			    "Check Consistency Failed, status 0x%x\n", status);
+
+	return -EIO;
+}
+
+static ssize_t consistency_check_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return rebuild_show(dev, attr, buf);
+}
+static DEVICE_ATTR_RW(consistency_check);
+
+static ssize_t ctlr_num_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrb_hba *cb = shost_priv(shost);
+
+	return snprintf(buf, 20, "%d\n", cb->ctlr_num);
+}
+static DEVICE_ATTR_RO(ctlr_num);
+
+static ssize_t firmware_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrb_hba *cb = shost_priv(shost);
+
+	return snprintf(buf, 16, "%s\n", cb->fw_version);
+}
+static DEVICE_ATTR_RO(firmware);
+
+static ssize_t model_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrb_hba *cb = shost_priv(shost);
+
+	return snprintf(buf, 16, "%s\n", cb->model_name);
+}
+static DEVICE_ATTR_RO(model);
+
+static ssize_t flush_cache_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrb_hba *cb = shost_priv(shost);
+	unsigned short status;
+
+	status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
+	if (status == MYRB_STATUS_SUCCESS) {
+		shost_printk(KERN_INFO, shost,
+			     "Cache Flush Completed\n");
+		return count;
+	}
+	shost_printk(KERN_INFO, shost,
+		     "Cache Flush Failed, status %x\n", status);
+	return -EIO;
+}
+static DEVICE_ATTR_WO(flush_cache);
+
+static struct device_attribute *myrb_sdev_attrs[] = {
+	&dev_attr_rebuild,
+	&dev_attr_consistency_check,
+	&dev_attr_raid_state,
+	&dev_attr_raid_level,
+	NULL,
+};
+
+static struct device_attribute *myrb_shost_attrs[] = {
+	&dev_attr_ctlr_num,
+	&dev_attr_model,
+	&dev_attr_firmware,
+	&dev_attr_flush_cache,
+	NULL,
+};
+
+struct scsi_host_template myrb_template = {
+	.module			= THIS_MODULE,
+	.name			= "DAC960",
+	.proc_name		= "myrb",
+	.queuecommand		= myrb_queuecommand,
+	.eh_host_reset_handler	= myrb_host_reset,
+	.slave_alloc		= myrb_slave_alloc,
+	.slave_configure	= myrb_slave_configure,
+	.slave_destroy		= myrb_slave_destroy,
+	.bios_param		= myrb_biosparam,
+	.cmd_size		= sizeof(struct myrb_cmdblk),
+	.shost_attrs		= myrb_shost_attrs,
+	.sdev_attrs		= myrb_sdev_attrs,
+	.this_id		= -1,
+};
+
+/**
+ * myrb_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int myrb_is_raid(struct device *dev)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+
+	return sdev->channel == myrb_logical_channel(sdev->host);
+}
+
+/**
+ * myrb_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void myrb_get_resync(struct device *dev)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrb_hba *cb = shost_priv(sdev->host);
+	struct myrb_rbld_progress rbld_buf;
+	unsigned int percent_complete = 0;
+	unsigned short status;
+	unsigned int ldev_size = 0, remaining = 0;
+
+	if (sdev->channel < myrb_logical_channel(sdev->host))
+		return;
+	status = myrb_get_rbld_progress(cb, &rbld_buf);
+	if (status == MYRB_STATUS_SUCCESS) {
+		if (rbld_buf.ldev_num == sdev->id) {
+			ldev_size = rbld_buf.ldev_size;
+			remaining = rbld_buf.blocks_left;
+		}
+	}
+	if (remaining && ldev_size)
+		percent_complete = (ldev_size - remaining) * 100 / ldev_size;
+	raid_set_resync(myrb_raid_template, dev, percent_complete);
+}
+
+/**
+ * myrb_get_state - get raid volume status
+ * @dev the device struct object
+ */
+static void myrb_get_state(struct device *dev)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrb_hba *cb = shost_priv(sdev->host);
+	struct myrb_ldev_info *ldev_info = sdev->hostdata;
+	enum raid_state state = RAID_STATE_UNKNOWN;
+	unsigned short status;
+
+	if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
+		state = RAID_STATE_UNKNOWN;
+	else {
+		status = myrb_get_rbld_progress(cb, NULL);
+		if (status == MYRB_STATUS_SUCCESS)
+			state = RAID_STATE_RESYNCING;
+		else {
+			switch (ldev_info->state) {
+			case MYRB_DEVICE_ONLINE:
+				state = RAID_STATE_ACTIVE;
+				break;
+			case MYRB_DEVICE_WO:
+			case MYRB_DEVICE_CRITICAL:
+				state = RAID_STATE_DEGRADED;
+				break;
+			default:
+				state = RAID_STATE_OFFLINE;
+			}
+		}
+	}
+	raid_set_state(myrb_raid_template, dev, state);
+}
+
+struct raid_function_template myrb_raid_functions = {
+	.cookie		= &myrb_template,
+	.is_raid	= myrb_is_raid,
+	.get_resync	= myrb_get_resync,
+	.get_state	= myrb_get_state,
+};
+
+static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
+		struct scsi_cmnd *scmd)
+{
+	unsigned short status;
+
+	if (!cmd_blk)
+		return;
+
+	scsi_dma_unmap(scmd);
+
+	if (cmd_blk->dcdb) {
+		memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
+		dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
+			      cmd_blk->dcdb_addr);
+		cmd_blk->dcdb = NULL;
+	}
+	if (cmd_blk->sgl) {
+		dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
+		cmd_blk->sgl = NULL;
+		cmd_blk->sgl_addr = 0;
+	}
+	status = cmd_blk->status;
+	switch (status) {
+	case MYRB_STATUS_SUCCESS:
+	case MYRB_STATUS_DEVICE_BUSY:
+		scmd->result = (DID_OK << 16) | status;
+		break;
+	case MYRB_STATUS_BAD_DATA:
+		dev_dbg(&scmd->device->sdev_gendev,
+			"Bad Data Encountered\n");
+		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+			/* Unrecovered read error */
+			scsi_build_sense_buffer(0, scmd->sense_buffer,
+						MEDIUM_ERROR, 0x11, 0);
+		else
+			/* Write error */
+			scsi_build_sense_buffer(0, scmd->sense_buffer,
+						MEDIUM_ERROR, 0x0C, 0);
+		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+		break;
+	case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
+		scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
+		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+			/* Unrecovered read error, auto-reallocation failed */
+			scsi_build_sense_buffer(0, scmd->sense_buffer,
+						MEDIUM_ERROR, 0x11, 0x04);
+		else
+			/* Write error, auto-reallocation failed */
+			scsi_build_sense_buffer(0, scmd->sense_buffer,
+						MEDIUM_ERROR, 0x0C, 0x02);
+		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+		break;
+	case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
+		dev_dbg(&scmd->device->sdev_gendev,
+			    "Logical Drive Nonexistent or Offline");
+		scmd->result = (DID_BAD_TARGET << 16);
+		break;
+	case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
+		dev_dbg(&scmd->device->sdev_gendev,
+			    "Attempt to Access Beyond End of Logical Drive");
+		/* Logical block address out of range */
+		scsi_build_sense_buffer(0, scmd->sense_buffer,
+					NOT_READY, 0x21, 0);
+		break;
+	case MYRB_STATUS_DEVICE_NONRESPONSIVE:
+		dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
+		scmd->result = (DID_BAD_TARGET << 16);
+		break;
+	default:
+		scmd_printk(KERN_ERR, scmd,
+			    "Unexpected Error Status %04X", status);
+		scmd->result = (DID_ERROR << 16);
+		break;
+	}
+	scmd->scsi_done(scmd);
+}
+
+static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+	if (!cmd_blk)
+		return;
+
+	if (cmd_blk->completion) {
+		complete(cmd_blk->completion);
+		cmd_blk->completion = NULL;
+	}
+}
+
+static void myrb_monitor(struct work_struct *work)
+{
+	struct myrb_hba *cb = container_of(work,
+			struct myrb_hba, monitor_work.work);
+	struct Scsi_Host *shost = cb->host;
+	unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
+
+	dev_dbg(&shost->shost_gendev, "monitor tick\n");
+
+	if (cb->new_ev_seq > cb->old_ev_seq) {
+		int event = cb->old_ev_seq;
+
+		dev_dbg(&shost->shost_gendev,
+			"get event log no %d/%d\n",
+			cb->new_ev_seq, event);
+		myrb_get_event(cb, event);
+		cb->old_ev_seq = event + 1;
+		interval = 10;
+	} else if (cb->need_err_info) {
+		cb->need_err_info = false;
+		dev_dbg(&shost->shost_gendev, "get error table\n");
+		myrb_get_errtable(cb);
+		interval = 10;
+	} else if (cb->need_rbld && cb->rbld_first) {
+		cb->need_rbld = false;
+		dev_dbg(&shost->shost_gendev,
+			"get rebuild progress\n");
+		myrb_update_rbld_progress(cb);
+		interval = 10;
+	} else if (cb->need_ldev_info) {
+		cb->need_ldev_info = false;
+		dev_dbg(&shost->shost_gendev,
+			"get logical drive info\n");
+		myrb_get_ldev_info(cb);
+		interval = 10;
+	} else if (cb->need_rbld) {
+		cb->need_rbld = false;
+		dev_dbg(&shost->shost_gendev,
+			"get rebuild progress\n");
+		myrb_update_rbld_progress(cb);
+		interval = 10;
+	} else if (cb->need_cc_status) {
+		cb->need_cc_status = false;
+		dev_dbg(&shost->shost_gendev,
+			"get consistency check progress\n");
+		myrb_get_cc_progress(cb);
+		interval = 10;
+	} else if (cb->need_bgi_status) {
+		cb->need_bgi_status = false;
+		dev_dbg(&shost->shost_gendev, "get background init status\n");
+		myrb_bgi_control(cb);
+		interval = 10;
+	} else {
+		dev_dbg(&shost->shost_gendev, "new enquiry\n");
+		mutex_lock(&cb->dma_mutex);
+		myrb_hba_enquiry(cb);
+		mutex_unlock(&cb->dma_mutex);
+		if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
+		    cb->need_err_info || cb->need_rbld ||
+		    cb->need_ldev_info || cb->need_cc_status ||
+		    cb->need_bgi_status) {
+			dev_dbg(&shost->shost_gendev,
+				"reschedule monitor\n");
+			interval = 0;
+		}
+	}
+	if (interval > 1)
+		cb->primary_monitor_time = jiffies;
+	queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
+}
+
+/**
+ * myrb_err_status - reports controller BIOS messages
+ *
+ * Controller BIOS messages are passed through the Error Status Register
+ * when the driver performs the BIOS handshaking.
+ *
+ * Return: true for fatal errors and false otherwise.
+ */
+bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
+		unsigned char parm0, unsigned char parm1)
+{
+	struct pci_dev *pdev = cb->pdev;
+
+	switch (error) {
+	case 0x00:
+		dev_info(&pdev->dev,
+			 "Physical Device %d:%d Not Responding\n",
+			 parm1, parm0);
+		break;
+	case 0x08:
+		dev_notice(&pdev->dev, "Spinning Up Drives\n");
+		break;
+	case 0x30:
+		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
+		break;
+	case 0x60:
+		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
+		break;
+	case 0x70:
+		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
+		break;
+	case 0x90:
+		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
+			   parm1, parm0);
+		break;
+	case 0xA0:
+		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
+		break;
+	case 0xB0:
+		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
+		break;
+	case 0xD0:
+		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
+		break;
+	case 0xF0:
+		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
+		return true;
+	default:
+		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
+			error);
+		return true;
+	}
+	return false;
+}
+
+/*
+ * Hardware-specific functions
+ */
+
+/*
+ * DAC960 LA Series Controllers
+ */
+
+static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
+{
+	writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
+{
+	writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_gen_intr(void __iomem *base)
+{
+	writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_reset_ctrl(void __iomem *base)
+{
+	writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
+{
+	writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
+{
+	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
+
+	return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
+}
+
+static inline bool DAC960_LA_init_in_progress(void __iomem *base)
+{
+	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
+
+	return !(idb & DAC960_LA_IDB_INIT_DONE);
+}
+
+static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
+{
+	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
+{
+	writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_intr(void __iomem *base)
+{
+	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
+	       base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
+{
+	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
+
+	return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
+{
+	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
+
+	return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_LA_enable_intr(void __iomem *base)
+{
+	unsigned char odb = 0xFF;
+
+	odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
+	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_LA_disable_intr(void __iomem *base)
+{
+	unsigned char odb = 0xFF;
+
+	odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
+	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_LA_intr_enabled(void __iomem *base)
+{
+	unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
+
+	return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
+		union myrb_cmd_mbox *mbox)
+{
+	mem_mbox->words[1] = mbox->words[1];
+	mem_mbox->words[2] = mbox->words[2];
+	mem_mbox->words[3] = mbox->words[3];
+	/* Memory barrier to prevent reordering */
+	wmb();
+	mem_mbox->words[0] = mbox->words[0];
+	/* Memory barrier to force PCI access */
+	mb();
+}
+
+static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
+		union myrb_cmd_mbox *mbox)
+{
+	writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
+	writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
+	writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
+	writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
+}
+
+static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
+{
+	return readb(base + DAC960_LA_STSID_OFFSET);
+}
+
+static inline unsigned short DAC960_LA_read_status(void __iomem *base)
+{
+	return readw(base + DAC960_LA_STS_OFFSET);
+}
+
+static inline bool
+DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
+		unsigned char *param0, unsigned char *param1)
+{
+	unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
+
+	if (!(errsts & DAC960_LA_ERRSTS_PENDING))
+		return false;
+	errsts &= ~DAC960_LA_ERRSTS_PENDING;
+
+	*error = errsts;
+	*param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
+	*param1 = readb(base + DAC960_LA_CMDID_OFFSET);
+	writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
+	return true;
+}
+
+static inline unsigned short
+DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
+		union myrb_cmd_mbox *mbox)
+{
+	unsigned short status;
+	int timeout = 0;
+
+	while (timeout < MYRB_MAILBOX_TIMEOUT) {
+		if (!DAC960_LA_hw_mbox_is_full(base))
+			break;
+		udelay(10);
+		timeout++;
+	}
+	if (DAC960_LA_hw_mbox_is_full(base)) {
+		dev_err(&pdev->dev,
+			"Timeout waiting for empty mailbox\n");
+		return MYRB_STATUS_SUBSYS_TIMEOUT;
+	}
+	DAC960_LA_write_hw_mbox(base, mbox);
+	DAC960_LA_hw_mbox_new_cmd(base);
+	timeout = 0;
+	while (timeout < MYRB_MAILBOX_TIMEOUT) {
+		if (DAC960_LA_hw_mbox_status_available(base))
+			break;
+		udelay(10);
+		timeout++;
+	}
+	if (!DAC960_LA_hw_mbox_status_available(base)) {
+		dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
+		return MYRB_STATUS_SUBSYS_TIMEOUT;
+	}
+	status = DAC960_LA_read_status(base);
+	DAC960_LA_ack_hw_mbox_intr(base);
+	DAC960_LA_ack_hw_mbox_status(base);
+
+	return status;
+}
+
+static int DAC960_LA_hw_init(struct pci_dev *pdev,
+		struct myrb_hba *cb, void __iomem *base)
+{
+	int timeout = 0;
+	unsigned char error, parm0, parm1;
+
+	DAC960_LA_disable_intr(base);
+	DAC960_LA_ack_hw_mbox_status(base);
+	udelay(1000);
+	timeout = 0;
+	while (DAC960_LA_init_in_progress(base) &&
+	       timeout < MYRB_MAILBOX_TIMEOUT) {
+		if (DAC960_LA_read_error_status(base, &error,
+					      &parm0, &parm1) &&
+		    myrb_err_status(cb, error, parm0, parm1))
+			return -ENODEV;
+		udelay(10);
+		timeout++;
+	}
+	if (timeout == MYRB_MAILBOX_TIMEOUT) {
+		dev_err(&pdev->dev,
+			"Timeout waiting for Controller Initialisation\n");
+		return -ETIMEDOUT;
+	}
+	if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
+		dev_err(&pdev->dev,
+			"Unable to Enable Memory Mailbox Interface\n");
+		DAC960_LA_reset_ctrl(base);
+		return -ENODEV;
+	}
+	DAC960_LA_enable_intr(base);
+	cb->qcmd = myrb_qcmd;
+	cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
+	if (cb->dual_mode_interface)
+		cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
+	else
+		cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
+	cb->disable_intr = DAC960_LA_disable_intr;
+	cb->reset = DAC960_LA_reset_ctrl;
+
+	return 0;
+}
+
+static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
+{
+	struct myrb_hba *cb = arg;
+	void __iomem *base = cb->io_base;
+	struct myrb_stat_mbox *next_stat_mbox;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cb->queue_lock, flags);
+	DAC960_LA_ack_intr(base);
+	next_stat_mbox = cb->next_stat_mbox;
+	while (next_stat_mbox->valid) {
+		unsigned char id = next_stat_mbox->id;
+		struct scsi_cmnd *scmd = NULL;
+		struct myrb_cmdblk *cmd_blk = NULL;
+
+		if (id == MYRB_DCMD_TAG)
+			cmd_blk = &cb->dcmd_blk;
+		else if (id == MYRB_MCMD_TAG)
+			cmd_blk = &cb->mcmd_blk;
+		else {
+			scmd = scsi_host_find_tag(cb->host, id - 3);
+			if (scmd)
+				cmd_blk = scsi_cmd_priv(scmd);
+		}
+		if (cmd_blk)
+			cmd_blk->status = next_stat_mbox->status;
+		else
+			dev_err(&cb->pdev->dev,
+				"Unhandled command completion %d\n", id);
+
+		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
+		if (++next_stat_mbox > cb->last_stat_mbox)
+			next_stat_mbox = cb->first_stat_mbox;
+
+		if (cmd_blk) {
+			if (id < 3)
+				myrb_handle_cmdblk(cb, cmd_blk);
+			else
+				myrb_handle_scsi(cb, cmd_blk, scmd);
+		}
+	}
+	cb->next_stat_mbox = next_stat_mbox;
+	spin_unlock_irqrestore(&cb->queue_lock, flags);
+	return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_LA_privdata = {
+	.hw_init =	DAC960_LA_hw_init,
+	.irq_handler =	DAC960_LA_intr_handler,
+	.mmio_size =	DAC960_LA_mmio_size,
+};
+
+/*
+ * DAC960 PG Series Controllers
+ */
+static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
+{
+	writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
+{
+	writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_gen_intr(void __iomem *base)
+{
+	writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_reset_ctrl(void __iomem *base)
+{
+	writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
+{
+	writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
+{
+	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
+
+	return idb & DAC960_PG_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_PG_init_in_progress(void __iomem *base)
+{
+	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
+
+	return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
+{
+	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
+{
+	writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_intr(void __iomem *base)
+{
+	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
+	       base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
+{
+	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
+
+	return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
+{
+	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
+
+	return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_PG_enable_intr(void __iomem *base)
+{
+	unsigned int imask = (unsigned int)-1;
+
+	imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
+	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_PG_disable_intr(void __iomem *base)
+{
+	unsigned int imask = (unsigned int)-1;
+
+	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_PG_intr_enabled(void __iomem *base)
+{
+	unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
+
+	return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
+		union myrb_cmd_mbox *mbox)
+{
+	mem_mbox->words[1] = mbox->words[1];
+	mem_mbox->words[2] = mbox->words[2];
+	mem_mbox->words[3] = mbox->words[3];
+	/* Memory barrier to prevent reordering */
+	wmb();
+	mem_mbox->words[0] = mbox->words[0];
+	/* Memory barrier to force PCI access */
+	mb();
+}
+
+static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
+		union myrb_cmd_mbox *mbox)
+{
+	writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
+	writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
+	writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
+	writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
+}
+
+static inline unsigned char
+DAC960_PG_read_status_cmd_ident(void __iomem *base)
+{
+	return readb(base + DAC960_PG_STSID_OFFSET);
+}
+
+static inline unsigned short
+DAC960_PG_read_status(void __iomem *base)
+{
+	return readw(base + DAC960_PG_STS_OFFSET);
+}
+
+static inline bool
+DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
+		unsigned char *param0, unsigned char *param1)
+{
+	unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
+
+	if (!(errsts & DAC960_PG_ERRSTS_PENDING))
+		return false;
+	errsts &= ~DAC960_PG_ERRSTS_PENDING;
+	*error = errsts;
+	*param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
+	*param1 = readb(base + DAC960_PG_CMDID_OFFSET);
+	writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
+	return true;
+}
+
+static inline unsigned short
+DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
+		union myrb_cmd_mbox *mbox)
+{
+	unsigned short status;
+	int timeout = 0;
+
+	while (timeout < MYRB_MAILBOX_TIMEOUT) {
+		if (!DAC960_PG_hw_mbox_is_full(base))
+			break;
+		udelay(10);
+		timeout++;
+	}
+	if (DAC960_PG_hw_mbox_is_full(base)) {
+		dev_err(&pdev->dev,
+			"Timeout waiting for empty mailbox\n");
+		return MYRB_STATUS_SUBSYS_TIMEOUT;
+	}
+	DAC960_PG_write_hw_mbox(base, mbox);
+	DAC960_PG_hw_mbox_new_cmd(base);
+
+	timeout = 0;
+	while (timeout < MYRB_MAILBOX_TIMEOUT) {
+		if (DAC960_PG_hw_mbox_status_available(base))
+			break;
+		udelay(10);
+		timeout++;
+	}
+	if (!DAC960_PG_hw_mbox_status_available(base)) {
+		dev_err(&pdev->dev,
+			"Timeout waiting for mailbox status\n");
+		return MYRB_STATUS_SUBSYS_TIMEOUT;
+	}
+	status = DAC960_PG_read_status(base);
+	DAC960_PG_ack_hw_mbox_intr(base);
+	DAC960_PG_ack_hw_mbox_status(base);
+
+	return status;
+}
+
+static int DAC960_PG_hw_init(struct pci_dev *pdev,
+		struct myrb_hba *cb, void __iomem *base)
+{
+	int timeout = 0;
+	unsigned char error, parm0, parm1;
+
+	DAC960_PG_disable_intr(base);
+	DAC960_PG_ack_hw_mbox_status(base);
+	udelay(1000);
+	while (DAC960_PG_init_in_progress(base) &&
+	       timeout < MYRB_MAILBOX_TIMEOUT) {
+		if (DAC960_PG_read_error_status(base, &error,
+						&parm0, &parm1) &&
+		    myrb_err_status(cb, error, parm0, parm1))
+			return -EIO;
+		udelay(10);
+		timeout++;
+	}
+	if (timeout == MYRB_MAILBOX_TIMEOUT) {
+		dev_err(&pdev->dev,
+			"Timeout waiting for Controller Initialisation\n");
+		return -ETIMEDOUT;
+	}
+	if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
+		dev_err(&pdev->dev,
+			"Unable to Enable Memory Mailbox Interface\n");
+		DAC960_PG_reset_ctrl(base);
+		return -ENODEV;
+	}
+	DAC960_PG_enable_intr(base);
+	cb->qcmd = myrb_qcmd;
+	cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
+	if (cb->dual_mode_interface)
+		cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
+	else
+		cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
+	cb->disable_intr = DAC960_PG_disable_intr;
+	cb->reset = DAC960_PG_reset_ctrl;
+
+	return 0;
+}
+
+static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
+{
+	struct myrb_hba *cb = arg;
+	void __iomem *base = cb->io_base;
+	struct myrb_stat_mbox *next_stat_mbox;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cb->queue_lock, flags);
+	DAC960_PG_ack_intr(base);
+	next_stat_mbox = cb->next_stat_mbox;
+	while (next_stat_mbox->valid) {
+		unsigned char id = next_stat_mbox->id;
+		struct scsi_cmnd *scmd = NULL;
+		struct myrb_cmdblk *cmd_blk = NULL;
+
+		if (id == MYRB_DCMD_TAG)
+			cmd_blk = &cb->dcmd_blk;
+		else if (id == MYRB_MCMD_TAG)
+			cmd_blk = &cb->mcmd_blk;
+		else {
+			scmd = scsi_host_find_tag(cb->host, id - 3);
+			if (scmd)
+				cmd_blk = scsi_cmd_priv(scmd);
+		}
+		if (cmd_blk)
+			cmd_blk->status = next_stat_mbox->status;
+		else
+			dev_err(&cb->pdev->dev,
+				"Unhandled command completion %d\n", id);
+
+		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
+		if (++next_stat_mbox > cb->last_stat_mbox)
+			next_stat_mbox = cb->first_stat_mbox;
+
+		if (id < 3)
+			myrb_handle_cmdblk(cb, cmd_blk);
+		else
+			myrb_handle_scsi(cb, cmd_blk, scmd);
+	}
+	cb->next_stat_mbox = next_stat_mbox;
+	spin_unlock_irqrestore(&cb->queue_lock, flags);
+	return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_PG_privdata = {
+	.hw_init =	DAC960_PG_hw_init,
+	.irq_handler =	DAC960_PG_intr_handler,
+	.mmio_size =	DAC960_PG_mmio_size,
+};
+
+
+/*
+ * DAC960 PD Series Controllers
+ */
+
+static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
+{
+	writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
+{
+	writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_gen_intr(void __iomem *base)
+{
+	writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_reset_ctrl(void __iomem *base)
+{
+	writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
+{
+	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
+
+	return idb & DAC960_PD_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_PD_init_in_progress(void __iomem *base)
+{
+	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
+
+	return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_PD_ack_intr(void __iomem *base)
+{
+	writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
+}
+
+static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
+{
+	unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
+
+	return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_PD_enable_intr(void __iomem *base)
+{
+	writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
+}
+
+static inline void DAC960_PD_disable_intr(void __iomem *base)
+{
+	writeb(0, base + DAC960_PD_IRQEN_OFFSET);
+}
+
+static inline bool DAC960_PD_intr_enabled(void __iomem *base)
+{
+	unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
+
+	return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
+}
+
+static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
+		union myrb_cmd_mbox *mbox)
+{
+	writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
+	writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
+	writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
+	writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
+}
+
+static inline unsigned char
+DAC960_PD_read_status_cmd_ident(void __iomem *base)
+{
+	return readb(base + DAC960_PD_STSID_OFFSET);
+}
+
+static inline unsigned short
+DAC960_PD_read_status(void __iomem *base)
+{
+	return readw(base + DAC960_PD_STS_OFFSET);
+}
+
+static inline bool
+DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
+		unsigned char *param0, unsigned char *param1)
+{
+	unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
+
+	if (!(errsts & DAC960_PD_ERRSTS_PENDING))
+		return false;
+	errsts &= ~DAC960_PD_ERRSTS_PENDING;
+	*error = errsts;
+	*param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
+	*param1 = readb(base + DAC960_PD_CMDID_OFFSET);
+	writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
+	return true;
+}
+
+static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+	void __iomem *base = cb->io_base;
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+	while (DAC960_PD_hw_mbox_is_full(base))
+		udelay(1);
+	DAC960_PD_write_cmd_mbox(base, mbox);
+	DAC960_PD_hw_mbox_new_cmd(base);
+}
+
+static int DAC960_PD_hw_init(struct pci_dev *pdev,
+		struct myrb_hba *cb, void __iomem *base)
+{
+	int timeout = 0;
+	unsigned char error, parm0, parm1;
+
+	if (!request_region(cb->io_addr, 0x80, "myrb")) {
+		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
+			(unsigned long)cb->io_addr);
+		return -EBUSY;
+	}
+	DAC960_PD_disable_intr(base);
+	DAC960_PD_ack_hw_mbox_status(base);
+	udelay(1000);
+	while (DAC960_PD_init_in_progress(base) &&
+	       timeout < MYRB_MAILBOX_TIMEOUT) {
+		if (DAC960_PD_read_error_status(base, &error,
+					      &parm0, &parm1) &&
+		    myrb_err_status(cb, error, parm0, parm1))
+			return -EIO;
+		udelay(10);
+		timeout++;
+	}
+	if (timeout == MYRB_MAILBOX_TIMEOUT) {
+		dev_err(&pdev->dev,
+			"Timeout waiting for Controller Initialisation\n");
+		return -ETIMEDOUT;
+	}
+	if (!myrb_enable_mmio(cb, NULL)) {
+		dev_err(&pdev->dev,
+			"Unable to Enable Memory Mailbox Interface\n");
+		DAC960_PD_reset_ctrl(base);
+		return -ENODEV;
+	}
+	DAC960_PD_enable_intr(base);
+	cb->qcmd = DAC960_PD_qcmd;
+	cb->disable_intr = DAC960_PD_disable_intr;
+	cb->reset = DAC960_PD_reset_ctrl;
+
+	return 0;
+}
+
+static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
+{
+	struct myrb_hba *cb = arg;
+	void __iomem *base = cb->io_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cb->queue_lock, flags);
+	while (DAC960_PD_hw_mbox_status_available(base)) {
+		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
+		struct scsi_cmnd *scmd = NULL;
+		struct myrb_cmdblk *cmd_blk = NULL;
+
+		if (id == MYRB_DCMD_TAG)
+			cmd_blk = &cb->dcmd_blk;
+		else if (id == MYRB_MCMD_TAG)
+			cmd_blk = &cb->mcmd_blk;
+		else {
+			scmd = scsi_host_find_tag(cb->host, id - 3);
+			if (scmd)
+				cmd_blk = scsi_cmd_priv(scmd);
+		}
+		if (cmd_blk)
+			cmd_blk->status = DAC960_PD_read_status(base);
+		else
+			dev_err(&cb->pdev->dev,
+				"Unhandled command completion %d\n", id);
+
+		DAC960_PD_ack_intr(base);
+		DAC960_PD_ack_hw_mbox_status(base);
+
+		if (id < 3)
+			myrb_handle_cmdblk(cb, cmd_blk);
+		else
+			myrb_handle_scsi(cb, cmd_blk, scmd);
+	}
+	spin_unlock_irqrestore(&cb->queue_lock, flags);
+	return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_PD_privdata = {
+	.hw_init =	DAC960_PD_hw_init,
+	.irq_handler =	DAC960_PD_intr_handler,
+	.mmio_size =	DAC960_PD_mmio_size,
+};
+
+
+/*
+ * DAC960 P Series Controllers
+ *
+ * Similar to the DAC960 PD Series Controllers, but some commands have
+ * to be translated.
+ */
+
+static inline void myrb_translate_enquiry(void *enq)
+{
+	memcpy(enq + 132, enq + 36, 64);
+	memset(enq + 36, 0, 96);
+}
+
+static inline void myrb_translate_devstate(void *state)
+{
+	memcpy(state + 2, state + 3, 1);
+	memmove(state + 4, state + 5, 2);
+	memmove(state + 6, state + 8, 4);
+}
+
+static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
+{
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	int ldev_num = mbox->type5.ld.ldev_num;
+
+	mbox->bytes[3] &= 0x7;
+	mbox->bytes[3] |= mbox->bytes[7] << 6;
+	mbox->bytes[7] = ldev_num;
+}
+
+static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
+{
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+	int ldev_num = mbox->bytes[7];
+
+	mbox->bytes[7] = mbox->bytes[3] >> 6;
+	mbox->bytes[3] &= 0x7;
+	mbox->bytes[3] |= ldev_num << 3;
+}
+
+static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+	void __iomem *base = cb->io_base;
+	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+	switch (mbox->common.opcode) {
+	case MYRB_CMD_ENQUIRY:
+		mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
+		break;
+	case MYRB_CMD_GET_DEVICE_STATE:
+		mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
+		break;
+	case MYRB_CMD_READ:
+		mbox->common.opcode = MYRB_CMD_READ_OLD;
+		myrb_translate_to_rw_command(cmd_blk);
+		break;
+	case MYRB_CMD_WRITE:
+		mbox->common.opcode = MYRB_CMD_WRITE_OLD;
+		myrb_translate_to_rw_command(cmd_blk);
+		break;
+	case MYRB_CMD_READ_SG:
+		mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
+		myrb_translate_to_rw_command(cmd_blk);
+		break;
+	case MYRB_CMD_WRITE_SG:
+		mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
+		myrb_translate_to_rw_command(cmd_blk);
+		break;
+	default:
+		break;
+	}
+	while (DAC960_PD_hw_mbox_is_full(base))
+		udelay(1);
+	DAC960_PD_write_cmd_mbox(base, mbox);
+	DAC960_PD_hw_mbox_new_cmd(base);
+}
+
+
+static int DAC960_P_hw_init(struct pci_dev *pdev,
+		struct myrb_hba *cb, void __iomem *base)
+{
+	int timeout = 0;
+	unsigned char error, parm0, parm1;
+
+	if (!request_region(cb->io_addr, 0x80, "myrb")) {
+		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
+			(unsigned long)cb->io_addr);
+		return -EBUSY;
+	}
+	DAC960_PD_disable_intr(base);
+	DAC960_PD_ack_hw_mbox_status(base);
+	udelay(1000);
+	while (DAC960_PD_init_in_progress(base) &&
+	       timeout < MYRB_MAILBOX_TIMEOUT) {
+		if (DAC960_PD_read_error_status(base, &error,
+						&parm0, &parm1) &&
+		    myrb_err_status(cb, error, parm0, parm1))
+			return -EAGAIN;
+		udelay(10);
+		timeout++;
+	}
+	if (timeout == MYRB_MAILBOX_TIMEOUT) {
+		dev_err(&pdev->dev,
+			"Timeout waiting for Controller Initialisation\n");
+		return -ETIMEDOUT;
+	}
+	if (!myrb_enable_mmio(cb, NULL)) {
+		dev_err(&pdev->dev,
+			"Unable to allocate DMA mapped memory\n");
+		DAC960_PD_reset_ctrl(base);
+		return -ETIMEDOUT;
+	}
+	DAC960_PD_enable_intr(base);
+	cb->qcmd = DAC960_P_qcmd;
+	cb->disable_intr = DAC960_PD_disable_intr;
+	cb->reset = DAC960_PD_reset_ctrl;
+
+	return 0;
+}
+
+static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
+{
+	struct myrb_hba *cb = arg;
+	void __iomem *base = cb->io_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cb->queue_lock, flags);
+	while (DAC960_PD_hw_mbox_status_available(base)) {
+		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
+		struct scsi_cmnd *scmd = NULL;
+		struct myrb_cmdblk *cmd_blk = NULL;
+		union myrb_cmd_mbox *mbox;
+		enum myrb_cmd_opcode op;
+
+
+		if (id == MYRB_DCMD_TAG)
+			cmd_blk = &cb->dcmd_blk;
+		else if (id == MYRB_MCMD_TAG)
+			cmd_blk = &cb->mcmd_blk;
+		else {
+			scmd = scsi_host_find_tag(cb->host, id - 3);
+			if (scmd)
+				cmd_blk = scsi_cmd_priv(scmd);
+		}
+		if (cmd_blk)
+			cmd_blk->status = DAC960_PD_read_status(base);
+		else
+			dev_err(&cb->pdev->dev,
+				"Unhandled command completion %d\n", id);
+
+		DAC960_PD_ack_intr(base);
+		DAC960_PD_ack_hw_mbox_status(base);
+
+		if (!cmd_blk)
+			continue;
+
+		mbox = &cmd_blk->mbox;
+		op = mbox->common.opcode;
+		switch (op) {
+		case MYRB_CMD_ENQUIRY_OLD:
+			mbox->common.opcode = MYRB_CMD_ENQUIRY;
+			myrb_translate_enquiry(cb->enquiry);
+			break;
+		case MYRB_CMD_READ_OLD:
+			mbox->common.opcode = MYRB_CMD_READ;
+			myrb_translate_from_rw_command(cmd_blk);
+			break;
+		case MYRB_CMD_WRITE_OLD:
+			mbox->common.opcode = MYRB_CMD_WRITE;
+			myrb_translate_from_rw_command(cmd_blk);
+			break;
+		case MYRB_CMD_READ_SG_OLD:
+			mbox->common.opcode = MYRB_CMD_READ_SG;
+			myrb_translate_from_rw_command(cmd_blk);
+			break;
+		case MYRB_CMD_WRITE_SG_OLD:
+			mbox->common.opcode = MYRB_CMD_WRITE_SG;
+			myrb_translate_from_rw_command(cmd_blk);
+			break;
+		default:
+			break;
+		}
+		if (id < 3)
+			myrb_handle_cmdblk(cb, cmd_blk);
+		else
+			myrb_handle_scsi(cb, cmd_blk, scmd);
+	}
+	spin_unlock_irqrestore(&cb->queue_lock, flags);
+	return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_P_privdata = {
+	.hw_init =	DAC960_P_hw_init,
+	.irq_handler =	DAC960_P_intr_handler,
+	.mmio_size =	DAC960_PD_mmio_size,
+};
+
+static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
+		const struct pci_device_id *entry)
+{
+	struct myrb_privdata *privdata =
+		(struct myrb_privdata *)entry->driver_data;
+	irq_handler_t irq_handler = privdata->irq_handler;
+	unsigned int mmio_size = privdata->mmio_size;
+	struct Scsi_Host *shost;
+	struct myrb_hba *cb = NULL;
+
+	shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
+	if (!shost) {
+		dev_err(&pdev->dev, "Unable to allocate Controller\n");
+		return NULL;
+	}
+	shost->max_cmd_len = 12;
+	shost->max_lun = 256;
+	cb = shost_priv(shost);
+	mutex_init(&cb->dcmd_mutex);
+	mutex_init(&cb->dma_mutex);
+	cb->pdev = pdev;
+
+	if (pci_enable_device(pdev))
+		goto failure;
+
+	if (privdata->hw_init == DAC960_PD_hw_init ||
+	    privdata->hw_init == DAC960_P_hw_init) {
+		cb->io_addr = pci_resource_start(pdev, 0);
+		cb->pci_addr = pci_resource_start(pdev, 1);
+	} else
+		cb->pci_addr = pci_resource_start(pdev, 0);
+
+	pci_set_drvdata(pdev, cb);
+	spin_lock_init(&cb->queue_lock);
+	if (mmio_size < PAGE_SIZE)
+		mmio_size = PAGE_SIZE;
+	cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
+	if (cb->mmio_base == NULL) {
+		dev_err(&pdev->dev,
+			"Unable to map Controller Register Window\n");
+		goto failure;
+	}
+
+	cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
+	if (privdata->hw_init(pdev, cb, cb->io_base))
+		goto failure;
+
+	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
+		dev_err(&pdev->dev,
+			"Unable to acquire IRQ Channel %d\n", pdev->irq);
+		goto failure;
+	}
+	cb->irq = pdev->irq;
+	return cb;
+
+failure:
+	dev_err(&pdev->dev,
+		"Failed to initialize Controller\n");
+	myrb_cleanup(cb);
+	return NULL;
+}
+
+static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
+{
+	struct myrb_hba *cb;
+	int ret;
+
+	cb = myrb_detect(dev, entry);
+	if (!cb)
+		return -ENODEV;
+
+	ret = myrb_get_hba_config(cb);
+	if (ret < 0) {
+		myrb_cleanup(cb);
+		return ret;
+	}
+
+	if (!myrb_create_mempools(dev, cb)) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	ret = scsi_add_host(cb->host, &dev->dev);
+	if (ret) {
+		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
+		myrb_destroy_mempools(cb);
+		goto failed;
+	}
+	scsi_scan_host(cb->host);
+	return 0;
+failed:
+	myrb_cleanup(cb);
+	return ret;
+}
+
+
+static void myrb_remove(struct pci_dev *pdev)
+{
+	struct myrb_hba *cb = pci_get_drvdata(pdev);
+
+	shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
+	myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
+	myrb_cleanup(cb);
+	myrb_destroy_mempools(cb);
+}
+
+
+static const struct pci_device_id myrb_id_table[] = {
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
+			       PCI_DEVICE_ID_DEC_21285,
+			       PCI_VENDOR_ID_MYLEX,
+			       PCI_DEVICE_ID_MYLEX_DAC960_LA),
+		.driver_data	= (unsigned long) &DAC960_LA_privdata,
+	},
+	{
+		PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
+	},
+	{
+		PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
+	},
+	{
+		PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
+	},
+	{0, },
+};
+
+MODULE_DEVICE_TABLE(pci, myrb_id_table);
+
+static struct pci_driver myrb_pci_driver = {
+	.name		= "myrb",
+	.id_table	= myrb_id_table,
+	.probe		= myrb_probe,
+	.remove		= myrb_remove,
+};
+
+static int __init myrb_init_module(void)
+{
+	int ret;
+
+	myrb_raid_template = raid_class_attach(&myrb_raid_functions);
+	if (!myrb_raid_template)
+		return -ENODEV;
+
+	ret = pci_register_driver(&myrb_pci_driver);
+	if (ret)
+		raid_class_release(myrb_raid_template);
+
+	return ret;
+}
+
+static void __exit myrb_cleanup_module(void)
+{
+	pci_unregister_driver(&myrb_pci_driver);
+	raid_class_release(myrb_raid_template);
+}
+
+module_init(myrb_init_module);
+module_exit(myrb_cleanup_module);
+
+MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h
new file mode 100644
index 0000000..9289c19
--- /dev/null
+++ b/drivers/scsi/myrb.h
@@ -0,0 +1,958 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver,
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ *
+ */
+
+#ifndef MYRB_H
+#define MYRB_H
+
+#define MYRB_MAX_LDEVS			32
+#define MYRB_MAX_CHANNELS		3
+#define MYRB_MAX_TARGETS		16
+#define MYRB_MAX_PHYSICAL_DEVICES	45
+#define MYRB_SCATTER_GATHER_LIMIT	32
+#define MYRB_CMD_MBOX_COUNT		256
+#define MYRB_STAT_MBOX_COUNT		1024
+
+#define MYRB_BLKSIZE_BITS		9
+#define MYRB_MAILBOX_TIMEOUT		1000000
+
+#define MYRB_DCMD_TAG			1
+#define MYRB_MCMD_TAG			2
+
+#define MYRB_PRIMARY_MONITOR_INTERVAL (10 * HZ)
+#define MYRB_SECONDARY_MONITOR_INTERVAL (60 * HZ)
+
+/*
+ * DAC960 V1 Firmware Command Opcodes.
+ */
+enum myrb_cmd_opcode {
+	/* I/O Commands */
+	MYRB_CMD_READ_EXTENDED =	0x33,
+	MYRB_CMD_WRITE_EXTENDED =	0x34,
+	MYRB_CMD_READAHEAD_EXTENDED =	0x35,
+	MYRB_CMD_READ_EXTENDED_SG =	0xB3,
+	MYRB_CMD_WRITE_EXTENDED_SG =	0xB4,
+	MYRB_CMD_READ =			0x36,
+	MYRB_CMD_READ_SG =		0xB6,
+	MYRB_CMD_WRITE =		0x37,
+	MYRB_CMD_WRITE_SG =		0xB7,
+	MYRB_CMD_DCDB =			0x04,
+	MYRB_CMD_DCDB_SG =		0x84,
+	MYRB_CMD_FLUSH =		0x0A,
+	/* Controller Status Related Commands */
+	MYRB_CMD_ENQUIRY =		0x53,
+	MYRB_CMD_ENQUIRY2 =		0x1C,
+	MYRB_CMD_GET_LDRV_ELEMENT =	0x55,
+	MYRB_CMD_GET_LDEV_INFO =	0x19,
+	MYRB_CMD_IOPORTREAD =		0x39,
+	MYRB_CMD_IOPORTWRITE =		0x3A,
+	MYRB_CMD_GET_SD_STATS =		0x3E,
+	MYRB_CMD_GET_PD_STATS =		0x3F,
+	MYRB_CMD_EVENT_LOG_OPERATION =	0x72,
+	/* Device Related Commands */
+	MYRB_CMD_START_DEVICE =		0x10,
+	MYRB_CMD_GET_DEVICE_STATE =	0x50,
+	MYRB_CMD_STOP_CHANNEL =		0x13,
+	MYRB_CMD_START_CHANNEL =	0x12,
+	MYRB_CMD_RESET_CHANNEL =	0x1A,
+	/* Commands Associated with Data Consistency and Errors */
+	MYRB_CMD_REBUILD =		0x09,
+	MYRB_CMD_REBUILD_ASYNC =	0x16,
+	MYRB_CMD_CHECK_CONSISTENCY =	0x0F,
+	MYRB_CMD_CHECK_CONSISTENCY_ASYNC = 0x1E,
+	MYRB_CMD_REBUILD_STAT =		0x0C,
+	MYRB_CMD_GET_REBUILD_PROGRESS =	0x27,
+	MYRB_CMD_REBUILD_CONTROL =	0x1F,
+	MYRB_CMD_READ_BADBLOCK_TABLE =	0x0B,
+	MYRB_CMD_READ_BADDATA_TABLE =	0x25,
+	MYRB_CMD_CLEAR_BADDATA_TABLE =	0x26,
+	MYRB_CMD_GET_ERROR_TABLE =	0x17,
+	MYRB_CMD_ADD_CAPACITY_ASYNC =	0x2A,
+	MYRB_CMD_BGI_CONTROL =		0x2B,
+	/* Configuration Related Commands */
+	MYRB_CMD_READ_CONFIG2 =		0x3D,
+	MYRB_CMD_WRITE_CONFIG2 =	0x3C,
+	MYRB_CMD_READ_CONFIG_ONDISK =	0x4A,
+	MYRB_CMD_WRITE_CONFIG_ONDISK =	0x4B,
+	MYRB_CMD_READ_CONFIG =		0x4E,
+	MYRB_CMD_READ_BACKUP_CONFIG =	0x4D,
+	MYRB_CMD_WRITE_CONFIG =		0x4F,
+	MYRB_CMD_ADD_CONFIG =		0x4C,
+	MYRB_CMD_READ_CONFIG_LABEL =	0x48,
+	MYRB_CMD_WRITE_CONFIG_LABEL =	0x49,
+	/* Firmware Upgrade Related Commands */
+	MYRB_CMD_LOAD_IMAGE =		0x20,
+	MYRB_CMD_STORE_IMAGE =		0x21,
+	MYRB_CMD_PROGRAM_IMAGE =	0x22,
+	/* Diagnostic Commands */
+	MYRB_CMD_SET_DIAGNOSTIC_MODE =	0x31,
+	MYRB_CMD_RUN_DIAGNOSTIC =	0x32,
+	/* Subsystem Service Commands */
+	MYRB_CMD_GET_SUBSYS_DATA =	0x70,
+	MYRB_CMD_SET_SUBSYS_PARAM =	0x71,
+	/* Version 2.xx Firmware Commands */
+	MYRB_CMD_ENQUIRY_OLD =		0x05,
+	MYRB_CMD_GET_DEVICE_STATE_OLD =	0x14,
+	MYRB_CMD_READ_OLD =		0x02,
+	MYRB_CMD_WRITE_OLD =		0x03,
+	MYRB_CMD_READ_SG_OLD =		0x82,
+	MYRB_CMD_WRITE_SG_OLD =		0x83
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Command Status Codes.
+ */
+#define MYRB_STATUS_SUCCESS			0x0000	/* Common */
+#define MYRB_STATUS_CHECK_CONDITION		0x0002	/* Common */
+#define MYRB_STATUS_NO_DEVICE			0x0102	/* Common */
+#define MYRB_STATUS_INVALID_ADDRESS		0x0105	/* Common */
+#define MYRB_STATUS_INVALID_PARAM		0x0105	/* Common */
+#define MYRB_STATUS_IRRECOVERABLE_DATA_ERROR	0x0001	/* I/O */
+#define MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE 0x0002	/* I/O */
+#define MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV	0x0105	/* I/O */
+#define MYRB_STATUS_BAD_DATA			0x010C	/* I/O */
+#define MYRB_STATUS_DEVICE_BUSY			0x0008	/* DCDB */
+#define MYRB_STATUS_DEVICE_NONRESPONSIVE	0x000E	/* DCDB */
+#define MYRB_STATUS_COMMAND_TERMINATED		0x000F	/* DCDB */
+#define MYRB_STATUS_START_DEVICE_FAILED		0x0002	/* Device */
+#define MYRB_STATUS_INVALID_CHANNEL_OR_TARGET	0x0105	/* Device */
+#define MYRB_STATUS_CHANNEL_BUSY		0x0106	/* Device */
+#define MYRB_STATUS_OUT_OF_MEMORY		0x0107	/* Device */
+#define MYRB_STATUS_CHANNEL_NOT_STOPPED		0x0002	/* Device */
+#define MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE 0x0002	/* Consistency */
+#define MYRB_STATUS_RBLD_BADBLOCKS		0x0003	/* Consistency */
+#define MYRB_STATUS_RBLD_NEW_DISK_FAILED	0x0004	/* Consistency */
+#define MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS	0x0106	/* Consistency */
+#define MYRB_STATUS_DEPENDENT_DISK_DEAD		0x0002	/* Consistency */
+#define MYRB_STATUS_INCONSISTENT_BLOCKS		0x0003	/* Consistency */
+#define MYRB_STATUS_INVALID_OR_NONREDUNDANT_LDRV 0x0105 /* Consistency */
+#define MYRB_STATUS_NO_RBLD_OR_CHECK_INPROGRESS	0x0105	/* Consistency */
+#define MYRB_STATUS_RBLD_IN_PROGRESS_DATA_VALID	0x0000	/* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE	0x0002	/* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_BADBLOCKS	0x0003	/* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED 0x0004	/* Consistency */
+#define MYRB_STATUS_RBLD_SUCCESS		0x0100	/* Consistency */
+#define MYRB_STATUS_RBLD_SUCCESS_TERMINATED	0x0107	/* Consistency */
+#define MYRB_STATUS_RBLD_NOT_CHECKED		0x0108	/* Consistency */
+#define MYRB_STATUS_BGI_SUCCESS			0x0100	/* Consistency */
+#define MYRB_STATUS_BGI_ABORTED			0x0005	/* Consistency */
+#define MYRB_STATUS_NO_BGI_INPROGRESS		0x0105	/* Consistency */
+#define MYRB_STATUS_ADD_CAPACITY_INPROGRESS	0x0004	/* Consistency */
+#define MYRB_STATUS_ADD_CAPACITY_FAILED_OR_SUSPENDED 0x00F4 /* Consistency */
+#define MYRB_STATUS_CONFIG2_CSUM_ERROR		0x0002	/* Configuration */
+#define MYRB_STATUS_CONFIGURATION_SUSPENDED	0x0106	/* Configuration */
+#define MYRB_STATUS_FAILED_TO_CONFIGURE_NVRAM	0x0105	/* Configuration */
+#define MYRB_STATUS_CONFIGURATION_NOT_SAVED	0x0106	/* Configuration */
+#define MYRB_STATUS_SUBSYS_NOTINSTALLED		0x0001	/* Subsystem */
+#define MYRB_STATUS_SUBSYS_FAILED		0x0002	/* Subsystem */
+#define MYRB_STATUS_SUBSYS_BUSY			0x0106	/* Subsystem */
+#define MYRB_STATUS_SUBSYS_TIMEOUT		0x0108	/* Subsystem */
+
+/*
+ * DAC960 V1 Firmware Enquiry Command reply structure.
+ */
+struct myrb_enquiry {
+	unsigned char ldev_count;			/* Byte 0 */
+	unsigned int rsvd1:24;				/* Bytes 1-3 */
+	unsigned int ldev_sizes[32];			/* Bytes 4-131 */
+	unsigned short flash_age;			/* Bytes 132-133 */
+	struct {
+		unsigned char deferred:1;		/* Byte 134 Bit 0 */
+		unsigned char low_bat:1;		/* Byte 134 Bit 1 */
+		unsigned char rsvd2:6;			/* Byte 134 Bits 2-7 */
+	} status;
+	unsigned char rsvd3:8;				/* Byte 135 */
+	unsigned char fw_minor_version;			/* Byte 136 */
+	unsigned char fw_major_version;			/* Byte 137 */
+	enum {
+		MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS =	0x00,
+		MYRB_STDBY_RBLD_IN_PROGRESS =			0x01,
+		MYRB_BG_RBLD_IN_PROGRESS =			0x02,
+		MYRB_BG_CHECK_IN_PROGRESS =			0x03,
+		MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR =		0xFF,
+		MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED =	0xF0,
+		MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED =	0xF1,
+		MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER =		0xF2,
+		MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED =	0xF3
+	} __packed rbld;		/* Byte 138 */
+	unsigned char max_tcq;				/* Byte 139 */
+	unsigned char ldev_offline;			/* Byte 140 */
+	unsigned char rsvd4:8;				/* Byte 141 */
+	unsigned short ev_seq;				/* Bytes 142-143 */
+	unsigned char ldev_critical;			/* Byte 144 */
+	unsigned int rsvd5:24;				/* Bytes 145-147 */
+	unsigned char pdev_dead;			/* Byte 148 */
+	unsigned char rsvd6:8;				/* Byte 149 */
+	unsigned char rbld_count;			/* Byte 150 */
+	struct {
+		unsigned char rsvd7:3;			/* Byte 151 Bits 0-2 */
+		unsigned char bbu_present:1;		/* Byte 151 Bit 3 */
+		unsigned char rsvd8:4;			/* Byte 151 Bits 4-7 */
+	} misc;
+	struct {
+		unsigned char target;
+		unsigned char channel;
+	} dead_drives[21];				/* Bytes 152-194 */
+	unsigned char rsvd9[62];			/* Bytes 195-255 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Enquiry2 Command reply structure.
+ */
+struct myrb_enquiry2 {
+	struct {
+		enum {
+			DAC960_V1_P_PD_PU =			0x01,
+			DAC960_V1_PL =				0x02,
+			DAC960_V1_PG =				0x10,
+			DAC960_V1_PJ =				0x11,
+			DAC960_V1_PR =				0x12,
+			DAC960_V1_PT =				0x13,
+			DAC960_V1_PTL0 =			0x14,
+			DAC960_V1_PRL =				0x15,
+			DAC960_V1_PTL1 =			0x16,
+			DAC960_V1_1164P =			0x20
+		} __packed sub_model;		/* Byte 0 */
+		unsigned char actual_channels;			/* Byte 1 */
+		enum {
+			MYRB_5_CHANNEL_BOARD =		0x01,
+			MYRB_3_CHANNEL_BOARD =		0x02,
+			MYRB_2_CHANNEL_BOARD =		0x03,
+			MYRB_3_CHANNEL_ASIC_DAC =	0x04
+		} __packed model;		/* Byte 2 */
+		enum {
+			MYRB_EISA_CONTROLLER =		0x01,
+			MYRB_MCA_CONTROLLER =		0x02,
+			MYRB_PCI_CONTROLLER =		0x03,
+			MYRB_SCSI_TO_SCSI =		0x08
+		} __packed controller;	/* Byte 3 */
+	} hw;						/* Bytes 0-3 */
+	/* MajorVersion.MinorVersion-FirmwareType-TurnID */
+	struct {
+		unsigned char major_version;		/* Byte 4 */
+		unsigned char minor_version;		/* Byte 5 */
+		unsigned char turn_id;			/* Byte 6 */
+		char firmware_type;			/* Byte 7 */
+	} fw;						/* Bytes 4-7 */
+	unsigned int rsvd1;				/* Byte 8-11 */
+	unsigned char cfg_chan;				/* Byte 12 */
+	unsigned char cur_chan;				/* Byte 13 */
+	unsigned char max_targets;			/* Byte 14 */
+	unsigned char max_tcq;				/* Byte 15 */
+	unsigned char max_ldev;				/* Byte 16 */
+	unsigned char max_arms;				/* Byte 17 */
+	unsigned char max_spans;			/* Byte 18 */
+	unsigned char rsvd2;				/* Byte 19 */
+	unsigned int rsvd3;				/* Bytes 20-23 */
+	unsigned int mem_size;				/* Bytes 24-27 */
+	unsigned int cache_size;			/* Bytes 28-31 */
+	unsigned int flash_size;			/* Bytes 32-35 */
+	unsigned int nvram_size;			/* Bytes 36-39 */
+	struct {
+		enum {
+			MYRB_RAM_TYPE_DRAM =		0x0,
+			MYRB_RAM_TYPE_EDO =			0x1,
+			MYRB_RAM_TYPE_SDRAM =		0x2,
+			MYRB_RAM_TYPE_Last =		0x7
+		} __packed ram:3;	/* Byte 40 Bits 0-2 */
+		enum {
+			MYRB_ERR_CORR_None =	0x0,
+			MYRB_ERR_CORR_Parity =	0x1,
+			MYRB_ERR_CORR_ECC =		0x2,
+			MYRB_ERR_CORR_Last =	0x7
+		} __packed ec:3;	/* Byte 40 Bits 3-5 */
+		unsigned char fast_page:1;		/* Byte 40 Bit 6 */
+		unsigned char low_power:1;		/* Byte 40 Bit 7 */
+		unsigned char rsvd4;			/* Bytes 41 */
+	} mem_type;
+	unsigned short clock_speed;			/* Bytes 42-43 */
+	unsigned short mem_speed;			/* Bytes 44-45 */
+	unsigned short hw_speed;			/* Bytes 46-47 */
+	unsigned char rsvd5[12];			/* Bytes 48-59 */
+	unsigned short max_cmds;			/* Bytes 60-61 */
+	unsigned short max_sge;				/* Bytes 62-63 */
+	unsigned short max_drv_cmds;			/* Bytes 64-65 */
+	unsigned short max_io_desc;			/* Bytes 66-67 */
+	unsigned short max_sectors;			/* Bytes 68-69 */
+	unsigned char latency;				/* Byte 70 */
+	unsigned char rsvd6;				/* Byte 71 */
+	unsigned char scsi_tmo;				/* Byte 72 */
+	unsigned char rsvd7;				/* Byte 73 */
+	unsigned short min_freelines;			/* Bytes 74-75 */
+	unsigned char rsvd8[8];				/* Bytes 76-83 */
+	unsigned char rbld_rate_const;			/* Byte 84 */
+	unsigned char rsvd9[11];			/* Byte 85-95 */
+	unsigned short pdrv_block_size;			/* Bytes 96-97 */
+	unsigned short ldev_block_size;			/* Bytes 98-99 */
+	unsigned short max_blocks_per_cmd;		/* Bytes 100-101 */
+	unsigned short block_factor;			/* Bytes 102-103 */
+	unsigned short cacheline_size;			/* Bytes 104-105 */
+	struct {
+		enum {
+			MYRB_WIDTH_NARROW_8BIT =		0x0,
+			MYRB_WIDTH_WIDE_16BIT =			0x1,
+			MYRB_WIDTH_WIDE_32BIT =			0x2
+		} __packed bus_width:2;	/* Byte 106 Bits 0-1 */
+		enum {
+			MYRB_SCSI_SPEED_FAST =			0x0,
+			MYRB_SCSI_SPEED_ULTRA =			0x1,
+			MYRB_SCSI_SPEED_ULTRA2 =		0x2
+		} __packed bus_speed:2;	/* Byte 106 Bits 2-3 */
+		unsigned char differential:1;		/* Byte 106 Bit 4 */
+		unsigned char rsvd10:3;			/* Byte 106 Bits 5-7 */
+	} scsi_cap;
+	unsigned char rsvd11[5];			/* Byte 107-111 */
+	unsigned short fw_build;			/* Bytes 112-113 */
+	enum {
+		MYRB_FAULT_AEMI =				0x01,
+		MYRB_FAULT_OEM1 =				0x02,
+		MYRB_FAULT_OEM2 =				0x04,
+		MYRB_FAULT_OEM3 =				0x08,
+		MYRB_FAULT_CONNER =				0x10,
+		MYRB_FAULT_SAFTE =				0x20
+	} __packed fault_mgmt;		/* Byte 114 */
+	unsigned char rsvd12;				/* Byte 115 */
+	struct {
+		unsigned int clustering:1;		/* Byte 116 Bit 0 */
+		unsigned int online_RAID_expansion:1;	/* Byte 116 Bit 1 */
+		unsigned int readahead:1;		/* Byte 116 Bit 2 */
+		unsigned int bgi:1;			/* Byte 116 Bit 3 */
+		unsigned int rsvd13:28;			/* Bytes 116-119 */
+	} fw_features;
+	unsigned char rsvd14[8];			/* Bytes 120-127 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Logical Drive State type.
+ */
+enum myrb_devstate {
+	MYRB_DEVICE_DEAD =		0x00,
+	MYRB_DEVICE_WO =		0x02,
+	MYRB_DEVICE_ONLINE =		0x03,
+	MYRB_DEVICE_CRITICAL =		0x04,
+	MYRB_DEVICE_STANDBY =		0x10,
+	MYRB_DEVICE_OFFLINE =		0xFF
+} __packed;
+
+/*
+ * DAC960 V1 RAID Levels
+ */
+enum myrb_raidlevel {
+	MYRB_RAID_LEVEL0 =		0x0,     /* RAID 0 */
+	MYRB_RAID_LEVEL1 =		0x1,     /* RAID 1 */
+	MYRB_RAID_LEVEL3 =		0x3,     /* RAID 3 */
+	MYRB_RAID_LEVEL5 =		0x5,     /* RAID 5 */
+	MYRB_RAID_LEVEL6 =		0x6,     /* RAID 6 */
+	MYRB_RAID_JBOD =		0x7,     /* RAID 7 (JBOD) */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Logical Drive Information structure.
+ */
+struct myrb_ldev_info {
+	unsigned int size;				/* Bytes 0-3 */
+	enum myrb_devstate state;			/* Byte 4 */
+	unsigned int raid_level:7;			/* Byte 5 Bits 0-6 */
+	unsigned int wb_enabled:1;			/* Byte 5 Bit 7 */
+	unsigned int rsvd:16;				/* Bytes 6-7 */
+};
+
+/*
+ * DAC960 V1 Firmware Perform Event Log Operation Types.
+ */
+#define DAC960_V1_GetEventLogEntry		0x00
+
+/*
+ * DAC960 V1 Firmware Get Event Log Entry Command reply structure.
+ */
+struct myrb_log_entry {
+	unsigned char msg_type;			/* Byte 0 */
+	unsigned char msg_len;			/* Byte 1 */
+	unsigned char target:5;			/* Byte 2 Bits 0-4 */
+	unsigned char channel:3;		/* Byte 2 Bits 5-7 */
+	unsigned char lun:6;			/* Byte 3 Bits 0-5 */
+	unsigned char rsvd1:2;			/* Byte 3 Bits 6-7 */
+	unsigned short seq_num;			/* Bytes 4-5 */
+	unsigned char sense[26];		/* Bytes 6-31 */
+};
+
+/*
+ * DAC960 V1 Firmware Get Device State Command reply structure.
+ * The structure is padded by 2 bytes for compatibility with Version 2.xx
+ * Firmware.
+ */
+struct myrb_pdev_state {
+	unsigned int present:1;			/* Byte 0 Bit 0 */
+	unsigned int :7;				/* Byte 0 Bits 1-7 */
+	enum {
+		MYRB_TYPE_OTHER =			0x0,
+		MYRB_TYPE_DISK =			0x1,
+		MYRB_TYPE_TAPE =			0x2,
+		MYRB_TYPE_CDROM_OR_WORM =		0x3
+	} __packed devtype:2;		/* Byte 1 Bits 0-1 */
+	unsigned int rsvd1:1;				/* Byte 1 Bit 2 */
+	unsigned int fast20:1;				/* Byte 1 Bit 3 */
+	unsigned int sync:1;				/* Byte 1 Bit 4 */
+	unsigned int fast:1;				/* Byte 1 Bit 5 */
+	unsigned int wide:1;				/* Byte 1 Bit 6 */
+	unsigned int tcq_supported:1;			/* Byte 1 Bit 7 */
+	enum myrb_devstate state;			/* Byte 2 */
+	unsigned int rsvd2:8;				/* Byte 3 */
+	unsigned int sync_multiplier;			/* Byte 4 */
+	unsigned int sync_offset:5;			/* Byte 5 Bits 0-4 */
+	unsigned int rsvd3:3;				/* Byte 5 Bits 5-7 */
+	unsigned int size;				/* Bytes 6-9 */
+	unsigned int rsvd4:16;			/* Bytes 10-11 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Get Rebuild Progress Command reply structure.
+ */
+struct myrb_rbld_progress {
+	unsigned int ldev_num;				/* Bytes 0-3 */
+	unsigned int ldev_size;				/* Bytes 4-7 */
+	unsigned int blocks_left;			/* Bytes 8-11 */
+};
+
+/*
+ * DAC960 V1 Firmware Background Initialization Status Command reply structure.
+ */
+struct myrb_bgi_status {
+	unsigned int ldev_size;				/* Bytes 0-3 */
+	unsigned int blocks_done;			/* Bytes 4-7 */
+	unsigned char rsvd1[12];			/* Bytes 8-19 */
+	unsigned int ldev_num;				/* Bytes 20-23 */
+	unsigned char raid_level;			/* Byte 24 */
+	enum {
+		MYRB_BGI_INVALID =	0x00,
+		MYRB_BGI_STARTED =	0x02,
+		MYRB_BGI_INPROGRESS =	0x04,
+		MYRB_BGI_SUSPENDED =	0x05,
+		MYRB_BGI_CANCELLED =	0x06
+	} __packed status;		/* Byte 25 */
+	unsigned char rsvd2[6];				/* Bytes 26-31 */
+};
+
+/*
+ * DAC960 V1 Firmware Error Table Entry structure.
+ */
+struct myrb_error_entry {
+	unsigned char parity_err;			/* Byte 0 */
+	unsigned char soft_err;				/* Byte 1 */
+	unsigned char hard_err;				/* Byte 2 */
+	unsigned char misc_err;				/* Byte 3 */
+};
+
+/*
+ * DAC960 V1 Firmware Read Config2 Command reply structure.
+ */
+struct myrb_config2 {
+	unsigned rsvd1:1;				/* Byte 0 Bit 0 */
+	unsigned active_negation:1;			/* Byte 0 Bit 1 */
+	unsigned rsvd2:5;				/* Byte 0 Bits 2-6 */
+	unsigned no_rescan_on_reset_during_scan:1;	/* Byte 0 Bit 7 */
+	unsigned StorageWorks_support:1;		/* Byte 1 Bit 0 */
+	unsigned HewlettPackard_support:1;		/* Byte 1 Bit 1 */
+	unsigned no_disconnect_on_first_command:1;	/* Byte 1 Bit 2 */
+	unsigned rsvd3:2;				/* Byte 1 Bits 3-4 */
+	unsigned AEMI_ARM:1;				/* Byte 1 Bit 5 */
+	unsigned AEMI_OFM:1;				/* Byte 1 Bit 6 */
+	unsigned rsvd4:1;				/* Byte 1 Bit 7 */
+	enum {
+		MYRB_OEMID_MYLEX =		0x00,
+		MYRB_OEMID_IBM =		0x08,
+		MYRB_OEMID_HP =			0x0A,
+		MYRB_OEMID_DEC =		0x0C,
+		MYRB_OEMID_SIEMENS =		0x10,
+		MYRB_OEMID_INTEL =		0x12
+	} __packed OEMID;		/* Byte 2 */
+	unsigned char oem_model_number;			/* Byte 3 */
+	unsigned char physical_sector;			/* Byte 4 */
+	unsigned char logical_sector;			/* Byte 5 */
+	unsigned char block_factor;			/* Byte 6 */
+	unsigned readahead_enabled:1;			/* Byte 7 Bit 0 */
+	unsigned low_BIOS_delay:1;			/* Byte 7 Bit 1 */
+	unsigned rsvd5:2;				/* Byte 7 Bits 2-3 */
+	unsigned restrict_reassign_to_one_sector:1;	/* Byte 7 Bit 4 */
+	unsigned rsvd6:1;				/* Byte 7 Bit 5 */
+	unsigned FUA_during_write_recovery:1;		/* Byte 7 Bit 6 */
+	unsigned enable_LeftSymmetricRAID5Algorithm:1;	/* Byte 7 Bit 7 */
+	unsigned char default_rebuild_rate;		/* Byte 8 */
+	unsigned char rsvd7;				/* Byte 9 */
+	unsigned char blocks_per_cacheline;		/* Byte 10 */
+	unsigned char blocks_per_stripe;		/* Byte 11 */
+	struct {
+		enum {
+			MYRB_SPEED_ASYNC =		0x0,
+			MYRB_SPEED_SYNC_8MHz =		0x1,
+			MYRB_SPEED_SYNC_5MHz =		0x2,
+			MYRB_SPEED_SYNC_10_OR_20MHz =	0x3
+		} __packed speed:2;	/* Byte 11 Bits 0-1 */
+		unsigned force_8bit:1;			/* Byte 11 Bit 2 */
+		unsigned disable_fast20:1;		/* Byte 11 Bit 3 */
+		unsigned rsvd8:3;			/* Byte 11 Bits 4-6 */
+		unsigned enable_tcq:1;			/* Byte 11 Bit 7 */
+	} __packed channelparam[6];	/* Bytes 12-17 */
+	unsigned char SCSIInitiatorID;			/* Byte 18 */
+	unsigned char rsvd9;				/* Byte 19 */
+	enum {
+		MYRB_STARTUP_CONTROLLER_SPINUP =	0x00,
+		MYRB_STARTUP_POWERON_SPINUP =		0x01
+	} __packed startup;		/* Byte 20 */
+	unsigned char simultaneous_device_spinup_count;	/* Byte 21 */
+	unsigned char seconds_delay_between_spinups;	/* Byte 22 */
+	unsigned char rsvd10[29];			/* Bytes 23-51 */
+	unsigned BIOS_disabled:1;			/* Byte 52 Bit 0 */
+	unsigned CDROM_boot_enabled:1;			/* Byte 52 Bit 1 */
+	unsigned rsvd11:3;				/* Byte 52 Bits 2-4 */
+	enum {
+		MYRB_GEOM_128_32 =		0x0,
+		MYRB_GEOM_255_63 =		0x1,
+		MYRB_GEOM_RESERVED1 =		0x2,
+		MYRB_GEOM_RESERVED2 =		0x3
+	} __packed drive_geometry:2;	/* Byte 52 Bits 5-6 */
+	unsigned rsvd12:1;				/* Byte 52 Bit 7 */
+	unsigned char rsvd13[9];			/* Bytes 53-61 */
+	unsigned short csum;				/* Bytes 62-63 */
+};
+
+/*
+ * DAC960 V1 Firmware DCDB request structure.
+ */
+struct myrb_dcdb {
+	unsigned target:4;				 /* Byte 0 Bits 0-3 */
+	unsigned channel:4;				 /* Byte 0 Bits 4-7 */
+	enum {
+		MYRB_DCDB_XFER_NONE =		0,
+		MYRB_DCDB_XFER_DEVICE_TO_SYSTEM = 1,
+		MYRB_DCDB_XFER_SYSTEM_TO_DEVICE = 2,
+		MYRB_DCDB_XFER_ILLEGAL =	3
+	} __packed data_xfer:2;				/* Byte 1 Bits 0-1 */
+	unsigned early_status:1;			/* Byte 1 Bit 2 */
+	unsigned rsvd1:1;				/* Byte 1 Bit 3 */
+	enum {
+		MYRB_DCDB_TMO_24_HRS =	0,
+		MYRB_DCDB_TMO_10_SECS =	1,
+		MYRB_DCDB_TMO_60_SECS =	2,
+		MYRB_DCDB_TMO_10_MINS =	3
+	} __packed timeout:2;				/* Byte 1 Bits 4-5 */
+	unsigned no_autosense:1;			/* Byte 1 Bit 6 */
+	unsigned allow_disconnect:1;			/* Byte 1 Bit 7 */
+	unsigned short xfer_len_lo;			/* Bytes 2-3 */
+	u32 dma_addr;					/* Bytes 4-7 */
+	unsigned char cdb_len:4;			/* Byte 8 Bits 0-3 */
+	unsigned char xfer_len_hi4:4;			/* Byte 8 Bits 4-7 */
+	unsigned char sense_len;			/* Byte 9 */
+	unsigned char cdb[12];				/* Bytes 10-21 */
+	unsigned char sense[64];			/* Bytes 22-85 */
+	unsigned char status;				/* Byte 86 */
+	unsigned char rsvd2;				/* Byte 87 */
+};
+
+/*
+ * DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address
+ *32 Bit Byte Count structure.
+ */
+struct myrb_sge {
+	u32 sge_addr;		/* Bytes 0-3 */
+	u32 sge_count;		/* Bytes 4-7 */
+};
+
+/*
+ * 13 Byte DAC960 V1 Firmware Command Mailbox structure.
+ * Bytes 13-15 are not used.  The structure is padded to 16 bytes for
+ * efficient access.
+ */
+union myrb_cmd_mbox {
+	unsigned int words[4];				/* Words 0-3 */
+	unsigned char bytes[16];			/* Bytes 0-15 */
+	struct {
+		enum myrb_cmd_opcode opcode;		/* Byte 0 */
+		unsigned char id;			/* Byte 1 */
+		unsigned char rsvd[14];			/* Bytes 2-15 */
+	} __packed common;
+	struct {
+		enum myrb_cmd_opcode opcode;		/* Byte 0 */
+		unsigned char id;			/* Byte 1 */
+		unsigned char rsvd1[6];			/* Bytes 2-7 */
+		u32 addr;				/* Bytes 8-11 */
+		unsigned char rsvd2[4];			/* Bytes 12-15 */
+	} __packed type3;
+	struct {
+		enum myrb_cmd_opcode opcode;		/* Byte 0 */
+		unsigned char id;			/* Byte 1 */
+		unsigned char optype;			/* Byte 2 */
+		unsigned char rsvd1[5];			/* Bytes 3-7 */
+		u32 addr;				/* Bytes 8-11 */
+		unsigned char rsvd2[4];			/* Bytes 12-15 */
+	} __packed type3B;
+	struct {
+		enum myrb_cmd_opcode opcode;		/* Byte 0 */
+		unsigned char id;			/* Byte 1 */
+		unsigned char rsvd1[5];			/* Bytes 2-6 */
+		unsigned char ldev_num:6;		/* Byte 7 Bits 0-6 */
+		unsigned char auto_restore:1;		/* Byte 7 Bit 7 */
+		unsigned char rsvd2[8];			/* Bytes 8-15 */
+	} __packed type3C;
+	struct {
+		enum myrb_cmd_opcode opcode;		/* Byte 0 */
+		unsigned char id;			/* Byte 1 */
+		unsigned char channel;			/* Byte 2 */
+		unsigned char target;			/* Byte 3 */
+		enum myrb_devstate state;		/* Byte 4 */
+		unsigned char rsvd1[3];			/* Bytes 5-7 */
+		u32 addr;				/* Bytes 8-11 */
+		unsigned char rsvd2[4];			/* Bytes 12-15 */
+	} __packed type3D;
+	struct {
+		enum myrb_cmd_opcode opcode;		/* Byte 0 */
+		unsigned char id;			/* Byte 1 */
+		unsigned char optype;			/* Byte 2 */
+		unsigned char opqual;			/* Byte 3 */
+		unsigned short ev_seq;			/* Bytes 4-5 */
+		unsigned char rsvd1[2];			/* Bytes 6-7 */
+		u32 addr;				/* Bytes 8-11 */
+		unsigned char rsvd2[4];			/* Bytes 12-15 */
+	} __packed type3E;
+	struct {
+		enum myrb_cmd_opcode opcode;		/* Byte 0 */
+		unsigned char id;			/* Byte 1 */
+		unsigned char rsvd1[2];			/* Bytes 2-3 */
+		unsigned char rbld_rate;		/* Byte 4 */
+		unsigned char rsvd2[3];			/* Bytes 5-7 */
+		u32 addr;				/* Bytes 8-11 */
+		unsigned char rsvd3[4];			/* Bytes 12-15 */
+	} __packed type3R;
+	struct {
+		enum myrb_cmd_opcode opcode;		/* Byte 0 */
+		unsigned char id;			/* Byte 1 */
+		unsigned short xfer_len;		/* Bytes 2-3 */
+		unsigned int lba;			/* Bytes 4-7 */
+		u32 addr;				/* Bytes 8-11 */
+		unsigned char ldev_num;			/* Byte 12 */
+		unsigned char rsvd[3];			/* Bytes 13-15 */
+	} __packed type4;
+	struct {
+		enum myrb_cmd_opcode opcode;		/* Byte 0 */
+		unsigned char id;			/* Byte 1 */
+		struct {
+			unsigned short xfer_len:11;	/* Bytes 2-3 */
+			unsigned char ldev_num:5;	/* Byte 3 Bits 3-7 */
+		} __packed ld;
+		unsigned int lba;			/* Bytes 4-7 */
+		u32 addr;				/* Bytes 8-11 */
+		unsigned char sg_count:6;		/* Byte 12 Bits 0-5 */
+		enum {
+			MYRB_SGL_ADDR32_COUNT32 = 0x0,
+			MYRB_SGL_ADDR32_COUNT16 = 0x1,
+			MYRB_SGL_COUNT32_ADDR32 = 0x2,
+			MYRB_SGL_COUNT16_ADDR32 = 0x3
+		} __packed sg_type:2;	/* Byte 12 Bits 6-7 */
+		unsigned char rsvd[3];			/* Bytes 13-15 */
+	} __packed type5;
+	struct {
+		enum myrb_cmd_opcode opcode;		/* Byte 0 */
+		unsigned char id;			/* Byte 1 */
+		unsigned char opcode2;			/* Byte 2 */
+		unsigned char rsvd1:8;			/* Byte 3 */
+		u32 cmd_mbox_addr;			/* Bytes 4-7 */
+		u32 stat_mbox_addr;			/* Bytes 8-11 */
+		unsigned char rsvd2[4];			/* Bytes 12-15 */
+	} __packed typeX;
+};
+
+/*
+ * DAC960 V1 Firmware Controller Status Mailbox structure.
+ */
+struct myrb_stat_mbox {
+	unsigned char id;		/* Byte 0 */
+	unsigned char rsvd:7;		/* Byte 1 Bits 0-6 */
+	unsigned char valid:1;			/* Byte 1 Bit 7 */
+	unsigned short status;		/* Bytes 2-3 */
+};
+
+struct myrb_cmdblk {
+	union myrb_cmd_mbox mbox;
+	unsigned short status;
+	struct completion *completion;
+	struct myrb_dcdb *dcdb;
+	dma_addr_t dcdb_addr;
+	struct myrb_sge *sgl;
+	dma_addr_t sgl_addr;
+};
+
+struct myrb_hba {
+	unsigned int ldev_block_size;
+	unsigned char ldev_geom_heads;
+	unsigned char ldev_geom_sectors;
+	unsigned char bus_width;
+	unsigned short stripe_size;
+	unsigned short segment_size;
+	unsigned short new_ev_seq;
+	unsigned short old_ev_seq;
+	bool dual_mode_interface;
+	bool bgi_status_supported;
+	bool safte_enabled;
+	bool need_ldev_info;
+	bool need_err_info;
+	bool need_rbld;
+	bool need_cc_status;
+	bool need_bgi_status;
+	bool rbld_first;
+
+	struct pci_dev *pdev;
+	struct Scsi_Host *host;
+
+	struct workqueue_struct *work_q;
+	char work_q_name[20];
+	struct delayed_work monitor_work;
+	unsigned long primary_monitor_time;
+	unsigned long secondary_monitor_time;
+
+	struct dma_pool *sg_pool;
+	struct dma_pool *dcdb_pool;
+
+	spinlock_t queue_lock;
+
+	void (*qcmd)(struct myrb_hba *cs, struct myrb_cmdblk *cmd_blk);
+	void (*write_cmd_mbox)(union myrb_cmd_mbox *next_mbox,
+			       union myrb_cmd_mbox *cmd_mbox);
+	void (*get_cmd_mbox)(void __iomem *base);
+	void (*disable_intr)(void __iomem *base);
+	void (*reset)(void __iomem *base);
+
+	unsigned int ctlr_num;
+	unsigned char model_name[20];
+	unsigned char fw_version[12];
+
+	unsigned int irq;
+	phys_addr_t io_addr;
+	phys_addr_t pci_addr;
+	void __iomem *io_base;
+	void __iomem *mmio_base;
+
+	size_t cmd_mbox_size;
+	dma_addr_t cmd_mbox_addr;
+	union myrb_cmd_mbox *first_cmd_mbox;
+	union myrb_cmd_mbox *last_cmd_mbox;
+	union myrb_cmd_mbox *next_cmd_mbox;
+	union myrb_cmd_mbox *prev_cmd_mbox1;
+	union myrb_cmd_mbox *prev_cmd_mbox2;
+
+	size_t stat_mbox_size;
+	dma_addr_t stat_mbox_addr;
+	struct myrb_stat_mbox *first_stat_mbox;
+	struct myrb_stat_mbox *last_stat_mbox;
+	struct myrb_stat_mbox *next_stat_mbox;
+
+	struct myrb_cmdblk dcmd_blk;
+	struct myrb_cmdblk mcmd_blk;
+	struct mutex dcmd_mutex;
+
+	struct myrb_enquiry *enquiry;
+	dma_addr_t enquiry_addr;
+
+	struct myrb_error_entry *err_table;
+	dma_addr_t err_table_addr;
+
+	unsigned short last_rbld_status;
+
+	struct myrb_ldev_info *ldev_info_buf;
+	dma_addr_t ldev_info_addr;
+
+	struct myrb_bgi_status bgi_status;
+
+	struct mutex dma_mutex;
+};
+
+/*
+ * DAC960 LA Series Controller Interface Register Offsets.
+ */
+#define DAC960_LA_mmio_size		0x80
+
+enum DAC960_LA_reg_offset {
+	DAC960_LA_IRQMASK_OFFSET	= 0x34,
+	DAC960_LA_CMDOP_OFFSET		= 0x50,
+	DAC960_LA_CMDID_OFFSET		= 0x51,
+	DAC960_LA_MBOX2_OFFSET		= 0x52,
+	DAC960_LA_MBOX3_OFFSET		= 0x53,
+	DAC960_LA_MBOX4_OFFSET		= 0x54,
+	DAC960_LA_MBOX5_OFFSET		= 0x55,
+	DAC960_LA_MBOX6_OFFSET		= 0x56,
+	DAC960_LA_MBOX7_OFFSET		= 0x57,
+	DAC960_LA_MBOX8_OFFSET		= 0x58,
+	DAC960_LA_MBOX9_OFFSET		= 0x59,
+	DAC960_LA_MBOX10_OFFSET		= 0x5A,
+	DAC960_LA_MBOX11_OFFSET		= 0x5B,
+	DAC960_LA_MBOX12_OFFSET		= 0x5C,
+	DAC960_LA_STSID_OFFSET		= 0x5D,
+	DAC960_LA_STS_OFFSET		= 0x5E,
+	DAC960_LA_IDB_OFFSET		= 0x60,
+	DAC960_LA_ODB_OFFSET		= 0x61,
+	DAC960_LA_ERRSTS_OFFSET		= 0x63,
+};
+
+/*
+ * DAC960 LA Series Inbound Door Bell Register.
+ */
+#define DAC960_LA_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_LA_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_LA_IDB_GEN_IRQ 0x04
+#define DAC960_LA_IDB_CTRL_RESET 0x08
+#define DAC960_LA_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_LA_IDB_HWMBOX_EMPTY 0x01
+#define DAC960_LA_IDB_INIT_DONE 0x02
+
+/*
+ * DAC960 LA Series Outbound Door Bell Register.
+ */
+#define DAC960_LA_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_LA_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_LA_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_LA_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 LA Series Interrupt Mask Register.
+ */
+#define DAC960_LA_IRQMASK_DISABLE_IRQ 0x04
+
+/*
+ * DAC960 LA Series Error Status Register.
+ */
+#define DAC960_LA_ERRSTS_PENDING 0x02
+
+/*
+ * DAC960 PG Series Controller Interface Register Offsets.
+ */
+#define DAC960_PG_mmio_size		0x2000
+
+enum DAC960_PG_reg_offset {
+	DAC960_PG_IDB_OFFSET		= 0x0020,
+	DAC960_PG_ODB_OFFSET		= 0x002C,
+	DAC960_PG_IRQMASK_OFFSET	= 0x0034,
+	DAC960_PG_CMDOP_OFFSET		= 0x1000,
+	DAC960_PG_CMDID_OFFSET		= 0x1001,
+	DAC960_PG_MBOX2_OFFSET		= 0x1002,
+	DAC960_PG_MBOX3_OFFSET		= 0x1003,
+	DAC960_PG_MBOX4_OFFSET		= 0x1004,
+	DAC960_PG_MBOX5_OFFSET		= 0x1005,
+	DAC960_PG_MBOX6_OFFSET		= 0x1006,
+	DAC960_PG_MBOX7_OFFSET		= 0x1007,
+	DAC960_PG_MBOX8_OFFSET		= 0x1008,
+	DAC960_PG_MBOX9_OFFSET		= 0x1009,
+	DAC960_PG_MBOX10_OFFSET		= 0x100A,
+	DAC960_PG_MBOX11_OFFSET		= 0x100B,
+	DAC960_PG_MBOX12_OFFSET		= 0x100C,
+	DAC960_PG_STSID_OFFSET		= 0x1018,
+	DAC960_PG_STS_OFFSET		= 0x101A,
+	DAC960_PG_ERRSTS_OFFSET		= 0x103F,
+};
+
+/*
+ * DAC960 PG Series Inbound Door Bell Register.
+ */
+#define DAC960_PG_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_PG_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_PG_IDB_GEN_IRQ 0x04
+#define DAC960_PG_IDB_CTRL_RESET 0x08
+#define DAC960_PG_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_PG_IDB_HWMBOX_FULL 0x01
+#define DAC960_PG_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 PG Series Outbound Door Bell Register.
+ */
+#define DAC960_PG_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_PG_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_PG_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_PG_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 PG Series Interrupt Mask Register.
+ */
+#define DAC960_PG_IRQMASK_MSI_MASK1 0x03
+#define DAC960_PG_IRQMASK_DISABLE_IRQ 0x04
+#define DAC960_PG_IRQMASK_MSI_MASK2 0xF8
+
+/*
+ * DAC960 PG Series Error Status Register.
+ */
+#define DAC960_PG_ERRSTS_PENDING 0x04
+
+/*
+ * DAC960 PD Series Controller Interface Register Offsets.
+ */
+#define DAC960_PD_mmio_size		0x80
+
+enum DAC960_PD_reg_offset {
+	DAC960_PD_CMDOP_OFFSET		= 0x00,
+	DAC960_PD_CMDID_OFFSET		= 0x01,
+	DAC960_PD_MBOX2_OFFSET		= 0x02,
+	DAC960_PD_MBOX3_OFFSET		= 0x03,
+	DAC960_PD_MBOX4_OFFSET		= 0x04,
+	DAC960_PD_MBOX5_OFFSET		= 0x05,
+	DAC960_PD_MBOX6_OFFSET		= 0x06,
+	DAC960_PD_MBOX7_OFFSET		= 0x07,
+	DAC960_PD_MBOX8_OFFSET		= 0x08,
+	DAC960_PD_MBOX9_OFFSET		= 0x09,
+	DAC960_PD_MBOX10_OFFSET		= 0x0A,
+	DAC960_PD_MBOX11_OFFSET		= 0x0B,
+	DAC960_PD_MBOX12_OFFSET		= 0x0C,
+	DAC960_PD_STSID_OFFSET		= 0x0D,
+	DAC960_PD_STS_OFFSET		= 0x0E,
+	DAC960_PD_ERRSTS_OFFSET		= 0x3F,
+	DAC960_PD_IDB_OFFSET		= 0x40,
+	DAC960_PD_ODB_OFFSET		= 0x41,
+	DAC960_PD_IRQEN_OFFSET		= 0x43,
+};
+
+/*
+ * DAC960 PD Series Inbound Door Bell Register.
+ */
+#define DAC960_PD_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_PD_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_PD_IDB_GEN_IRQ 0x04
+#define DAC960_PD_IDB_CTRL_RESET 0x08
+
+#define DAC960_PD_IDB_HWMBOX_FULL 0x01
+#define DAC960_PD_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 PD Series Outbound Door Bell Register.
+ */
+#define DAC960_PD_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_PD_ODB_HWMBOX_STS_AVAIL 0x01
+
+/*
+ * DAC960 PD Series Interrupt Enable Register.
+ */
+#define DAC960_PD_IRQMASK_ENABLE_IRQ 0x01
+
+/*
+ * DAC960 PD Series Error Status Register.
+ */
+#define DAC960_PD_ERRSTS_PENDING 0x04
+
+typedef int (*myrb_hw_init_t)(struct pci_dev *pdev,
+			      struct myrb_hba *cb, void __iomem *base);
+typedef unsigned short (*mbox_mmio_init_t)(struct pci_dev *pdev,
+					   void __iomem *base,
+					   union myrb_cmd_mbox *mbox);
+
+struct myrb_privdata {
+	myrb_hw_init_t		hw_init;
+	irq_handler_t		irq_handler;
+	unsigned int		mmio_size;
+};
+
+#endif /* MYRB_H */
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
new file mode 100644
index 0000000..eb0dd56
--- /dev/null
+++ b/drivers/scsi/myrs.c
@@ -0,0 +1,3271 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * This driver supports the newer, SCSI-based firmware interface only.
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver, which has
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/raid_class.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "myrs.h"
+
+static struct raid_template *myrs_raid_template;
+
+static struct myrs_devstate_name_entry {
+	enum myrs_devstate state;
+	char *name;
+} myrs_devstate_name_list[] = {
+	{ MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
+	{ MYRS_DEVICE_ONLINE, "Online" },
+	{ MYRS_DEVICE_REBUILD, "Rebuild" },
+	{ MYRS_DEVICE_MISSING, "Missing" },
+	{ MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
+	{ MYRS_DEVICE_OFFLINE, "Offline" },
+	{ MYRS_DEVICE_CRITICAL, "Critical" },
+	{ MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
+	{ MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
+	{ MYRS_DEVICE_STANDBY, "Standby" },
+	{ MYRS_DEVICE_INVALID_STATE, "Invalid" },
+};
+
+static char *myrs_devstate_name(enum myrs_devstate state)
+{
+	struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
+		if (entry[i].state == state)
+			return entry[i].name;
+	}
+	return NULL;
+}
+
+static struct myrs_raid_level_name_entry {
+	enum myrs_raid_level level;
+	char *name;
+} myrs_raid_level_name_list[] = {
+	{ MYRS_RAID_LEVEL0, "RAID0" },
+	{ MYRS_RAID_LEVEL1, "RAID1" },
+	{ MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
+	{ MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
+	{ MYRS_RAID_LEVEL6, "RAID6" },
+	{ MYRS_RAID_JBOD, "JBOD" },
+	{ MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
+	{ MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
+	{ MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
+	{ MYRS_RAID_SPAN, "Mylex SPAN" },
+	{ MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
+	{ MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
+	{ MYRS_RAID_PHYSICAL, "Physical device" },
+};
+
+static char *myrs_raid_level_name(enum myrs_raid_level level)
+{
+	struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
+		if (entry[i].level == level)
+			return entry[i].name;
+	}
+	return NULL;
+}
+
+/**
+ * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
+ */
+static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
+{
+	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+
+	memset(mbox, 0, sizeof(union myrs_cmd_mbox));
+	cmd_blk->status = 0;
+}
+
+/**
+ * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
+ */
+static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
+{
+	void __iomem *base = cs->io_base;
+	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+	union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
+
+	cs->write_cmd_mbox(next_mbox, mbox);
+
+	if (cs->prev_cmd_mbox1->words[0] == 0 ||
+	    cs->prev_cmd_mbox2->words[0] == 0)
+		cs->get_cmd_mbox(base);
+
+	cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
+	cs->prev_cmd_mbox1 = next_mbox;
+
+	if (++next_mbox > cs->last_cmd_mbox)
+		next_mbox = cs->first_cmd_mbox;
+
+	cs->next_cmd_mbox = next_mbox;
+}
+
+/**
+ * myrs_exec_cmd - executes V2 Command and waits for completion.
+ */
+static void myrs_exec_cmd(struct myrs_hba *cs,
+		struct myrs_cmdblk *cmd_blk)
+{
+	DECLARE_COMPLETION_ONSTACK(complete);
+	unsigned long flags;
+
+	cmd_blk->complete = &complete;
+	spin_lock_irqsave(&cs->queue_lock, flags);
+	myrs_qcmd(cs, cmd_blk);
+	spin_unlock_irqrestore(&cs->queue_lock, flags);
+
+	WARN_ON(in_interrupt());
+	wait_for_completion(&complete);
+}
+
+/**
+ * myrs_report_progress - prints progress message
+ */
+static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
+		unsigned char *msg, unsigned long blocks,
+		unsigned long size)
+{
+	shost_printk(KERN_INFO, cs->host,
+		     "Logical Drive %d: %s in Progress: %d%% completed\n",
+		     ldev_num, msg,
+		     (100 * (int)(blocks >> 7)) / (int)(size >> 7));
+}
+
+/**
+ * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
+ */
+static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
+{
+	struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+	dma_addr_t ctlr_info_addr;
+	union myrs_sgl *sgl;
+	unsigned char status;
+	unsigned short ldev_present, ldev_critical, ldev_offline;
+
+	ldev_present = cs->ctlr_info->ldev_present;
+	ldev_critical = cs->ctlr_info->ldev_critical;
+	ldev_offline = cs->ctlr_info->ldev_offline;
+
+	ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
+					sizeof(struct myrs_ctlr_info),
+					DMA_FROM_DEVICE);
+	if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
+		return MYRS_STATUS_FAILED;
+
+	mutex_lock(&cs->dcmd_mutex);
+	myrs_reset_cmd(cmd_blk);
+	mbox->ctlr_info.id = MYRS_DCMD_TAG;
+	mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
+	mbox->ctlr_info.control.dma_ctrl_to_host = true;
+	mbox->ctlr_info.control.no_autosense = true;
+	mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
+	mbox->ctlr_info.ctlr_num = 0;
+	mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
+	sgl = &mbox->ctlr_info.dma_addr;
+	sgl->sge[0].sge_addr = ctlr_info_addr;
+	sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
+	dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
+	myrs_exec_cmd(cs, cmd_blk);
+	status = cmd_blk->status;
+	mutex_unlock(&cs->dcmd_mutex);
+	dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
+			 sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
+	if (status == MYRS_STATUS_SUCCESS) {
+		if (cs->ctlr_info->bg_init_active +
+		    cs->ctlr_info->ldev_init_active +
+		    cs->ctlr_info->pdev_init_active +
+		    cs->ctlr_info->cc_active +
+		    cs->ctlr_info->rbld_active +
+		    cs->ctlr_info->exp_active != 0)
+			cs->needs_update = true;
+		if (cs->ctlr_info->ldev_present != ldev_present ||
+		    cs->ctlr_info->ldev_critical != ldev_critical ||
+		    cs->ctlr_info->ldev_offline != ldev_offline)
+			shost_printk(KERN_INFO, cs->host,
+				     "Logical drive count changes (%d/%d/%d)\n",
+				     cs->ctlr_info->ldev_critical,
+				     cs->ctlr_info->ldev_offline,
+				     cs->ctlr_info->ldev_present);
+	}
+
+	return status;
+}
+
+/**
+ * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
+ */
+static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
+		unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
+{
+	struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+	dma_addr_t ldev_info_addr;
+	struct myrs_ldev_info ldev_info_orig;
+	union myrs_sgl *sgl;
+	unsigned char status;
+
+	memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
+	ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
+					sizeof(struct myrs_ldev_info),
+					DMA_FROM_DEVICE);
+	if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
+		return MYRS_STATUS_FAILED;
+
+	mutex_lock(&cs->dcmd_mutex);
+	myrs_reset_cmd(cmd_blk);
+	mbox->ldev_info.id = MYRS_DCMD_TAG;
+	mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
+	mbox->ldev_info.control.dma_ctrl_to_host = true;
+	mbox->ldev_info.control.no_autosense = true;
+	mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
+	mbox->ldev_info.ldev.ldev_num = ldev_num;
+	mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
+	sgl = &mbox->ldev_info.dma_addr;
+	sgl->sge[0].sge_addr = ldev_info_addr;
+	sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
+	dev_dbg(&cs->host->shost_gendev,
+		"Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
+	myrs_exec_cmd(cs, cmd_blk);
+	status = cmd_blk->status;
+	mutex_unlock(&cs->dcmd_mutex);
+	dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
+			 sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
+	if (status == MYRS_STATUS_SUCCESS) {
+		unsigned short ldev_num = ldev_info->ldev_num;
+		struct myrs_ldev_info *new = ldev_info;
+		struct myrs_ldev_info *old = &ldev_info_orig;
+		unsigned long ldev_size = new->cfg_devsize;
+
+		if (new->dev_state != old->dev_state) {
+			const char *name;
+
+			name = myrs_devstate_name(new->dev_state);
+			shost_printk(KERN_INFO, cs->host,
+				     "Logical Drive %d is now %s\n",
+				     ldev_num, name ? name : "Invalid");
+		}
+		if ((new->soft_errs != old->soft_errs) ||
+		    (new->cmds_failed != old->cmds_failed) ||
+		    (new->deferred_write_errs != old->deferred_write_errs))
+			shost_printk(KERN_INFO, cs->host,
+				     "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
+				     ldev_num, new->soft_errs,
+				     new->cmds_failed,
+				     new->deferred_write_errs);
+		if (new->bg_init_active)
+			myrs_report_progress(cs, ldev_num,
+					     "Background Initialization",
+					     new->bg_init_lba, ldev_size);
+		else if (new->fg_init_active)
+			myrs_report_progress(cs, ldev_num,
+					     "Foreground Initialization",
+					     new->fg_init_lba, ldev_size);
+		else if (new->migration_active)
+			myrs_report_progress(cs, ldev_num,
+					     "Data Migration",
+					     new->migration_lba, ldev_size);
+		else if (new->patrol_active)
+			myrs_report_progress(cs, ldev_num,
+					     "Patrol Operation",
+					     new->patrol_lba, ldev_size);
+		if (old->bg_init_active && !new->bg_init_active)
+			shost_printk(KERN_INFO, cs->host,
+				     "Logical Drive %d: Background Initialization %s\n",
+				     ldev_num,
+				     (new->ldev_control.ldev_init_done ?
+				      "Completed" : "Failed"));
+	}
+	return status;
+}
+
+/**
+ * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
+ */
+static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
+		unsigned char channel, unsigned char target, unsigned char lun,
+		struct myrs_pdev_info *pdev_info)
+{
+	struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+	dma_addr_t pdev_info_addr;
+	union myrs_sgl *sgl;
+	unsigned char status;
+
+	pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
+					sizeof(struct myrs_pdev_info),
+					DMA_FROM_DEVICE);
+	if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
+		return MYRS_STATUS_FAILED;
+
+	mutex_lock(&cs->dcmd_mutex);
+	myrs_reset_cmd(cmd_blk);
+	mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
+	mbox->pdev_info.id = MYRS_DCMD_TAG;
+	mbox->pdev_info.control.dma_ctrl_to_host = true;
+	mbox->pdev_info.control.no_autosense = true;
+	mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
+	mbox->pdev_info.pdev.lun = lun;
+	mbox->pdev_info.pdev.target = target;
+	mbox->pdev_info.pdev.channel = channel;
+	mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
+	sgl = &mbox->pdev_info.dma_addr;
+	sgl->sge[0].sge_addr = pdev_info_addr;
+	sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
+	dev_dbg(&cs->host->shost_gendev,
+		"Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
+		channel, target, lun);
+	myrs_exec_cmd(cs, cmd_blk);
+	status = cmd_blk->status;
+	mutex_unlock(&cs->dcmd_mutex);
+	dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
+			 sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
+	return status;
+}
+
+/**
+ * myrs_dev_op - executes a "Device Operation" Command
+ */
+static unsigned char myrs_dev_op(struct myrs_hba *cs,
+		enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
+{
+	struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+	unsigned char status;
+
+	mutex_lock(&cs->dcmd_mutex);
+	myrs_reset_cmd(cmd_blk);
+	mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
+	mbox->dev_op.id = MYRS_DCMD_TAG;
+	mbox->dev_op.control.dma_ctrl_to_host = true;
+	mbox->dev_op.control.no_autosense = true;
+	mbox->dev_op.ioctl_opcode = opcode;
+	mbox->dev_op.opdev = opdev;
+	myrs_exec_cmd(cs, cmd_blk);
+	status = cmd_blk->status;
+	mutex_unlock(&cs->dcmd_mutex);
+	return status;
+}
+
+/**
+ * myrs_translate_pdev - translates a Physical Device Channel and
+ * TargetID into a Logical Device.
+ */
+static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
+		unsigned char channel, unsigned char target, unsigned char lun,
+		struct myrs_devmap *devmap)
+{
+	struct pci_dev *pdev = cs->pdev;
+	dma_addr_t devmap_addr;
+	struct myrs_cmdblk *cmd_blk;
+	union myrs_cmd_mbox *mbox;
+	union myrs_sgl *sgl;
+	unsigned char status;
+
+	memset(devmap, 0x0, sizeof(struct myrs_devmap));
+	devmap_addr = dma_map_single(&pdev->dev, devmap,
+				     sizeof(struct myrs_devmap),
+				     DMA_FROM_DEVICE);
+	if (dma_mapping_error(&pdev->dev, devmap_addr))
+		return MYRS_STATUS_FAILED;
+
+	mutex_lock(&cs->dcmd_mutex);
+	cmd_blk = &cs->dcmd_blk;
+	mbox = &cmd_blk->mbox;
+	mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
+	mbox->pdev_info.control.dma_ctrl_to_host = true;
+	mbox->pdev_info.control.no_autosense = true;
+	mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
+	mbox->pdev_info.pdev.target = target;
+	mbox->pdev_info.pdev.channel = channel;
+	mbox->pdev_info.pdev.lun = lun;
+	mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
+	sgl = &mbox->pdev_info.dma_addr;
+	sgl->sge[0].sge_addr = devmap_addr;
+	sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
+
+	myrs_exec_cmd(cs, cmd_blk);
+	status = cmd_blk->status;
+	mutex_unlock(&cs->dcmd_mutex);
+	dma_unmap_single(&pdev->dev, devmap_addr,
+			 sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
+	return status;
+}
+
+/**
+ * myrs_get_event - executes a Get Event Command
+ */
+static unsigned char myrs_get_event(struct myrs_hba *cs,
+		unsigned int event_num, struct myrs_event *event_buf)
+{
+	struct pci_dev *pdev = cs->pdev;
+	dma_addr_t event_addr;
+	struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
+	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+	union myrs_sgl *sgl;
+	unsigned char status;
+
+	event_addr = dma_map_single(&pdev->dev, event_buf,
+				    sizeof(struct myrs_event), DMA_FROM_DEVICE);
+	if (dma_mapping_error(&pdev->dev, event_addr))
+		return MYRS_STATUS_FAILED;
+
+	mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
+	mbox->get_event.dma_size = sizeof(struct myrs_event);
+	mbox->get_event.evnum_upper = event_num >> 16;
+	mbox->get_event.ctlr_num = 0;
+	mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
+	mbox->get_event.evnum_lower = event_num & 0xFFFF;
+	sgl = &mbox->get_event.dma_addr;
+	sgl->sge[0].sge_addr = event_addr;
+	sgl->sge[0].sge_count = mbox->get_event.dma_size;
+	myrs_exec_cmd(cs, cmd_blk);
+	status = cmd_blk->status;
+	dma_unmap_single(&pdev->dev, event_addr,
+			 sizeof(struct myrs_event), DMA_FROM_DEVICE);
+
+	return status;
+}
+
+/*
+ * myrs_get_fwstatus - executes a Get Health Status Command
+ */
+static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
+{
+	struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
+	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+	union myrs_sgl *sgl;
+	unsigned char status = cmd_blk->status;
+
+	myrs_reset_cmd(cmd_blk);
+	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+	mbox->common.id = MYRS_MCMD_TAG;
+	mbox->common.control.dma_ctrl_to_host = true;
+	mbox->common.control.no_autosense = true;
+	mbox->common.dma_size = sizeof(struct myrs_fwstat);
+	mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
+	sgl = &mbox->common.dma_addr;
+	sgl->sge[0].sge_addr = cs->fwstat_addr;
+	sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
+	dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
+	myrs_exec_cmd(cs, cmd_blk);
+	status = cmd_blk->status;
+
+	return status;
+}
+
+/**
+ * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
+ */
+static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
+		enable_mbox_t enable_mbox_fn)
+{
+	void __iomem *base = cs->io_base;
+	struct pci_dev *pdev = cs->pdev;
+	union myrs_cmd_mbox *cmd_mbox;
+	struct myrs_stat_mbox *stat_mbox;
+	union myrs_cmd_mbox *mbox;
+	dma_addr_t mbox_addr;
+	unsigned char status = MYRS_STATUS_FAILED;
+
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+			dev_err(&pdev->dev, "DMA mask out of range\n");
+			return false;
+		}
+
+	/* Temporary dma mapping, used only in the scope of this function */
+	mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
+				  &mbox_addr, GFP_KERNEL);
+	if (dma_mapping_error(&pdev->dev, mbox_addr))
+		return false;
+
+	/* These are the base addresses for the command memory mailbox array */
+	cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
+	cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
+				      &cs->cmd_mbox_addr, GFP_KERNEL);
+	if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
+		dev_err(&pdev->dev, "Failed to map command mailbox\n");
+		goto out_free;
+	}
+	cs->first_cmd_mbox = cmd_mbox;
+	cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
+	cs->last_cmd_mbox = cmd_mbox;
+	cs->next_cmd_mbox = cs->first_cmd_mbox;
+	cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
+	cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
+
+	/* These are the base addresses for the status memory mailbox array */
+	cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
+	stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
+				       &cs->stat_mbox_addr, GFP_KERNEL);
+	if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
+		dev_err(&pdev->dev, "Failed to map status mailbox\n");
+		goto out_free;
+	}
+
+	cs->first_stat_mbox = stat_mbox;
+	stat_mbox += MYRS_MAX_STAT_MBOX - 1;
+	cs->last_stat_mbox = stat_mbox;
+	cs->next_stat_mbox = cs->first_stat_mbox;
+
+	cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
+					    sizeof(struct myrs_fwstat),
+					    &cs->fwstat_addr, GFP_KERNEL);
+	if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
+		dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
+		cs->fwstat_buf = NULL;
+		goto out_free;
+	}
+	cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
+				GFP_KERNEL | GFP_DMA);
+	if (!cs->ctlr_info)
+		goto out_free;
+
+	cs->event_buf = kzalloc(sizeof(struct myrs_event),
+				GFP_KERNEL | GFP_DMA);
+	if (!cs->event_buf)
+		goto out_free;
+
+	/* Enable the Memory Mailbox Interface. */
+	memset(mbox, 0, sizeof(union myrs_cmd_mbox));
+	mbox->set_mbox.id = 1;
+	mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
+	mbox->set_mbox.control.no_autosense = true;
+	mbox->set_mbox.first_cmd_mbox_size_kb =
+		(MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
+	mbox->set_mbox.first_stat_mbox_size_kb =
+		(MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
+	mbox->set_mbox.second_cmd_mbox_size_kb = 0;
+	mbox->set_mbox.second_stat_mbox_size_kb = 0;
+	mbox->set_mbox.sense_len = 0;
+	mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
+	mbox->set_mbox.fwstat_buf_size_kb = 1;
+	mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
+	mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
+	mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
+	status = enable_mbox_fn(base, mbox_addr);
+
+out_free:
+	dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
+			  mbox, mbox_addr);
+	if (status != MYRS_STATUS_SUCCESS)
+		dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
+			status);
+	return (status == MYRS_STATUS_SUCCESS);
+}
+
+/**
+ * myrs_get_config - reads the Configuration Information
+ */
+static int myrs_get_config(struct myrs_hba *cs)
+{
+	struct myrs_ctlr_info *info = cs->ctlr_info;
+	struct Scsi_Host *shost = cs->host;
+	unsigned char status;
+	unsigned char model[20];
+	unsigned char fw_version[12];
+	int i, model_len;
+
+	/* Get data into dma-able area, then copy into permanent location */
+	mutex_lock(&cs->cinfo_mutex);
+	status = myrs_get_ctlr_info(cs);
+	mutex_unlock(&cs->cinfo_mutex);
+	if (status != MYRS_STATUS_SUCCESS) {
+		shost_printk(KERN_ERR, shost,
+			     "Failed to get controller information\n");
+		return -ENODEV;
+	}
+
+	/* Initialize the Controller Model Name and Full Model Name fields. */
+	model_len = sizeof(info->ctlr_name);
+	if (model_len > sizeof(model)-1)
+		model_len = sizeof(model)-1;
+	memcpy(model, info->ctlr_name, model_len);
+	model_len--;
+	while (model[model_len] == ' ' || model[model_len] == '\0')
+		model_len--;
+	model[++model_len] = '\0';
+	strcpy(cs->model_name, "DAC960 ");
+	strcat(cs->model_name, model);
+	/* Initialize the Controller Firmware Version field. */
+	sprintf(fw_version, "%d.%02d-%02d",
+		info->fw_major_version, info->fw_minor_version,
+		info->fw_turn_number);
+	if (info->fw_major_version == 6 &&
+	    info->fw_minor_version == 0 &&
+	    info->fw_turn_number < 1) {
+		shost_printk(KERN_WARNING, shost,
+			"FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
+			"STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
+			"PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
+			fw_version);
+		return -ENODEV;
+	}
+	/* Initialize the Controller Channels and Targets. */
+	shost->max_channel = info->physchan_present + info->virtchan_present;
+	shost->max_id = info->max_targets[0];
+	for (i = 1; i < 16; i++) {
+		if (!info->max_targets[i])
+			continue;
+		if (shost->max_id < info->max_targets[i])
+			shost->max_id = info->max_targets[i];
+	}
+
+	/*
+	 * Initialize the Controller Queue Depth, Driver Queue Depth,
+	 * Logical Drive Count, Maximum Blocks per Command, Controller
+	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
+	 * The Driver Queue Depth must be at most three less than
+	 * the Controller Queue Depth; tag '1' is reserved for
+	 * direct commands, and tag '2' for monitoring commands.
+	 */
+	shost->can_queue = info->max_tcq - 3;
+	if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
+		shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
+	shost->max_sectors = info->max_transfer_size;
+	shost->sg_tablesize = info->max_sge;
+	if (shost->sg_tablesize > MYRS_SG_LIMIT)
+		shost->sg_tablesize = MYRS_SG_LIMIT;
+
+	shost_printk(KERN_INFO, shost,
+		"Configuring %s PCI RAID Controller\n", model);
+	shost_printk(KERN_INFO, shost,
+		"  Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
+		fw_version, info->physchan_present, info->mem_size_mb);
+
+	shost_printk(KERN_INFO, shost,
+		     "  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
+		     shost->can_queue, shost->max_sectors);
+
+	shost_printk(KERN_INFO, shost,
+		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
+		     shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
+	for (i = 0; i < info->physchan_max; i++) {
+		if (!info->max_targets[i])
+			continue;
+		shost_printk(KERN_INFO, shost,
+			     "  Device Channel %d: max %d devices\n",
+			     i, info->max_targets[i]);
+	}
+	shost_printk(KERN_INFO, shost,
+		     "  Physical: %d/%d channels, %d disks, %d devices\n",
+		     info->physchan_present, info->physchan_max,
+		     info->pdisk_present, info->pdev_present);
+
+	shost_printk(KERN_INFO, shost,
+		     "  Logical: %d/%d channels, %d disks\n",
+		     info->virtchan_present, info->virtchan_max,
+		     info->ldev_present);
+	return 0;
+}
+
+/**
+ * myrs_log_event - prints a Controller Event message
+ */
+static struct {
+	int ev_code;
+	unsigned char *ev_msg;
+} myrs_ev_list[] = {
+	/* Physical Device Events (0x0000 - 0x007F) */
+	{ 0x0001, "P Online" },
+	{ 0x0002, "P Standby" },
+	{ 0x0005, "P Automatic Rebuild Started" },
+	{ 0x0006, "P Manual Rebuild Started" },
+	{ 0x0007, "P Rebuild Completed" },
+	{ 0x0008, "P Rebuild Cancelled" },
+	{ 0x0009, "P Rebuild Failed for Unknown Reasons" },
+	{ 0x000A, "P Rebuild Failed due to New Physical Device" },
+	{ 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
+	{ 0x000C, "S Offline" },
+	{ 0x000D, "P Found" },
+	{ 0x000E, "P Removed" },
+	{ 0x000F, "P Unconfigured" },
+	{ 0x0010, "P Expand Capacity Started" },
+	{ 0x0011, "P Expand Capacity Completed" },
+	{ 0x0012, "P Expand Capacity Failed" },
+	{ 0x0013, "P Command Timed Out" },
+	{ 0x0014, "P Command Aborted" },
+	{ 0x0015, "P Command Retried" },
+	{ 0x0016, "P Parity Error" },
+	{ 0x0017, "P Soft Error" },
+	{ 0x0018, "P Miscellaneous Error" },
+	{ 0x0019, "P Reset" },
+	{ 0x001A, "P Active Spare Found" },
+	{ 0x001B, "P Warm Spare Found" },
+	{ 0x001C, "S Sense Data Received" },
+	{ 0x001D, "P Initialization Started" },
+	{ 0x001E, "P Initialization Completed" },
+	{ 0x001F, "P Initialization Failed" },
+	{ 0x0020, "P Initialization Cancelled" },
+	{ 0x0021, "P Failed because Write Recovery Failed" },
+	{ 0x0022, "P Failed because SCSI Bus Reset Failed" },
+	{ 0x0023, "P Failed because of Double Check Condition" },
+	{ 0x0024, "P Failed because Device Cannot Be Accessed" },
+	{ 0x0025, "P Failed because of Gross Error on SCSI Processor" },
+	{ 0x0026, "P Failed because of Bad Tag from Device" },
+	{ 0x0027, "P Failed because of Command Timeout" },
+	{ 0x0028, "P Failed because of System Reset" },
+	{ 0x0029, "P Failed because of Busy Status or Parity Error" },
+	{ 0x002A, "P Failed because Host Set Device to Failed State" },
+	{ 0x002B, "P Failed because of Selection Timeout" },
+	{ 0x002C, "P Failed because of SCSI Bus Phase Error" },
+	{ 0x002D, "P Failed because Device Returned Unknown Status" },
+	{ 0x002E, "P Failed because Device Not Ready" },
+	{ 0x002F, "P Failed because Device Not Found at Startup" },
+	{ 0x0030, "P Failed because COD Write Operation Failed" },
+	{ 0x0031, "P Failed because BDT Write Operation Failed" },
+	{ 0x0039, "P Missing at Startup" },
+	{ 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
+	{ 0x003C, "P Temporarily Offline Device Automatically Made Online" },
+	{ 0x003D, "P Standby Rebuild Started" },
+	/* Logical Device Events (0x0080 - 0x00FF) */
+	{ 0x0080, "M Consistency Check Started" },
+	{ 0x0081, "M Consistency Check Completed" },
+	{ 0x0082, "M Consistency Check Cancelled" },
+	{ 0x0083, "M Consistency Check Completed With Errors" },
+	{ 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
+	{ 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
+	{ 0x0086, "L Offline" },
+	{ 0x0087, "L Critical" },
+	{ 0x0088, "L Online" },
+	{ 0x0089, "M Automatic Rebuild Started" },
+	{ 0x008A, "M Manual Rebuild Started" },
+	{ 0x008B, "M Rebuild Completed" },
+	{ 0x008C, "M Rebuild Cancelled" },
+	{ 0x008D, "M Rebuild Failed for Unknown Reasons" },
+	{ 0x008E, "M Rebuild Failed due to New Physical Device" },
+	{ 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
+	{ 0x0090, "M Initialization Started" },
+	{ 0x0091, "M Initialization Completed" },
+	{ 0x0092, "M Initialization Cancelled" },
+	{ 0x0093, "M Initialization Failed" },
+	{ 0x0094, "L Found" },
+	{ 0x0095, "L Deleted" },
+	{ 0x0096, "M Expand Capacity Started" },
+	{ 0x0097, "M Expand Capacity Completed" },
+	{ 0x0098, "M Expand Capacity Failed" },
+	{ 0x0099, "L Bad Block Found" },
+	{ 0x009A, "L Size Changed" },
+	{ 0x009B, "L Type Changed" },
+	{ 0x009C, "L Bad Data Block Found" },
+	{ 0x009E, "L Read of Data Block in BDT" },
+	{ 0x009F, "L Write Back Data for Disk Block Lost" },
+	{ 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
+	{ 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
+	{ 0x00A2, "L Standby Rebuild Started" },
+	/* Fault Management Events (0x0100 - 0x017F) */
+	{ 0x0140, "E Fan %d Failed" },
+	{ 0x0141, "E Fan %d OK" },
+	{ 0x0142, "E Fan %d Not Present" },
+	{ 0x0143, "E Power Supply %d Failed" },
+	{ 0x0144, "E Power Supply %d OK" },
+	{ 0x0145, "E Power Supply %d Not Present" },
+	{ 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
+	{ 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
+	{ 0x0148, "E Temperature Sensor %d Temperature Normal" },
+	{ 0x0149, "E Temperature Sensor %d Not Present" },
+	{ 0x014A, "E Enclosure Management Unit %d Access Critical" },
+	{ 0x014B, "E Enclosure Management Unit %d Access OK" },
+	{ 0x014C, "E Enclosure Management Unit %d Access Offline" },
+	/* Controller Events (0x0180 - 0x01FF) */
+	{ 0x0181, "C Cache Write Back Error" },
+	{ 0x0188, "C Battery Backup Unit Found" },
+	{ 0x0189, "C Battery Backup Unit Charge Level Low" },
+	{ 0x018A, "C Battery Backup Unit Charge Level OK" },
+	{ 0x0193, "C Installation Aborted" },
+	{ 0x0195, "C Battery Backup Unit Physically Removed" },
+	{ 0x0196, "C Memory Error During Warm Boot" },
+	{ 0x019E, "C Memory Soft ECC Error Corrected" },
+	{ 0x019F, "C Memory Hard ECC Error Corrected" },
+	{ 0x01A2, "C Battery Backup Unit Failed" },
+	{ 0x01AB, "C Mirror Race Recovery Failed" },
+	{ 0x01AC, "C Mirror Race on Critical Drive" },
+	/* Controller Internal Processor Events */
+	{ 0x0380, "C Internal Controller Hung" },
+	{ 0x0381, "C Internal Controller Firmware Breakpoint" },
+	{ 0x0390, "C Internal Controller i960 Processor Specific Error" },
+	{ 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
+	{ 0, "" }
+};
+
+static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
+{
+	unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
+	int ev_idx = 0, ev_code;
+	unsigned char ev_type, *ev_msg;
+	struct Scsi_Host *shost = cs->host;
+	struct scsi_device *sdev;
+	struct scsi_sense_hdr sshdr = {0};
+	unsigned char sense_info[4];
+	unsigned char cmd_specific[4];
+
+	if (ev->ev_code == 0x1C) {
+		if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
+			memset(&sshdr, 0x0, sizeof(sshdr));
+			memset(sense_info, 0x0, sizeof(sense_info));
+			memset(cmd_specific, 0x0, sizeof(cmd_specific));
+		} else {
+			memcpy(sense_info, &ev->sense_data[3], 4);
+			memcpy(cmd_specific, &ev->sense_data[7], 4);
+		}
+	}
+	if (sshdr.sense_key == VENDOR_SPECIFIC &&
+	    (sshdr.asc == 0x80 || sshdr.asc == 0x81))
+		ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
+	while (true) {
+		ev_code = myrs_ev_list[ev_idx].ev_code;
+		if (ev_code == ev->ev_code || ev_code == 0)
+			break;
+		ev_idx++;
+	}
+	ev_type = myrs_ev_list[ev_idx].ev_msg[0];
+	ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
+	if (ev_code == 0) {
+		shost_printk(KERN_WARNING, shost,
+			     "Unknown Controller Event Code %04X\n",
+			     ev->ev_code);
+		return;
+	}
+	switch (ev_type) {
+	case 'P':
+		sdev = scsi_device_lookup(shost, ev->channel,
+					  ev->target, 0);
+		sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
+			    ev->ev_seq, ev_msg);
+		if (sdev && sdev->hostdata &&
+		    sdev->channel < cs->ctlr_info->physchan_present) {
+			struct myrs_pdev_info *pdev_info = sdev->hostdata;
+
+			switch (ev->ev_code) {
+			case 0x0001:
+			case 0x0007:
+				pdev_info->dev_state = MYRS_DEVICE_ONLINE;
+				break;
+			case 0x0002:
+				pdev_info->dev_state = MYRS_DEVICE_STANDBY;
+				break;
+			case 0x000C:
+				pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
+				break;
+			case 0x000E:
+				pdev_info->dev_state = MYRS_DEVICE_MISSING;
+				break;
+			case 0x000F:
+				pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
+				break;
+			}
+		}
+		break;
+	case 'L':
+		shost_printk(KERN_INFO, shost,
+			     "event %d: Logical Drive %d %s\n",
+			     ev->ev_seq, ev->lun, ev_msg);
+		cs->needs_update = true;
+		break;
+	case 'M':
+		shost_printk(KERN_INFO, shost,
+			     "event %d: Logical Drive %d %s\n",
+			     ev->ev_seq, ev->lun, ev_msg);
+		cs->needs_update = true;
+		break;
+	case 'S':
+		if (sshdr.sense_key == NO_SENSE ||
+		    (sshdr.sense_key == NOT_READY &&
+		     sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
+					    sshdr.ascq == 0x02)))
+			break;
+		shost_printk(KERN_INFO, shost,
+			     "event %d: Physical Device %d:%d %s\n",
+			     ev->ev_seq, ev->channel, ev->target, ev_msg);
+		shost_printk(KERN_INFO, shost,
+			     "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
+			     ev->channel, ev->target,
+			     sshdr.sense_key, sshdr.asc, sshdr.ascq);
+		shost_printk(KERN_INFO, shost,
+			     "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
+			     ev->channel, ev->target,
+			     sense_info[0], sense_info[1],
+			     sense_info[2], sense_info[3],
+			     cmd_specific[0], cmd_specific[1],
+			     cmd_specific[2], cmd_specific[3]);
+		break;
+	case 'E':
+		if (cs->disable_enc_msg)
+			break;
+		sprintf(msg_buf, ev_msg, ev->lun);
+		shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
+			     ev->ev_seq, ev->target, msg_buf);
+		break;
+	case 'C':
+		shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
+			     ev->ev_seq, ev_msg);
+		break;
+	default:
+		shost_printk(KERN_INFO, shost,
+			     "event %d: Unknown Event Code %04X\n",
+			     ev->ev_seq, ev->ev_code);
+		break;
+	}
+}
+
+/*
+ * SCSI sysfs interface functions
+ */
+static ssize_t raid_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrs_hba *cs = shost_priv(sdev->host);
+	int ret;
+
+	if (!sdev->hostdata)
+		return snprintf(buf, 16, "Unknown\n");
+
+	if (sdev->channel >= cs->ctlr_info->physchan_present) {
+		struct myrs_ldev_info *ldev_info = sdev->hostdata;
+		const char *name;
+
+		name = myrs_devstate_name(ldev_info->dev_state);
+		if (name)
+			ret = snprintf(buf, 32, "%s\n", name);
+		else
+			ret = snprintf(buf, 32, "Invalid (%02X)\n",
+				       ldev_info->dev_state);
+	} else {
+		struct myrs_pdev_info *pdev_info;
+		const char *name;
+
+		pdev_info = sdev->hostdata;
+		name = myrs_devstate_name(pdev_info->dev_state);
+		if (name)
+			ret = snprintf(buf, 32, "%s\n", name);
+		else
+			ret = snprintf(buf, 32, "Invalid (%02X)\n",
+				       pdev_info->dev_state);
+	}
+	return ret;
+}
+
+static ssize_t raid_state_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrs_hba *cs = shost_priv(sdev->host);
+	struct myrs_cmdblk *cmd_blk;
+	union myrs_cmd_mbox *mbox;
+	enum myrs_devstate new_state;
+	unsigned short ldev_num;
+	unsigned char status;
+
+	if (!strncmp(buf, "offline", 7) ||
+	    !strncmp(buf, "kill", 4))
+		new_state = MYRS_DEVICE_OFFLINE;
+	else if (!strncmp(buf, "online", 6))
+		new_state = MYRS_DEVICE_ONLINE;
+	else if (!strncmp(buf, "standby", 7))
+		new_state = MYRS_DEVICE_STANDBY;
+	else
+		return -EINVAL;
+
+	if (sdev->channel < cs->ctlr_info->physchan_present) {
+		struct myrs_pdev_info *pdev_info = sdev->hostdata;
+		struct myrs_devmap *pdev_devmap =
+			(struct myrs_devmap *)&pdev_info->rsvd13;
+
+		if (pdev_info->dev_state == new_state) {
+			sdev_printk(KERN_INFO, sdev,
+				    "Device already in %s\n",
+				    myrs_devstate_name(new_state));
+			return count;
+		}
+		status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
+					     sdev->lun, pdev_devmap);
+		if (status != MYRS_STATUS_SUCCESS)
+			return -ENXIO;
+		ldev_num = pdev_devmap->ldev_num;
+	} else {
+		struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+		if (ldev_info->dev_state == new_state) {
+			sdev_printk(KERN_INFO, sdev,
+				    "Device already in %s\n",
+				    myrs_devstate_name(new_state));
+			return count;
+		}
+		ldev_num = ldev_info->ldev_num;
+	}
+	mutex_lock(&cs->dcmd_mutex);
+	cmd_blk = &cs->dcmd_blk;
+	myrs_reset_cmd(cmd_blk);
+	mbox = &cmd_blk->mbox;
+	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+	mbox->common.id = MYRS_DCMD_TAG;
+	mbox->common.control.dma_ctrl_to_host = true;
+	mbox->common.control.no_autosense = true;
+	mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
+	mbox->set_devstate.state = new_state;
+	mbox->set_devstate.ldev.ldev_num = ldev_num;
+	myrs_exec_cmd(cs, cmd_blk);
+	status = cmd_blk->status;
+	mutex_unlock(&cs->dcmd_mutex);
+	if (status == MYRS_STATUS_SUCCESS) {
+		if (sdev->channel < cs->ctlr_info->physchan_present) {
+			struct myrs_pdev_info *pdev_info = sdev->hostdata;
+
+			pdev_info->dev_state = new_state;
+		} else {
+			struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+			ldev_info->dev_state = new_state;
+		}
+		sdev_printk(KERN_INFO, sdev,
+			    "Set device state to %s\n",
+			    myrs_devstate_name(new_state));
+		return count;
+	}
+	sdev_printk(KERN_INFO, sdev,
+		    "Failed to set device state to %s, status 0x%02x\n",
+		    myrs_devstate_name(new_state), status);
+	return -EINVAL;
+}
+static DEVICE_ATTR_RW(raid_state);
+
+static ssize_t raid_level_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrs_hba *cs = shost_priv(sdev->host);
+	const char *name = NULL;
+
+	if (!sdev->hostdata)
+		return snprintf(buf, 16, "Unknown\n");
+
+	if (sdev->channel >= cs->ctlr_info->physchan_present) {
+		struct myrs_ldev_info *ldev_info;
+
+		ldev_info = sdev->hostdata;
+		name = myrs_raid_level_name(ldev_info->raid_level);
+		if (!name)
+			return snprintf(buf, 32, "Invalid (%02X)\n",
+					ldev_info->dev_state);
+
+	} else
+		name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
+
+	return snprintf(buf, 32, "%s\n", name);
+}
+static DEVICE_ATTR_RO(raid_level);
+
+static ssize_t rebuild_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrs_hba *cs = shost_priv(sdev->host);
+	struct myrs_ldev_info *ldev_info;
+	unsigned short ldev_num;
+	unsigned char status;
+
+	if (sdev->channel < cs->ctlr_info->physchan_present)
+		return snprintf(buf, 32, "physical device - not rebuilding\n");
+
+	ldev_info = sdev->hostdata;
+	ldev_num = ldev_info->ldev_num;
+	status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+	if (status != MYRS_STATUS_SUCCESS) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Failed to get device information, status 0x%02x\n",
+			    status);
+		return -EIO;
+	}
+	if (ldev_info->rbld_active) {
+		return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
+				(size_t)ldev_info->rbld_lba,
+				(size_t)ldev_info->cfg_devsize);
+	} else
+		return snprintf(buf, 32, "not rebuilding\n");
+}
+
+static ssize_t rebuild_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrs_hba *cs = shost_priv(sdev->host);
+	struct myrs_ldev_info *ldev_info;
+	struct myrs_cmdblk *cmd_blk;
+	union myrs_cmd_mbox *mbox;
+	unsigned short ldev_num;
+	unsigned char status;
+	int rebuild, ret;
+
+	if (sdev->channel < cs->ctlr_info->physchan_present)
+		return -EINVAL;
+
+	ldev_info = sdev->hostdata;
+	if (!ldev_info)
+		return -ENXIO;
+	ldev_num = ldev_info->ldev_num;
+
+	ret = kstrtoint(buf, 0, &rebuild);
+	if (ret)
+		return ret;
+
+	status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+	if (status != MYRS_STATUS_SUCCESS) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Failed to get device information, status 0x%02x\n",
+			    status);
+		return -EIO;
+	}
+
+	if (rebuild && ldev_info->rbld_active) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Rebuild Not Initiated; already in progress\n");
+		return -EALREADY;
+	}
+	if (!rebuild && !ldev_info->rbld_active) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Rebuild Not Cancelled; no rebuild in progress\n");
+		return count;
+	}
+
+	mutex_lock(&cs->dcmd_mutex);
+	cmd_blk = &cs->dcmd_blk;
+	myrs_reset_cmd(cmd_blk);
+	mbox = &cmd_blk->mbox;
+	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+	mbox->common.id = MYRS_DCMD_TAG;
+	mbox->common.control.dma_ctrl_to_host = true;
+	mbox->common.control.no_autosense = true;
+	if (rebuild) {
+		mbox->ldev_info.ldev.ldev_num = ldev_num;
+		mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
+	} else {
+		mbox->ldev_info.ldev.ldev_num = ldev_num;
+		mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
+	}
+	myrs_exec_cmd(cs, cmd_blk);
+	status = cmd_blk->status;
+	mutex_unlock(&cs->dcmd_mutex);
+	if (status) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Rebuild Not %s, status 0x%02x\n",
+			    rebuild ? "Initiated" : "Cancelled", status);
+		ret = -EIO;
+	} else {
+		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
+			    rebuild ? "Initiated" : "Cancelled");
+		ret = count;
+	}
+
+	return ret;
+}
+static DEVICE_ATTR_RW(rebuild);
+
+static ssize_t consistency_check_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrs_hba *cs = shost_priv(sdev->host);
+	struct myrs_ldev_info *ldev_info;
+	unsigned short ldev_num;
+	unsigned char status;
+
+	if (sdev->channel < cs->ctlr_info->physchan_present)
+		return snprintf(buf, 32, "physical device - not checking\n");
+
+	ldev_info = sdev->hostdata;
+	if (!ldev_info)
+		return -ENXIO;
+	ldev_num = ldev_info->ldev_num;
+	status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+	if (ldev_info->cc_active)
+		return snprintf(buf, 32, "checking block %zu of %zu\n",
+				(size_t)ldev_info->cc_lba,
+				(size_t)ldev_info->cfg_devsize);
+	else
+		return snprintf(buf, 32, "not checking\n");
+}
+
+static ssize_t consistency_check_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrs_hba *cs = shost_priv(sdev->host);
+	struct myrs_ldev_info *ldev_info;
+	struct myrs_cmdblk *cmd_blk;
+	union myrs_cmd_mbox *mbox;
+	unsigned short ldev_num;
+	unsigned char status;
+	int check, ret;
+
+	if (sdev->channel < cs->ctlr_info->physchan_present)
+		return -EINVAL;
+
+	ldev_info = sdev->hostdata;
+	if (!ldev_info)
+		return -ENXIO;
+	ldev_num = ldev_info->ldev_num;
+
+	ret = kstrtoint(buf, 0, &check);
+	if (ret)
+		return ret;
+
+	status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+	if (status != MYRS_STATUS_SUCCESS) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Failed to get device information, status 0x%02x\n",
+			    status);
+		return -EIO;
+	}
+	if (check && ldev_info->cc_active) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Consistency Check Not Initiated; "
+			    "already in progress\n");
+		return -EALREADY;
+	}
+	if (!check && !ldev_info->cc_active) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Consistency Check Not Cancelled; "
+			    "check not in progress\n");
+		return count;
+	}
+
+	mutex_lock(&cs->dcmd_mutex);
+	cmd_blk = &cs->dcmd_blk;
+	myrs_reset_cmd(cmd_blk);
+	mbox = &cmd_blk->mbox;
+	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+	mbox->common.id = MYRS_DCMD_TAG;
+	mbox->common.control.dma_ctrl_to_host = true;
+	mbox->common.control.no_autosense = true;
+	if (check) {
+		mbox->cc.ldev.ldev_num = ldev_num;
+		mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
+		mbox->cc.restore_consistency = true;
+		mbox->cc.initialized_area_only = false;
+	} else {
+		mbox->cc.ldev.ldev_num = ldev_num;
+		mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
+	}
+	myrs_exec_cmd(cs, cmd_blk);
+	status = cmd_blk->status;
+	mutex_unlock(&cs->dcmd_mutex);
+	if (status != MYRS_STATUS_SUCCESS) {
+		sdev_printk(KERN_INFO, sdev,
+			    "Consistency Check Not %s, status 0x%02x\n",
+			    check ? "Initiated" : "Cancelled", status);
+		ret = -EIO;
+	} else {
+		sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
+			    check ? "Initiated" : "Cancelled");
+		ret = count;
+	}
+
+	return ret;
+}
+static DEVICE_ATTR_RW(consistency_check);
+
+static struct device_attribute *myrs_sdev_attrs[] = {
+	&dev_attr_consistency_check,
+	&dev_attr_rebuild,
+	&dev_attr_raid_state,
+	&dev_attr_raid_level,
+	NULL,
+};
+
+static ssize_t serial_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrs_hba *cs = shost_priv(shost);
+	char serial[17];
+
+	memcpy(serial, cs->ctlr_info->serial_number, 16);
+	serial[16] = '\0';
+	return snprintf(buf, 16, "%s\n", serial);
+}
+static DEVICE_ATTR_RO(serial);
+
+static ssize_t ctlr_num_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrs_hba *cs = shost_priv(shost);
+
+	return snprintf(buf, 20, "%d\n", cs->host->host_no);
+}
+static DEVICE_ATTR_RO(ctlr_num);
+
+static struct myrs_cpu_type_tbl {
+	enum myrs_cpu_type type;
+	char *name;
+} myrs_cpu_type_names[] = {
+	{ MYRS_CPUTYPE_i960CA, "i960CA" },
+	{ MYRS_CPUTYPE_i960RD, "i960RD" },
+	{ MYRS_CPUTYPE_i960RN, "i960RN" },
+	{ MYRS_CPUTYPE_i960RP, "i960RP" },
+	{ MYRS_CPUTYPE_NorthBay, "NorthBay" },
+	{ MYRS_CPUTYPE_StrongArm, "StrongARM" },
+	{ MYRS_CPUTYPE_i960RM, "i960RM" },
+};
+
+static ssize_t processor_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrs_hba *cs = shost_priv(shost);
+	struct myrs_cpu_type_tbl *tbl;
+	const char *first_processor = NULL;
+	const char *second_processor = NULL;
+	struct myrs_ctlr_info *info = cs->ctlr_info;
+	ssize_t ret;
+	int i;
+
+	if (info->cpu[0].cpu_count) {
+		tbl = myrs_cpu_type_names;
+		for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
+			if (tbl[i].type == info->cpu[0].cpu_type) {
+				first_processor = tbl[i].name;
+				break;
+			}
+		}
+	}
+	if (info->cpu[1].cpu_count) {
+		tbl = myrs_cpu_type_names;
+		for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
+			if (tbl[i].type == info->cpu[1].cpu_type) {
+				second_processor = tbl[i].name;
+				break;
+			}
+		}
+	}
+	if (first_processor && second_processor)
+		ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
+			       "2: %s (%s, %d cpus)\n",
+			       info->cpu[0].cpu_name,
+			       first_processor, info->cpu[0].cpu_count,
+			       info->cpu[1].cpu_name,
+			       second_processor, info->cpu[1].cpu_count);
+	else if (first_processor && !second_processor)
+		ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
+			       info->cpu[0].cpu_name,
+			       first_processor, info->cpu[0].cpu_count);
+	else if (!first_processor && second_processor)
+		ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
+			       info->cpu[1].cpu_name,
+			       second_processor, info->cpu[1].cpu_count);
+	else
+		ret = snprintf(buf, 64, "1: absent\n2: absent\n");
+
+	return ret;
+}
+static DEVICE_ATTR_RO(processor);
+
+static ssize_t model_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrs_hba *cs = shost_priv(shost);
+
+	return snprintf(buf, 28, "%s\n", cs->model_name);
+}
+static DEVICE_ATTR_RO(model);
+
+static ssize_t ctlr_type_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrs_hba *cs = shost_priv(shost);
+
+	return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
+}
+static DEVICE_ATTR_RO(ctlr_type);
+
+static ssize_t cache_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrs_hba *cs = shost_priv(shost);
+
+	return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
+}
+static DEVICE_ATTR_RO(cache_size);
+
+static ssize_t firmware_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrs_hba *cs = shost_priv(shost);
+
+	return snprintf(buf, 16, "%d.%02d-%02d\n",
+			cs->ctlr_info->fw_major_version,
+			cs->ctlr_info->fw_minor_version,
+			cs->ctlr_info->fw_turn_number);
+}
+static DEVICE_ATTR_RO(firmware);
+
+static ssize_t discovery_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrs_hba *cs = shost_priv(shost);
+	struct myrs_cmdblk *cmd_blk;
+	union myrs_cmd_mbox *mbox;
+	unsigned char status;
+
+	mutex_lock(&cs->dcmd_mutex);
+	cmd_blk = &cs->dcmd_blk;
+	myrs_reset_cmd(cmd_blk);
+	mbox = &cmd_blk->mbox;
+	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+	mbox->common.id = MYRS_DCMD_TAG;
+	mbox->common.control.dma_ctrl_to_host = true;
+	mbox->common.control.no_autosense = true;
+	mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
+	myrs_exec_cmd(cs, cmd_blk);
+	status = cmd_blk->status;
+	mutex_unlock(&cs->dcmd_mutex);
+	if (status != MYRS_STATUS_SUCCESS) {
+		shost_printk(KERN_INFO, shost,
+			     "Discovery Not Initiated, status %02X\n",
+			     status);
+		return -EINVAL;
+	}
+	shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
+	cs->next_evseq = 0;
+	cs->needs_update = true;
+	queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
+	flush_delayed_work(&cs->monitor_work);
+	shost_printk(KERN_INFO, shost, "Discovery Completed\n");
+
+	return count;
+}
+static DEVICE_ATTR_WO(discovery);
+
+static ssize_t flush_cache_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrs_hba *cs = shost_priv(shost);
+	unsigned char status;
+
+	status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
+			     MYRS_RAID_CONTROLLER);
+	if (status == MYRS_STATUS_SUCCESS) {
+		shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
+		return count;
+	}
+	shost_printk(KERN_INFO, shost,
+		     "Cache Flush failed, status 0x%02x\n", status);
+	return -EIO;
+}
+static DEVICE_ATTR_WO(flush_cache);
+
+static ssize_t disable_enclosure_messages_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct myrs_hba *cs = shost_priv(shost);
+
+	return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
+}
+
+static ssize_t disable_enclosure_messages_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrs_hba *cs = shost_priv(sdev->host);
+	int value, ret;
+
+	ret = kstrtoint(buf, 0, &value);
+	if (ret)
+		return ret;
+
+	if (value > 2)
+		return -EINVAL;
+
+	cs->disable_enc_msg = value;
+	return count;
+}
+static DEVICE_ATTR_RW(disable_enclosure_messages);
+
+static struct device_attribute *myrs_shost_attrs[] = {
+	&dev_attr_serial,
+	&dev_attr_ctlr_num,
+	&dev_attr_processor,
+	&dev_attr_model,
+	&dev_attr_ctlr_type,
+	&dev_attr_cache_size,
+	&dev_attr_firmware,
+	&dev_attr_discovery,
+	&dev_attr_flush_cache,
+	&dev_attr_disable_enclosure_messages,
+	NULL,
+};
+
+/*
+ * SCSI midlayer interface
+ */
+int myrs_host_reset(struct scsi_cmnd *scmd)
+{
+	struct Scsi_Host *shost = scmd->device->host;
+	struct myrs_hba *cs = shost_priv(shost);
+
+	cs->reset(cs->io_base);
+	return SUCCESS;
+}
+
+static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
+		struct myrs_ldev_info *ldev_info)
+{
+	unsigned char modes[32], *mode_pg;
+	bool dbd;
+	size_t mode_len;
+
+	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
+	if (dbd) {
+		mode_len = 24;
+		mode_pg = &modes[4];
+	} else {
+		mode_len = 32;
+		mode_pg = &modes[12];
+	}
+	memset(modes, 0, sizeof(modes));
+	modes[0] = mode_len - 1;
+	modes[2] = 0x10; /* Enable FUA */
+	if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
+		modes[2] |= 0x80;
+	if (!dbd) {
+		unsigned char *block_desc = &modes[4];
+
+		modes[3] = 8;
+		put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
+		put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
+	}
+	mode_pg[0] = 0x08;
+	mode_pg[1] = 0x12;
+	if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
+		mode_pg[2] |= 0x01;
+	if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
+	    ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
+		mode_pg[2] |= 0x04;
+	if (ldev_info->cacheline_size) {
+		mode_pg[2] |= 0x08;
+		put_unaligned_be16(1 << ldev_info->cacheline_size,
+				   &mode_pg[14]);
+	}
+
+	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
+}
+
+static int myrs_queuecommand(struct Scsi_Host *shost,
+		struct scsi_cmnd *scmd)
+{
+	struct myrs_hba *cs = shost_priv(shost);
+	struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+	struct scsi_device *sdev = scmd->device;
+	union myrs_sgl *hw_sge;
+	dma_addr_t sense_addr;
+	struct scatterlist *sgl;
+	unsigned long flags, timeout;
+	int nsge;
+
+	if (!scmd->device->hostdata) {
+		scmd->result = (DID_NO_CONNECT << 16);
+		scmd->scsi_done(scmd);
+		return 0;
+	}
+
+	switch (scmd->cmnd[0]) {
+	case REPORT_LUNS:
+		scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+					0x20, 0x0);
+		scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+		scmd->scsi_done(scmd);
+		return 0;
+	case MODE_SENSE:
+		if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+			struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+			if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
+			    (scmd->cmnd[2] & 0x3F) != 0x08) {
+				/* Illegal request, invalid field in CDB */
+				scsi_build_sense_buffer(0, scmd->sense_buffer,
+					ILLEGAL_REQUEST, 0x24, 0);
+				scmd->result = (DRIVER_SENSE << 24) |
+					SAM_STAT_CHECK_CONDITION;
+			} else {
+				myrs_mode_sense(cs, scmd, ldev_info);
+				scmd->result = (DID_OK << 16);
+			}
+			scmd->scsi_done(scmd);
+			return 0;
+		}
+		break;
+	}
+
+	myrs_reset_cmd(cmd_blk);
+	cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
+					&sense_addr);
+	if (!cmd_blk->sense)
+		return SCSI_MLQUEUE_HOST_BUSY;
+	cmd_blk->sense_addr = sense_addr;
+
+	timeout = scmd->request->timeout;
+	if (scmd->cmd_len <= 10) {
+		if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+			struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+			mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
+			mbox->SCSI_10.pdev.lun = ldev_info->lun;
+			mbox->SCSI_10.pdev.target = ldev_info->target;
+			mbox->SCSI_10.pdev.channel = ldev_info->channel;
+			mbox->SCSI_10.pdev.ctlr = 0;
+		} else {
+			mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
+			mbox->SCSI_10.pdev.lun = sdev->lun;
+			mbox->SCSI_10.pdev.target = sdev->id;
+			mbox->SCSI_10.pdev.channel = sdev->channel;
+		}
+		mbox->SCSI_10.id = scmd->request->tag + 3;
+		mbox->SCSI_10.control.dma_ctrl_to_host =
+			(scmd->sc_data_direction == DMA_FROM_DEVICE);
+		if (scmd->request->cmd_flags & REQ_FUA)
+			mbox->SCSI_10.control.fua = true;
+		mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
+		mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
+		mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
+		mbox->SCSI_10.cdb_len = scmd->cmd_len;
+		if (timeout > 60) {
+			mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
+			mbox->SCSI_10.tmo.tmo_val = timeout / 60;
+		} else {
+			mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
+			mbox->SCSI_10.tmo.tmo_val = timeout;
+		}
+		memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
+		hw_sge = &mbox->SCSI_10.dma_addr;
+		cmd_blk->dcdb = NULL;
+	} else {
+		dma_addr_t dcdb_dma;
+
+		cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
+					       &dcdb_dma);
+		if (!cmd_blk->dcdb) {
+			dma_pool_free(cs->sense_pool, cmd_blk->sense,
+				      cmd_blk->sense_addr);
+			cmd_blk->sense = NULL;
+			cmd_blk->sense_addr = 0;
+			return SCSI_MLQUEUE_HOST_BUSY;
+		}
+		cmd_blk->dcdb_dma = dcdb_dma;
+		if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+			struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+			mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
+			mbox->SCSI_255.pdev.lun = ldev_info->lun;
+			mbox->SCSI_255.pdev.target = ldev_info->target;
+			mbox->SCSI_255.pdev.channel = ldev_info->channel;
+			mbox->SCSI_255.pdev.ctlr = 0;
+		} else {
+			mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
+			mbox->SCSI_255.pdev.lun = sdev->lun;
+			mbox->SCSI_255.pdev.target = sdev->id;
+			mbox->SCSI_255.pdev.channel = sdev->channel;
+		}
+		mbox->SCSI_255.id = scmd->request->tag + 3;
+		mbox->SCSI_255.control.dma_ctrl_to_host =
+			(scmd->sc_data_direction == DMA_FROM_DEVICE);
+		if (scmd->request->cmd_flags & REQ_FUA)
+			mbox->SCSI_255.control.fua = true;
+		mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
+		mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
+		mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
+		mbox->SCSI_255.cdb_len = scmd->cmd_len;
+		mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
+		if (timeout > 60) {
+			mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
+			mbox->SCSI_255.tmo.tmo_val = timeout / 60;
+		} else {
+			mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
+			mbox->SCSI_255.tmo.tmo_val = timeout;
+		}
+		memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
+		hw_sge = &mbox->SCSI_255.dma_addr;
+	}
+	if (scmd->sc_data_direction == DMA_NONE)
+		goto submit;
+	nsge = scsi_dma_map(scmd);
+	if (nsge == 1) {
+		sgl = scsi_sglist(scmd);
+		hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
+		hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
+	} else {
+		struct myrs_sge *hw_sgl;
+		dma_addr_t hw_sgl_addr;
+		int i;
+
+		if (nsge > 2) {
+			hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
+						&hw_sgl_addr);
+			if (WARN_ON(!hw_sgl)) {
+				if (cmd_blk->dcdb) {
+					dma_pool_free(cs->dcdb_pool,
+						      cmd_blk->dcdb,
+						      cmd_blk->dcdb_dma);
+					cmd_blk->dcdb = NULL;
+					cmd_blk->dcdb_dma = 0;
+				}
+				dma_pool_free(cs->sense_pool,
+					      cmd_blk->sense,
+					      cmd_blk->sense_addr);
+				cmd_blk->sense = NULL;
+				cmd_blk->sense_addr = 0;
+				return SCSI_MLQUEUE_HOST_BUSY;
+			}
+			cmd_blk->sgl = hw_sgl;
+			cmd_blk->sgl_addr = hw_sgl_addr;
+			if (scmd->cmd_len <= 10)
+				mbox->SCSI_10.control.add_sge_mem = true;
+			else
+				mbox->SCSI_255.control.add_sge_mem = true;
+			hw_sge->ext.sge0_len = nsge;
+			hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
+		} else
+			hw_sgl = hw_sge->sge;
+
+		scsi_for_each_sg(scmd, sgl, nsge, i) {
+			if (WARN_ON(!hw_sgl)) {
+				scsi_dma_unmap(scmd);
+				scmd->result = (DID_ERROR << 16);
+				scmd->scsi_done(scmd);
+				return 0;
+			}
+			hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
+			hw_sgl->sge_count = (u64)sg_dma_len(sgl);
+			hw_sgl++;
+		}
+	}
+submit:
+	spin_lock_irqsave(&cs->queue_lock, flags);
+	myrs_qcmd(cs, cmd_blk);
+	spin_unlock_irqrestore(&cs->queue_lock, flags);
+
+	return 0;
+}
+
+static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
+		struct scsi_device *sdev)
+{
+	unsigned short ldev_num;
+	unsigned int chan_offset =
+		sdev->channel - cs->ctlr_info->physchan_present;
+
+	ldev_num = sdev->id + chan_offset * sdev->host->max_id;
+
+	return ldev_num;
+}
+
+static int myrs_slave_alloc(struct scsi_device *sdev)
+{
+	struct myrs_hba *cs = shost_priv(sdev->host);
+	unsigned char status;
+
+	if (sdev->channel > sdev->host->max_channel)
+		return 0;
+
+	if (sdev->channel >= cs->ctlr_info->physchan_present) {
+		struct myrs_ldev_info *ldev_info;
+		unsigned short ldev_num;
+
+		if (sdev->lun > 0)
+			return -ENXIO;
+
+		ldev_num = myrs_translate_ldev(cs, sdev);
+
+		ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
+		if (!ldev_info)
+			return -ENOMEM;
+
+		status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+		if (status != MYRS_STATUS_SUCCESS) {
+			sdev->hostdata = NULL;
+			kfree(ldev_info);
+		} else {
+			enum raid_level level;
+
+			dev_dbg(&sdev->sdev_gendev,
+				"Logical device mapping %d:%d:%d -> %d\n",
+				ldev_info->channel, ldev_info->target,
+				ldev_info->lun, ldev_info->ldev_num);
+
+			sdev->hostdata = ldev_info;
+			switch (ldev_info->raid_level) {
+			case MYRS_RAID_LEVEL0:
+				level = RAID_LEVEL_LINEAR;
+				break;
+			case MYRS_RAID_LEVEL1:
+				level = RAID_LEVEL_1;
+				break;
+			case MYRS_RAID_LEVEL3:
+			case MYRS_RAID_LEVEL3F:
+			case MYRS_RAID_LEVEL3L:
+				level = RAID_LEVEL_3;
+				break;
+			case MYRS_RAID_LEVEL5:
+			case MYRS_RAID_LEVEL5L:
+				level = RAID_LEVEL_5;
+				break;
+			case MYRS_RAID_LEVEL6:
+				level = RAID_LEVEL_6;
+				break;
+			case MYRS_RAID_LEVELE:
+			case MYRS_RAID_NEWSPAN:
+			case MYRS_RAID_SPAN:
+				level = RAID_LEVEL_LINEAR;
+				break;
+			case MYRS_RAID_JBOD:
+				level = RAID_LEVEL_JBOD;
+				break;
+			default:
+				level = RAID_LEVEL_UNKNOWN;
+				break;
+			}
+			raid_set_level(myrs_raid_template,
+				       &sdev->sdev_gendev, level);
+			if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
+				const char *name;
+
+				name = myrs_devstate_name(ldev_info->dev_state);
+				sdev_printk(KERN_DEBUG, sdev,
+					    "logical device in state %s\n",
+					    name ? name : "Invalid");
+			}
+		}
+	} else {
+		struct myrs_pdev_info *pdev_info;
+
+		pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+		if (!pdev_info)
+			return -ENOMEM;
+
+		status = myrs_get_pdev_info(cs, sdev->channel,
+					    sdev->id, sdev->lun,
+					    pdev_info);
+		if (status != MYRS_STATUS_SUCCESS) {
+			sdev->hostdata = NULL;
+			kfree(pdev_info);
+			return -ENXIO;
+		}
+		sdev->hostdata = pdev_info;
+	}
+	return 0;
+}
+
+static int myrs_slave_configure(struct scsi_device *sdev)
+{
+	struct myrs_hba *cs = shost_priv(sdev->host);
+	struct myrs_ldev_info *ldev_info;
+
+	if (sdev->channel > sdev->host->max_channel)
+		return -ENXIO;
+
+	if (sdev->channel < cs->ctlr_info->physchan_present) {
+		/* Skip HBA device */
+		if (sdev->type == TYPE_RAID)
+			return -ENXIO;
+		sdev->no_uld_attach = 1;
+		return 0;
+	}
+	if (sdev->lun != 0)
+		return -ENXIO;
+
+	ldev_info = sdev->hostdata;
+	if (!ldev_info)
+		return -ENXIO;
+	if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
+	    ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
+		sdev->wce_default_on = 1;
+	sdev->tagged_supported = 1;
+	return 0;
+}
+
+static void myrs_slave_destroy(struct scsi_device *sdev)
+{
+	kfree(sdev->hostdata);
+}
+
+struct scsi_host_template myrs_template = {
+	.module			= THIS_MODULE,
+	.name			= "DAC960",
+	.proc_name		= "myrs",
+	.queuecommand		= myrs_queuecommand,
+	.eh_host_reset_handler	= myrs_host_reset,
+	.slave_alloc		= myrs_slave_alloc,
+	.slave_configure	= myrs_slave_configure,
+	.slave_destroy		= myrs_slave_destroy,
+	.cmd_size		= sizeof(struct myrs_cmdblk),
+	.shost_attrs		= myrs_shost_attrs,
+	.sdev_attrs		= myrs_sdev_attrs,
+	.this_id		= -1,
+};
+
+static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
+		const struct pci_device_id *entry)
+{
+	struct Scsi_Host *shost;
+	struct myrs_hba *cs;
+
+	shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
+	if (!shost)
+		return NULL;
+
+	shost->max_cmd_len = 16;
+	shost->max_lun = 256;
+	cs = shost_priv(shost);
+	mutex_init(&cs->dcmd_mutex);
+	mutex_init(&cs->cinfo_mutex);
+	cs->host = shost;
+
+	return cs;
+}
+
+/*
+ * RAID template functions
+ */
+
+/**
+ * myrs_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+myrs_is_raid(struct device *dev)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrs_hba *cs = shost_priv(sdev->host);
+
+	return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
+}
+
+/**
+ * myrs_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+myrs_get_resync(struct device *dev)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrs_hba *cs = shost_priv(sdev->host);
+	struct myrs_ldev_info *ldev_info = sdev->hostdata;
+	u64 percent_complete = 0;
+	u8 status;
+
+	if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
+		return;
+	if (ldev_info->rbld_active) {
+		unsigned short ldev_num = ldev_info->ldev_num;
+
+		status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+		percent_complete = ldev_info->rbld_lba * 100;
+		do_div(percent_complete, ldev_info->cfg_devsize);
+	}
+	raid_set_resync(myrs_raid_template, dev, percent_complete);
+}
+
+/**
+ * myrs_get_state - get raid volume status
+ * @dev the device struct object
+ */
+static void
+myrs_get_state(struct device *dev)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct myrs_hba *cs = shost_priv(sdev->host);
+	struct myrs_ldev_info *ldev_info = sdev->hostdata;
+	enum raid_state state = RAID_STATE_UNKNOWN;
+
+	if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
+		state = RAID_STATE_UNKNOWN;
+	else {
+		switch (ldev_info->dev_state) {
+		case MYRS_DEVICE_ONLINE:
+			state = RAID_STATE_ACTIVE;
+			break;
+		case MYRS_DEVICE_SUSPECTED_CRITICAL:
+		case MYRS_DEVICE_CRITICAL:
+			state = RAID_STATE_DEGRADED;
+			break;
+		case MYRS_DEVICE_REBUILD:
+			state = RAID_STATE_RESYNCING;
+			break;
+		case MYRS_DEVICE_UNCONFIGURED:
+		case MYRS_DEVICE_INVALID_STATE:
+			state = RAID_STATE_UNKNOWN;
+			break;
+		default:
+			state = RAID_STATE_OFFLINE;
+		}
+	}
+	raid_set_state(myrs_raid_template, dev, state);
+}
+
+struct raid_function_template myrs_raid_functions = {
+	.cookie		= &myrs_template,
+	.is_raid	= myrs_is_raid,
+	.get_resync	= myrs_get_resync,
+	.get_state	= myrs_get_state,
+};
+
+/*
+ * PCI interface functions
+ */
+void myrs_flush_cache(struct myrs_hba *cs)
+{
+	myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
+}
+
+static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
+		struct scsi_cmnd *scmd)
+{
+	unsigned char status;
+
+	if (!cmd_blk)
+		return;
+
+	scsi_dma_unmap(scmd);
+	status = cmd_blk->status;
+	if (cmd_blk->sense) {
+		if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
+			unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
+
+			if (sense_len > cmd_blk->sense_len)
+				sense_len = cmd_blk->sense_len;
+			memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
+		}
+		dma_pool_free(cs->sense_pool, cmd_blk->sense,
+			      cmd_blk->sense_addr);
+		cmd_blk->sense = NULL;
+		cmd_blk->sense_addr = 0;
+	}
+	if (cmd_blk->dcdb) {
+		dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
+			      cmd_blk->dcdb_dma);
+		cmd_blk->dcdb = NULL;
+		cmd_blk->dcdb_dma = 0;
+	}
+	if (cmd_blk->sgl) {
+		dma_pool_free(cs->sg_pool, cmd_blk->sgl,
+			      cmd_blk->sgl_addr);
+		cmd_blk->sgl = NULL;
+		cmd_blk->sgl_addr = 0;
+	}
+	if (cmd_blk->residual)
+		scsi_set_resid(scmd, cmd_blk->residual);
+	if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
+	    status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
+		scmd->result = (DID_BAD_TARGET << 16);
+	else
+		scmd->result = (DID_OK << 16) | status;
+	scmd->scsi_done(scmd);
+}
+
+static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
+{
+	if (!cmd_blk)
+		return;
+
+	if (cmd_blk->complete) {
+		complete(cmd_blk->complete);
+		cmd_blk->complete = NULL;
+	}
+}
+
+static void myrs_monitor(struct work_struct *work)
+{
+	struct myrs_hba *cs = container_of(work, struct myrs_hba,
+					   monitor_work.work);
+	struct Scsi_Host *shost = cs->host;
+	struct myrs_ctlr_info *info = cs->ctlr_info;
+	unsigned int epoch = cs->fwstat_buf->epoch;
+	unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
+	unsigned char status;
+
+	dev_dbg(&shost->shost_gendev, "monitor tick\n");
+
+	status = myrs_get_fwstatus(cs);
+
+	if (cs->needs_update) {
+		cs->needs_update = false;
+		mutex_lock(&cs->cinfo_mutex);
+		status = myrs_get_ctlr_info(cs);
+		mutex_unlock(&cs->cinfo_mutex);
+	}
+	if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
+		status = myrs_get_event(cs, cs->next_evseq,
+					cs->event_buf);
+		if (status == MYRS_STATUS_SUCCESS) {
+			myrs_log_event(cs, cs->event_buf);
+			cs->next_evseq++;
+			interval = 1;
+		}
+	}
+
+	if (time_after(jiffies, cs->secondary_monitor_time
+		       + MYRS_SECONDARY_MONITOR_INTERVAL))
+		cs->secondary_monitor_time = jiffies;
+
+	if (info->bg_init_active +
+	    info->ldev_init_active +
+	    info->pdev_init_active +
+	    info->cc_active +
+	    info->rbld_active +
+	    info->exp_active != 0) {
+		struct scsi_device *sdev;
+
+		shost_for_each_device(sdev, shost) {
+			struct myrs_ldev_info *ldev_info;
+			int ldev_num;
+
+			if (sdev->channel < info->physchan_present)
+				continue;
+			ldev_info = sdev->hostdata;
+			if (!ldev_info)
+				continue;
+			ldev_num = ldev_info->ldev_num;
+			myrs_get_ldev_info(cs, ldev_num, ldev_info);
+		}
+		cs->needs_update = true;
+	}
+	if (epoch == cs->epoch &&
+	    cs->fwstat_buf->next_evseq == cs->next_evseq &&
+	    (cs->needs_update == false ||
+	     time_before(jiffies, cs->primary_monitor_time
+			 + MYRS_PRIMARY_MONITOR_INTERVAL))) {
+		interval = MYRS_SECONDARY_MONITOR_INTERVAL;
+	}
+
+	if (interval > 1)
+		cs->primary_monitor_time = jiffies;
+	queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
+}
+
+static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
+{
+	struct Scsi_Host *shost = cs->host;
+	size_t elem_size, elem_align;
+
+	elem_align = sizeof(struct myrs_sge);
+	elem_size = shost->sg_tablesize * elem_align;
+	cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
+				      elem_size, elem_align, 0);
+	if (cs->sg_pool == NULL) {
+		shost_printk(KERN_ERR, shost,
+			     "Failed to allocate SG pool\n");
+		return false;
+	}
+
+	cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
+					 MYRS_SENSE_SIZE, sizeof(int), 0);
+	if (cs->sense_pool == NULL) {
+		dma_pool_destroy(cs->sg_pool);
+		cs->sg_pool = NULL;
+		shost_printk(KERN_ERR, shost,
+			     "Failed to allocate sense data pool\n");
+		return false;
+	}
+
+	cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
+					MYRS_DCDB_SIZE,
+					sizeof(unsigned char), 0);
+	if (!cs->dcdb_pool) {
+		dma_pool_destroy(cs->sg_pool);
+		cs->sg_pool = NULL;
+		dma_pool_destroy(cs->sense_pool);
+		cs->sense_pool = NULL;
+		shost_printk(KERN_ERR, shost,
+			     "Failed to allocate DCDB pool\n");
+		return false;
+	}
+
+	snprintf(cs->work_q_name, sizeof(cs->work_q_name),
+		 "myrs_wq_%d", shost->host_no);
+	cs->work_q = create_singlethread_workqueue(cs->work_q_name);
+	if (!cs->work_q) {
+		dma_pool_destroy(cs->dcdb_pool);
+		cs->dcdb_pool = NULL;
+		dma_pool_destroy(cs->sg_pool);
+		cs->sg_pool = NULL;
+		dma_pool_destroy(cs->sense_pool);
+		cs->sense_pool = NULL;
+		shost_printk(KERN_ERR, shost,
+			     "Failed to create workqueue\n");
+		return false;
+	}
+
+	/* Initialize the Monitoring Timer. */
+	INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
+	queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
+
+	return true;
+}
+
+static void myrs_destroy_mempools(struct myrs_hba *cs)
+{
+	cancel_delayed_work_sync(&cs->monitor_work);
+	destroy_workqueue(cs->work_q);
+
+	dma_pool_destroy(cs->sg_pool);
+	dma_pool_destroy(cs->dcdb_pool);
+	dma_pool_destroy(cs->sense_pool);
+}
+
+static void myrs_unmap(struct myrs_hba *cs)
+{
+	kfree(cs->event_buf);
+	kfree(cs->ctlr_info);
+	if (cs->fwstat_buf) {
+		dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
+				  cs->fwstat_buf, cs->fwstat_addr);
+		cs->fwstat_buf = NULL;
+	}
+	if (cs->first_stat_mbox) {
+		dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
+				  cs->first_stat_mbox, cs->stat_mbox_addr);
+		cs->first_stat_mbox = NULL;
+	}
+	if (cs->first_cmd_mbox) {
+		dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
+				  cs->first_cmd_mbox, cs->cmd_mbox_addr);
+		cs->first_cmd_mbox = NULL;
+	}
+}
+
+static void myrs_cleanup(struct myrs_hba *cs)
+{
+	struct pci_dev *pdev = cs->pdev;
+
+	/* Free the memory mailbox, status, and related structures */
+	myrs_unmap(cs);
+
+	if (cs->mmio_base) {
+		cs->disable_intr(cs);
+		iounmap(cs->mmio_base);
+	}
+	if (cs->irq)
+		free_irq(cs->irq, cs);
+	if (cs->io_addr)
+		release_region(cs->io_addr, 0x80);
+	iounmap(cs->mmio_base);
+	pci_set_drvdata(pdev, NULL);
+	pci_disable_device(pdev);
+	scsi_host_put(cs->host);
+}
+
+static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
+		const struct pci_device_id *entry)
+{
+	struct myrs_privdata *privdata =
+		(struct myrs_privdata *)entry->driver_data;
+	irq_handler_t irq_handler = privdata->irq_handler;
+	unsigned int mmio_size = privdata->mmio_size;
+	struct myrs_hba *cs = NULL;
+
+	cs = myrs_alloc_host(pdev, entry);
+	if (!cs) {
+		dev_err(&pdev->dev, "Unable to allocate Controller\n");
+		return NULL;
+	}
+	cs->pdev = pdev;
+
+	if (pci_enable_device(pdev))
+		goto Failure;
+
+	cs->pci_addr = pci_resource_start(pdev, 0);
+
+	pci_set_drvdata(pdev, cs);
+	spin_lock_init(&cs->queue_lock);
+	/* Map the Controller Register Window. */
+	if (mmio_size < PAGE_SIZE)
+		mmio_size = PAGE_SIZE;
+	cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size);
+	if (cs->mmio_base == NULL) {
+		dev_err(&pdev->dev,
+			"Unable to map Controller Register Window\n");
+		goto Failure;
+	}
+
+	cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
+	if (privdata->hw_init(pdev, cs, cs->io_base))
+		goto Failure;
+
+	/* Acquire shared access to the IRQ Channel. */
+	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
+		dev_err(&pdev->dev,
+			"Unable to acquire IRQ Channel %d\n", pdev->irq);
+		goto Failure;
+	}
+	cs->irq = pdev->irq;
+	return cs;
+
+Failure:
+	dev_err(&pdev->dev,
+		"Failed to initialize Controller\n");
+	myrs_cleanup(cs);
+	return NULL;
+}
+
+/**
+ * myrs_err_status reports Controller BIOS Messages passed through
+  the Error Status Register when the driver performs the BIOS handshaking.
+  It returns true for fatal errors and false otherwise.
+*/
+
+static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
+		unsigned char parm0, unsigned char parm1)
+{
+	struct pci_dev *pdev = cs->pdev;
+
+	switch (status) {
+	case 0x00:
+		dev_info(&pdev->dev,
+			 "Physical Device %d:%d Not Responding\n",
+			 parm1, parm0);
+		break;
+	case 0x08:
+		dev_notice(&pdev->dev, "Spinning Up Drives\n");
+		break;
+	case 0x30:
+		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
+		break;
+	case 0x60:
+		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
+		break;
+	case 0x70:
+		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
+		break;
+	case 0x90:
+		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
+			   parm1, parm0);
+		break;
+	case 0xA0:
+		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
+		break;
+	case 0xB0:
+		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
+		break;
+	case 0xD0:
+		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
+		break;
+	case 0xF0:
+		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
+		return true;
+	default:
+		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
+			status);
+		return true;
+	}
+	return false;
+}
+
+/*
+ * Hardware-specific functions
+ */
+
+/*
+ * DAC960 GEM Series Controllers.
+ */
+
+static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
+{
+	__le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
+
+	writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
+{
+	__le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
+
+	writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_gen_intr(void __iomem *base)
+{
+	__le32 val = cpu_to_le32(DAC960_GEM_IDB_GEN_IRQ << 24);
+
+	writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
+{
+	__le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
+
+	writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
+{
+	__le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
+
+	writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
+{
+	__le32 val;
+
+	val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
+	return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
+{
+	__le32 val;
+
+	val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
+	return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
+{
+	__le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
+
+	writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_mem_mbox_intr(void __iomem *base)
+{
+	__le32 val = cpu_to_le32(DAC960_GEM_ODB_MMBOX_ACK_IRQ << 24);
+
+	writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_intr(void __iomem *base)
+{
+	__le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
+				  DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
+
+	writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
+{
+	__le32 val;
+
+	val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
+	return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_GEM_mem_mbox_status_available(void __iomem *base)
+{
+	__le32 val;
+
+	val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
+	return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_GEM_enable_intr(void __iomem *base)
+{
+	__le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
+				  DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
+	writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_disable_intr(void __iomem *base)
+{
+	__le32 val = 0;
+
+	writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
+}
+
+static inline bool DAC960_GEM_intr_enabled(void __iomem *base)
+{
+	__le32 val;
+
+	val = readl(base + DAC960_GEM_IRQMASK_READ_OFFSET);
+	return !((le32_to_cpu(val) >> 24) &
+		 (DAC960_GEM_IRQMASK_HWMBOX_IRQ |
+		  DAC960_GEM_IRQMASK_MMBOX_IRQ));
+}
+
+static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+		union myrs_cmd_mbox *mbox)
+{
+	memcpy(&mem_mbox->words[1], &mbox->words[1],
+	       sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+	/* Barrier to avoid reordering */
+	wmb();
+	mem_mbox->words[0] = mbox->words[0];
+	/* Barrier to force PCI access */
+	mb();
+}
+
+static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
+		dma_addr_t cmd_mbox_addr)
+{
+	dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_GEM_read_cmd_ident(void __iomem *base)
+{
+	return readw(base + DAC960_GEM_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
+{
+	return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
+		unsigned char *param0, unsigned char *param1)
+{
+	__le32 val;
+
+	val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
+	if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
+		return false;
+	*error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
+	*param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
+	*param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
+	writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
+	return true;
+}
+
+static inline unsigned char
+DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+	unsigned char status;
+
+	while (DAC960_GEM_hw_mbox_is_full(base))
+		udelay(1);
+	DAC960_GEM_write_hw_mbox(base, mbox_addr);
+	DAC960_GEM_hw_mbox_new_cmd(base);
+	while (!DAC960_GEM_hw_mbox_status_available(base))
+		udelay(1);
+	status = DAC960_GEM_read_cmd_status(base);
+	DAC960_GEM_ack_hw_mbox_intr(base);
+	DAC960_GEM_ack_hw_mbox_status(base);
+
+	return status;
+}
+
+static int DAC960_GEM_hw_init(struct pci_dev *pdev,
+		struct myrs_hba *cs, void __iomem *base)
+{
+	int timeout = 0;
+	unsigned char status, parm0, parm1;
+
+	DAC960_GEM_disable_intr(base);
+	DAC960_GEM_ack_hw_mbox_status(base);
+	udelay(1000);
+	while (DAC960_GEM_init_in_progress(base) &&
+	       timeout < MYRS_MAILBOX_TIMEOUT) {
+		if (DAC960_GEM_read_error_status(base, &status,
+						 &parm0, &parm1) &&
+		    myrs_err_status(cs, status, parm0, parm1))
+			return -EIO;
+		udelay(10);
+		timeout++;
+	}
+	if (timeout == MYRS_MAILBOX_TIMEOUT) {
+		dev_err(&pdev->dev,
+			"Timeout waiting for Controller Initialisation\n");
+		return -ETIMEDOUT;
+	}
+	if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
+		dev_err(&pdev->dev,
+			"Unable to Enable Memory Mailbox Interface\n");
+		DAC960_GEM_reset_ctrl(base);
+		return -EAGAIN;
+	}
+	DAC960_GEM_enable_intr(base);
+	cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
+	cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
+	cs->disable_intr = DAC960_GEM_disable_intr;
+	cs->reset = DAC960_GEM_reset_ctrl;
+	return 0;
+}
+
+static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
+{
+	struct myrs_hba *cs = arg;
+	void __iomem *base = cs->io_base;
+	struct myrs_stat_mbox *next_stat_mbox;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cs->queue_lock, flags);
+	DAC960_GEM_ack_intr(base);
+	next_stat_mbox = cs->next_stat_mbox;
+	while (next_stat_mbox->id > 0) {
+		unsigned short id = next_stat_mbox->id;
+		struct scsi_cmnd *scmd = NULL;
+		struct myrs_cmdblk *cmd_blk = NULL;
+
+		if (id == MYRS_DCMD_TAG)
+			cmd_blk = &cs->dcmd_blk;
+		else if (id == MYRS_MCMD_TAG)
+			cmd_blk = &cs->mcmd_blk;
+		else {
+			scmd = scsi_host_find_tag(cs->host, id - 3);
+			if (scmd)
+				cmd_blk = scsi_cmd_priv(scmd);
+		}
+		if (cmd_blk) {
+			cmd_blk->status = next_stat_mbox->status;
+			cmd_blk->sense_len = next_stat_mbox->sense_len;
+			cmd_blk->residual = next_stat_mbox->residual;
+		} else
+			dev_err(&cs->pdev->dev,
+				"Unhandled command completion %d\n", id);
+
+		memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+		if (++next_stat_mbox > cs->last_stat_mbox)
+			next_stat_mbox = cs->first_stat_mbox;
+
+		if (cmd_blk) {
+			if (id < 3)
+				myrs_handle_cmdblk(cs, cmd_blk);
+			else
+				myrs_handle_scsi(cs, cmd_blk, scmd);
+		}
+	}
+	cs->next_stat_mbox = next_stat_mbox;
+	spin_unlock_irqrestore(&cs->queue_lock, flags);
+	return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_GEM_privdata = {
+	.hw_init =		DAC960_GEM_hw_init,
+	.irq_handler =		DAC960_GEM_intr_handler,
+	.mmio_size =		DAC960_GEM_mmio_size,
+};
+
+/*
+ * DAC960 BA Series Controllers.
+ */
+
+static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
+{
+	writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
+{
+	writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_gen_intr(void __iomem *base)
+{
+	writeb(DAC960_BA_IDB_GEN_IRQ, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_reset_ctrl(void __iomem *base)
+{
+	writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
+{
+	writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
+{
+	u8 val;
+
+	val = readb(base + DAC960_BA_IDB_OFFSET);
+	return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
+}
+
+static inline bool DAC960_BA_init_in_progress(void __iomem *base)
+{
+	u8 val;
+
+	val = readb(base + DAC960_BA_IDB_OFFSET);
+	return !(val & DAC960_BA_IDB_INIT_DONE);
+}
+
+static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
+{
+	writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_mem_mbox_intr(void __iomem *base)
+{
+	writeb(DAC960_BA_ODB_MMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_intr(void __iomem *base)
+{
+	writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
+	       base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
+{
+	u8 val;
+
+	val = readb(base + DAC960_BA_ODB_OFFSET);
+	return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_BA_mem_mbox_status_available(void __iomem *base)
+{
+	u8 val;
+
+	val = readb(base + DAC960_BA_ODB_OFFSET);
+	return val & DAC960_BA_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_BA_enable_intr(void __iomem *base)
+{
+	writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_BA_disable_intr(void __iomem *base)
+{
+	writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_BA_intr_enabled(void __iomem *base)
+{
+	u8 val;
+
+	val = readb(base + DAC960_BA_IRQMASK_OFFSET);
+	return !(val & DAC960_BA_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+		union myrs_cmd_mbox *mbox)
+{
+	memcpy(&mem_mbox->words[1], &mbox->words[1],
+	       sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+	/* Barrier to avoid reordering */
+	wmb();
+	mem_mbox->words[0] = mbox->words[0];
+	/* Barrier to force PCI access */
+	mb();
+}
+
+
+static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
+		dma_addr_t cmd_mbox_addr)
+{
+	dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_BA_read_cmd_ident(void __iomem *base)
+{
+	return readw(base + DAC960_BA_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
+{
+	return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
+		unsigned char *param0, unsigned char *param1)
+{
+	u8 val;
+
+	val = readb(base + DAC960_BA_ERRSTS_OFFSET);
+	if (!(val & DAC960_BA_ERRSTS_PENDING))
+		return false;
+	val &= ~DAC960_BA_ERRSTS_PENDING;
+	*error = val;
+	*param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
+	*param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
+	writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
+	return true;
+}
+
+static inline unsigned char
+DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+	unsigned char status;
+
+	while (DAC960_BA_hw_mbox_is_full(base))
+		udelay(1);
+	DAC960_BA_write_hw_mbox(base, mbox_addr);
+	DAC960_BA_hw_mbox_new_cmd(base);
+	while (!DAC960_BA_hw_mbox_status_available(base))
+		udelay(1);
+	status = DAC960_BA_read_cmd_status(base);
+	DAC960_BA_ack_hw_mbox_intr(base);
+	DAC960_BA_ack_hw_mbox_status(base);
+
+	return status;
+}
+
+static int DAC960_BA_hw_init(struct pci_dev *pdev,
+		struct myrs_hba *cs, void __iomem *base)
+{
+	int timeout = 0;
+	unsigned char status, parm0, parm1;
+
+	DAC960_BA_disable_intr(base);
+	DAC960_BA_ack_hw_mbox_status(base);
+	udelay(1000);
+	while (DAC960_BA_init_in_progress(base) &&
+	       timeout < MYRS_MAILBOX_TIMEOUT) {
+		if (DAC960_BA_read_error_status(base, &status,
+					      &parm0, &parm1) &&
+		    myrs_err_status(cs, status, parm0, parm1))
+			return -EIO;
+		udelay(10);
+		timeout++;
+	}
+	if (timeout == MYRS_MAILBOX_TIMEOUT) {
+		dev_err(&pdev->dev,
+			"Timeout waiting for Controller Initialisation\n");
+		return -ETIMEDOUT;
+	}
+	if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
+		dev_err(&pdev->dev,
+			"Unable to Enable Memory Mailbox Interface\n");
+		DAC960_BA_reset_ctrl(base);
+		return -EAGAIN;
+	}
+	DAC960_BA_enable_intr(base);
+	cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
+	cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
+	cs->disable_intr = DAC960_BA_disable_intr;
+	cs->reset = DAC960_BA_reset_ctrl;
+	return 0;
+}
+
+static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
+{
+	struct myrs_hba *cs = arg;
+	void __iomem *base = cs->io_base;
+	struct myrs_stat_mbox *next_stat_mbox;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cs->queue_lock, flags);
+	DAC960_BA_ack_intr(base);
+	next_stat_mbox = cs->next_stat_mbox;
+	while (next_stat_mbox->id > 0) {
+		unsigned short id = next_stat_mbox->id;
+		struct scsi_cmnd *scmd = NULL;
+		struct myrs_cmdblk *cmd_blk = NULL;
+
+		if (id == MYRS_DCMD_TAG)
+			cmd_blk = &cs->dcmd_blk;
+		else if (id == MYRS_MCMD_TAG)
+			cmd_blk = &cs->mcmd_blk;
+		else {
+			scmd = scsi_host_find_tag(cs->host, id - 3);
+			if (scmd)
+				cmd_blk = scsi_cmd_priv(scmd);
+		}
+		if (cmd_blk) {
+			cmd_blk->status = next_stat_mbox->status;
+			cmd_blk->sense_len = next_stat_mbox->sense_len;
+			cmd_blk->residual = next_stat_mbox->residual;
+		} else
+			dev_err(&cs->pdev->dev,
+				"Unhandled command completion %d\n", id);
+
+		memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+		if (++next_stat_mbox > cs->last_stat_mbox)
+			next_stat_mbox = cs->first_stat_mbox;
+
+		if (cmd_blk) {
+			if (id < 3)
+				myrs_handle_cmdblk(cs, cmd_blk);
+			else
+				myrs_handle_scsi(cs, cmd_blk, scmd);
+		}
+	}
+	cs->next_stat_mbox = next_stat_mbox;
+	spin_unlock_irqrestore(&cs->queue_lock, flags);
+	return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_BA_privdata = {
+	.hw_init =		DAC960_BA_hw_init,
+	.irq_handler =		DAC960_BA_intr_handler,
+	.mmio_size =		DAC960_BA_mmio_size,
+};
+
+/*
+ * DAC960 LP Series Controllers.
+ */
+
+static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
+{
+	writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
+{
+	writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_gen_intr(void __iomem *base)
+{
+	writeb(DAC960_LP_IDB_GEN_IRQ, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_reset_ctrl(void __iomem *base)
+{
+	writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
+{
+	writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
+{
+	u8 val;
+
+	val = readb(base + DAC960_LP_IDB_OFFSET);
+	return val & DAC960_LP_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_LP_init_in_progress(void __iomem *base)
+{
+	u8 val;
+
+	val = readb(base + DAC960_LP_IDB_OFFSET);
+	return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
+{
+	writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_mem_mbox_intr(void __iomem *base)
+{
+	writeb(DAC960_LP_ODB_MMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_intr(void __iomem *base)
+{
+	writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
+	       base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
+{
+	u8 val;
+
+	val = readb(base + DAC960_LP_ODB_OFFSET);
+	return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_LP_mem_mbox_status_available(void __iomem *base)
+{
+	u8 val;
+
+	val = readb(base + DAC960_LP_ODB_OFFSET);
+	return val & DAC960_LP_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_LP_enable_intr(void __iomem *base)
+{
+	writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_LP_disable_intr(void __iomem *base)
+{
+	writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_LP_intr_enabled(void __iomem *base)
+{
+	u8 val;
+
+	val = readb(base + DAC960_LP_IRQMASK_OFFSET);
+	return !(val & DAC960_LP_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+		union myrs_cmd_mbox *mbox)
+{
+	memcpy(&mem_mbox->words[1], &mbox->words[1],
+	       sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+	/* Barrier to avoid reordering */
+	wmb();
+	mem_mbox->words[0] = mbox->words[0];
+	/* Barrier to force PCI access */
+	mb();
+}
+
+static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
+		dma_addr_t cmd_mbox_addr)
+{
+	dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_LP_read_cmd_ident(void __iomem *base)
+{
+	return readw(base + DAC960_LP_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
+{
+	return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
+		unsigned char *param0, unsigned char *param1)
+{
+	u8 val;
+
+	val = readb(base + DAC960_LP_ERRSTS_OFFSET);
+	if (!(val & DAC960_LP_ERRSTS_PENDING))
+		return false;
+	val &= ~DAC960_LP_ERRSTS_PENDING;
+	*error = val;
+	*param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
+	*param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
+	writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
+	return true;
+}
+
+static inline unsigned char
+DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+	unsigned char status;
+
+	while (DAC960_LP_hw_mbox_is_full(base))
+		udelay(1);
+	DAC960_LP_write_hw_mbox(base, mbox_addr);
+	DAC960_LP_hw_mbox_new_cmd(base);
+	while (!DAC960_LP_hw_mbox_status_available(base))
+		udelay(1);
+	status = DAC960_LP_read_cmd_status(base);
+	DAC960_LP_ack_hw_mbox_intr(base);
+	DAC960_LP_ack_hw_mbox_status(base);
+
+	return status;
+}
+
+static int DAC960_LP_hw_init(struct pci_dev *pdev,
+		struct myrs_hba *cs, void __iomem *base)
+{
+	int timeout = 0;
+	unsigned char status, parm0, parm1;
+
+	DAC960_LP_disable_intr(base);
+	DAC960_LP_ack_hw_mbox_status(base);
+	udelay(1000);
+	while (DAC960_LP_init_in_progress(base) &&
+	       timeout < MYRS_MAILBOX_TIMEOUT) {
+		if (DAC960_LP_read_error_status(base, &status,
+					      &parm0, &parm1) &&
+		    myrs_err_status(cs, status, parm0, parm1))
+			return -EIO;
+		udelay(10);
+		timeout++;
+	}
+	if (timeout == MYRS_MAILBOX_TIMEOUT) {
+		dev_err(&pdev->dev,
+			"Timeout waiting for Controller Initialisation\n");
+		return -ETIMEDOUT;
+	}
+	if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
+		dev_err(&pdev->dev,
+			"Unable to Enable Memory Mailbox Interface\n");
+		DAC960_LP_reset_ctrl(base);
+		return -ENODEV;
+	}
+	DAC960_LP_enable_intr(base);
+	cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
+	cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
+	cs->disable_intr = DAC960_LP_disable_intr;
+	cs->reset = DAC960_LP_reset_ctrl;
+
+	return 0;
+}
+
+static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
+{
+	struct myrs_hba *cs = arg;
+	void __iomem *base = cs->io_base;
+	struct myrs_stat_mbox *next_stat_mbox;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cs->queue_lock, flags);
+	DAC960_LP_ack_intr(base);
+	next_stat_mbox = cs->next_stat_mbox;
+	while (next_stat_mbox->id > 0) {
+		unsigned short id = next_stat_mbox->id;
+		struct scsi_cmnd *scmd = NULL;
+		struct myrs_cmdblk *cmd_blk = NULL;
+
+		if (id == MYRS_DCMD_TAG)
+			cmd_blk = &cs->dcmd_blk;
+		else if (id == MYRS_MCMD_TAG)
+			cmd_blk = &cs->mcmd_blk;
+		else {
+			scmd = scsi_host_find_tag(cs->host, id - 3);
+			if (scmd)
+				cmd_blk = scsi_cmd_priv(scmd);
+		}
+		if (cmd_blk) {
+			cmd_blk->status = next_stat_mbox->status;
+			cmd_blk->sense_len = next_stat_mbox->sense_len;
+			cmd_blk->residual = next_stat_mbox->residual;
+		} else
+			dev_err(&cs->pdev->dev,
+				"Unhandled command completion %d\n", id);
+
+		memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+		if (++next_stat_mbox > cs->last_stat_mbox)
+			next_stat_mbox = cs->first_stat_mbox;
+
+		if (cmd_blk) {
+			if (id < 3)
+				myrs_handle_cmdblk(cs, cmd_blk);
+			else
+				myrs_handle_scsi(cs, cmd_blk, scmd);
+		}
+	}
+	cs->next_stat_mbox = next_stat_mbox;
+	spin_unlock_irqrestore(&cs->queue_lock, flags);
+	return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_LP_privdata = {
+	.hw_init =		DAC960_LP_hw_init,
+	.irq_handler =		DAC960_LP_intr_handler,
+	.mmio_size =		DAC960_LP_mmio_size,
+};
+
+/*
+ * Module functions
+ */
+static int
+myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
+{
+	struct myrs_hba *cs;
+	int ret;
+
+	cs = myrs_detect(dev, entry);
+	if (!cs)
+		return -ENODEV;
+
+	ret = myrs_get_config(cs);
+	if (ret < 0) {
+		myrs_cleanup(cs);
+		return ret;
+	}
+
+	if (!myrs_create_mempools(dev, cs)) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	ret = scsi_add_host(cs->host, &dev->dev);
+	if (ret) {
+		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
+		myrs_destroy_mempools(cs);
+		goto failed;
+	}
+	scsi_scan_host(cs->host);
+	return 0;
+failed:
+	myrs_cleanup(cs);
+	return ret;
+}
+
+
+static void myrs_remove(struct pci_dev *pdev)
+{
+	struct myrs_hba *cs = pci_get_drvdata(pdev);
+
+	if (cs == NULL)
+		return;
+
+	shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
+	myrs_flush_cache(cs);
+	myrs_destroy_mempools(cs);
+	myrs_cleanup(cs);
+}
+
+
+static const struct pci_device_id myrs_id_table[] = {
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
+			       PCI_DEVICE_ID_MYLEX_DAC960_GEM,
+			       PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
+		.driver_data	= (unsigned long) &DAC960_GEM_privdata,
+	},
+	{
+		PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
+	},
+	{
+		PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
+	},
+	{0, },
+};
+
+MODULE_DEVICE_TABLE(pci, myrs_id_table);
+
+static struct pci_driver myrs_pci_driver = {
+	.name		= "myrs",
+	.id_table	= myrs_id_table,
+	.probe		= myrs_probe,
+	.remove		= myrs_remove,
+};
+
+static int __init myrs_init_module(void)
+{
+	int ret;
+
+	myrs_raid_template = raid_class_attach(&myrs_raid_functions);
+	if (!myrs_raid_template)
+		return -ENODEV;
+
+	ret = pci_register_driver(&myrs_pci_driver);
+	if (ret)
+		raid_class_release(myrs_raid_template);
+
+	return ret;
+}
+
+static void __exit myrs_cleanup_module(void)
+{
+	pci_unregister_driver(&myrs_pci_driver);
+	raid_class_release(myrs_raid_template);
+}
+
+module_init(myrs_init_module);
+module_exit(myrs_cleanup_module);
+
+MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h
new file mode 100644
index 0000000..e6702ee
--- /dev/null
+++ b/drivers/scsi/myrs.h
@@ -0,0 +1,1134 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * This driver supports the newer, SCSI-based firmware interface only.
+ *
+ * Copyright 2018 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver, which has
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ */
+
+#ifndef _MYRS_H
+#define _MYRS_H
+
+#define MYRS_MAILBOX_TIMEOUT 1000000
+
+#define MYRS_DCMD_TAG 1
+#define MYRS_MCMD_TAG 2
+
+#define MYRS_LINE_BUFFER_SIZE 128
+
+#define MYRS_PRIMARY_MONITOR_INTERVAL (10 * HZ)
+#define MYRS_SECONDARY_MONITOR_INTERVAL (60 * HZ)
+
+/* Maximum number of Scatter/Gather Segments supported */
+#define MYRS_SG_LIMIT		128
+
+/*
+ * Number of Command and Status Mailboxes used by the
+ * DAC960 V2 Firmware Memory Mailbox Interface.
+ */
+#define MYRS_MAX_CMD_MBOX		512
+#define MYRS_MAX_STAT_MBOX		512
+
+#define MYRS_DCDB_SIZE			16
+#define MYRS_SENSE_SIZE			14
+
+/*
+ * DAC960 V2 Firmware Command Opcodes.
+ */
+enum myrs_cmd_opcode {
+	MYRS_CMD_OP_MEMCOPY		= 0x01,
+	MYRS_CMD_OP_SCSI_10_PASSTHRU	= 0x02,
+	MYRS_CMD_OP_SCSI_255_PASSTHRU	= 0x03,
+	MYRS_CMD_OP_SCSI_10		= 0x04,
+	MYRS_CMD_OP_SCSI_256		= 0x05,
+	MYRS_CMD_OP_IOCTL		= 0x20,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware IOCTL Opcodes.
+ */
+enum myrs_ioctl_opcode {
+	MYRS_IOCTL_GET_CTLR_INFO	= 0x01,
+	MYRS_IOCTL_GET_LDEV_INFO_VALID	= 0x03,
+	MYRS_IOCTL_GET_PDEV_INFO_VALID	= 0x05,
+	MYRS_IOCTL_GET_HEALTH_STATUS	= 0x11,
+	MYRS_IOCTL_GET_EVENT		= 0x15,
+	MYRS_IOCTL_START_DISCOVERY	= 0x81,
+	MYRS_IOCTL_SET_DEVICE_STATE	= 0x82,
+	MYRS_IOCTL_INIT_PDEV_START	= 0x84,
+	MYRS_IOCTL_INIT_PDEV_STOP	= 0x85,
+	MYRS_IOCTL_INIT_LDEV_START	= 0x86,
+	MYRS_IOCTL_INIT_LDEV_STOP	= 0x87,
+	MYRS_IOCTL_RBLD_DEVICE_START	= 0x88,
+	MYRS_IOCTL_RBLD_DEVICE_STOP	= 0x89,
+	MYRS_IOCTL_MAKE_CONSISTENT_START = 0x8A,
+	MYRS_IOCTL_MAKE_CONSISTENT_STOP = 0x8B,
+	MYRS_IOCTL_CC_START		= 0x8C,
+	MYRS_IOCTL_CC_STOP		= 0x8D,
+	MYRS_IOCTL_SET_MEM_MBOX		= 0x8E,
+	MYRS_IOCTL_RESET_DEVICE		= 0x90,
+	MYRS_IOCTL_FLUSH_DEVICE_DATA	= 0x91,
+	MYRS_IOCTL_PAUSE_DEVICE		= 0x92,
+	MYRS_IOCTL_UNPAUS_EDEVICE	= 0x93,
+	MYRS_IOCTL_LOCATE_DEVICE	= 0x94,
+	MYRS_IOCTL_CREATE_CONFIGURATION = 0xC0,
+	MYRS_IOCTL_DELETE_LDEV		= 0xC1,
+	MYRS_IOCTL_REPLACE_INTERNALDEVICE = 0xC2,
+	MYRS_IOCTL_RENAME_LDEV		= 0xC3,
+	MYRS_IOCTL_ADD_CONFIGURATION	= 0xC4,
+	MYRS_IOCTL_XLATE_PDEV_TO_LDEV	= 0xC5,
+	MYRS_IOCTL_CLEAR_CONFIGURATION	= 0xCA,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Command Status Codes.
+ */
+#define MYRS_STATUS_SUCCESS			0x00
+#define MYRS_STATUS_FAILED			0x02
+#define MYRS_STATUS_DEVICE_BUSY			0x08
+#define MYRS_STATUS_DEVICE_NON_RESPONSIVE	0x0E
+#define MYRS_STATUS_DEVICE_NON_RESPONSIVE2	0x0F
+#define MYRS_STATUS_RESERVATION_CONFLICT	0x18
+
+/*
+ * DAC960 V2 Firmware Memory Type structure.
+ */
+struct myrs_mem_type {
+	enum {
+		MYRS_MEMTYPE_RESERVED	= 0x00,
+		MYRS_MEMTYPE_DRAM	= 0x01,
+		MYRS_MEMTYPE_EDRAM	= 0x02,
+		MYRS_MEMTYPE_EDO	= 0x03,
+		MYRS_MEMTYPE_SDRAM	= 0x04,
+		MYRS_MEMTYPE_LAST	= 0x1F,
+	} __packed mem_type:5;	/* Byte 0 Bits 0-4 */
+	unsigned rsvd:1;			/* Byte 0 Bit 5 */
+	unsigned mem_parity:1;			/* Byte 0 Bit 6 */
+	unsigned mem_ecc:1;			/* Byte 0 Bit 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Processor Type structure.
+ */
+enum myrs_cpu_type {
+	MYRS_CPUTYPE_i960CA	= 0x01,
+	MYRS_CPUTYPE_i960RD	= 0x02,
+	MYRS_CPUTYPE_i960RN	= 0x03,
+	MYRS_CPUTYPE_i960RP	= 0x04,
+	MYRS_CPUTYPE_NorthBay	= 0x05,
+	MYRS_CPUTYPE_StrongArm	= 0x06,
+	MYRS_CPUTYPE_i960RM	= 0x07,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Get Controller Info reply structure.
+ */
+struct myrs_ctlr_info {
+	unsigned char rsvd1;				/* Byte 0 */
+	enum {
+		MYRS_SCSI_BUS	= 0x00,
+		MYRS_Fibre_BUS	= 0x01,
+		MYRS_PCI_BUS	= 0x03
+	} __packed bus;	/* Byte 1 */
+	enum {
+		MYRS_CTLR_DAC960E	= 0x01,
+		MYRS_CTLR_DAC960M	= 0x08,
+		MYRS_CTLR_DAC960PD	= 0x10,
+		MYRS_CTLR_DAC960PL	= 0x11,
+		MYRS_CTLR_DAC960PU	= 0x12,
+		MYRS_CTLR_DAC960PE	= 0x13,
+		MYRS_CTLR_DAC960PG	= 0x14,
+		MYRS_CTLR_DAC960PJ	= 0x15,
+		MYRS_CTLR_DAC960PTL0	= 0x16,
+		MYRS_CTLR_DAC960PR	= 0x17,
+		MYRS_CTLR_DAC960PRL	= 0x18,
+		MYRS_CTLR_DAC960PT	= 0x19,
+		MYRS_CTLR_DAC1164P	= 0x1A,
+		MYRS_CTLR_DAC960PTL1	= 0x1B,
+		MYRS_CTLR_EXR2000P	= 0x1C,
+		MYRS_CTLR_EXR3000P	= 0x1D,
+		MYRS_CTLR_ACCELERAID352 = 0x1E,
+		MYRS_CTLR_ACCELERAID170 = 0x1F,
+		MYRS_CTLR_ACCELERAID160 = 0x20,
+		MYRS_CTLR_DAC960S	= 0x60,
+		MYRS_CTLR_DAC960SU	= 0x61,
+		MYRS_CTLR_DAC960SX	= 0x62,
+		MYRS_CTLR_DAC960SF	= 0x63,
+		MYRS_CTLR_DAC960SS	= 0x64,
+		MYRS_CTLR_DAC960FL	= 0x65,
+		MYRS_CTLR_DAC960LL	= 0x66,
+		MYRS_CTLR_DAC960FF	= 0x67,
+		MYRS_CTLR_DAC960HP	= 0x68,
+		MYRS_CTLR_RAIDBRICK	= 0x69,
+		MYRS_CTLR_METEOR_FL	= 0x6A,
+		MYRS_CTLR_METEOR_FF	= 0x6B
+	} __packed ctlr_type;	/* Byte 2 */
+	unsigned char rsvd2;			/* Byte 3 */
+	unsigned short bus_speed_mhz;		/* Bytes 4-5 */
+	unsigned char bus_width;		/* Byte 6 */
+	unsigned char flash_code;		/* Byte 7 */
+	unsigned char ports_present;		/* Byte 8 */
+	unsigned char rsvd3[7];			/* Bytes 9-15 */
+	unsigned char bus_name[16];		/* Bytes 16-31 */
+	unsigned char ctlr_name[16];		/* Bytes 32-47 */
+	unsigned char rsvd4[16];		/* Bytes 48-63 */
+	/* Firmware Release Information */
+	unsigned char fw_major_version;		/* Byte 64 */
+	unsigned char fw_minor_version;		/* Byte 65 */
+	unsigned char fw_turn_number;		/* Byte 66 */
+	unsigned char fw_build_number;		/* Byte 67 */
+	unsigned char fw_release_day;		/* Byte 68 */
+	unsigned char fw_release_month;		/* Byte 69 */
+	unsigned char fw_release_year_hi;	/* Byte 70 */
+	unsigned char fw_release_year_lo;	/* Byte 71 */
+	/* Hardware Release Information */
+	unsigned char hw_rev;			/* Byte 72 */
+	unsigned char rsvd5[3];			/* Bytes 73-75 */
+	unsigned char hw_release_day;		/* Byte 76 */
+	unsigned char hw_release_month;		/* Byte 77 */
+	unsigned char hw_release_year_hi;	/* Byte 78 */
+	unsigned char hw_release_year_lo;	/* Byte 79 */
+	/* Hardware Manufacturing Information */
+	unsigned char manuf_batch_num;		/* Byte 80 */
+	unsigned char rsvd6;			/* Byte 81 */
+	unsigned char manuf_plant_num;		/* Byte 82 */
+	unsigned char rsvd7;			/* Byte 83 */
+	unsigned char hw_manuf_day;		/* Byte 84 */
+	unsigned char hw_manuf_month;		/* Byte 85 */
+	unsigned char hw_manuf_year_hi;		/* Byte 86 */
+	unsigned char hw_manuf_year_lo;		/* Byte 87 */
+	unsigned char max_pd_per_xld;		/* Byte 88 */
+	unsigned char max_ild_per_xld;		/* Byte 89 */
+	unsigned short nvram_size_kb;		/* Bytes 90-91 */
+	unsigned char max_xld;			/* Byte 92 */
+	unsigned char rsvd8[3];			/* Bytes 93-95 */
+	/* Unique Information per Controller */
+	unsigned char serial_number[16];	/* Bytes 96-111 */
+	unsigned char rsvd9[16];		/* Bytes 112-127 */
+	/* Vendor Information */
+	unsigned char rsvd10[3];		/* Bytes 128-130 */
+	unsigned char oem_code;			/* Byte 131 */
+	unsigned char vendor[16];		/* Bytes 132-147 */
+	/* Other Physical/Controller/Operation Information */
+	unsigned char bbu_present:1;		/* Byte 148 Bit 0 */
+	unsigned char cluster_mode:1;		/* Byte 148 Bit 1 */
+	unsigned char rsvd11:6;			/* Byte 148 Bits 2-7 */
+	unsigned char rsvd12[3];		/* Bytes 149-151 */
+	/* Physical Device Scan Information */
+	unsigned char pscan_active:1;		/* Byte 152 Bit 0 */
+	unsigned char rsvd13:7;			/* Byte 152 Bits 1-7 */
+	unsigned char pscan_chan;		/* Byte 153 */
+	unsigned char pscan_target;		/* Byte 154 */
+	unsigned char pscan_lun;		/* Byte 155 */
+	/* Maximum Command Data Transfer Sizes */
+	unsigned short max_transfer_size;	/* Bytes 156-157 */
+	unsigned short max_sge;			/* Bytes 158-159 */
+	/* Logical/Physical Device Counts */
+	unsigned short ldev_present;		/* Bytes 160-161 */
+	unsigned short ldev_critical;		/* Bytes 162-163 */
+	unsigned short ldev_offline;		/* Bytes 164-165 */
+	unsigned short pdev_present;		/* Bytes 166-167 */
+	unsigned short pdisk_present;		/* Bytes 168-169 */
+	unsigned short pdisk_critical;		/* Bytes 170-171 */
+	unsigned short pdisk_offline;		/* Bytes 172-173 */
+	unsigned short max_tcq;			/* Bytes 174-175 */
+	/* Channel and Target ID Information */
+	unsigned char physchan_present;		/* Byte 176 */
+	unsigned char virtchan_present;		/* Byte 177 */
+	unsigned char physchan_max;		/* Byte 178 */
+	unsigned char virtchan_max;		/* Byte 179 */
+	unsigned char max_targets[16];		/* Bytes 180-195 */
+	unsigned char rsvd14[12];		/* Bytes 196-207 */
+	/* Memory/Cache Information */
+	unsigned short mem_size_mb;		/* Bytes 208-209 */
+	unsigned short cache_size_mb;		/* Bytes 210-211 */
+	unsigned int valid_cache_bytes;		/* Bytes 212-215 */
+	unsigned int dirty_cache_bytes;		/* Bytes 216-219 */
+	unsigned short mem_speed_mhz;		/* Bytes 220-221 */
+	unsigned char mem_data_width;		/* Byte 222 */
+	struct myrs_mem_type mem_type;		/* Byte 223 */
+	unsigned char cache_mem_type_name[16];	/* Bytes 224-239 */
+	/* Execution Memory Information */
+	unsigned short exec_mem_size_mb;	/* Bytes 240-241 */
+	unsigned short exec_l2_cache_size_mb;	/* Bytes 242-243 */
+	unsigned char rsvd15[8];		/* Bytes 244-251 */
+	unsigned short exec_mem_speed_mhz;	/* Bytes 252-253 */
+	unsigned char exec_mem_data_width;	/* Byte 254 */
+	struct myrs_mem_type exec_mem_type;	/* Byte 255 */
+	unsigned char exec_mem_type_name[16];	/* Bytes 256-271 */
+	/* CPU Type Information */
+	struct {				/* Bytes 272-335 */
+		unsigned short cpu_speed_mhz;
+		enum myrs_cpu_type cpu_type;
+		unsigned char cpu_count;
+		unsigned char rsvd16[12];
+		unsigned char cpu_name[16];
+	} __packed cpu[2];
+	/* Debugging/Profiling/Command Time Tracing Information */
+	unsigned short cur_prof_page_num;	/* Bytes 336-337 */
+	unsigned short num_prof_waiters;	/* Bytes 338-339 */
+	unsigned short cur_trace_page_num;	/* Bytes 340-341 */
+	unsigned short num_trace_waiters;	/* Bytes 342-343 */
+	unsigned char rsvd18[8];		/* Bytes 344-351 */
+	/* Error Counters on Physical Devices */
+	unsigned short pdev_bus_resets;		/* Bytes 352-353 */
+	unsigned short pdev_parity_errors;	/* Bytes 355-355 */
+	unsigned short pdev_soft_errors;	/* Bytes 356-357 */
+	unsigned short pdev_cmds_failed;	/* Bytes 358-359 */
+	unsigned short pdev_misc_errors;	/* Bytes 360-361 */
+	unsigned short pdev_cmd_timeouts;	/* Bytes 362-363 */
+	unsigned short pdev_sel_timeouts;	/* Bytes 364-365 */
+	unsigned short pdev_retries_done;	/* Bytes 366-367 */
+	unsigned short pdev_aborts_done;	/* Bytes 368-369 */
+	unsigned short pdev_host_aborts_done;	/* Bytes 370-371 */
+	unsigned short pdev_predicted_failures;	/* Bytes 372-373 */
+	unsigned short pdev_host_cmds_failed;	/* Bytes 374-375 */
+	unsigned short pdev_hard_errors;	/* Bytes 376-377 */
+	unsigned char rsvd19[6];		/* Bytes 378-383 */
+	/* Error Counters on Logical Devices */
+	unsigned short ldev_soft_errors;	/* Bytes 384-385 */
+	unsigned short ldev_cmds_failed;	/* Bytes 386-387 */
+	unsigned short ldev_host_aborts_done;	/* Bytes 388-389 */
+	unsigned char rsvd20[2];		/* Bytes 390-391 */
+	/* Error Counters on Controller */
+	unsigned short ctlr_mem_errors;		/* Bytes 392-393 */
+	unsigned short ctlr_host_aborts_done;	/* Bytes 394-395 */
+	unsigned char rsvd21[4];		/* Bytes 396-399 */
+	/* Long Duration Activity Information */
+	unsigned short bg_init_active;		/* Bytes 400-401 */
+	unsigned short ldev_init_active;	/* Bytes 402-403 */
+	unsigned short pdev_init_active;	/* Bytes 404-405 */
+	unsigned short cc_active;		/* Bytes 406-407 */
+	unsigned short rbld_active;		/* Bytes 408-409 */
+	unsigned short exp_active;		/* Bytes 410-411 */
+	unsigned short patrol_active;		/* Bytes 412-413 */
+	unsigned char rsvd22[2];		/* Bytes 414-415 */
+	/* Flash ROM Information */
+	unsigned char flash_type;		/* Byte 416 */
+	unsigned char rsvd23;			/* Byte 417 */
+	unsigned short flash_size_MB;		/* Bytes 418-419 */
+	unsigned int flash_limit;		/* Bytes 420-423 */
+	unsigned int flash_count;		/* Bytes 424-427 */
+	unsigned char rsvd24[4];		/* Bytes 428-431 */
+	unsigned char flash_type_name[16];	/* Bytes 432-447 */
+	/* Firmware Run Time Information */
+	unsigned char rbld_rate;		/* Byte 448 */
+	unsigned char bg_init_rate;		/* Byte 449 */
+	unsigned char fg_init_rate;		/* Byte 450 */
+	unsigned char cc_rate;			/* Byte 451 */
+	unsigned char rsvd25[4];		/* Bytes 452-455 */
+	unsigned int max_dp;			/* Bytes 456-459 */
+	unsigned int free_dp;			/* Bytes 460-463 */
+	unsigned int max_iop;			/* Bytes 464-467 */
+	unsigned int free_iop;			/* Bytes 468-471 */
+	unsigned short max_combined_len;	/* Bytes 472-473 */
+	unsigned short num_cfg_groups;		/* Bytes 474-475 */
+	unsigned installation_abort_status:1;	/* Byte 476 Bit 0 */
+	unsigned maint_mode_status:1;		/* Byte 476 Bit 1 */
+	unsigned rsvd26:6;			/* Byte 476 Bits 2-7 */
+	unsigned char rsvd27[6];		/* Bytes 477-511 */
+	unsigned char rsvd28[512];		/* Bytes 512-1023 */
+};
+
+/*
+ * DAC960 V2 Firmware Device State type.
+ */
+enum myrs_devstate {
+	MYRS_DEVICE_UNCONFIGURED	= 0x00,
+	MYRS_DEVICE_ONLINE		= 0x01,
+	MYRS_DEVICE_REBUILD		= 0x03,
+	MYRS_DEVICE_MISSING		= 0x04,
+	MYRS_DEVICE_SUSPECTED_CRITICAL	= 0x05,
+	MYRS_DEVICE_OFFLINE		= 0x08,
+	MYRS_DEVICE_CRITICAL		= 0x09,
+	MYRS_DEVICE_SUSPECTED_DEAD	= 0x0C,
+	MYRS_DEVICE_COMMANDED_OFFLINE	= 0x10,
+	MYRS_DEVICE_STANDBY		= 0x21,
+	MYRS_DEVICE_INVALID_STATE	= 0xFF,
+} __packed;
+
+/*
+ * DAC960 V2 RAID Levels
+ */
+enum myrs_raid_level {
+	MYRS_RAID_LEVEL0	= 0x0,     /* RAID 0 */
+	MYRS_RAID_LEVEL1	= 0x1,     /* RAID 1 */
+	MYRS_RAID_LEVEL3	= 0x3,     /* RAID 3 right asymmetric parity */
+	MYRS_RAID_LEVEL5	= 0x5,     /* RAID 5 right asymmetric parity */
+	MYRS_RAID_LEVEL6	= 0x6,     /* RAID 6 (Mylex RAID 6) */
+	MYRS_RAID_JBOD		= 0x7,     /* RAID 7 (JBOD) */
+	MYRS_RAID_NEWSPAN	= 0x8,     /* New Mylex SPAN */
+	MYRS_RAID_LEVEL3F	= 0x9,     /* RAID 3 fixed parity */
+	MYRS_RAID_LEVEL3L	= 0xb,     /* RAID 3 left symmetric parity */
+	MYRS_RAID_SPAN		= 0xc,     /* current spanning implementation */
+	MYRS_RAID_LEVEL5L	= 0xd,     /* RAID 5 left symmetric parity */
+	MYRS_RAID_LEVELE	= 0xe,     /* RAID E (concatenation) */
+	MYRS_RAID_PHYSICAL	= 0xf,     /* physical device */
+} __packed;
+
+enum myrs_stripe_size {
+	MYRS_STRIPE_SIZE_0	= 0x0,	/* no stripe (RAID 1, RAID 7, etc) */
+	MYRS_STRIPE_SIZE_512B	= 0x1,
+	MYRS_STRIPE_SIZE_1K	= 0x2,
+	MYRS_STRIPE_SIZE_2K	= 0x3,
+	MYRS_STRIPE_SIZE_4K	= 0x4,
+	MYRS_STRIPE_SIZE_8K	= 0x5,
+	MYRS_STRIPE_SIZE_16K	= 0x6,
+	MYRS_STRIPE_SIZE_32K	= 0x7,
+	MYRS_STRIPE_SIZE_64K	= 0x8,
+	MYRS_STRIPE_SIZE_128K	= 0x9,
+	MYRS_STRIPE_SIZE_256K	= 0xa,
+	MYRS_STRIPE_SIZE_512K	= 0xb,
+	MYRS_STRIPE_SIZE_1M	= 0xc,
+} __packed;
+
+enum myrs_cacheline_size {
+	MYRS_CACHELINE_ZERO	= 0x0,	/* caching cannot be enabled */
+	MYRS_CACHELINE_512B	= 0x1,
+	MYRS_CACHELINE_1K	= 0x2,
+	MYRS_CACHELINE_2K	= 0x3,
+	MYRS_CACHELINE_4K	= 0x4,
+	MYRS_CACHELINE_8K	= 0x5,
+	MYRS_CACHELINE_16K	= 0x6,
+	MYRS_CACHELINE_32K	= 0x7,
+	MYRS_CACHELINE_64K	= 0x8,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Get Logical Device Info reply structure.
+ */
+struct myrs_ldev_info {
+	unsigned char ctlr;			/* Byte 0 */
+	unsigned char channel;			/* Byte 1 */
+	unsigned char target;			/* Byte 2 */
+	unsigned char lun;			/* Byte 3 */
+	enum myrs_devstate dev_state;		/* Byte 4 */
+	unsigned char raid_level;		/* Byte 5 */
+	enum myrs_stripe_size stripe_size;	/* Byte 6 */
+	enum myrs_cacheline_size cacheline_size; /* Byte 7 */
+	struct {
+		enum {
+			MYRS_READCACHE_DISABLED		= 0x0,
+			MYRS_READCACHE_ENABLED		= 0x1,
+			MYRS_READAHEAD_ENABLED		= 0x2,
+			MYRS_INTELLIGENT_READAHEAD_ENABLED = 0x3,
+			MYRS_READCACHE_LAST		= 0x7,
+		} __packed rce:3; /* Byte 8 Bits 0-2 */
+		enum {
+			MYRS_WRITECACHE_DISABLED	= 0x0,
+			MYRS_LOGICALDEVICE_RO		= 0x1,
+			MYRS_WRITECACHE_ENABLED		= 0x2,
+			MYRS_INTELLIGENT_WRITECACHE_ENABLED = 0x3,
+			MYRS_WRITECACHE_LAST		= 0x7,
+		} __packed wce:3; /* Byte 8 Bits 3-5 */
+		unsigned rsvd1:1;		/* Byte 8 Bit 6 */
+		unsigned ldev_init_done:1;	/* Byte 8 Bit 7 */
+	} ldev_control;				/* Byte 8 */
+	/* Logical Device Operations Status */
+	unsigned char cc_active:1;		/* Byte 9 Bit 0 */
+	unsigned char rbld_active:1;		/* Byte 9 Bit 1 */
+	unsigned char bg_init_active:1;		/* Byte 9 Bit 2 */
+	unsigned char fg_init_active:1;		/* Byte 9 Bit 3 */
+	unsigned char migration_active:1;	/* Byte 9 Bit 4 */
+	unsigned char patrol_active:1;		/* Byte 9 Bit 5 */
+	unsigned char rsvd2:2;			/* Byte 9 Bits 6-7 */
+	unsigned char raid5_writeupdate;	/* Byte 10 */
+	unsigned char raid5_algo;		/* Byte 11 */
+	unsigned short ldev_num;		/* Bytes 12-13 */
+	/* BIOS Info */
+	unsigned char bios_disabled:1;		/* Byte 14 Bit 0 */
+	unsigned char cdrom_boot:1;		/* Byte 14 Bit 1 */
+	unsigned char drv_coercion:1;		/* Byte 14 Bit 2 */
+	unsigned char write_same_disabled:1;	/* Byte 14 Bit 3 */
+	unsigned char hba_mode:1;		/* Byte 14 Bit 4 */
+	enum {
+		MYRS_GEOMETRY_128_32	= 0x0,
+		MYRS_GEOMETRY_255_63	= 0x1,
+		MYRS_GEOMETRY_RSVD1	= 0x2,
+		MYRS_GEOMETRY_RSVD2	= 0x3
+	} __packed drv_geom:2;	/* Byte 14 Bits 5-6 */
+	unsigned char super_ra_enabled:1;	/* Byte 14 Bit 7 */
+	unsigned char rsvd3;			/* Byte 15 */
+	/* Error Counters */
+	unsigned short soft_errs;		/* Bytes 16-17 */
+	unsigned short cmds_failed;		/* Bytes 18-19 */
+	unsigned short cmds_aborted;		/* Bytes 20-21 */
+	unsigned short deferred_write_errs;	/* Bytes 22-23 */
+	unsigned int rsvd4;			/* Bytes 24-27 */
+	unsigned int rsvd5;			/* Bytes 28-31 */
+	/* Device Size Information */
+	unsigned short rsvd6;			/* Bytes 32-33 */
+	unsigned short devsize_bytes;		/* Bytes 34-35 */
+	unsigned int orig_devsize;		/* Bytes 36-39 */
+	unsigned int cfg_devsize;		/* Bytes 40-43 */
+	unsigned int rsvd7;			/* Bytes 44-47 */
+	unsigned char ldev_name[32];		/* Bytes 48-79 */
+	unsigned char inquiry[36];		/* Bytes 80-115 */
+	unsigned char rsvd8[12];		/* Bytes 116-127 */
+	u64 last_read_lba;			/* Bytes 128-135 */
+	u64 last_write_lba;			/* Bytes 136-143 */
+	u64 cc_lba;				/* Bytes 144-151 */
+	u64 rbld_lba;				/* Bytes 152-159 */
+	u64 bg_init_lba;			/* Bytes 160-167 */
+	u64 fg_init_lba;			/* Bytes 168-175 */
+	u64 migration_lba;			/* Bytes 176-183 */
+	u64 patrol_lba;				/* Bytes 184-191 */
+	unsigned char rsvd9[64];		/* Bytes 192-255 */
+};
+
+/*
+ * DAC960 V2 Firmware Get Physical Device Info reply structure.
+ */
+struct myrs_pdev_info {
+	unsigned char rsvd1;			/* Byte 0 */
+	unsigned char channel;			/* Byte 1 */
+	unsigned char target;			/* Byte 2 */
+	unsigned char lun;			/* Byte 3 */
+	/* Configuration Status Bits */
+	unsigned char pdev_fault_tolerant:1;	/* Byte 4 Bit 0 */
+	unsigned char pdev_connected:1;		/* Byte 4 Bit 1 */
+	unsigned char pdev_local_to_ctlr:1;	/* Byte 4 Bit 2 */
+	unsigned char rsvd2:5;			/* Byte 4 Bits 3-7 */
+	/* Multiple Host/Controller Status Bits */
+	unsigned char remote_host_dead:1;	/* Byte 5 Bit 0 */
+	unsigned char remove_ctlr_dead:1;	/* Byte 5 Bit 1 */
+	unsigned char rsvd3:6;			/* Byte 5 Bits 2-7 */
+	enum myrs_devstate dev_state;		/* Byte 6 */
+	unsigned char nego_data_width;		/* Byte 7 */
+	unsigned short nego_sync_rate;		/* Bytes 8-9 */
+	/* Multiported Physical Device Information */
+	unsigned char num_ports;		/* Byte 10 */
+	unsigned char drv_access_bitmap;	/* Byte 11 */
+	unsigned int rsvd4;			/* Bytes 12-15 */
+	unsigned char ip_address[16];		/* Bytes 16-31 */
+	unsigned short max_tags;		/* Bytes 32-33 */
+	/* Physical Device Operations Status */
+	unsigned char cc_in_progress:1;		/* Byte 34 Bit 0 */
+	unsigned char rbld_in_progress:1;	/* Byte 34 Bit 1 */
+	unsigned char makecc_in_progress:1;	/* Byte 34 Bit 2 */
+	unsigned char pdevinit_in_progress:1;	/* Byte 34 Bit 3 */
+	unsigned char migration_in_progress:1;	/* Byte 34 Bit 4 */
+	unsigned char patrol_in_progress:1;	/* Byte 34 Bit 5 */
+	unsigned char rsvd5:2;			/* Byte 34 Bits 6-7 */
+	unsigned char long_op_status;		/* Byte 35 */
+	unsigned char parity_errs;		/* Byte 36 */
+	unsigned char soft_errs;		/* Byte 37 */
+	unsigned char hard_errs;		/* Byte 38 */
+	unsigned char misc_errs;		/* Byte 39 */
+	unsigned char cmd_timeouts;		/* Byte 40 */
+	unsigned char retries;			/* Byte 41 */
+	unsigned char aborts;			/* Byte 42 */
+	unsigned char pred_failures;		/* Byte 43 */
+	unsigned int rsvd6;			/* Bytes 44-47 */
+	unsigned short rsvd7;			/* Bytes 48-49 */
+	unsigned short devsize_bytes;		/* Bytes 50-51 */
+	unsigned int orig_devsize;		/* Bytes 52-55 */
+	unsigned int cfg_devsize;		/* Bytes 56-59 */
+	unsigned int rsvd8;			/* Bytes 60-63 */
+	unsigned char pdev_name[16];		/* Bytes 64-79 */
+	unsigned char rsvd9[16];		/* Bytes 80-95 */
+	unsigned char rsvd10[32];		/* Bytes 96-127 */
+	unsigned char inquiry[36];		/* Bytes 128-163 */
+	unsigned char rsvd11[20];		/* Bytes 164-183 */
+	unsigned char rsvd12[8];		/* Bytes 184-191 */
+	u64 last_read_lba;			/* Bytes 192-199 */
+	u64 last_write_lba;			/* Bytes 200-207 */
+	u64 cc_lba;				/* Bytes 208-215 */
+	u64 rbld_lba;				/* Bytes 216-223 */
+	u64 makecc_lba;				/* Bytes 224-231 */
+	u64 devinit_lba;			/* Bytes 232-239 */
+	u64 migration_lba;			/* Bytes 240-247 */
+	u64 patrol_lba;				/* Bytes 248-255 */
+	unsigned char rsvd13[256];		/* Bytes 256-511 */
+};
+
+/*
+ * DAC960 V2 Firmware Health Status Buffer structure.
+ */
+struct myrs_fwstat {
+	unsigned int uptime_usecs;		/* Bytes 0-3 */
+	unsigned int uptime_msecs;		/* Bytes 4-7 */
+	unsigned int seconds;			/* Bytes 8-11 */
+	unsigned char rsvd1[4];			/* Bytes 12-15 */
+	unsigned int epoch;			/* Bytes 16-19 */
+	unsigned char rsvd2[4];			/* Bytes 20-23 */
+	unsigned int dbg_msgbuf_idx;		/* Bytes 24-27 */
+	unsigned int coded_msgbuf_idx;		/* Bytes 28-31 */
+	unsigned int cur_timetrace_page;	/* Bytes 32-35 */
+	unsigned int cur_prof_page;		/* Bytes 36-39 */
+	unsigned int next_evseq;		/* Bytes 40-43 */
+	unsigned char rsvd3[4];			/* Bytes 44-47 */
+	unsigned char rsvd4[16];		/* Bytes 48-63 */
+	unsigned char rsvd5[64];		/* Bytes 64-127 */
+};
+
+/*
+ * DAC960 V2 Firmware Get Event reply structure.
+ */
+struct myrs_event {
+	unsigned int ev_seq;			/* Bytes 0-3 */
+	unsigned int ev_time;			/* Bytes 4-7 */
+	unsigned int ev_code;			/* Bytes 8-11 */
+	unsigned char rsvd1;			/* Byte 12 */
+	unsigned char channel;			/* Byte 13 */
+	unsigned char target;			/* Byte 14 */
+	unsigned char lun;			/* Byte 15 */
+	unsigned int rsvd2;			/* Bytes 16-19 */
+	unsigned int ev_parm;			/* Bytes 20-23 */
+	unsigned char sense_data[40];		/* Bytes 24-63 */
+};
+
+/*
+ * DAC960 V2 Firmware Command Control Bits structure.
+ */
+struct myrs_cmd_ctrl {
+	unsigned char fua:1;			/* Byte 0 Bit 0 */
+	unsigned char disable_pgout:1;		/* Byte 0 Bit 1 */
+	unsigned char rsvd1:1;			/* Byte 0 Bit 2 */
+	unsigned char add_sge_mem:1;		/* Byte 0 Bit 3 */
+	unsigned char dma_ctrl_to_host:1;	/* Byte 0 Bit 4 */
+	unsigned char rsvd2:1;			/* Byte 0 Bit 5 */
+	unsigned char no_autosense:1;		/* Byte 0 Bit 6 */
+	unsigned char disc_prohibited:1;	/* Byte 0 Bit 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Command Timeout structure.
+ */
+struct myrs_cmd_tmo {
+	unsigned char tmo_val:6;			/* Byte 0 Bits 0-5 */
+	enum {
+		MYRS_TMO_SCALE_SECONDS	= 0,
+		MYRS_TMO_SCALE_MINUTES	= 1,
+		MYRS_TMO_SCALE_HOURS	= 2,
+		MYRS_TMO_SCALE_RESERVED = 3
+	} __packed tmo_scale:2;		/* Byte 0 Bits 6-7 */
+};
+
+/*
+ * DAC960 V2 Firmware Physical Device structure.
+ */
+struct myrs_pdev {
+	unsigned char lun;			/* Byte 0 */
+	unsigned char target;			/* Byte 1 */
+	unsigned char channel:3;		/* Byte 2 Bits 0-2 */
+	unsigned char ctlr:5;			/* Byte 2 Bits 3-7 */
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Logical Device structure.
+ */
+struct myrs_ldev {
+	unsigned short ldev_num;		/* Bytes 0-1 */
+	unsigned char rsvd:3;			/* Byte 2 Bits 0-2 */
+	unsigned char ctlr:5;			/* Byte 2 Bits 3-7 */
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Operation Device type.
+ */
+enum myrs_opdev {
+	MYRS_PHYSICAL_DEVICE	= 0x00,
+	MYRS_RAID_DEVICE	= 0x01,
+	MYRS_PHYSICAL_CHANNEL	= 0x02,
+	MYRS_RAID_CHANNEL	= 0x03,
+	MYRS_PHYSICAL_CONTROLLER = 0x04,
+	MYRS_RAID_CONTROLLER	= 0x05,
+	MYRS_CONFIGURATION_GROUP = 0x10,
+	MYRS_ENCLOSURE		= 0x11,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Translate Physical To Logical Device structure.
+ */
+struct myrs_devmap {
+	unsigned short ldev_num;		/* Bytes 0-1 */
+	unsigned short rsvd;			/* Bytes 2-3 */
+	unsigned char prev_boot_ctlr;		/* Byte 4 */
+	unsigned char prev_boot_channel;	/* Byte 5 */
+	unsigned char prev_boot_target;		/* Byte 6 */
+	unsigned char prev_boot_lun;		/* Byte 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Scatter/Gather List Entry structure.
+ */
+struct myrs_sge {
+	u64 sge_addr;			/* Bytes 0-7 */
+	u64 sge_count;			/* Bytes 8-15 */
+};
+
+/*
+ * DAC960 V2 Firmware Data Transfer Memory Address structure.
+ */
+union myrs_sgl {
+	struct myrs_sge sge[2]; /* Bytes 0-31 */
+	struct {
+		unsigned short sge0_len;	/* Bytes 0-1 */
+		unsigned short sge1_len;	/* Bytes 2-3 */
+		unsigned short sge2_len;	/* Bytes 4-5 */
+		unsigned short rsvd;		/* Bytes 6-7 */
+		u64 sge0_addr;			/* Bytes 8-15 */
+		u64 sge1_addr;			/* Bytes 16-23 */
+		u64 sge2_addr;			/* Bytes 24-31 */
+	} ext;
+};
+
+/*
+ * 64 Byte DAC960 V2 Firmware Command Mailbox structure.
+ */
+union myrs_cmd_mbox {
+	unsigned int words[16];				/* Words 0-15 */
+	struct {
+		unsigned short id;			/* Bytes 0-1 */
+		enum myrs_cmd_opcode opcode;		/* Byte 2 */
+		struct myrs_cmd_ctrl control;		/* Byte 3 */
+		u32 dma_size:24;			/* Bytes 4-6 */
+		unsigned char dma_num;			/* Byte 7 */
+		u64 sense_addr;				/* Bytes 8-15 */
+		unsigned int rsvd1:24;			/* Bytes 16-18 */
+		struct myrs_cmd_tmo tmo;		/* Byte 19 */
+		unsigned char sense_len;		/* Byte 20 */
+		enum myrs_ioctl_opcode ioctl_opcode;	/* Byte 21 */
+		unsigned char rsvd2[10];		/* Bytes 22-31 */
+		union myrs_sgl dma_addr;		/* Bytes 32-63 */
+	} common;
+	struct {
+		unsigned short id;			/* Bytes 0-1 */
+		enum myrs_cmd_opcode opcode;		/* Byte 2 */
+		struct myrs_cmd_ctrl control;		/* Byte 3 */
+		u32 dma_size;				/* Bytes 4-7 */
+		u64 sense_addr;				/* Bytes 8-15 */
+		struct myrs_pdev pdev;			/* Bytes 16-18 */
+		struct myrs_cmd_tmo tmo;		/* Byte 19 */
+		unsigned char sense_len;		/* Byte 20 */
+		unsigned char cdb_len;			/* Byte 21 */
+		unsigned char cdb[10];			/* Bytes 22-31 */
+		union myrs_sgl dma_addr;		/* Bytes 32-63 */
+	} SCSI_10;
+	struct {
+		unsigned short id;			/* Bytes 0-1 */
+		enum myrs_cmd_opcode opcode;		/* Byte 2 */
+		struct myrs_cmd_ctrl control;		/* Byte 3 */
+		u32 dma_size;				/* Bytes 4-7 */
+		u64 sense_addr;				/* Bytes 8-15 */
+		struct myrs_pdev pdev;			/* Bytes 16-18 */
+		struct myrs_cmd_tmo tmo;		/* Byte 19 */
+		unsigned char sense_len;		/* Byte 20 */
+		unsigned char cdb_len;			/* Byte 21 */
+		unsigned short rsvd;			/* Bytes 22-23 */
+		u64 cdb_addr;				/* Bytes 24-31 */
+		union myrs_sgl dma_addr;		/* Bytes 32-63 */
+	} SCSI_255;
+	struct {
+		unsigned short id;			/* Bytes 0-1 */
+		enum myrs_cmd_opcode opcode;		/* Byte 2 */
+		struct myrs_cmd_ctrl control;		/* Byte 3 */
+		u32 dma_size:24;			/* Bytes 4-6 */
+		unsigned char dma_num;			/* Byte 7 */
+		u64 sense_addr;				/* Bytes 8-15 */
+		unsigned short rsvd1;			/* Bytes 16-17 */
+		unsigned char ctlr_num;			/* Byte 18 */
+		struct myrs_cmd_tmo tmo;		/* Byte 19 */
+		unsigned char sense_len;		/* Byte 20 */
+		enum myrs_ioctl_opcode ioctl_opcode;	/* Byte 21 */
+		unsigned char rsvd2[10];		/* Bytes 22-31 */
+		union myrs_sgl dma_addr;		/* Bytes 32-63 */
+	} ctlr_info;
+	struct {
+		unsigned short id;			/* Bytes 0-1 */
+		enum myrs_cmd_opcode opcode;		/* Byte 2 */
+		struct myrs_cmd_ctrl control;		/* Byte 3 */
+		u32 dma_size:24;			/* Bytes 4-6 */
+		unsigned char dma_num;			/* Byte 7 */
+		u64 sense_addr;				/* Bytes 8-15 */
+		struct myrs_ldev ldev;			/* Bytes 16-18 */
+		struct myrs_cmd_tmo tmo;		/* Byte 19 */
+		unsigned char sense_len;		/* Byte 20 */
+		enum myrs_ioctl_opcode ioctl_opcode;	/* Byte 21 */
+		unsigned char rsvd[10];			/* Bytes 22-31 */
+		union myrs_sgl dma_addr;		/* Bytes 32-63 */
+	} ldev_info;
+	struct {
+		unsigned short id;			/* Bytes 0-1 */
+		enum myrs_cmd_opcode opcode;		/* Byte 2 */
+		struct myrs_cmd_ctrl control;		/* Byte 3 */
+		u32 dma_size:24;			/* Bytes 4-6 */
+		unsigned char dma_num;			/* Byte 7 */
+		u64 sense_addr;				/* Bytes 8-15 */
+		struct myrs_pdev pdev;			/* Bytes 16-18 */
+		struct myrs_cmd_tmo tmo;		/* Byte 19 */
+		unsigned char sense_len;		/* Byte 20 */
+		enum myrs_ioctl_opcode ioctl_opcode;	/* Byte 21 */
+		unsigned char rsvd[10];			/* Bytes 22-31 */
+		union myrs_sgl dma_addr;		/* Bytes 32-63 */
+	} pdev_info;
+	struct {
+		unsigned short id;			/* Bytes 0-1 */
+		enum myrs_cmd_opcode opcode;		/* Byte 2 */
+		struct myrs_cmd_ctrl control;		/* Byte 3 */
+		u32 dma_size:24;			/* Bytes 4-6 */
+		unsigned char dma_num;			/* Byte 7 */
+		u64 sense_addr;				/* Bytes 8-15 */
+		unsigned short evnum_upper;		/* Bytes 16-17 */
+		unsigned char ctlr_num;			/* Byte 18 */
+		struct myrs_cmd_tmo tmo;		/* Byte 19 */
+		unsigned char sense_len;		/* Byte 20 */
+		enum myrs_ioctl_opcode ioctl_opcode;	/* Byte 21 */
+		unsigned short evnum_lower;		/* Bytes 22-23 */
+		unsigned char rsvd[8];			/* Bytes 24-31 */
+		union myrs_sgl dma_addr;		/* Bytes 32-63 */
+	} get_event;
+	struct {
+		unsigned short id;			/* Bytes 0-1 */
+		enum myrs_cmd_opcode opcode;		/* Byte 2 */
+		struct myrs_cmd_ctrl control;		/* Byte 3 */
+		u32 dma_size:24;			/* Bytes 4-6 */
+		unsigned char dma_num;			/* Byte 7 */
+		u64 sense_addr;				/* Bytes 8-15 */
+		union {
+			struct myrs_ldev ldev;		/* Bytes 16-18 */
+			struct myrs_pdev pdev;		/* Bytes 16-18 */
+		};
+		struct myrs_cmd_tmo tmo;		/* Byte 19 */
+		unsigned char sense_len;		/* Byte 20 */
+		enum myrs_ioctl_opcode ioctl_opcode;	/* Byte 21 */
+		enum myrs_devstate state;		/* Byte 22 */
+		unsigned char rsvd[9];			/* Bytes 23-31 */
+		union myrs_sgl dma_addr;		/* Bytes 32-63 */
+	} set_devstate;
+	struct {
+		unsigned short id;			/* Bytes 0-1 */
+		enum myrs_cmd_opcode opcode;		/* Byte 2 */
+		struct myrs_cmd_ctrl control;		/* Byte 3 */
+		u32 dma_size:24;			/* Bytes 4-6 */
+		unsigned char dma_num;			/* Byte 7 */
+		u64 sense_addr;				/* Bytes 8-15 */
+		struct myrs_ldev ldev;			/* Bytes 16-18 */
+		struct myrs_cmd_tmo tmo;		/* Byte 19 */
+		unsigned char sense_len;		/* Byte 20 */
+		enum myrs_ioctl_opcode ioctl_opcode;	/* Byte 21 */
+		unsigned char restore_consistency:1;	/* Byte 22 Bit 0 */
+		unsigned char initialized_area_only:1;	/* Byte 22 Bit 1 */
+		unsigned char rsvd1:6;			/* Byte 22 Bits 2-7 */
+		unsigned char rsvd2[9];			/* Bytes 23-31 */
+		union myrs_sgl dma_addr;		/* Bytes 32-63 */
+	} cc;
+	struct {
+		unsigned short id;			/* Bytes 0-1 */
+		enum myrs_cmd_opcode opcode;		/* Byte 2 */
+		struct myrs_cmd_ctrl control;		/* Byte 3 */
+		unsigned char first_cmd_mbox_size_kb;	/* Byte 4 */
+		unsigned char first_stat_mbox_size_kb;	/* Byte 5 */
+		unsigned char second_cmd_mbox_size_kb;	/* Byte 6 */
+		unsigned char second_stat_mbox_size_kb;	/* Byte 7 */
+		u64 sense_addr;				/* Bytes 8-15 */
+		unsigned int rsvd1:24;			/* Bytes 16-18 */
+		struct myrs_cmd_tmo tmo;		/* Byte 19 */
+		unsigned char sense_len;		/* Byte 20 */
+		enum myrs_ioctl_opcode ioctl_opcode;	/* Byte 21 */
+		unsigned char fwstat_buf_size_kb;	/* Byte 22 */
+		unsigned char rsvd2;			/* Byte 23 */
+		u64 fwstat_buf_addr;			/* Bytes 24-31 */
+		u64 first_cmd_mbox_addr;		/* Bytes 32-39 */
+		u64 first_stat_mbox_addr;		/* Bytes 40-47 */
+		u64 second_cmd_mbox_addr;		/* Bytes 48-55 */
+		u64 second_stat_mbox_addr;		/* Bytes 56-63 */
+	} set_mbox;
+	struct {
+		unsigned short id;			/* Bytes 0-1 */
+		enum myrs_cmd_opcode opcode;		/* Byte 2 */
+		struct myrs_cmd_ctrl control;		/* Byte 3 */
+		u32 dma_size:24;			/* Bytes 4-6 */
+		unsigned char dma_num;			/* Byte 7 */
+		u64 sense_addr;				/* Bytes 8-15 */
+		struct myrs_pdev pdev;			/* Bytes 16-18 */
+		struct myrs_cmd_tmo tmo;		/* Byte 19 */
+		unsigned char sense_len;		/* Byte 20 */
+		enum myrs_ioctl_opcode ioctl_opcode;	/* Byte 21 */
+		enum myrs_opdev opdev;			/* Byte 22 */
+		unsigned char rsvd[9];			/* Bytes 23-31 */
+		union myrs_sgl dma_addr;		/* Bytes 32-63 */
+	} dev_op;
+};
+
+/*
+ * DAC960 V2 Firmware Controller Status Mailbox structure.
+ */
+struct myrs_stat_mbox {
+	unsigned short id;		/* Bytes 0-1 */
+	unsigned char status;		/* Byte 2 */
+	unsigned char sense_len;	/* Byte 3 */
+	int residual;			/* Bytes 4-7 */
+};
+
+struct myrs_cmdblk {
+	union myrs_cmd_mbox mbox;
+	unsigned char status;
+	unsigned char sense_len;
+	int residual;
+	struct completion *complete;
+	struct myrs_sge *sgl;
+	dma_addr_t sgl_addr;
+	unsigned char *dcdb;
+	dma_addr_t dcdb_dma;
+	unsigned char *sense;
+	dma_addr_t sense_addr;
+};
+
+/*
+ * DAC960 Driver Controller structure.
+ */
+struct myrs_hba {
+	void __iomem *io_base;
+	void __iomem *mmio_base;
+	phys_addr_t io_addr;
+	phys_addr_t pci_addr;
+	unsigned int irq;
+
+	unsigned char model_name[28];
+	unsigned char fw_version[12];
+
+	struct Scsi_Host *host;
+	struct pci_dev *pdev;
+
+	unsigned int epoch;
+	unsigned int next_evseq;
+	/* Monitor flags */
+	bool needs_update;
+	bool disable_enc_msg;
+
+	struct workqueue_struct *work_q;
+	char work_q_name[20];
+	struct delayed_work monitor_work;
+	unsigned long primary_monitor_time;
+	unsigned long secondary_monitor_time;
+
+	spinlock_t queue_lock;
+
+	struct dma_pool *sg_pool;
+	struct dma_pool *sense_pool;
+	struct dma_pool *dcdb_pool;
+
+	void (*write_cmd_mbox)(union myrs_cmd_mbox *next_mbox,
+			       union myrs_cmd_mbox *cmd_mbox);
+	void (*get_cmd_mbox)(void __iomem *base);
+	void (*disable_intr)(void __iomem *base);
+	void (*reset)(void __iomem *base);
+
+	dma_addr_t cmd_mbox_addr;
+	size_t cmd_mbox_size;
+	union myrs_cmd_mbox *first_cmd_mbox;
+	union myrs_cmd_mbox *last_cmd_mbox;
+	union myrs_cmd_mbox *next_cmd_mbox;
+	union myrs_cmd_mbox *prev_cmd_mbox1;
+	union myrs_cmd_mbox *prev_cmd_mbox2;
+
+	dma_addr_t stat_mbox_addr;
+	size_t stat_mbox_size;
+	struct myrs_stat_mbox *first_stat_mbox;
+	struct myrs_stat_mbox *last_stat_mbox;
+	struct myrs_stat_mbox *next_stat_mbox;
+
+	struct myrs_cmdblk dcmd_blk;
+	struct myrs_cmdblk mcmd_blk;
+	struct mutex dcmd_mutex;
+
+	struct myrs_fwstat *fwstat_buf;
+	dma_addr_t fwstat_addr;
+
+	struct myrs_ctlr_info *ctlr_info;
+	struct mutex cinfo_mutex;
+
+	struct myrs_event *event_buf;
+};
+
+typedef unsigned char (*enable_mbox_t)(void __iomem *base, dma_addr_t addr);
+typedef int (*myrs_hwinit_t)(struct pci_dev *pdev,
+			     struct myrs_hba *c, void __iomem *base);
+
+struct myrs_privdata {
+	myrs_hwinit_t		hw_init;
+	irq_handler_t		irq_handler;
+	unsigned int		mmio_size;
+};
+
+/*
+ * DAC960 GEM Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_GEM_mmio_size	0x600
+
+enum DAC960_GEM_reg_offset {
+	DAC960_GEM_IDB_READ_OFFSET	= 0x214,
+	DAC960_GEM_IDB_CLEAR_OFFSET	= 0x218,
+	DAC960_GEM_ODB_READ_OFFSET	= 0x224,
+	DAC960_GEM_ODB_CLEAR_OFFSET	= 0x228,
+	DAC960_GEM_IRQSTS_OFFSET	= 0x208,
+	DAC960_GEM_IRQMASK_READ_OFFSET	= 0x22C,
+	DAC960_GEM_IRQMASK_CLEAR_OFFSET	= 0x230,
+	DAC960_GEM_CMDMBX_OFFSET	= 0x510,
+	DAC960_GEM_CMDSTS_OFFSET	= 0x518,
+	DAC960_GEM_ERRSTS_READ_OFFSET	= 0x224,
+	DAC960_GEM_ERRSTS_CLEAR_OFFSET	= 0x228,
+};
+
+/*
+ * DAC960 GEM Series Inbound Door Bell Register.
+ */
+#define DAC960_GEM_IDB_HWMBOX_NEW_CMD	0x01
+#define DAC960_GEM_IDB_HWMBOX_ACK_STS	0x02
+#define DAC960_GEM_IDB_GEN_IRQ		0x04
+#define DAC960_GEM_IDB_CTRL_RESET	0x08
+#define DAC960_GEM_IDB_MMBOX_NEW_CMD	0x10
+
+#define DAC960_GEM_IDB_HWMBOX_FULL	0x01
+#define DAC960_GEM_IDB_INIT_IN_PROGRESS	0x02
+
+/*
+ * DAC960 GEM Series Outbound Door Bell Register.
+ */
+#define DAC960_GEM_ODB_HWMBOX_ACK_IRQ	0x01
+#define DAC960_GEM_ODB_MMBOX_ACK_IRQ	0x02
+#define DAC960_GEM_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_GEM_ODB_MMBOX_STS_AVAIL	0x02
+
+/*
+ * DAC960 GEM Series Interrupt Mask Register.
+ */
+#define DAC960_GEM_IRQMASK_HWMBOX_IRQ	0x01
+#define DAC960_GEM_IRQMASK_MMBOX_IRQ	0x02
+
+/*
+ * DAC960 GEM Series Error Status Register.
+ */
+#define DAC960_GEM_ERRSTS_PENDING	0x20
+
+/*
+ * dma_addr_writeql is provided to write dma_addr_t types
+ * to a 64-bit pci address space register.  The controller
+ * will accept having the register written as two 32-bit
+ * values.
+ *
+ * In HIGHMEM kernels, dma_addr_t is a 64-bit value.
+ * without HIGHMEM,  dma_addr_t is a 32-bit value.
+ *
+ * The compiler should always fix up the assignment
+ * to u.wq appropriately, depending upon the size of
+ * dma_addr_t.
+ */
+static inline
+void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address)
+{
+	union {
+		u64 wq;
+		uint wl[2];
+	} u;
+
+	u.wq = addr;
+
+	writel(u.wl[0], write_address);
+	writel(u.wl[1], write_address + 4);
+}
+
+/*
+ * DAC960 BA Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_BA_mmio_size		0x80
+
+enum DAC960_BA_reg_offset {
+	DAC960_BA_IRQSTS_OFFSET	= 0x30,
+	DAC960_BA_IRQMASK_OFFSET = 0x34,
+	DAC960_BA_CMDMBX_OFFSET = 0x50,
+	DAC960_BA_CMDSTS_OFFSET = 0x58,
+	DAC960_BA_IDB_OFFSET	= 0x60,
+	DAC960_BA_ODB_OFFSET	= 0x61,
+	DAC960_BA_ERRSTS_OFFSET = 0x63,
+};
+
+/*
+ * DAC960 BA Series Inbound Door Bell Register.
+ */
+#define DAC960_BA_IDB_HWMBOX_NEW_CMD	0x01
+#define DAC960_BA_IDB_HWMBOX_ACK_STS	0x02
+#define DAC960_BA_IDB_GEN_IRQ		0x04
+#define DAC960_BA_IDB_CTRL_RESET	0x08
+#define DAC960_BA_IDB_MMBOX_NEW_CMD	0x10
+
+#define DAC960_BA_IDB_HWMBOX_EMPTY	0x01
+#define DAC960_BA_IDB_INIT_DONE		0x02
+
+/*
+ * DAC960 BA Series Outbound Door Bell Register.
+ */
+#define DAC960_BA_ODB_HWMBOX_ACK_IRQ	0x01
+#define DAC960_BA_ODB_MMBOX_ACK_IRQ	0x02
+
+#define DAC960_BA_ODB_HWMBOX_STS_AVAIL	0x01
+#define DAC960_BA_ODB_MMBOX_STS_AVAIL	0x02
+
+/*
+ * DAC960 BA Series Interrupt Mask Register.
+ */
+#define DAC960_BA_IRQMASK_DISABLE_IRQ	0x04
+#define DAC960_BA_IRQMASK_DISABLEW_I2O	0x08
+
+/*
+ * DAC960 BA Series Error Status Register.
+ */
+#define DAC960_BA_ERRSTS_PENDING	0x04
+
+/*
+ * DAC960 LP Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_LP_mmio_size		0x80
+
+enum DAC960_LP_reg_offset {
+	DAC960_LP_CMDMBX_OFFSET = 0x10,
+	DAC960_LP_CMDSTS_OFFSET = 0x18,
+	DAC960_LP_IDB_OFFSET	= 0x20,
+	DAC960_LP_ODB_OFFSET	= 0x2C,
+	DAC960_LP_ERRSTS_OFFSET = 0x2E,
+	DAC960_LP_IRQSTS_OFFSET	= 0x30,
+	DAC960_LP_IRQMASK_OFFSET = 0x34,
+};
+
+/*
+ * DAC960 LP Series Inbound Door Bell Register.
+ */
+#define DAC960_LP_IDB_HWMBOX_NEW_CMD	0x01
+#define DAC960_LP_IDB_HWMBOX_ACK_STS	0x02
+#define DAC960_LP_IDB_GEN_IRQ		0x04
+#define DAC960_LP_IDB_CTRL_RESET	0x08
+#define DAC960_LP_IDB_MMBOX_NEW_CMD	0x10
+
+#define DAC960_LP_IDB_HWMBOX_FULL	0x01
+#define DAC960_LP_IDB_INIT_IN_PROGRESS	0x02
+
+/*
+ * DAC960 LP Series Outbound Door Bell Register.
+ */
+#define DAC960_LP_ODB_HWMBOX_ACK_IRQ	0x01
+#define DAC960_LP_ODB_MMBOX_ACK_IRQ	0x02
+
+#define DAC960_LP_ODB_HWMBOX_STS_AVAIL	0x01
+#define DAC960_LP_ODB_MMBOX_STS_AVAIL	0x02
+
+/*
+ * DAC960 LP Series Interrupt Mask Register.
+ */
+#define DAC960_LP_IRQMASK_DISABLE_IRQ	0x04
+
+/*
+ * DAC960 LP Series Error Status Register.
+ */
+#define DAC960_LP_ERRSTS_PENDING	0x04
+
+#endif /* _MYRS_H */
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 6cd3e28..e0b427f 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /******************************************************************************
 **  Device driver for the PCI-SCSI NCR538XX controller family.
 **
 **  Copyright (C) 1994  Wolfgang Stanglmeier
 **
-**  This program is free software; you can redistribute it and/or modify
-**  it under the terms of the GNU General Public License as published by
-**  the Free Software Foundation; either version 2 of the License, or
-**  (at your option) any later version.
-**
-**  This program is distributed in the hope that it will be useful,
-**  but WITHOUT ANY WARRANTY; without even the implied warranty of
-**  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-**  GNU General Public License for more details.
-**
-**  You should have received a copy of the GNU General Public License
-**  along with this program; if not, write to the Free Software
-**  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 **
 **-----------------------------------------------------------------------------
 **
@@ -3922,11 +3910,14 @@
 					np->scsi_mode = SMODE_HVD;
 				break;
 			}
+			/* fall through */
 		case 3:	/* SYMBIOS controllers report HVD through GPIO3 */
 			if (INB(nc_gpreg) & 0x08)
 				break;
+			/* fall through */
 		case 2:	/* Set HVD unconditionally */
 			np->scsi_mode = SMODE_HVD;
+			/* fall through */
 		case 1:	/* Trust previous settings for HVD */
 			if (np->sv_stest2 & 0x20)
 				np->scsi_mode = SMODE_HVD;
@@ -6726,6 +6717,7 @@
 			OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0]));
 			return;
 		}
+		/* fall through */
 	case SIR_RESEL_BAD_TARGET:	/* Will send a TARGET RESET message */
 	case SIR_RESEL_BAD_LUN:		/* Will send a TARGET RESET message */
 	case SIR_RESEL_BAD_I_T_L_Q:	/* Will send an ABORT TAG message   */
@@ -8313,7 +8305,6 @@
 	tpnt->this_id		= 7;
 	tpnt->sg_tablesize	= SCSI_NCR_SG_TABLESIZE;
 	tpnt->cmd_per_lun	= SCSI_NCR_CMD_PER_LUN;
-	tpnt->use_clustering	= ENABLE_CLUSTERING;
 
 	if (device->differential)
 		driver_setup.diff_support = device->differential;
diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h
index 02901c5..8326f5f 100644
--- a/drivers/scsi/ncr53c8xx.h
+++ b/drivers/scsi/ncr53c8xx.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /******************************************************************************
 **  Device driver for the PCI-SCSI NCR538XX controller family.
 **
 **  Copyright (C) 1994  Wolfgang Stanglmeier
 **  Copyright (C) 1998-2001  Gerard Roudier <groudier@free.fr>
 **
-**  This program is free software; you can redistribute it and/or modify
-**  it under the terms of the GNU General Public License as published by
-**  the Free Software Foundation; either version 2 of the License, or
-**  (at your option) any later version.
-**
-**  This program is distributed in the hope that it will be useful,
-**  but WITHOUT ANY WARRANTY; without even the implied warranty of
-**  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-**  GNU General Public License for more details.
-**
-**  You should have received a copy of the GNU General Public License
-**  along with this program; if not, write to the Free Software
-**  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 **
 **-----------------------------------------------------------------------------
 **
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 8620ac5..70db792 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -1,20 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * NinjaSCSI-32Bi Cardbus, NinjaSCSI-32UDE PCI/CardBus SCSI driver
  * Copyright (C) 2001, 2002, 2003
  *      YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
  *      GOTO Masanori <gotom@debian.or.jp>, <gotom@debian.org>
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- *
  * Revision History:
  *   1.0: Initial Release.
  *   1.1: Add /proc SDTR status.
@@ -274,7 +264,7 @@
 	.sg_tablesize			= NSP32_SG_SIZE,
 	.max_sectors			= 128,
 	.this_id			= NSP32_HOST_SCSIID,
-	.use_clustering			= DISABLE_CLUSTERING,
+	.dma_boundary			= PAGE_SIZE - 1,
 	.eh_abort_handler		= nsp32_eh_abort,
 	.eh_host_reset_handler		= nsp32_eh_host_reset,
 /*	.highmem_io			= 1, */
@@ -2441,7 +2431,6 @@
 
 	period      = data->synct[entry].period_num;
 	ackwidth    = data->synct[entry].ackwidth;
-	offset      = offset;
 	sample_rate = data->synct[entry].sample_rate;
 
 	target->syncreg    = TO_SYNCREG(period, offset);
@@ -2638,7 +2627,7 @@
 	/*
 	 * setup DMA 
 	 */
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
 		nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
 		goto scsi_unregister;
 	}
@@ -2646,7 +2635,9 @@
 	/*
 	 * allocate autoparam DMA resource.
 	 */
-	data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
+	data->autoparam = dma_alloc_coherent(&pdev->dev,
+			sizeof(nsp32_autoparam), &(data->auto_paddr),
+			GFP_KERNEL);
 	if (data->autoparam == NULL) {
 		nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
 		goto scsi_unregister;
@@ -2655,8 +2646,8 @@
 	/*
 	 * allocate scatter-gather DMA resource.
 	 */
-	data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
-					     &(data->sg_paddr));
+	data->sg_list = dma_alloc_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE,
+			&data->sg_paddr, GFP_KERNEL);
 	if (data->sg_list == NULL) {
 		nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
 		goto free_autoparam;
@@ -2761,11 +2752,11 @@
 	free_irq(host->irq, data);
 
  free_sg_list:
-	pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
+	dma_free_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE,
 			    data->sg_list, data->sg_paddr);
 
  free_autoparam:
-	pci_free_consistent(pdev, sizeof(nsp32_autoparam),
+	dma_free_coherent(&pdev->dev, sizeof(nsp32_autoparam),
 			    data->autoparam, data->auto_paddr);
 	
  scsi_unregister:
@@ -2780,12 +2771,12 @@
 	nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
 
 	if (data->autoparam) {
-		pci_free_consistent(data->Pci, sizeof(nsp32_autoparam),
+		dma_free_coherent(&data->Pci->dev, sizeof(nsp32_autoparam),
 				    data->autoparam, data->auto_paddr);
 	}
 
 	if (data->sg_list) {
-		pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE,
+		dma_free_coherent(&data->Pci->dev, NSP32_SG_TABLE_SIZE,
 				    data->sg_list, data->sg_paddr);
 	}
 
diff --git a/drivers/scsi/nsp32.h b/drivers/scsi/nsp32.h
index c022182..ab0726c 100644
--- a/drivers/scsi/nsp32.h
+++ b/drivers/scsi/nsp32.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Workbit NinjaSCSI-32Bi/UDE PCI/CardBus SCSI Host Bus Adapter driver
  * Basic data header
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
 */
 
 #ifndef _NSP32_H
diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild
deleted file mode 100644
index 58cecd4..0000000
--- a/drivers/scsi/osd/Kbuild
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Kbuild for the OSD modules
-#
-# Copyright (C) 2008 Panasas Inc.  All rights reserved.
-#
-# Authors:
-#   Boaz Harrosh <ooo@electrozaur.com>
-#   Benny Halevy <bhalevy@panasas.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2
-#
-
-# libosd.ko - osd-initiator library
-libosd-y := osd_initiator.o
-obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o
-
-# osd.ko - SCSI ULD and char-device
-osd-y := osd_uld.o
-obj-$(CONFIG_SCSI_OSD_ULD) += osd.o
diff --git a/drivers/scsi/osd/Kconfig b/drivers/scsi/osd/Kconfig
deleted file mode 100644
index 347cc5e..0000000
--- a/drivers/scsi/osd/Kconfig
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-# Kernel configuration file for the OSD scsi protocol
-#
-# Copyright (C) 2008 Panasas Inc.  All rights reserved.
-#
-# Authors:
-#   Boaz Harrosh <ooo@electrozaur.com>
-#   Benny Halevy <bhalevy@panasas.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public version 2 License as
-# published by the Free Software Foundation
-#
-config SCSI_OSD_INITIATOR
-	tristate "OSD-Initiator library"
-	depends on SCSI
-	help
-		Enable the OSD-Initiator library (libosd.ko).
-		NOTE: You must also select CRYPTO_SHA1 + CRYPTO_HMAC and their
-		dependencies
-
-config SCSI_OSD_ULD
-	tristate "OSD Upper Level driver"
-	depends on SCSI_OSD_INITIATOR
-	help
-		Build a SCSI upper layer driver that exports /dev/osdX devices
-		to user-mode for testing and controlling OSD devices. It is also
-		needed by exofs, for mounting an OSD based file system.
-
-config SCSI_OSD_DPRINT_SENSE
-    int "(0-2) When sense is returned, DEBUG print all sense descriptors"
-    default 1
-    depends on SCSI_OSD_INITIATOR
-    help
-        When a CHECK_CONDITION status is returned from a target, and a
-        sense-buffer is retrieved, turning this on will dump a full
-        sense-decoding message. Setting to 2 will also print recoverable
-        errors that might be regularly returned for some filesystem
-        operations.
-
-config SCSI_OSD_DEBUG
-	bool "Compile All OSD modules with lots of DEBUG prints"
-	default n
-	depends on SCSI_OSD_INITIATOR
-	help
-		OSD Code is populated with lots of OSD_DEBUG(..) printouts to
-		dmesg. Enable this if you found a bug and you want to help us
-		track the problem (see also MAINTAINERS). Setting this will also
-		force SCSI_OSD_DPRINT_SENSE=2.
diff --git a/drivers/scsi/osd/osd_debug.h b/drivers/scsi/osd/osd_debug.h
deleted file mode 100644
index 2634126..0000000
--- a/drivers/scsi/osd/osd_debug.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * osd_debug.h - Some kprintf macros
- *
- * Copyright (C) 2008 Panasas Inc.  All rights reserved.
- *
- * Authors:
- *   Boaz Harrosh <ooo@electrozaur.com>
- *   Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- */
-#ifndef __OSD_DEBUG_H__
-#define __OSD_DEBUG_H__
-
-#define OSD_ERR(fmt, a...) printk(KERN_ERR "osd: " fmt, ##a)
-#define OSD_INFO(fmt, a...) printk(KERN_NOTICE "osd: " fmt, ##a)
-
-#ifdef CONFIG_SCSI_OSD_DEBUG
-#define OSD_DEBUG(fmt, a...) \
-	printk(KERN_NOTICE "osd @%s:%d: " fmt, __func__, __LINE__, ##a)
-#else
-#define OSD_DEBUG(fmt, a...) do {} while (0)
-#endif
-
-/* u64 has problems with printk this will cast it to unsigned long long */
-#define _LLU(x) (unsigned long long)(x)
-
-#endif /* ndef __OSD_DEBUG_H__ */
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
deleted file mode 100644
index 67b1457..0000000
--- a/drivers/scsi/osd/osd_initiator.c
+++ /dev/null
@@ -1,2076 +0,0 @@
-/*
- * osd_initiator - Main body of the osd initiator library.
- *
- * Note: The file does not contain the advanced security functionality which
- * is only needed by the security_manager's initiators.
- *
- * Copyright (C) 2008 Panasas Inc.  All rights reserved.
- *
- * Authors:
- *   Boaz Harrosh <ooo@electrozaur.com>
- *   Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in the
- *     documentation and/or other materials provided with the distribution.
- *  3. Neither the name of the Panasas company nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include <scsi/osd_initiator.h>
-#include <scsi/osd_sec.h>
-#include <scsi/osd_attributes.h>
-#include <scsi/osd_sense.h>
-
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_request.h>
-
-#include "osd_debug.h"
-
-#ifndef __unused
-#    define __unused			__attribute__((unused))
-#endif
-
-enum { OSD_REQ_RETRIES = 1 };
-
-MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
-MODULE_DESCRIPTION("open-osd initiator library libosd.ko");
-MODULE_LICENSE("GPL");
-
-static inline void build_test(void)
-{
-	/* structures were not packed */
-	BUILD_BUG_ON(sizeof(struct osd_capability) != OSD_CAP_LEN);
-	BUILD_BUG_ON(sizeof(struct osdv2_cdb) != OSD_TOTAL_CDB_LEN);
-	BUILD_BUG_ON(sizeof(struct osdv1_cdb) != OSDv1_TOTAL_CDB_LEN);
-}
-
-static const char *_osd_ver_desc(struct osd_request *or)
-{
-	return osd_req_is_ver1(or) ? "OSD1" : "OSD2";
-}
-
-#define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len)
-
-static int _osd_get_print_system_info(struct osd_dev *od,
-	void *caps, struct osd_dev_info *odi)
-{
-	struct osd_request *or;
-	struct osd_attr get_attrs[] = {
-		ATTR_DEF_RI(OSD_ATTR_RI_VENDOR_IDENTIFICATION, 8),
-		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_IDENTIFICATION, 16),
-		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_MODEL, 32),
-		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_REVISION_LEVEL, 4),
-		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER, 64 /*variable*/),
-		ATTR_DEF_RI(OSD_ATTR_RI_OSD_NAME, 64 /*variable*/),
-		ATTR_DEF_RI(OSD_ATTR_RI_TOTAL_CAPACITY, 8),
-		ATTR_DEF_RI(OSD_ATTR_RI_USED_CAPACITY, 8),
-		ATTR_DEF_RI(OSD_ATTR_RI_NUMBER_OF_PARTITIONS, 8),
-		ATTR_DEF_RI(OSD_ATTR_RI_CLOCK, 6),
-		/* IBM-OSD-SIM Has a bug with this one put it last */
-		ATTR_DEF_RI(OSD_ATTR_RI_OSD_SYSTEM_ID, 20),
-	};
-	void *iter = NULL, *pFirst;
-	int nelem = ARRAY_SIZE(get_attrs), a = 0;
-	int ret;
-
-	or = osd_start_request(od);
-	if (!or)
-		return -ENOMEM;
-
-	/* get attrs */
-	osd_req_get_attributes(or, &osd_root_object);
-	osd_req_add_get_attr_list(or, get_attrs, ARRAY_SIZE(get_attrs));
-
-	ret = osd_finalize_request(or, 0, caps, NULL);
-	if (ret)
-		goto out;
-
-	ret = osd_execute_request(or);
-	if (ret) {
-		OSD_ERR("Failed to detect %s => %d\n", _osd_ver_desc(or), ret);
-		goto out;
-	}
-
-	osd_req_decode_get_attr_list(or, get_attrs, &nelem, &iter);
-
-	OSD_INFO("Detected %s device\n",
-		_osd_ver_desc(or));
-
-	pFirst = get_attrs[a++].val_ptr;
-	OSD_INFO("VENDOR_IDENTIFICATION  [%s]\n",
-		(char *)pFirst);
-
-	pFirst = get_attrs[a++].val_ptr;
-	OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
-		(char *)pFirst);
-
-	pFirst = get_attrs[a++].val_ptr;
-	OSD_INFO("PRODUCT_MODEL          [%s]\n",
-		(char *)pFirst);
-
-	pFirst = get_attrs[a++].val_ptr;
-	OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
-		pFirst ? get_unaligned_be32(pFirst) : ~0U);
-
-	pFirst = get_attrs[a++].val_ptr;
-	OSD_INFO("PRODUCT_SERIAL_NUMBER  [%s]\n",
-		(char *)pFirst);
-
-	odi->osdname_len = get_attrs[a].len;
-	/* Avoid NULL for memcmp optimization 0-length is good enough */
-	odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
-	if (!odi->osdname) {
-		ret = -ENOMEM;
-		goto out;
-	}
-	if (odi->osdname_len)
-		memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
-	OSD_INFO("OSD_NAME               [%s]\n", odi->osdname);
-	a++;
-
-	pFirst = get_attrs[a++].val_ptr;
-	OSD_INFO("TOTAL_CAPACITY         [0x%llx]\n",
-		pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
-
-	pFirst = get_attrs[a++].val_ptr;
-	OSD_INFO("USED_CAPACITY          [0x%llx]\n",
-		pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
-
-	pFirst = get_attrs[a++].val_ptr;
-	OSD_INFO("NUMBER_OF_PARTITIONS   [%llu]\n",
-		pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
-
-	if (a >= nelem)
-		goto out;
-
-	/* FIXME: Where are the time utilities */
-	pFirst = get_attrs[a++].val_ptr;
-	OSD_INFO("CLOCK                  [0x%6phN]\n", pFirst);
-
-	if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */
-		unsigned len = get_attrs[a].len;
-		char sid_dump[32*4 + 2]; /* 2nibbles+space+ASCII */
-
-		hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
-				   sid_dump, sizeof(sid_dump), true);
-		OSD_INFO("OSD_SYSTEM_ID(%d)\n"
-			 "        [%s]\n", len, sid_dump);
-
-		if (unlikely(len > sizeof(odi->systemid))) {
-			OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). "
-				"device identification might not work\n", len);
-			len = sizeof(odi->systemid);
-		}
-		odi->systemid_len = len;
-		memcpy(odi->systemid, get_attrs[a].val_ptr, len);
-		a++;
-	}
-out:
-	osd_end_request(or);
-	return ret;
-}
-
-int osd_auto_detect_ver(struct osd_dev *od,
-	void *caps, struct osd_dev_info *odi)
-{
-	int ret;
-
-	/* Auto-detect the osd version */
-	ret = _osd_get_print_system_info(od, caps, odi);
-	if (ret) {
-		osd_dev_set_ver(od, OSD_VER1);
-		OSD_DEBUG("converting to OSD1\n");
-		ret = _osd_get_print_system_info(od, caps, odi);
-	}
-
-	return ret;
-}
-EXPORT_SYMBOL(osd_auto_detect_ver);
-
-static unsigned _osd_req_cdb_len(struct osd_request *or)
-{
-	return osd_req_is_ver1(or) ? OSDv1_TOTAL_CDB_LEN : OSD_TOTAL_CDB_LEN;
-}
-
-static unsigned _osd_req_alist_elem_size(struct osd_request *or, unsigned len)
-{
-	return osd_req_is_ver1(or) ?
-		osdv1_attr_list_elem_size(len) :
-		osdv2_attr_list_elem_size(len);
-}
-
-static void _osd_req_alist_elem_encode(struct osd_request *or,
-	void *attr_last, const struct osd_attr *oa)
-{
-	if (osd_req_is_ver1(or)) {
-		struct osdv1_attributes_list_element *attr = attr_last;
-
-		attr->attr_page = cpu_to_be32(oa->attr_page);
-		attr->attr_id = cpu_to_be32(oa->attr_id);
-		attr->attr_bytes = cpu_to_be16(oa->len);
-		memcpy(attr->attr_val, oa->val_ptr, oa->len);
-	} else {
-		struct osdv2_attributes_list_element *attr = attr_last;
-
-		attr->attr_page = cpu_to_be32(oa->attr_page);
-		attr->attr_id = cpu_to_be32(oa->attr_id);
-		attr->attr_bytes = cpu_to_be16(oa->len);
-		memcpy(attr->attr_val, oa->val_ptr, oa->len);
-	}
-}
-
-static int _osd_req_alist_elem_decode(struct osd_request *or,
-	void *cur_p, struct osd_attr *oa, unsigned max_bytes)
-{
-	unsigned inc;
-	if (osd_req_is_ver1(or)) {
-		struct osdv1_attributes_list_element *attr = cur_p;
-
-		if (max_bytes < sizeof(*attr))
-			return -1;
-
-		oa->len = be16_to_cpu(attr->attr_bytes);
-		inc = _osd_req_alist_elem_size(or, oa->len);
-		if (inc > max_bytes)
-			return -1;
-
-		oa->attr_page = be32_to_cpu(attr->attr_page);
-		oa->attr_id = be32_to_cpu(attr->attr_id);
-
-		/* OSD1: On empty attributes we return a pointer to 2 bytes
-		 * of zeros. This keeps similar behaviour with OSD2.
-		 * (See below)
-		 */
-		oa->val_ptr = likely(oa->len) ? attr->attr_val :
-						(u8 *)&attr->attr_bytes;
-	} else {
-		struct osdv2_attributes_list_element *attr = cur_p;
-
-		if (max_bytes < sizeof(*attr))
-			return -1;
-
-		oa->len = be16_to_cpu(attr->attr_bytes);
-		inc = _osd_req_alist_elem_size(or, oa->len);
-		if (inc > max_bytes)
-			return -1;
-
-		oa->attr_page = be32_to_cpu(attr->attr_page);
-		oa->attr_id = be32_to_cpu(attr->attr_id);
-
-		/* OSD2: For convenience, on empty attributes, we return 8 bytes
-		 * of zeros here. This keeps the same behaviour with OSD2r04,
-		 * and is nice with null terminating ASCII fields.
-		 * oa->val_ptr == NULL marks the end-of-list, or error.
-		 */
-		oa->val_ptr = likely(oa->len) ? attr->attr_val : attr->reserved;
-	}
-	return inc;
-}
-
-static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head)
-{
-	return osd_req_is_ver1(or) ?
-		osdv1_list_size(list_head) :
-		osdv2_list_size(list_head);
-}
-
-static unsigned _osd_req_sizeof_alist_header(struct osd_request *or)
-{
-	return osd_req_is_ver1(or) ?
-		sizeof(struct osdv1_attributes_list_header) :
-		sizeof(struct osdv2_attributes_list_header);
-}
-
-static void _osd_req_set_alist_type(struct osd_request *or,
-	void *list, int list_type)
-{
-	if (osd_req_is_ver1(or)) {
-		struct osdv1_attributes_list_header *attr_list = list;
-
-		memset(attr_list, 0, sizeof(*attr_list));
-		attr_list->type = list_type;
-	} else {
-		struct osdv2_attributes_list_header *attr_list = list;
-
-		memset(attr_list, 0, sizeof(*attr_list));
-		attr_list->type = list_type;
-	}
-}
-
-static bool _osd_req_is_alist_type(struct osd_request *or,
-	void *list, int list_type)
-{
-	if (!list)
-		return false;
-
-	if (osd_req_is_ver1(or)) {
-		struct osdv1_attributes_list_header *attr_list = list;
-
-		return attr_list->type == list_type;
-	} else {
-		struct osdv2_attributes_list_header *attr_list = list;
-
-		return attr_list->type == list_type;
-	}
-}
-
-/* This is for List-objects not Attributes-Lists */
-static void _osd_req_encode_olist(struct osd_request *or,
-	struct osd_obj_id_list *list)
-{
-	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
-
-	if (osd_req_is_ver1(or)) {
-		cdbh->v1.list_identifier = list->list_identifier;
-		cdbh->v1.start_address = list->continuation_id;
-	} else {
-		cdbh->v2.list_identifier = list->list_identifier;
-		cdbh->v2.start_address = list->continuation_id;
-	}
-}
-
-static osd_cdb_offset osd_req_encode_offset(struct osd_request *or,
-	u64 offset, unsigned *padding)
-{
-	return __osd_encode_offset(offset, padding,
-			osd_req_is_ver1(or) ?
-				OSDv1_OFFSET_MIN_SHIFT : OSD_OFFSET_MIN_SHIFT,
-			OSD_OFFSET_MAX_SHIFT);
-}
-
-static struct osd_security_parameters *
-_osd_req_sec_params(struct osd_request *or)
-{
-	struct osd_cdb *ocdb = &or->cdb;
-
-	if (osd_req_is_ver1(or))
-		return (struct osd_security_parameters *)&ocdb->v1.sec_params;
-	else
-		return (struct osd_security_parameters *)&ocdb->v2.sec_params;
-}
-
-void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device)
-{
-	memset(osdd, 0, sizeof(*osdd));
-	osdd->scsi_device = scsi_device;
-	osdd->def_timeout = BLK_DEFAULT_SG_TIMEOUT;
-#ifdef OSD_VER1_SUPPORT
-	osdd->version = OSD_VER2;
-#endif
-	/* TODO: Allocate pools for osd_request attributes ... */
-}
-EXPORT_SYMBOL(osd_dev_init);
-
-void osd_dev_fini(struct osd_dev *osdd)
-{
-	/* TODO: De-allocate pools */
-
-	osdd->scsi_device = NULL;
-}
-EXPORT_SYMBOL(osd_dev_fini);
-
-static struct osd_request *_osd_request_alloc(gfp_t gfp)
-{
-	struct osd_request *or;
-
-	/* TODO: Use mempool with one saved request */
-	or = kzalloc(sizeof(*or), gfp);
-	return or;
-}
-
-static void _osd_request_free(struct osd_request *or)
-{
-	kfree(or);
-}
-
-struct osd_request *osd_start_request(struct osd_dev *dev)
-{
-	struct osd_request *or;
-
-	or = _osd_request_alloc(GFP_KERNEL);
-	if (!or)
-		return NULL;
-
-	or->osd_dev = dev;
-	or->timeout = dev->def_timeout;
-	or->retries = OSD_REQ_RETRIES;
-
-	return or;
-}
-EXPORT_SYMBOL(osd_start_request);
-
-static void _osd_free_seg(struct osd_request *or __unused,
-	struct _osd_req_data_segment *seg)
-{
-	if (!seg->buff || !seg->alloc_size)
-		return;
-
-	kfree(seg->buff);
-	seg->buff = NULL;
-	seg->alloc_size = 0;
-}
-
-static void _put_request(struct request *rq)
-{
-	/*
-	 * If osd_finalize_request() was called but the request was not
-	 * executed through the block layer, then we must release BIOs.
-	 * TODO: Keep error code in or->async_error. Need to audit all
-	 *       code paths.
-	 */
-	if (unlikely(rq->bio))
-		blk_end_request(rq, BLK_STS_IOERR, blk_rq_bytes(rq));
-	else
-		blk_put_request(rq);
-}
-
-void osd_end_request(struct osd_request *or)
-{
-	struct request *rq = or->request;
-
-	if (rq) {
-		if (rq->next_rq) {
-			_put_request(rq->next_rq);
-			rq->next_rq = NULL;
-		}
-
-		_put_request(rq);
-	}
-
-	_osd_free_seg(or, &or->get_attr);
-	_osd_free_seg(or, &or->enc_get_attr);
-	_osd_free_seg(or, &or->set_attr);
-	_osd_free_seg(or, &or->cdb_cont);
-
-	_osd_request_free(or);
-}
-EXPORT_SYMBOL(osd_end_request);
-
-static void _set_error_resid(struct osd_request *or, struct request *req,
-			     blk_status_t error)
-{
-	or->async_error = error;
-	or->req_errors = scsi_req(req)->result;
-	or->sense_len = scsi_req(req)->sense_len;
-	if (or->sense_len)
-		memcpy(or->sense, scsi_req(req)->sense, or->sense_len);
-	if (or->out.req)
-		or->out.residual = scsi_req(or->out.req)->resid_len;
-	if (or->in.req)
-		or->in.residual = scsi_req(or->in.req)->resid_len;
-}
-
-int osd_execute_request(struct osd_request *or)
-{
-	blk_execute_rq(or->request->q, NULL, or->request, 0);
-
-	if (scsi_req(or->request)->result) {
-		_set_error_resid(or, or->request, BLK_STS_IOERR);
-		return -EIO;
-	}
-
-	_set_error_resid(or, or->request, BLK_STS_OK);
-	return 0;
-}
-EXPORT_SYMBOL(osd_execute_request);
-
-static void osd_request_async_done(struct request *req, blk_status_t error)
-{
-	struct osd_request *or = req->end_io_data;
-
-	_set_error_resid(or, req, error);
-	if (req->next_rq) {
-		__blk_put_request(req->q, req->next_rq);
-		req->next_rq = NULL;
-	}
-
-	__blk_put_request(req->q, req);
-	or->request = NULL;
-	or->in.req = NULL;
-	or->out.req = NULL;
-
-	if (or->async_done)
-		or->async_done(or, or->async_private);
-	else
-		osd_end_request(or);
-}
-
-int osd_execute_request_async(struct osd_request *or,
-	osd_req_done_fn *done, void *private)
-{
-	or->request->end_io_data = or;
-	or->async_private = private;
-	or->async_done = done;
-
-	blk_execute_rq_nowait(or->request->q, NULL, or->request, 0,
-			      osd_request_async_done);
-	return 0;
-}
-EXPORT_SYMBOL(osd_execute_request_async);
-
-u8 sg_out_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
-u8 sg_in_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
-
-static int _osd_realloc_seg(struct osd_request *or,
-	struct _osd_req_data_segment *seg, unsigned max_bytes)
-{
-	void *buff;
-
-	if (seg->alloc_size >= max_bytes)
-		return 0;
-
-	buff = krealloc(seg->buff, max_bytes, GFP_KERNEL);
-	if (!buff) {
-		OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes,
-			seg->alloc_size);
-		return -ENOMEM;
-	}
-
-	memset(buff + seg->alloc_size, 0, max_bytes - seg->alloc_size);
-	seg->buff = buff;
-	seg->alloc_size = max_bytes;
-	return 0;
-}
-
-static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
-{
-	OSD_DEBUG("total_bytes=%d\n", total_bytes);
-	return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
-}
-
-static int _alloc_set_attr_list(struct osd_request *or,
-	const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
-{
-	unsigned total_bytes = add_bytes;
-
-	for (; nelem; --nelem, ++oa)
-		total_bytes += _osd_req_alist_elem_size(or, oa->len);
-
-	OSD_DEBUG("total_bytes=%d\n", total_bytes);
-	return _osd_realloc_seg(or, &or->set_attr, total_bytes);
-}
-
-static int _alloc_get_attr_desc(struct osd_request *or, unsigned max_bytes)
-{
-	OSD_DEBUG("total_bytes=%d\n", max_bytes);
-	return _osd_realloc_seg(or, &or->enc_get_attr, max_bytes);
-}
-
-static int _alloc_get_attr_list(struct osd_request *or)
-{
-	OSD_DEBUG("total_bytes=%d\n", or->get_attr.total_bytes);
-	return _osd_realloc_seg(or, &or->get_attr, or->get_attr.total_bytes);
-}
-
-/*
- * Common to all OSD commands
- */
-
-static void _osdv1_req_encode_common(struct osd_request *or,
-	__be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
-{
-	struct osdv1_cdb *ocdb = &or->cdb.v1;
-
-	/*
-	 * For speed, the commands
-	 *	OSD_ACT_PERFORM_SCSI_COMMAND	, V1 0x8F7E, V2 0x8F7C
-	 *	OSD_ACT_SCSI_TASK_MANAGEMENT	, V1 0x8F7F, V2 0x8F7D
-	 * are not supported here. Should pass zero and set after the call
-	 */
-	act &= cpu_to_be16(~0x0080); /* V1 action code */
-
-	OSD_DEBUG("OSDv1 execute opcode 0x%x\n", be16_to_cpu(act));
-
-	ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
-	ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
-	ocdb->h.varlen_cdb.service_action = act;
-
-	ocdb->h.partition = cpu_to_be64(obj->partition);
-	ocdb->h.object = cpu_to_be64(obj->id);
-	ocdb->h.v1.length = cpu_to_be64(len);
-	ocdb->h.v1.start_address = cpu_to_be64(offset);
-}
-
-static void _osdv2_req_encode_common(struct osd_request *or,
-	 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
-{
-	struct osdv2_cdb *ocdb = &or->cdb.v2;
-
-	OSD_DEBUG("OSDv2 execute opcode 0x%x\n", be16_to_cpu(act));
-
-	ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
-	ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
-	ocdb->h.varlen_cdb.service_action = act;
-
-	ocdb->h.partition = cpu_to_be64(obj->partition);
-	ocdb->h.object = cpu_to_be64(obj->id);
-	ocdb->h.v2.length = cpu_to_be64(len);
-	ocdb->h.v2.start_address = cpu_to_be64(offset);
-}
-
-static void _osd_req_encode_common(struct osd_request *or,
-	__be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
-{
-	if (osd_req_is_ver1(or))
-		_osdv1_req_encode_common(or, act, obj, offset, len);
-	else
-		_osdv2_req_encode_common(or, act, obj, offset, len);
-}
-
-/*
- * Device commands
- */
-/*TODO: void osd_req_set_master_seed_xchg(struct osd_request *, ...); */
-/*TODO: void osd_req_set_master_key(struct osd_request *, ...); */
-
-void osd_req_format(struct osd_request *or, u64 tot_capacity)
-{
-	_osd_req_encode_common(or, OSD_ACT_FORMAT_OSD, &osd_root_object, 0,
-				tot_capacity);
-}
-EXPORT_SYMBOL(osd_req_format);
-
-int osd_req_list_dev_partitions(struct osd_request *or,
-	osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem)
-{
-	return osd_req_list_partition_objects(or, 0, initial_id, list, nelem);
-}
-EXPORT_SYMBOL(osd_req_list_dev_partitions);
-
-static void _osd_req_encode_flush(struct osd_request *or,
-	enum osd_options_flush_scope_values op)
-{
-	struct osd_cdb_head *ocdb = osd_cdb_head(&or->cdb);
-
-	ocdb->command_specific_options = op;
-}
-
-void osd_req_flush_obsd(struct osd_request *or,
-	enum osd_options_flush_scope_values op)
-{
-	_osd_req_encode_common(or, OSD_ACT_FLUSH_OSD, &osd_root_object, 0, 0);
-	_osd_req_encode_flush(or, op);
-}
-EXPORT_SYMBOL(osd_req_flush_obsd);
-
-/*TODO: void osd_req_perform_scsi_command(struct osd_request *,
-	const u8 *cdb, ...); */
-/*TODO: void osd_req_task_management(struct osd_request *, ...); */
-
-/*
- * Partition commands
- */
-static void _osd_req_encode_partition(struct osd_request *or,
-	__be16 act, osd_id partition)
-{
-	struct osd_obj_id par = {
-		.partition = partition,
-		.id = 0,
-	};
-
-	_osd_req_encode_common(or, act, &par, 0, 0);
-}
-
-void osd_req_create_partition(struct osd_request *or, osd_id partition)
-{
-	_osd_req_encode_partition(or, OSD_ACT_CREATE_PARTITION, partition);
-}
-EXPORT_SYMBOL(osd_req_create_partition);
-
-void osd_req_remove_partition(struct osd_request *or, osd_id partition)
-{
-	_osd_req_encode_partition(or, OSD_ACT_REMOVE_PARTITION, partition);
-}
-EXPORT_SYMBOL(osd_req_remove_partition);
-
-/*TODO: void osd_req_set_partition_key(struct osd_request *,
-	osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE],
-	u8 seed[OSD_CRYPTO_SEED_SIZE]); */
-
-static int _osd_req_list_objects(struct osd_request *or,
-	__be16 action, const struct osd_obj_id *obj, osd_id initial_id,
-	struct osd_obj_id_list *list, unsigned nelem)
-{
-	struct request_queue *q = osd_request_queue(or->osd_dev);
-	u64 len = nelem * sizeof(osd_id) + sizeof(*list);
-	struct bio *bio;
-
-	_osd_req_encode_common(or, action, obj, (u64)initial_id, len);
-
-	if (list->list_identifier)
-		_osd_req_encode_olist(or, list);
-
-	WARN_ON(or->in.bio);
-	bio = bio_map_kern(q, list, len, GFP_KERNEL);
-	if (IS_ERR(bio)) {
-		OSD_ERR("!!! Failed to allocate list_objects BIO\n");
-		return PTR_ERR(bio);
-	}
-
-	bio_set_op_attrs(bio, REQ_OP_READ, 0);
-	or->in.bio = bio;
-	or->in.total_bytes = bio->bi_iter.bi_size;
-	return 0;
-}
-
-int osd_req_list_partition_collections(struct osd_request *or,
-	osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
-	unsigned nelem)
-{
-	struct osd_obj_id par = {
-		.partition = partition,
-		.id = 0,
-	};
-
-	return osd_req_list_collection_objects(or, &par, initial_id, list,
-					       nelem);
-}
-EXPORT_SYMBOL(osd_req_list_partition_collections);
-
-int osd_req_list_partition_objects(struct osd_request *or,
-	osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
-	unsigned nelem)
-{
-	struct osd_obj_id par = {
-		.partition = partition,
-		.id = 0,
-	};
-
-	return _osd_req_list_objects(or, OSD_ACT_LIST, &par, initial_id, list,
-				     nelem);
-}
-EXPORT_SYMBOL(osd_req_list_partition_objects);
-
-void osd_req_flush_partition(struct osd_request *or,
-	osd_id partition, enum osd_options_flush_scope_values op)
-{
-	_osd_req_encode_partition(or, OSD_ACT_FLUSH_PARTITION, partition);
-	_osd_req_encode_flush(or, op);
-}
-EXPORT_SYMBOL(osd_req_flush_partition);
-
-/*
- * Collection commands
- */
-/*TODO: void osd_req_create_collection(struct osd_request *,
-	const struct osd_obj_id *); */
-/*TODO: void osd_req_remove_collection(struct osd_request *,
-	const struct osd_obj_id *); */
-
-int osd_req_list_collection_objects(struct osd_request *or,
-	const struct osd_obj_id *obj, osd_id initial_id,
-	struct osd_obj_id_list *list, unsigned nelem)
-{
-	return _osd_req_list_objects(or, OSD_ACT_LIST_COLLECTION, obj,
-				     initial_id, list, nelem);
-}
-EXPORT_SYMBOL(osd_req_list_collection_objects);
-
-/*TODO: void query(struct osd_request *, ...); V2 */
-
-void osd_req_flush_collection(struct osd_request *or,
-	const struct osd_obj_id *obj, enum osd_options_flush_scope_values op)
-{
-	_osd_req_encode_common(or, OSD_ACT_FLUSH_PARTITION, obj, 0, 0);
-	_osd_req_encode_flush(or, op);
-}
-EXPORT_SYMBOL(osd_req_flush_collection);
-
-/*TODO: void get_member_attrs(struct osd_request *, ...); V2 */
-/*TODO: void set_member_attrs(struct osd_request *, ...); V2 */
-
-/*
- * Object commands
- */
-void osd_req_create_object(struct osd_request *or, struct osd_obj_id *obj)
-{
-	_osd_req_encode_common(or, OSD_ACT_CREATE, obj, 0, 0);
-}
-EXPORT_SYMBOL(osd_req_create_object);
-
-void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *obj)
-{
-	_osd_req_encode_common(or, OSD_ACT_REMOVE, obj, 0, 0);
-}
-EXPORT_SYMBOL(osd_req_remove_object);
-
-
-/*TODO: void osd_req_create_multi(struct osd_request *or,
-	struct osd_obj_id *first, struct osd_obj_id_list *list, unsigned nelem);
-*/
-
-void osd_req_write(struct osd_request *or,
-	const struct osd_obj_id *obj, u64 offset,
-	struct bio *bio, u64 len)
-{
-	_osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
-	WARN_ON(or->out.bio || or->out.total_bytes);
-	WARN_ON(!op_is_write(bio_op(bio)));
-	or->out.bio = bio;
-	or->out.total_bytes = len;
-}
-EXPORT_SYMBOL(osd_req_write);
-
-int osd_req_write_kern(struct osd_request *or,
-	const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
-{
-	struct request_queue *req_q = osd_request_queue(or->osd_dev);
-	struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
-
-	if (IS_ERR(bio))
-		return PTR_ERR(bio);
-
-	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-	osd_req_write(or, obj, offset, bio, len);
-	return 0;
-}
-EXPORT_SYMBOL(osd_req_write_kern);
-
-/*TODO: void osd_req_append(struct osd_request *,
-	const struct osd_obj_id *, struct bio *data_out); */
-/*TODO: void osd_req_create_write(struct osd_request *,
-	const struct osd_obj_id *, struct bio *data_out, u64 offset); */
-/*TODO: void osd_req_clear(struct osd_request *,
-	const struct osd_obj_id *, u64 offset, u64 len); */
-/*TODO: void osd_req_punch(struct osd_request *,
-	const struct osd_obj_id *, u64 offset, u64 len); V2 */
-
-void osd_req_flush_object(struct osd_request *or,
-	const struct osd_obj_id *obj, enum osd_options_flush_scope_values op,
-	/*V2*/ u64 offset, /*V2*/ u64 len)
-{
-	if (unlikely(osd_req_is_ver1(or) && (offset || len))) {
-		OSD_DEBUG("OSD Ver1 flush on specific range ignored\n");
-		offset = 0;
-		len = 0;
-	}
-
-	_osd_req_encode_common(or, OSD_ACT_FLUSH, obj, offset, len);
-	_osd_req_encode_flush(or, op);
-}
-EXPORT_SYMBOL(osd_req_flush_object);
-
-void osd_req_read(struct osd_request *or,
-	const struct osd_obj_id *obj, u64 offset,
-	struct bio *bio, u64 len)
-{
-	_osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
-	WARN_ON(or->in.bio || or->in.total_bytes);
-	WARN_ON(op_is_write(bio_op(bio)));
-	or->in.bio = bio;
-	or->in.total_bytes = len;
-}
-EXPORT_SYMBOL(osd_req_read);
-
-int osd_req_read_kern(struct osd_request *or,
-	const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
-{
-	struct request_queue *req_q = osd_request_queue(or->osd_dev);
-	struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
-
-	if (IS_ERR(bio))
-		return PTR_ERR(bio);
-
-	osd_req_read(or, obj, offset, bio, len);
-	return 0;
-}
-EXPORT_SYMBOL(osd_req_read_kern);
-
-static int _add_sg_continuation_descriptor(struct osd_request *or,
-	const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
-{
-	struct osd_sg_continuation_descriptor *oscd;
-	u32 oscd_size;
-	unsigned i;
-	int ret;
-
-	oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
-
-	if (!or->cdb_cont.total_bytes) {
-		/* First time, jump over the header, we will write to:
-		 *	cdb_cont.buff + cdb_cont.total_bytes
-		 */
-		or->cdb_cont.total_bytes =
-				sizeof(struct osd_continuation_segment_header);
-	}
-
-	ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
-	if (unlikely(ret))
-		return ret;
-
-	oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
-	oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
-	oscd->hdr.pad_length = 0;
-	oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
-
-	*len = 0;
-	/* copy the sg entries and convert to network byte order */
-	for (i = 0; i < numentries; i++) {
-		oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
-		oscd->entries[i].len    = cpu_to_be64(sglist[i].len);
-		*len += sglist[i].len;
-	}
-
-	or->cdb_cont.total_bytes += oscd_size;
-	OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
-		  or->cdb_cont.total_bytes, oscd_size, numentries);
-	return 0;
-}
-
-static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
-{
-	struct request_queue *req_q = osd_request_queue(or->osd_dev);
-	struct bio *bio;
-	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
-	struct osd_continuation_segment_header *cont_seg_hdr;
-
-	if (!or->cdb_cont.total_bytes)
-		return 0;
-
-	cont_seg_hdr = or->cdb_cont.buff;
-	cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
-	cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
-
-	/* create a bio for continuation segment */
-	bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
-			   GFP_KERNEL);
-	if (IS_ERR(bio))
-		return PTR_ERR(bio);
-
-	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-
-	/* integrity check the continuation before the bio is linked
-	 * with the other data segments since the continuation
-	 * integrity is separate from the other data segments.
-	 */
-	osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
-
-	cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
-
-	/* we can't use _req_append_segment, because we need to link in the
-	 * continuation bio to the head of the bio list - the
-	 * continuation segment (if it exists) is always the first segment in
-	 * the out data buffer.
-	 */
-	bio->bi_next = or->out.bio;
-	or->out.bio = bio;
-	or->out.total_bytes += or->cdb_cont.total_bytes;
-
-	return 0;
-}
-
-/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
- * @sglist that has the scatter gather entries. Scatter-gather enables a write
- * of multiple none-contiguous areas of an object, in a single call. The extents
- * may overlap and/or be in any order. The only constrain is that:
- *	total_bytes(sglist) >= total_bytes(bio)
- */
-int osd_req_write_sg(struct osd_request *or,
-	const struct osd_obj_id *obj, struct bio *bio,
-	const struct osd_sg_entry *sglist, unsigned numentries)
-{
-	u64 len;
-	int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
-
-	if (ret)
-		return ret;
-	osd_req_write(or, obj, 0, bio, len);
-
-	return 0;
-}
-EXPORT_SYMBOL(osd_req_write_sg);
-
-/* osd_req_read_sg: Read multiple extents of an object into @bio
- * See osd_req_write_sg
- */
-int osd_req_read_sg(struct osd_request *or,
-	const struct osd_obj_id *obj, struct bio *bio,
-	const struct osd_sg_entry *sglist, unsigned numentries)
-{
-	u64 len;
-	u64 off;
-	int ret;
-
-	if (numentries > 1) {
-		off = 0;
-		ret = _add_sg_continuation_descriptor(or, sglist, numentries,
-						      &len);
-		if (ret)
-			return ret;
-	} else {
-		/* Optimize the case of single segment, read_sg is a
-		 * bidi operation.
-		 */
-		len = sglist->len;
-		off = sglist->offset;
-	}
-	osd_req_read(or, obj, off, bio, len);
-
-	return 0;
-}
-EXPORT_SYMBOL(osd_req_read_sg);
-
-/* SG-list write/read Kern API
- *
- * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
- * of sg_entries. @numentries indicates how many pointers and sg_entries there
- * are.  By requiring an array of buff pointers. This allows a caller to do a
- * single write/read and scatter into multiple buffers.
- * NOTE: Each buffer + len should not cross a page boundary.
- */
-static struct bio *_create_sg_bios(struct osd_request *or,
-	void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
-{
-	struct request_queue *q = osd_request_queue(or->osd_dev);
-	struct bio *bio;
-	unsigned i;
-
-	bio = bio_kmalloc(GFP_KERNEL, numentries);
-	if (unlikely(!bio)) {
-		OSD_DEBUG("Failed to allocate BIO size=%u\n", numentries);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	for (i = 0; i < numentries; i++) {
-		unsigned offset = offset_in_page(buff[i]);
-		struct page *page = virt_to_page(buff[i]);
-		unsigned len = sglist[i].len;
-		unsigned added_len;
-
-		BUG_ON(offset + len > PAGE_SIZE);
-		added_len = bio_add_pc_page(q, bio, page, len, offset);
-		if (unlikely(len != added_len)) {
-			OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
-				  len, added_len);
-			bio_put(bio);
-			return ERR_PTR(-ENOMEM);
-		}
-	}
-
-	return bio;
-}
-
-int osd_req_write_sg_kern(struct osd_request *or,
-	const struct osd_obj_id *obj, void **buff,
-	const struct osd_sg_entry *sglist, unsigned numentries)
-{
-	struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
-	if (IS_ERR(bio))
-		return PTR_ERR(bio);
-
-	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-	osd_req_write_sg(or, obj, bio, sglist, numentries);
-
-	return 0;
-}
-EXPORT_SYMBOL(osd_req_write_sg_kern);
-
-int osd_req_read_sg_kern(struct osd_request *or,
-	const struct osd_obj_id *obj, void **buff,
-	const struct osd_sg_entry *sglist, unsigned numentries)
-{
-	struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
-	if (IS_ERR(bio))
-		return PTR_ERR(bio);
-
-	osd_req_read_sg(or, obj, bio, sglist, numentries);
-
-	return 0;
-}
-EXPORT_SYMBOL(osd_req_read_sg_kern);
-
-
-
-void osd_req_get_attributes(struct osd_request *or,
-	const struct osd_obj_id *obj)
-{
-	_osd_req_encode_common(or, OSD_ACT_GET_ATTRIBUTES, obj, 0, 0);
-}
-EXPORT_SYMBOL(osd_req_get_attributes);
-
-void osd_req_set_attributes(struct osd_request *or,
-	const struct osd_obj_id *obj)
-{
-	_osd_req_encode_common(or, OSD_ACT_SET_ATTRIBUTES, obj, 0, 0);
-}
-EXPORT_SYMBOL(osd_req_set_attributes);
-
-/*
- * Attributes List-mode
- */
-
-int osd_req_add_set_attr_list(struct osd_request *or,
-	const struct osd_attr *oa, unsigned nelem)
-{
-	unsigned total_bytes = or->set_attr.total_bytes;
-	void *attr_last;
-	int ret;
-
-	if (or->attributes_mode &&
-	    or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
-		WARN_ON(1);
-		return -EINVAL;
-	}
-	or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
-
-	if (!total_bytes) { /* first-time: allocate and put list header */
-		total_bytes = _osd_req_sizeof_alist_header(or);
-		ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
-		if (ret)
-			return ret;
-		_osd_req_set_alist_type(or, or->set_attr.buff,
-					OSD_ATTR_LIST_SET_RETRIEVE);
-	}
-	attr_last = or->set_attr.buff + total_bytes;
-
-	for (; nelem; --nelem) {
-		unsigned elem_size = _osd_req_alist_elem_size(or, oa->len);
-
-		total_bytes += elem_size;
-		if (unlikely(or->set_attr.alloc_size < total_bytes)) {
-			or->set_attr.total_bytes = total_bytes - elem_size;
-			ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
-			if (ret)
-				return ret;
-			attr_last =
-				or->set_attr.buff + or->set_attr.total_bytes;
-		}
-
-		_osd_req_alist_elem_encode(or, attr_last, oa);
-
-		attr_last += elem_size;
-		++oa;
-	}
-
-	or->set_attr.total_bytes = total_bytes;
-	return 0;
-}
-EXPORT_SYMBOL(osd_req_add_set_attr_list);
-
-static int _req_append_segment(struct osd_request *or,
-	unsigned padding, struct _osd_req_data_segment *seg,
-	struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
-{
-	void *pad_buff;
-	int ret;
-
-	if (padding) {
-		/* check if we can just add it to last buffer */
-		if (last_seg &&
-		    (padding <= last_seg->alloc_size - last_seg->total_bytes))
-			pad_buff = last_seg->buff + last_seg->total_bytes;
-		else
-			pad_buff = io->pad_buff;
-
-		ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
-				       GFP_KERNEL);
-		if (ret)
-			return ret;
-		io->total_bytes += padding;
-	}
-
-	ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
-			       GFP_KERNEL);
-	if (ret)
-		return ret;
-
-	io->total_bytes += seg->total_bytes;
-	OSD_DEBUG("padding=%d buff=%p total_bytes=%d\n", padding, seg->buff,
-		  seg->total_bytes);
-	return 0;
-}
-
-static int _osd_req_finalize_set_attr_list(struct osd_request *or)
-{
-	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
-	unsigned padding;
-	int ret;
-
-	if (!or->set_attr.total_bytes) {
-		cdbh->attrs_list.set_attr_offset = OSD_OFFSET_UNUSED;
-		return 0;
-	}
-
-	cdbh->attrs_list.set_attr_bytes = cpu_to_be32(or->set_attr.total_bytes);
-	cdbh->attrs_list.set_attr_offset =
-		osd_req_encode_offset(or, or->out.total_bytes, &padding);
-
-	ret = _req_append_segment(or, padding, &or->set_attr,
-				  or->out.last_seg, &or->out);
-	if (ret)
-		return ret;
-
-	or->out.last_seg = &or->set_attr;
-	return 0;
-}
-
-int osd_req_add_get_attr_list(struct osd_request *or,
-	const struct osd_attr *oa, unsigned nelem)
-{
-	unsigned total_bytes = or->enc_get_attr.total_bytes;
-	void *attr_last;
-	int ret;
-
-	if (or->attributes_mode &&
-	    or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
-		WARN_ON(1);
-		return -EINVAL;
-	}
-	or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
-
-	/* first time calc data-in list header size */
-	if (!or->get_attr.total_bytes)
-		or->get_attr.total_bytes = _osd_req_sizeof_alist_header(or);
-
-	/* calc data-out info */
-	if (!total_bytes) { /* first-time: allocate and put list header */
-		unsigned max_bytes;
-
-		total_bytes = _osd_req_sizeof_alist_header(or);
-		max_bytes = total_bytes +
-			nelem * sizeof(struct osd_attributes_list_attrid);
-		ret = _alloc_get_attr_desc(or, max_bytes);
-		if (ret)
-			return ret;
-
-		_osd_req_set_alist_type(or, or->enc_get_attr.buff,
-					OSD_ATTR_LIST_GET);
-	}
-	attr_last = or->enc_get_attr.buff + total_bytes;
-
-	for (; nelem; --nelem) {
-		struct osd_attributes_list_attrid *attrid;
-		const unsigned cur_size = sizeof(*attrid);
-
-		total_bytes += cur_size;
-		if (unlikely(or->enc_get_attr.alloc_size < total_bytes)) {
-			or->enc_get_attr.total_bytes = total_bytes - cur_size;
-			ret = _alloc_get_attr_desc(or,
-					total_bytes + nelem * sizeof(*attrid));
-			if (ret)
-				return ret;
-			attr_last = or->enc_get_attr.buff +
-				or->enc_get_attr.total_bytes;
-		}
-
-		attrid = attr_last;
-		attrid->attr_page = cpu_to_be32(oa->attr_page);
-		attrid->attr_id = cpu_to_be32(oa->attr_id);
-
-		attr_last += cur_size;
-
-		/* calc data-in size */
-		or->get_attr.total_bytes +=
-			_osd_req_alist_elem_size(or, oa->len);
-		++oa;
-	}
-
-	or->enc_get_attr.total_bytes = total_bytes;
-
-	OSD_DEBUG(
-	       "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%zu)\n",
-	       or->get_attr.total_bytes,
-	       or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or),
-	       or->enc_get_attr.total_bytes,
-	       (or->enc_get_attr.total_bytes - _osd_req_sizeof_alist_header(or))
-			/ sizeof(struct osd_attributes_list_attrid));
-
-	return 0;
-}
-EXPORT_SYMBOL(osd_req_add_get_attr_list);
-
-static int _osd_req_finalize_get_attr_list(struct osd_request *or)
-{
-	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
-	unsigned out_padding;
-	unsigned in_padding;
-	int ret;
-
-	if (!or->enc_get_attr.total_bytes) {
-		cdbh->attrs_list.get_attr_desc_offset = OSD_OFFSET_UNUSED;
-		cdbh->attrs_list.get_attr_offset = OSD_OFFSET_UNUSED;
-		return 0;
-	}
-
-	ret = _alloc_get_attr_list(or);
-	if (ret)
-		return ret;
-
-	/* The out-going buffer info update */
-	OSD_DEBUG("out-going\n");
-	cdbh->attrs_list.get_attr_desc_bytes =
-		cpu_to_be32(or->enc_get_attr.total_bytes);
-
-	cdbh->attrs_list.get_attr_desc_offset =
-		osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
-
-	ret = _req_append_segment(or, out_padding, &or->enc_get_attr,
-				  or->out.last_seg, &or->out);
-	if (ret)
-		return ret;
-	or->out.last_seg = &or->enc_get_attr;
-
-	/* The incoming buffer info update */
-	OSD_DEBUG("in-coming\n");
-	cdbh->attrs_list.get_attr_alloc_length =
-		cpu_to_be32(or->get_attr.total_bytes);
-
-	cdbh->attrs_list.get_attr_offset =
-		osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
-
-	ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
-				  &or->in);
-	if (ret)
-		return ret;
-	or->in.last_seg = &or->get_attr;
-
-	return 0;
-}
-
-int osd_req_decode_get_attr_list(struct osd_request *or,
-	struct osd_attr *oa, int *nelem, void **iterator)
-{
-	unsigned cur_bytes, returned_bytes;
-	int n;
-	const unsigned sizeof_attr_list = _osd_req_sizeof_alist_header(or);
-	void *cur_p;
-
-	if (!_osd_req_is_alist_type(or, or->get_attr.buff,
-				    OSD_ATTR_LIST_SET_RETRIEVE)) {
-		oa->attr_page = 0;
-		oa->attr_id = 0;
-		oa->val_ptr = NULL;
-		oa->len = 0;
-		*iterator = NULL;
-		return 0;
-	}
-
-	if (*iterator) {
-		BUG_ON((*iterator < or->get_attr.buff) ||
-		     (or->get_attr.buff + or->get_attr.alloc_size < *iterator));
-		cur_p = *iterator;
-		cur_bytes = (*iterator - or->get_attr.buff) - sizeof_attr_list;
-		returned_bytes = or->get_attr.total_bytes;
-	} else { /* first time decode the list header */
-		cur_bytes = sizeof_attr_list;
-		returned_bytes = _osd_req_alist_size(or, or->get_attr.buff) +
-					sizeof_attr_list;
-
-		cur_p = or->get_attr.buff + sizeof_attr_list;
-
-		if (returned_bytes > or->get_attr.alloc_size) {
-			OSD_DEBUG("target report: space was not big enough! "
-				  "Allocate=%u Needed=%u\n",
-				  or->get_attr.alloc_size,
-				  returned_bytes + sizeof_attr_list);
-
-			returned_bytes =
-				or->get_attr.alloc_size - sizeof_attr_list;
-		}
-		or->get_attr.total_bytes = returned_bytes;
-	}
-
-	for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) {
-		int inc = _osd_req_alist_elem_decode(or, cur_p, oa,
-						 returned_bytes - cur_bytes);
-
-		if (inc < 0) {
-			OSD_ERR("BAD FOOD from target. list not valid!"
-				"c=%d r=%d n=%d\n",
-				cur_bytes, returned_bytes, n);
-			oa->val_ptr = NULL;
-			cur_bytes = returned_bytes; /* break the caller loop */
-			break;
-		}
-
-		cur_bytes += inc;
-		cur_p += inc;
-		++oa;
-	}
-
-	*iterator = (returned_bytes - cur_bytes) ? cur_p : NULL;
-	*nelem = n;
-	return returned_bytes - cur_bytes;
-}
-EXPORT_SYMBOL(osd_req_decode_get_attr_list);
-
-/*
- * Attributes Page-mode
- */
-
-int osd_req_add_get_attr_page(struct osd_request *or,
-	u32 page_id, void *attar_page, unsigned max_page_len,
-	const struct osd_attr *set_one_attr)
-{
-	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
-
-	if (or->attributes_mode &&
-	    or->attributes_mode != OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
-		WARN_ON(1);
-		return -EINVAL;
-	}
-	or->attributes_mode = OSD_CDB_GET_ATTR_PAGE_SET_ONE;
-
-	or->get_attr.buff = attar_page;
-	or->get_attr.total_bytes = max_page_len;
-
-	cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
-	cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
-
-	if (!set_one_attr || !set_one_attr->attr_page)
-		return 0; /* The set is optional */
-
-	or->set_attr.buff = set_one_attr->val_ptr;
-	or->set_attr.total_bytes = set_one_attr->len;
-
-	cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
-	cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
-	cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
-	return 0;
-}
-EXPORT_SYMBOL(osd_req_add_get_attr_page);
-
-static int _osd_req_finalize_attr_page(struct osd_request *or)
-{
-	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
-	unsigned in_padding, out_padding;
-	int ret;
-
-	/* returned page */
-	cdbh->attrs_page.get_attr_offset =
-		osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
-
-	ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
-				  &or->in);
-	if (ret)
-		return ret;
-
-	if (or->set_attr.total_bytes == 0)
-		return 0;
-
-	/* set one value */
-	cdbh->attrs_page.set_attr_offset =
-		osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
-
-	ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
-				  &or->out);
-	return ret;
-}
-
-static inline void osd_sec_parms_set_out_offset(bool is_v1,
-	struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
-{
-	if (is_v1)
-		sec_parms->v1.data_out_integrity_check_offset = offset;
-	else
-		sec_parms->v2.data_out_integrity_check_offset = offset;
-}
-
-static inline void osd_sec_parms_set_in_offset(bool is_v1,
-	struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
-{
-	if (is_v1)
-		sec_parms->v1.data_in_integrity_check_offset = offset;
-	else
-		sec_parms->v2.data_in_integrity_check_offset = offset;
-}
-
-static int _osd_req_finalize_data_integrity(struct osd_request *or,
-	bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
-	const u8 *cap_key)
-{
-	struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
-	int ret;
-
-	if (!osd_is_sec_alldata(sec_parms))
-		return 0;
-
-	if (has_out) {
-		struct _osd_req_data_segment seg = {
-			.buff = &or->out_data_integ,
-			.total_bytes = sizeof(or->out_data_integ),
-		};
-		unsigned pad;
-
-		or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
-		or->out_data_integ.set_attributes_bytes = cpu_to_be64(
-			or->set_attr.total_bytes);
-		or->out_data_integ.get_attributes_bytes = cpu_to_be64(
-			or->enc_get_attr.total_bytes);
-
-		osd_sec_parms_set_out_offset(osd_req_is_ver1(or), sec_parms,
-			osd_req_encode_offset(or, or->out.total_bytes, &pad));
-
-		ret = _req_append_segment(or, pad, &seg, or->out.last_seg,
-					  &or->out);
-		if (ret)
-			return ret;
-		or->out.last_seg = NULL;
-
-		/* they are now all chained to request sign them all together */
-		osd_sec_sign_data(&or->out_data_integ, out_data_bio,
-				  cap_key);
-	}
-
-	if (has_in) {
-		struct _osd_req_data_segment seg = {
-			.buff = &or->in_data_integ,
-			.total_bytes = sizeof(or->in_data_integ),
-		};
-		unsigned pad;
-
-		osd_sec_parms_set_in_offset(osd_req_is_ver1(or), sec_parms,
-			osd_req_encode_offset(or, or->in.total_bytes, &pad));
-
-		ret = _req_append_segment(or, pad, &seg, or->in.last_seg,
-					  &or->in);
-		if (ret)
-			return ret;
-
-		or->in.last_seg = NULL;
-	}
-
-	return 0;
-}
-
-/*
- * osd_finalize_request and helpers
- */
-static struct request *_make_request(struct request_queue *q, bool has_write,
-			      struct _osd_io_info *oii)
-{
-	struct request *req;
-	struct bio *bio = oii->bio;
-	int ret;
-
-	req = blk_get_request(q, has_write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
-			0);
-	if (IS_ERR(req))
-		return req;
-
-	for_each_bio(bio) {
-		struct bio *bounce_bio = bio;
-
-		ret = blk_rq_append_bio(req, &bounce_bio);
-		if (ret)
-			return ERR_PTR(ret);
-	}
-
-	return req;
-}
-
-static int _init_blk_request(struct osd_request *or,
-	bool has_in, bool has_out)
-{
-	struct scsi_device *scsi_device = or->osd_dev->scsi_device;
-	struct request_queue *q = scsi_device->request_queue;
-	struct request *req;
-	int ret;
-
-	req = _make_request(q, has_out, has_out ? &or->out : &or->in);
-	if (IS_ERR(req)) {
-		ret = PTR_ERR(req);
-		goto out;
-	}
-
-	or->request = req;
-	req->rq_flags |= RQF_QUIET;
-
-	req->timeout = or->timeout;
-	scsi_req(req)->retries = or->retries;
-
-	if (has_out) {
-		or->out.req = req;
-		if (has_in) {
-			/* allocate bidi request */
-			req = _make_request(q, false, &or->in);
-			if (IS_ERR(req)) {
-				OSD_DEBUG("blk_get_request for bidi failed\n");
-				ret = PTR_ERR(req);
-				goto out;
-			}
-			or->in.req = or->request->next_rq = req;
-		}
-	} else if (has_in)
-		or->in.req = req;
-
-	ret = 0;
-out:
-	OSD_DEBUG("or=%p has_in=%d has_out=%d => %d, %p\n",
-			or, has_in, has_out, ret, or->request);
-	return ret;
-}
-
-int osd_finalize_request(struct osd_request *or,
-	u8 options, const void *cap, const u8 *cap_key)
-{
-	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
-	bool has_in, has_out;
-	 /* Save for data_integrity without the cdb_continuation */
-	struct bio *out_data_bio = or->out.bio;
-	u64 out_data_bytes = or->out.total_bytes;
-	int ret;
-
-	if (options & OSD_REQ_FUA)
-		cdbh->options |= OSD_CDB_FUA;
-
-	if (options & OSD_REQ_DPO)
-		cdbh->options |= OSD_CDB_DPO;
-
-	if (options & OSD_REQ_BYPASS_TIMESTAMPS)
-		cdbh->timestamp_control = OSD_CDB_BYPASS_TIMESTAMPS;
-
-	osd_set_caps(&or->cdb, cap);
-
-	has_in = or->in.bio || or->get_attr.total_bytes;
-	has_out = or->out.bio || or->cdb_cont.total_bytes ||
-		or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
-
-	ret = _osd_req_finalize_cdb_cont(or, cap_key);
-	if (ret) {
-		OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
-		return ret;
-	}
-	ret = _init_blk_request(or, has_in, has_out);
-	if (ret) {
-		OSD_DEBUG("_init_blk_request failed\n");
-		return ret;
-	}
-
-	or->out.pad_buff = sg_out_pad_buffer;
-	or->in.pad_buff = sg_in_pad_buffer;
-
-	if (!or->attributes_mode)
-		or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
-	cdbh->command_specific_options |= or->attributes_mode;
-	if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
-		ret = _osd_req_finalize_attr_page(or);
-		if (ret) {
-			OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
-			return ret;
-		}
-	} else {
-		/* TODO: I think that for the GET_ATTR command these 2 should
-		 * be reversed to keep them in execution order (for embedded
-		 * targets with low memory footprint)
-		 */
-		ret = _osd_req_finalize_set_attr_list(or);
-		if (ret) {
-			OSD_DEBUG("_osd_req_finalize_set_attr_list failed\n");
-			return ret;
-		}
-
-		ret = _osd_req_finalize_get_attr_list(or);
-		if (ret) {
-			OSD_DEBUG("_osd_req_finalize_get_attr_list failed\n");
-			return ret;
-		}
-	}
-
-	ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
-					       out_data_bio, out_data_bytes,
-					       cap_key);
-	if (ret)
-		return ret;
-
-	osd_sec_sign_cdb(&or->cdb, cap_key);
-
-	scsi_req(or->request)->cmd = or->cdb.buff;
-	scsi_req(or->request)->cmd_len = _osd_req_cdb_len(or);
-
-	return 0;
-}
-EXPORT_SYMBOL(osd_finalize_request);
-
-static bool _is_osd_security_code(int code)
-{
-	return	(code == osd_security_audit_value_frozen) ||
-		(code == osd_security_working_key_frozen) ||
-		(code == osd_nonce_not_unique) ||
-		(code == osd_nonce_timestamp_out_of_range) ||
-		(code == osd_invalid_dataout_buffer_integrity_check_value);
-}
-
-#define OSD_SENSE_PRINT1(fmt, a...) \
-	do { \
-		if (__cur_sense_need_output) \
-			OSD_ERR(fmt, ##a); \
-	} while (0)
-
-#define OSD_SENSE_PRINT2(fmt, a...) OSD_SENSE_PRINT1("    " fmt, ##a)
-
-int osd_req_decode_sense_full(struct osd_request *or,
-	struct osd_sense_info *osi, bool silent,
-	struct osd_obj_id *bad_obj_list __unused, int max_obj __unused,
-	struct osd_attr *bad_attr_list, int max_attr)
-{
-	int sense_len, original_sense_len;
-	struct osd_sense_info local_osi;
-	struct scsi_sense_descriptor_based *ssdb;
-	void *cur_descriptor;
-#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 0)
-	const bool __cur_sense_need_output = false;
-#else
-	bool __cur_sense_need_output = !silent;
-#endif
-	int ret;
-
-	if (likely(!or->req_errors))
-		return 0;
-
-	osi = osi ? : &local_osi;
-	memset(osi, 0, sizeof(*osi));
-
-	ssdb = (typeof(ssdb))or->sense;
-	sense_len = or->sense_len;
-	if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
-		OSD_ERR("Block-layer returned error(0x%x) but "
-			"sense_len(%u) || key(%d) is empty\n",
-			or->req_errors, sense_len, ssdb->sense_key);
-		goto analyze;
-	}
-
-	if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) {
-		OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n",
-			ssdb->response_code, sense_len);
-		goto analyze;
-	}
-
-	osi->key = ssdb->sense_key;
-	osi->additional_code = be16_to_cpu(ssdb->additional_sense_code);
-	original_sense_len = ssdb->additional_sense_length + 8;
-
-#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 1)
-	if (__cur_sense_need_output)
-		__cur_sense_need_output = (osi->key > scsi_sk_recovered_error);
-#endif
-	OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) "
-			"additional_code=0x%x async_error=%d errors=0x%x\n",
-			osi->key, original_sense_len, sense_len,
-			osi->additional_code, or->async_error,
-			or->req_errors);
-
-	if (original_sense_len < sense_len)
-		sense_len = original_sense_len;
-
-	cur_descriptor = ssdb->ssd;
-	sense_len -= sizeof(*ssdb);
-	while (sense_len > 0) {
-		struct scsi_sense_descriptor *ssd = cur_descriptor;
-		int cur_len = ssd->additional_length + 2;
-
-		sense_len -= cur_len;
-
-		if (sense_len < 0)
-			break; /* sense was truncated */
-
-		switch (ssd->descriptor_type) {
-		case scsi_sense_information:
-		case scsi_sense_command_specific_information:
-		{
-			struct scsi_sense_command_specific_data_descriptor
-				*sscd = cur_descriptor;
-
-			osi->command_info =
-				get_unaligned_be64(&sscd->information) ;
-			OSD_SENSE_PRINT2(
-				"command_specific_information 0x%llx \n",
-				_LLU(osi->command_info));
-			break;
-		}
-		case scsi_sense_key_specific:
-		{
-			struct scsi_sense_key_specific_data_descriptor
-				*ssks = cur_descriptor;
-
-			osi->sense_info = get_unaligned_be16(&ssks->value);
-			OSD_SENSE_PRINT2(
-				"sense_key_specific_information %u"
-				"sksv_cd_bpv_bp (0x%x)\n",
-				osi->sense_info, ssks->sksv_cd_bpv_bp);
-			break;
-		}
-		case osd_sense_object_identification:
-		{ /*FIXME: Keep first not last, Store in array*/
-			struct osd_sense_identification_data_descriptor
-				*osidd = cur_descriptor;
-
-			osi->not_initiated_command_functions =
-				le32_to_cpu(osidd->not_initiated_functions);
-			osi->completed_command_functions =
-				le32_to_cpu(osidd->completed_functions);
-			osi->obj.partition = be64_to_cpu(osidd->partition_id);
-			osi->obj.id = be64_to_cpu(osidd->object_id);
-			OSD_SENSE_PRINT2(
-				"object_identification pid=0x%llx oid=0x%llx\n",
-				_LLU(osi->obj.partition), _LLU(osi->obj.id));
-			OSD_SENSE_PRINT2(
-				"not_initiated_bits(%x) "
-				"completed_command_bits(%x)\n",
-				osi->not_initiated_command_functions,
-				osi->completed_command_functions);
-			break;
-		}
-		case osd_sense_response_integrity_check:
-		{
-			struct osd_sense_response_integrity_check_descriptor
-				*d = cur_descriptor;
-			/* 2nibbles+space+ASCII */
-			char dump[sizeof(d->integrity_check_value) * 4 + 2];
-
-			hex_dump_to_buffer(d->integrity_check_value,
-					sizeof(d->integrity_check_value),
-					32, 1, dump, sizeof(dump), true);
-			OSD_SENSE_PRINT2("response_integrity [%s]\n", dump);
-		}
-		case osd_sense_attribute_identification:
-		{
-			struct osd_sense_attributes_data_descriptor
-				*osadd = cur_descriptor;
-			unsigned len = min(cur_len, sense_len);
-			struct osd_sense_attr *pattr = osadd->sense_attrs;
-
-			while (len >= sizeof(*pattr)) {
-				u32 attr_page = be32_to_cpu(pattr->attr_page);
-				u32 attr_id = be32_to_cpu(pattr->attr_id);
-
-				if (!osi->attr.attr_page) {
-					osi->attr.attr_page = attr_page;
-					osi->attr.attr_id = attr_id;
-				}
-
-				if (bad_attr_list && max_attr) {
-					bad_attr_list->attr_page = attr_page;
-					bad_attr_list->attr_id = attr_id;
-					bad_attr_list++;
-					max_attr--;
-				}
-
-				len -= sizeof(*pattr);
-				OSD_SENSE_PRINT2(
-					"osd_sense_attribute_identification"
-					"attr_page=0x%x attr_id=0x%x\n",
-					attr_page, attr_id);
-			}
-		}
-		/*These are not legal for OSD*/
-		case scsi_sense_field_replaceable_unit:
-			OSD_SENSE_PRINT2("scsi_sense_field_replaceable_unit\n");
-			break;
-		case scsi_sense_stream_commands:
-			OSD_SENSE_PRINT2("scsi_sense_stream_commands\n");
-			break;
-		case scsi_sense_block_commands:
-			OSD_SENSE_PRINT2("scsi_sense_block_commands\n");
-			break;
-		case scsi_sense_ata_return:
-			OSD_SENSE_PRINT2("scsi_sense_ata_return\n");
-			break;
-		default:
-			if (ssd->descriptor_type <= scsi_sense_Reserved_last)
-				OSD_SENSE_PRINT2(
-					"scsi_sense Reserved descriptor (0x%x)",
-					ssd->descriptor_type);
-			else
-				OSD_SENSE_PRINT2(
-					"scsi_sense Vendor descriptor (0x%x)",
-					ssd->descriptor_type);
-		}
-
-		cur_descriptor += cur_len;
-	}
-
-analyze:
-	if (!osi->key) {
-		/* scsi sense is Empty, the request was never issued to target
-		 * linux return code might tell us what happened.
-		 */
-		if (or->async_error == BLK_STS_RESOURCE)
-			osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
-		else
-			osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
-		ret = or->async_error;
-	} else if (osi->key <= scsi_sk_recovered_error) {
-		osi->osd_err_pri = 0;
-		ret = 0;
-	} else if (osi->additional_code == scsi_invalid_field_in_cdb) {
-		if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) {
-			osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES;
-			ret = -EFAULT; /* caller should recover from this */
-		} else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) {
-			osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND;
-			ret = -ENOENT;
-		} else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) {
-			osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS;
-			ret = -EACCES;
-		} else {
-			osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
-			ret = -EINVAL;
-		}
-	} else if (osi->additional_code == osd_quota_error) {
-		osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE;
-		ret = -ENOSPC;
-	} else if (_is_osd_security_code(osi->additional_code)) {
-		osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
-		ret = -EINVAL;
-	} else {
-		osi->osd_err_pri = OSD_ERR_PRI_EIO;
-		ret = -EIO;
-	}
-
-	if (!or->out.residual)
-		or->out.residual = or->out.total_bytes;
-	if (!or->in.residual)
-		or->in.residual = or->in.total_bytes;
-
-	return ret;
-}
-EXPORT_SYMBOL(osd_req_decode_sense_full);
-
-/*
- * Implementation of osd_sec.h API
- * TODO: Move to a separate osd_sec.c file at a later stage.
- */
-
-enum { OSD_SEC_CAP_V1_ALL_CAPS =
-	OSD_SEC_CAP_APPEND | OSD_SEC_CAP_OBJ_MGMT | OSD_SEC_CAP_REMOVE   |
-	OSD_SEC_CAP_CREATE | OSD_SEC_CAP_SET_ATTR | OSD_SEC_CAP_GET_ATTR |
-	OSD_SEC_CAP_WRITE  | OSD_SEC_CAP_READ     | OSD_SEC_CAP_POL_SEC  |
-	OSD_SEC_CAP_GLOBAL | OSD_SEC_CAP_DEV_MGMT
-};
-
-enum { OSD_SEC_CAP_V2_ALL_CAPS =
-	OSD_SEC_CAP_V1_ALL_CAPS | OSD_SEC_CAP_QUERY | OSD_SEC_CAP_M_OBJECT
-};
-
-void osd_sec_init_nosec_doall_caps(void *caps,
-	const struct osd_obj_id *obj, bool is_collection, const bool is_v1)
-{
-	struct osd_capability *cap = caps;
-	u8 type;
-	u8 descriptor_type;
-
-	if (likely(obj->id)) {
-		if (unlikely(is_collection)) {
-			type = OSD_SEC_OBJ_COLLECTION;
-			descriptor_type = is_v1 ? OSD_SEC_OBJ_DESC_OBJ :
-						  OSD_SEC_OBJ_DESC_COL;
-		} else {
-			type = OSD_SEC_OBJ_USER;
-			descriptor_type = OSD_SEC_OBJ_DESC_OBJ;
-		}
-		WARN_ON(!obj->partition);
-	} else {
-		type = obj->partition ? OSD_SEC_OBJ_PARTITION :
-					OSD_SEC_OBJ_ROOT;
-		descriptor_type = OSD_SEC_OBJ_DESC_PAR;
-	}
-
-	memset(cap, 0, sizeof(*cap));
-
-	cap->h.format = OSD_SEC_CAP_FORMAT_VER1;
-	cap->h.integrity_algorithm__key_version = 0; /* MAKE_BYTE(0, 0); */
-	cap->h.security_method = OSD_SEC_NOSEC;
-/*	cap->expiration_time;
-	cap->AUDIT[30-10];
-	cap->discriminator[42-30];
-	cap->object_created_time; */
-	cap->h.object_type = type;
-	osd_sec_set_caps(&cap->h, OSD_SEC_CAP_V1_ALL_CAPS);
-	cap->h.object_descriptor_type = descriptor_type;
-	cap->od.obj_desc.policy_access_tag = 0;
-	cap->od.obj_desc.allowed_partition_id = cpu_to_be64(obj->partition);
-	cap->od.obj_desc.allowed_object_id = cpu_to_be64(obj->id);
-}
-EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps);
-
-/* FIXME: Extract version from caps pointer.
- *        Also Pete's target only supports caps from OSDv1 for now
- */
-void osd_set_caps(struct osd_cdb *cdb, const void *caps)
-{
-	/* NOTE: They start at same address */
-	memcpy(&cdb->v1.caps, caps, OSDv1_CAP_LEN);
-}
-
-bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused)
-{
-	return false;
-}
-
-void osd_sec_sign_cdb(struct osd_cdb *ocdb __unused, const u8 *cap_key __unused)
-{
-}
-
-void osd_sec_sign_data(void *data_integ __unused,
-		       struct bio *bio __unused, const u8 *cap_key __unused)
-{
-}
-
-/*
- * Declared in osd_protocol.h
- * 4.12.5 Data-In and Data-Out buffer offsets
- * byte offset = mantissa * (2^(exponent+8))
- * Returns the smallest allowed encoded offset that contains given @offset
- * The actual encoded offset returned is @offset + *@padding.
- */
-osd_cdb_offset __osd_encode_offset(
-	u64 offset, unsigned *padding, int min_shift, int max_shift)
-{
-	u64 try_offset = -1, mod, align;
-	osd_cdb_offset be32_offset;
-	int shift;
-
-	*padding = 0;
-	if (!offset)
-		return 0;
-
-	for (shift = min_shift; shift < max_shift; ++shift) {
-		try_offset = offset >> shift;
-		if (try_offset < (1 << OSD_OFFSET_MAX_BITS))
-			break;
-	}
-
-	BUG_ON(shift == max_shift);
-
-	align = 1 << shift;
-	mod = offset & (align - 1);
-	if (mod) {
-		*padding = align - mod;
-		try_offset += 1;
-	}
-
-	try_offset |= ((shift - 8) & 0xf) << 28;
-	be32_offset = cpu_to_be32((u32)try_offset);
-
-	OSD_DEBUG("offset=%llu mantissa=%llu exp=%d encoded=%x pad=%d\n",
-		 _LLU(offset), _LLU(try_offset & 0x0FFFFFFF), shift,
-		 be32_offset, *padding);
-	return be32_offset;
-}
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
deleted file mode 100644
index eaf36cc..0000000
--- a/drivers/scsi/osd/osd_uld.c
+++ /dev/null
@@ -1,571 +0,0 @@
-/*
- * osd_uld.c - OSD Upper Layer Driver
- *
- * A Linux driver module that registers as a SCSI ULD and probes
- * for OSD type SCSI devices.
- * It's main function is to export osd devices to in-kernel users like
- * osdfs and pNFS-objects-LD. It also provides one ioctl for running
- * in Kernel tests.
- *
- * Copyright (C) 2008 Panasas Inc.  All rights reserved.
- *
- * Authors:
- *   Boaz Harrosh <ooo@electrozaur.com>
- *   Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in the
- *     documentation and/or other materials provided with the distribution.
- *  3. Neither the name of the Panasas company nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/namei.h>
-#include <linux/cdev.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/idr.h>
-#include <linux/major.h>
-#include <linux/file.h>
-#include <linux/slab.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_driver.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_ioctl.h>
-
-#include <scsi/osd_initiator.h>
-#include <scsi/osd_sec.h>
-
-#include "osd_debug.h"
-
-#ifndef TYPE_OSD
-#  define TYPE_OSD 0x11
-#endif
-
-#ifndef SCSI_OSD_MAJOR
-#  define SCSI_OSD_MAJOR 260
-#endif
-#define SCSI_OSD_MAX_MINOR MINORMASK
-
-static const char osd_name[] = "osd";
-static const char *osd_version_string = "open-osd 0.2.1";
-
-MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
-MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CHARDEV_MAJOR(SCSI_OSD_MAJOR);
-MODULE_ALIAS_SCSI_DEVICE(TYPE_OSD);
-
-struct osd_uld_device {
-	int minor;
-	struct device class_dev;
-	struct cdev cdev;
-	struct osd_dev od;
-	struct osd_dev_info odi;
-	struct gendisk *disk;
-};
-
-struct osd_dev_handle {
-	struct osd_dev od;
-	struct file *file;
-	struct osd_uld_device *oud;
-} ;
-
-static DEFINE_IDA(osd_minor_ida);
-
-/*
- * scsi sysfs attribute operations
- */
-static ssize_t osdname_show(struct device *dev, struct device_attribute *attr,
-			    char *buf)
-{
-	struct osd_uld_device *ould = container_of(dev, struct osd_uld_device,
-						   class_dev);
-	return sprintf(buf, "%s\n", ould->odi.osdname);
-}
-static DEVICE_ATTR_RO(osdname);
-
-static ssize_t systemid_show(struct device *dev, struct device_attribute *attr,
-			    char *buf)
-{
-	struct osd_uld_device *ould = container_of(dev, struct osd_uld_device,
-						   class_dev);
-
-	memcpy(buf, ould->odi.systemid, ould->odi.systemid_len);
-	return ould->odi.systemid_len;
-}
-static DEVICE_ATTR_RO(systemid);
-
-static struct attribute *osd_uld_attrs[] = {
-	&dev_attr_osdname.attr,
-	&dev_attr_systemid.attr,
-	NULL,
-};
-ATTRIBUTE_GROUPS(osd_uld);
-
-static struct class osd_uld_class = {
-	.owner		= THIS_MODULE,
-	.name		= "scsi_osd",
-	.dev_groups	= osd_uld_groups,
-};
-
-/*
- * Char Device operations
- */
-
-static int osd_uld_open(struct inode *inode, struct file *file)
-{
-	struct osd_uld_device *oud = container_of(inode->i_cdev,
-					struct osd_uld_device, cdev);
-
-	get_device(&oud->class_dev);
-	/* cache osd_uld_device on file handle */
-	file->private_data = oud;
-	OSD_DEBUG("osd_uld_open %p\n", oud);
-	return 0;
-}
-
-static int osd_uld_release(struct inode *inode, struct file *file)
-{
-	struct osd_uld_device *oud = file->private_data;
-
-	OSD_DEBUG("osd_uld_release %p\n", file->private_data);
-	file->private_data = NULL;
-	put_device(&oud->class_dev);
-	return 0;
-}
-
-/* FIXME: Only one vector for now */
-unsigned g_test_ioctl;
-do_test_fn *g_do_test;
-
-int osduld_register_test(unsigned ioctl, do_test_fn *do_test)
-{
-	if (g_test_ioctl)
-		return -EINVAL;
-
-	g_test_ioctl = ioctl;
-	g_do_test = do_test;
-	return 0;
-}
-EXPORT_SYMBOL(osduld_register_test);
-
-void osduld_unregister_test(unsigned ioctl)
-{
-	if (ioctl == g_test_ioctl) {
-		g_test_ioctl = 0;
-		g_do_test = NULL;
-	}
-}
-EXPORT_SYMBOL(osduld_unregister_test);
-
-static do_test_fn *_find_ioctl(unsigned cmd)
-{
-	if (g_test_ioctl == cmd)
-		return g_do_test;
-	else
-		return NULL;
-}
-
-static long osd_uld_ioctl(struct file *file, unsigned int cmd,
-	unsigned long arg)
-{
-	struct osd_uld_device *oud = file->private_data;
-	int ret;
-	do_test_fn *do_test;
-
-	do_test = _find_ioctl(cmd);
-	if (do_test)
-		ret = do_test(&oud->od, cmd, arg);
-	else {
-		OSD_ERR("Unknown ioctl %d: osd_uld_device=%p\n", cmd, oud);
-		ret = -ENOIOCTLCMD;
-	}
-	return ret;
-}
-
-static const struct file_operations osd_fops = {
-	.owner          = THIS_MODULE,
-	.open           = osd_uld_open,
-	.release        = osd_uld_release,
-	.unlocked_ioctl = osd_uld_ioctl,
-	.llseek		= noop_llseek,
-};
-
-struct osd_dev *osduld_path_lookup(const char *name)
-{
-	struct osd_uld_device *oud;
-	struct osd_dev_handle *odh;
-	struct file *file;
-	int error;
-
-	if (!name || !*name) {
-		OSD_ERR("Mount with !path || !*path\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	odh = kzalloc(sizeof(*odh), GFP_KERNEL);
-	if (unlikely(!odh))
-		return ERR_PTR(-ENOMEM);
-
-	file = filp_open(name, O_RDWR, 0);
-	if (IS_ERR(file)) {
-		error = PTR_ERR(file);
-		goto free_od;
-	}
-
-	if (file->f_op != &osd_fops){
-		error = -EINVAL;
-		goto close_file;
-	}
-
-	oud = file->private_data;
-
-	odh->od = oud->od;
-	odh->file = file;
-	odh->oud = oud;
-
-	return &odh->od;
-
-close_file:
-	fput(file);
-free_od:
-	kfree(odh);
-	return ERR_PTR(error);
-}
-EXPORT_SYMBOL(osduld_path_lookup);
-
-static inline bool _the_same_or_null(const u8 *a1, unsigned a1_len,
-				     const u8 *a2, unsigned a2_len)
-{
-	if (!a2_len) /* User string is Empty means don't care */
-		return true;
-
-	if (a1_len != a2_len)
-		return false;
-
-	return 0 == memcmp(a1, a2, a1_len);
-}
-
-static int _match_odi(struct device *dev, const void *find_data)
-{
-	struct osd_uld_device *oud = container_of(dev, struct osd_uld_device,
-						  class_dev);
-	const struct osd_dev_info *odi = find_data;
-
-	if (_the_same_or_null(oud->odi.systemid, oud->odi.systemid_len,
-			      odi->systemid, odi->systemid_len) &&
-	    _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len,
-			      odi->osdname, odi->osdname_len)) {
-		OSD_DEBUG("found device sysid_len=%d osdname=%d\n",
-			  odi->systemid_len, odi->osdname_len);
-		return 1;
-	} else {
-		return 0;
-	}
-}
-
-/* osduld_info_lookup - Loop through all devices, return the requested osd_dev.
- *
- * if @odi->systemid_len and/or @odi->osdname_len are zero, they act as a don't
- * care. .e.g if they're both zero /dev/osd0 is returned.
- */
-struct osd_dev *osduld_info_lookup(const struct osd_dev_info *odi)
-{
-	struct device *dev = class_find_device(&osd_uld_class, NULL, odi, _match_odi);
-	if (likely(dev)) {
-		struct osd_dev_handle *odh = kzalloc(sizeof(*odh), GFP_KERNEL);
-		struct osd_uld_device *oud = container_of(dev,
-			struct osd_uld_device, class_dev);
-
-		if (unlikely(!odh)) {
-			put_device(dev);
-			return ERR_PTR(-ENOMEM);
-		}
-
-		odh->od = oud->od;
-		odh->oud = oud;
-
-		return &odh->od;
-	}
-
-	return ERR_PTR(-ENODEV);
-}
-EXPORT_SYMBOL(osduld_info_lookup);
-
-void osduld_put_device(struct osd_dev *od)
-{
-	if (od && !IS_ERR(od)) {
-		struct osd_dev_handle *odh =
-				container_of(od, struct osd_dev_handle, od);
-		struct osd_uld_device *oud = odh->oud;
-
-		BUG_ON(od->scsi_device != oud->od.scsi_device);
-
-		/* If scsi has released the device (logout), and exofs has last
-		 * reference on oud it will be freed by above osd_uld_release
-		 * within fput below. But this will oops in cdev_release which
-		 * is called after the fops->release. A get_/put_ pair makes
-		 * sure we have a cdev for the duration of fput
-		 */
-		if (odh->file) {
-			get_device(&oud->class_dev);
-			fput(odh->file);
-		}
-		put_device(&oud->class_dev);
-		kfree(odh);
-	}
-}
-EXPORT_SYMBOL(osduld_put_device);
-
-const struct osd_dev_info *osduld_device_info(struct osd_dev *od)
-{
-	struct osd_dev_handle *odh =
-				container_of(od, struct osd_dev_handle, od);
-	return &odh->oud->odi;
-}
-EXPORT_SYMBOL(osduld_device_info);
-
-bool osduld_device_same(struct osd_dev *od, const struct osd_dev_info *odi)
-{
-	struct osd_dev_handle *odh =
-				container_of(od, struct osd_dev_handle, od);
-	struct osd_uld_device *oud = odh->oud;
-
-	return (oud->odi.systemid_len == odi->systemid_len) &&
-		_the_same_or_null(oud->odi.systemid, oud->odi.systemid_len,
-				 odi->systemid, odi->systemid_len) &&
-		(oud->odi.osdname_len == odi->osdname_len) &&
-		_the_same_or_null(oud->odi.osdname, oud->odi.osdname_len,
-				  odi->osdname, odi->osdname_len);
-}
-EXPORT_SYMBOL(osduld_device_same);
-
-/*
- * Scsi Device operations
- */
-
-static int __detect_osd(struct osd_uld_device *oud)
-{
-	struct scsi_device *scsi_device = oud->od.scsi_device;
-	struct scsi_sense_hdr sense_hdr;
-	char caps[OSD_CAP_LEN];
-	int error;
-
-	/* sending a test_unit_ready as first command seems to be needed
-	 * by some targets
-	 */
-	OSD_DEBUG("start scsi_test_unit_ready %p %p %p\n",
-			oud, scsi_device, scsi_device->request_queue);
-	error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, &sense_hdr);
-	if (error)
-		OSD_ERR("warning: scsi_test_unit_ready failed\n");
-
-	osd_sec_init_nosec_doall_caps(caps, &osd_root_object, false, true);
-	if (osd_auto_detect_ver(&oud->od, caps, &oud->odi))
-		return -ENODEV;
-
-	return 0;
-}
-
-static void __remove(struct device *dev)
-{
-	struct osd_uld_device *oud = container_of(dev, struct osd_uld_device,
-						  class_dev);
-	struct scsi_device *scsi_device = oud->od.scsi_device;
-
-	kfree(oud->odi.osdname);
-
-	osd_dev_fini(&oud->od);
-	scsi_device_put(scsi_device);
-
-	OSD_INFO("osd_remove %s\n",
-		 oud->disk ? oud->disk->disk_name : NULL);
-
-	if (oud->disk)
-		put_disk(oud->disk);
-
-	kfree(oud);
-}
-
-static int osd_probe(struct device *dev)
-{
-	struct scsi_device *scsi_device = to_scsi_device(dev);
-	struct gendisk *disk;
-	struct osd_uld_device *oud;
-	int minor;
-	int error;
-
-	if (scsi_device->type != TYPE_OSD)
-		return -ENODEV;
-
-	minor = ida_alloc_max(&osd_minor_ida, SCSI_OSD_MAX_MINOR, GFP_KERNEL);
-	if (minor == -ENOSPC)
-		return -EBUSY;
-	if (minor < 0)
-		return -ENODEV;
-
-	error = -ENOMEM;
-	oud = kzalloc(sizeof(*oud), GFP_KERNEL);
-	if (NULL == oud)
-		goto err_retract_minor;
-
-	/* class device member */
-	device_initialize(&oud->class_dev);
-	dev_set_drvdata(dev, oud);
-	oud->minor = minor;
-	oud->class_dev.devt = MKDEV(SCSI_OSD_MAJOR, oud->minor);
-	oud->class_dev.class = &osd_uld_class;
-	oud->class_dev.parent = dev;
-	oud->class_dev.release = __remove;
-
-	/* hold one more reference to the scsi_device that will get released
-	 * in __release, in case a logout is happening while fs is mounted
-	 */
-	if (scsi_device_get(scsi_device))
-		goto err_retract_minor;
-	osd_dev_init(&oud->od, scsi_device);
-
-	/* allocate a disk and set it up */
-	/* FIXME: do we need this since sg has already done that */
-	disk = alloc_disk(1);
-	if (!disk) {
-		OSD_ERR("alloc_disk failed\n");
-		goto err_free_osd;
-	}
-	disk->major = SCSI_OSD_MAJOR;
-	disk->first_minor = oud->minor;
-	sprintf(disk->disk_name, "osd%d", oud->minor);
-	oud->disk = disk;
-
-	/* Detect the OSD Version */
-	error = __detect_osd(oud);
-	if (error) {
-		OSD_ERR("osd detection failed, non-compatible OSD device\n");
-		goto err_free_osd;
-	}
-
-	/* init the char-device for communication with user-mode */
-	cdev_init(&oud->cdev, &osd_fops);
-	oud->cdev.owner = THIS_MODULE;
-
-	error = dev_set_name(&oud->class_dev, "%s", disk->disk_name);
-	if (error) {
-		OSD_ERR("dev_set_name failed => %d\n", error);
-		goto err_free_osd;
-	}
-
-	error = cdev_device_add(&oud->cdev, &oud->class_dev);
-	if (error) {
-		OSD_ERR("device_register failed => %d\n", error);
-		goto err_free_osd;
-	}
-
-	OSD_INFO("osd_probe %s\n", disk->disk_name);
-	return 0;
-
-err_free_osd:
-	put_device(&oud->class_dev);
-err_retract_minor:
-	ida_free(&osd_minor_ida, minor);
-	return error;
-}
-
-static int osd_remove(struct device *dev)
-{
-	struct scsi_device *scsi_device = to_scsi_device(dev);
-	struct osd_uld_device *oud = dev_get_drvdata(dev);
-
-	if (oud->od.scsi_device != scsi_device) {
-		OSD_ERR("Half cooked osd-device %p, || %p!=%p",
-			dev, oud->od.scsi_device, scsi_device);
-	}
-
-	cdev_device_del(&oud->cdev, &oud->class_dev);
-	ida_free(&osd_minor_ida, oud->minor);
-	put_device(&oud->class_dev);
-
-	return 0;
-}
-
-/*
- * Global driver and scsi registration
- */
-
-static struct scsi_driver osd_driver = {
-	.gendrv = {
-		.name		= osd_name,
-		.owner		= THIS_MODULE,
-		.probe		= osd_probe,
-		.remove		= osd_remove,
-	}
-};
-
-static int __init osd_uld_init(void)
-{
-	int err;
-
-	err = class_register(&osd_uld_class);
-	if (err) {
-		OSD_ERR("Unable to register sysfs class => %d\n", err);
-		return err;
-	}
-
-	err = register_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0),
-				     SCSI_OSD_MAX_MINOR, osd_name);
-	if (err) {
-		OSD_ERR("Unable to register major %d for osd ULD => %d\n",
-			SCSI_OSD_MAJOR, err);
-		goto err_out;
-	}
-
-	err = scsi_register_driver(&osd_driver.gendrv);
-	if (err) {
-		OSD_ERR("scsi_register_driver failed => %d\n", err);
-		goto err_out_chrdev;
-	}
-
-	OSD_INFO("LOADED %s\n", osd_version_string);
-	return 0;
-
-err_out_chrdev:
-	unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR);
-err_out:
-	class_unregister(&osd_uld_class);
-	return err;
-}
-
-static void __exit osd_uld_exit(void)
-{
-	scsi_unregister_driver(&osd_driver.gendrv);
-	unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR);
-	class_unregister(&osd_uld_class);
-	OSD_INFO("UNLOADED %s\n", osd_version_string);
-}
-
-module_init(osd_uld_init);
-module_exit(osd_uld_exit);
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
deleted file mode 100644
index 7a1a1ed..0000000
--- a/drivers/scsi/osst.c
+++ /dev/null
@@ -1,6101 +0,0 @@
-/*
-  SCSI Tape Driver for Linux version 1.1 and newer. See the accompanying
-  file Documentation/scsi/st.txt for more information.
-
-  History:
-
-  OnStream SCSI Tape support (osst) cloned from st.c by
-  Willem Riede (osst@riede.org) Feb 2000
-  Fixes ... Kurt Garloff <garloff@suse.de> Mar 2000
-
-  Rewritten from Dwayne Forsyth's SCSI tape driver by Kai Makisara.
-  Contribution and ideas from several people including (in alphabetical
-  order) Klaus Ehrenfried, Wolfgang Denk, Steve Hirsch, Andreas Koppenh"ofer,
-  Michael Leodolter, Eyal Lebedinsky, J"org Weule, and Eric Youngdale.
-
-  Copyright 1992 - 2002 Kai Makisara / 2000 - 2006 Willem Riede
-	 email osst@riede.org
-
-  $Header: /cvsroot/osst/Driver/osst.c,v 1.73 2005/01/01 21:13:34 wriede Exp $
-
-  Microscopic alterations - Rik Ling, 2000/12/21
-  Last st.c sync: Tue Oct 15 22:01:04 2002 by makisara
-  Some small formal changes - aeb, 950809
-*/
-
-static const char * cvsid = "$Id: osst.c,v 1.73 2005/01/01 21:13:34 wriede Exp $";
-static const char * osst_version = "0.99.4";
-
-/* The "failure to reconnect" firmware bug */
-#define OSST_FW_NEED_POLL_MIN 10601 /*(107A)*/
-#define OSST_FW_NEED_POLL_MAX 10704 /*(108D)*/
-#define OSST_FW_NEED_POLL(x,d) ((x) >= OSST_FW_NEED_POLL_MIN && (x) <= OSST_FW_NEED_POLL_MAX && d->host->this_id != 7)
-
-#include <linux/module.h>
-
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/sched/signal.h>
-#include <linux/proc_fs.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/mtio.h>
-#include <linux/ioctl.h>
-#include <linux/fcntl.h>
-#include <linux/spinlock.h>
-#include <linux/vmalloc.h>
-#include <linux/blkdev.h>
-#include <linux/moduleparam.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-#include <asm/dma.h>
-
-/* The driver prints some debugging information on the console if DEBUG
-   is defined and non-zero. */
-#define DEBUG 0
-
-/* The message level for the debug messages is currently set to KERN_NOTICE
-   so that people can easily see the messages. Later when the debugging messages
-   in the drivers are more widely classified, this may be changed to KERN_DEBUG. */
-#define OSST_DEB_MSG  KERN_NOTICE
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_driver.h>
-#include <scsi/scsi_eh.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_ioctl.h>
-
-#define ST_KILOBYTE 1024
-
-#include "st.h"
-#include "osst.h"
-#include "osst_options.h"
-#include "osst_detect.h"
-
-static DEFINE_MUTEX(osst_int_mutex);
-static int max_dev = 0;
-static int write_threshold_kbs = 0;
-static int max_sg_segs = 0;
-
-#ifdef MODULE
-MODULE_AUTHOR("Willem Riede");
-MODULE_DESCRIPTION("OnStream {DI-|FW-|SC-|USB}{30|50} Tape Driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CHARDEV_MAJOR(OSST_MAJOR);
-MODULE_ALIAS_SCSI_DEVICE(TYPE_TAPE);
-
-module_param(max_dev, int, 0444);
-MODULE_PARM_DESC(max_dev, "Maximum number of OnStream Tape Drives to attach (4)");
-
-module_param(write_threshold_kbs, int, 0644);
-MODULE_PARM_DESC(write_threshold_kbs, "Asynchronous write threshold (KB; 32)");
-
-module_param(max_sg_segs, int, 0644);
-MODULE_PARM_DESC(max_sg_segs, "Maximum number of scatter/gather segments to use (9)");
-#else
-static struct osst_dev_parm {
-       char   *name;
-       int    *val;
-} parms[] __initdata = {
-       { "max_dev",             &max_dev             },
-       { "write_threshold_kbs", &write_threshold_kbs },
-       { "max_sg_segs",         &max_sg_segs         }
-};
-#endif
-
-/* Some default definitions have been moved to osst_options.h */
-#define OSST_BUFFER_SIZE (OSST_BUFFER_BLOCKS * ST_KILOBYTE)
-#define OSST_WRITE_THRESHOLD (OSST_WRITE_THRESHOLD_BLOCKS * ST_KILOBYTE)
-
-/* The buffer size should fit into the 24 bits for length in the
-   6-byte SCSI read and write commands. */
-#if OSST_BUFFER_SIZE >= (2 << 24 - 1)
-#error "Buffer size should not exceed (2 << 24 - 1) bytes!"
-#endif
-
-#if DEBUG
-static int debugging = 1;
-/* uncomment define below to test error recovery */
-// #define OSST_INJECT_ERRORS 1 
-#endif
-
-/* Do not retry! The drive firmware already retries when appropriate,
-   and when it tries to tell us something, we had better listen... */
-#define MAX_RETRIES 0
-
-#define NO_TAPE  NOT_READY
-
-#define OSST_WAIT_POSITION_COMPLETE   (HZ > 200 ? HZ / 200 : 1)
-#define OSST_WAIT_WRITE_COMPLETE      (HZ / 12)
-#define OSST_WAIT_LONG_WRITE_COMPLETE (HZ / 2)
-	
-#define OSST_TIMEOUT (200 * HZ)
-#define OSST_LONG_TIMEOUT (1800 * HZ)
-
-#define TAPE_NR(x) (iminor(x) & ~(-1 << ST_MODE_SHIFT))
-#define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT)
-#define TAPE_REWIND(x) ((iminor(x) & 0x80) == 0)
-#define TAPE_IS_RAW(x) (TAPE_MODE(x) & (ST_NBR_MODES >> 1))
-
-/* Internal ioctl to set both density (uppermost 8 bits) and blocksize (lower
-   24 bits) */
-#define SET_DENS_AND_BLK 0x10001
-
-static int osst_buffer_size       = OSST_BUFFER_SIZE;
-static int osst_write_threshold   = OSST_WRITE_THRESHOLD;
-static int osst_max_sg_segs       = OSST_MAX_SG;
-static int osst_max_dev           = OSST_MAX_TAPES;
-static int osst_nr_dev;
-
-static struct osst_tape **os_scsi_tapes = NULL;
-static DEFINE_RWLOCK(os_scsi_tapes_lock);
-
-static int modes_defined = 0;
-
-static struct osst_buffer *new_tape_buffer(int, int, int);
-static int enlarge_buffer(struct osst_buffer *, int);
-static void normalize_buffer(struct osst_buffer *);
-static int append_to_buffer(const char __user *, struct osst_buffer *, int);
-static int from_buffer(struct osst_buffer *, char __user *, int);
-static int osst_zero_buffer_tail(struct osst_buffer *);
-static int osst_copy_to_buffer(struct osst_buffer *, unsigned char *);
-static int osst_copy_from_buffer(struct osst_buffer *, unsigned char *);
-
-static int osst_probe(struct device *);
-static int osst_remove(struct device *);
-
-static struct scsi_driver osst_template = {
-	.gendrv = {
-		.name		=  "osst",
-		.owner		= THIS_MODULE,
-		.probe		= osst_probe,
-		.remove		= osst_remove,
-	}
-};
-
-static int osst_int_ioctl(struct osst_tape *STp, struct osst_request ** aSRpnt,
-			    unsigned int cmd_in, unsigned long arg);
-
-static int osst_set_frame_position(struct osst_tape *STp, struct osst_request ** aSRpnt, int frame, int skip);
-
-static int osst_get_frame_position(struct osst_tape *STp, struct osst_request ** aSRpnt);
-
-static int osst_flush_write_buffer(struct osst_tape *STp, struct osst_request ** aSRpnt);
-
-static int osst_write_error_recovery(struct osst_tape * STp, struct osst_request ** aSRpnt, int pending);
-
-static inline char *tape_name(struct osst_tape *tape)
-{
-	return tape->drive->disk_name;
-}
-
-/* Routines that handle the interaction with mid-layer SCSI routines */
-
-
-/* Normalize Sense */
-static void osst_analyze_sense(struct osst_request *SRpnt, struct st_cmdstatus *s)
-{
-	const u8 *ucp;
-	const u8 *sense = SRpnt->sense;
-
-	s->have_sense = scsi_normalize_sense(SRpnt->sense,
-				SCSI_SENSE_BUFFERSIZE, &s->sense_hdr);
-	s->flags = 0;
-
-	if (s->have_sense) {
-		s->deferred = 0;
-		s->remainder_valid =
-			scsi_get_sense_info_fld(sense, SCSI_SENSE_BUFFERSIZE, &s->uremainder64);
-		switch (sense[0] & 0x7f) {
-		case 0x71:
-			s->deferred = 1;
-		case 0x70:
-			s->fixed_format = 1;
-			s->flags = sense[2] & 0xe0;
-			break;
-		case 0x73:
-			s->deferred = 1;
-		case 0x72:
-			s->fixed_format = 0;
-			ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4);
-			s->flags = ucp ? (ucp[3] & 0xe0) : 0;
-			break;
-		}
-	}
-}
-
-/* Convert the result to success code */
-static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
-{
-	char *name = tape_name(STp);
-	int result = SRpnt->result;
-	u8 * sense = SRpnt->sense, scode;
-#if DEBUG
-	const char *stp;
-#endif
-	struct st_cmdstatus *cmdstatp;
-
-	if (!result)
-		return 0;
-
-	cmdstatp = &STp->buffer->cmdstat;
-	osst_analyze_sense(SRpnt, cmdstatp);
-
-	if (cmdstatp->have_sense)
-		scode = STp->buffer->cmdstat.sense_hdr.sense_key;
-	else
-		scode = 0;
-#if DEBUG
-	if (debugging) {
-		printk(OSST_DEB_MSG "%s:D: Error: %x, cmd: %x %x %x %x %x %x\n",
-		   name, result,
-		   SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
-		   SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
-		if (scode) printk(OSST_DEB_MSG "%s:D: Sense: %02x, ASC: %02x, ASCQ: %02x\n",
-				  name, scode, sense[12], sense[13]);
-		if (cmdstatp->have_sense)
-			__scsi_print_sense(STp->device, name,
-					   SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
-	}
-	else
-#endif
-	if (cmdstatp->have_sense && (
-		 scode != NO_SENSE &&
-		 scode != RECOVERED_ERROR &&
-/*      	 scode != UNIT_ATTENTION && */
-		 scode != BLANK_CHECK &&
-		 scode != VOLUME_OVERFLOW &&
-		 SRpnt->cmd[0] != MODE_SENSE &&
-		 SRpnt->cmd[0] != TEST_UNIT_READY)) { /* Abnormal conditions for tape */
-		if (cmdstatp->have_sense) {
-			printk(KERN_WARNING "%s:W: Command with sense data:\n", name);
-			__scsi_print_sense(STp->device, name,
-					   SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
-		}
-		else {
-			static	int	notyetprinted = 1;
-
-			printk(KERN_WARNING
-			     "%s:W: Warning %x (driver bt 0x%x, host bt 0x%x).\n",
-			     name, result, driver_byte(result),
-			     host_byte(result));
-			if (notyetprinted) {
-				notyetprinted = 0;
-				printk(KERN_INFO
-					"%s:I: This warning may be caused by your scsi controller,\n", name);
-				printk(KERN_INFO
-					"%s:I: it has been reported with some Buslogic cards.\n", name);
-			}
-		}
-	}
-	STp->pos_unknown |= STp->device->was_reset;
-
-	if (cmdstatp->have_sense && scode == RECOVERED_ERROR) {
-		STp->recover_count++;
-		STp->recover_erreg++;
-#if DEBUG
-		if (debugging) {
-			if (SRpnt->cmd[0] == READ_6)
-				stp = "read";
-			else if (SRpnt->cmd[0] == WRITE_6)
-				stp = "write";
-			else
-				stp = "ioctl";
-			printk(OSST_DEB_MSG "%s:D: Recovered %s error (%d).\n", name, stp,
-					     STp->recover_count);
-		}
-#endif
-		if ((sense[2] & 0xe0) == 0)
-			return 0;
-	}
-	return (-EIO);
-}
-
-
-/* Wakeup from interrupt */
-static void osst_end_async(struct request *req, blk_status_t status)
-{
-	struct scsi_request *rq = scsi_req(req);
-	struct osst_request *SRpnt = req->end_io_data;
-	struct osst_tape *STp = SRpnt->stp;
-	struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
-
-	STp->buffer->cmdstat.midlevel_result = SRpnt->result = rq->result;
-#if DEBUG
-	STp->write_pending = 0;
-#endif
-	if (rq->sense_len)
-		memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
-	if (SRpnt->waiting)
-		complete(SRpnt->waiting);
-
-	if (SRpnt->bio) {
-		kfree(mdata->pages);
-		blk_rq_unmap_user(SRpnt->bio);
-	}
-
-	__blk_put_request(req->q, req);
-}
-
-/* osst_request memory management */
-static struct osst_request *osst_allocate_request(void)
-{
-	return kzalloc(sizeof(struct osst_request), GFP_KERNEL);
-}
-
-static void osst_release_request(struct osst_request *streq)
-{
-	kfree(streq);
-}
-
-static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
-			int cmd_len, int data_direction, void *buffer, unsigned bufflen,
-			int use_sg, int timeout, int retries)
-{
-	struct request *req;
-	struct scsi_request *rq;
-	struct page **pages = NULL;
-	struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
-
-	int err = 0;
-	int write = (data_direction == DMA_TO_DEVICE);
-
-	req = blk_get_request(SRpnt->stp->device->request_queue,
-			write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
-	if (IS_ERR(req))
-		return DRIVER_ERROR << 24;
-
-	rq = scsi_req(req);
-	req->rq_flags |= RQF_QUIET;
-
-	SRpnt->bio = NULL;
-
-	if (use_sg) {
-		struct scatterlist *sg, *sgl = (struct scatterlist *)buffer;
-		int i;
-
-		pages = kcalloc(use_sg, sizeof(struct page *), GFP_KERNEL);
-		if (!pages)
-			goto free_req;
-
-		for_each_sg(sgl, sg, use_sg, i)
-			pages[i] = sg_page(sg);
-
-		mdata->null_mapped = 1;
-
-		mdata->page_order = get_order(sgl[0].length);
-		mdata->nr_entries =
-			DIV_ROUND_UP(bufflen, PAGE_SIZE << mdata->page_order);
-		mdata->offset = 0;
-
-		err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen, GFP_KERNEL);
-		if (err) {
-			kfree(pages);
-			goto free_req;
-		}
-		SRpnt->bio = req->bio;
-		mdata->pages = pages;
-
-	} else if (bufflen) {
-		err = blk_rq_map_kern(req->q, req, buffer, bufflen, GFP_KERNEL);
-		if (err)
-			goto free_req;
-	}
-
-	rq->cmd_len = cmd_len;
-	memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
-	memcpy(rq->cmd, cmd, rq->cmd_len);
-	req->timeout = timeout;
-	rq->retries = retries;
-	req->end_io_data = SRpnt;
-
-	blk_execute_rq_nowait(req->q, NULL, req, 1, osst_end_async);
-	return 0;
-free_req:
-	blk_put_request(req);
-	return DRIVER_ERROR << 24;
-}
-
-/* Do the scsi command. Waits until command performed if do_wait is true.
-   Otherwise osst_write_behind_check() is used to check that the command
-   has finished. */
-static	struct osst_request * osst_do_scsi(struct osst_request *SRpnt, struct osst_tape *STp, 
-	unsigned char *cmd, int bytes, int direction, int timeout, int retries, int do_wait)
-{
-	unsigned char *bp;
-	unsigned short use_sg;
-#ifdef OSST_INJECT_ERRORS
-	static   int   inject = 0;
-	static   int   repeat = 0;
-#endif
-	struct completion *waiting;
-
-	/* if async, make sure there's no command outstanding */
-	if (!do_wait && ((STp->buffer)->last_SRpnt)) {
-		printk(KERN_ERR "%s: Async command already active.\n",
-		       tape_name(STp));
-		if (signal_pending(current))
-			(STp->buffer)->syscall_result = (-EINTR);
-		else
-			(STp->buffer)->syscall_result = (-EBUSY);
-		return NULL;
-	}
-
-	if (SRpnt == NULL) {
-		SRpnt = osst_allocate_request();
-		if (SRpnt == NULL) {
-			printk(KERN_ERR "%s: Can't allocate SCSI request.\n",
-				     tape_name(STp));
-			if (signal_pending(current))
-				(STp->buffer)->syscall_result = (-EINTR);
-			else
-				(STp->buffer)->syscall_result = (-EBUSY);
-			return NULL;
-		}
-		SRpnt->stp = STp;
-	}
-
-	/* If async IO, set last_SRpnt. This ptr tells write_behind_check
-	   which IO is outstanding. It's nulled out when the IO completes. */
-	if (!do_wait)
-		(STp->buffer)->last_SRpnt = SRpnt;
-
-	waiting = &STp->wait;
-	init_completion(waiting);
-	SRpnt->waiting = waiting;
-
-	use_sg = (bytes > STp->buffer->sg[0].length) ? STp->buffer->use_sg : 0;
-	if (use_sg) {
-		bp = (char *)&(STp->buffer->sg[0]);
-		if (STp->buffer->sg_segs < use_sg)
-			use_sg = STp->buffer->sg_segs;
-	}
-	else
-		bp = (STp->buffer)->b_data;
-
-	memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
-	STp->buffer->cmdstat.have_sense = 0;
-	STp->buffer->syscall_result = 0;
-
-	if (osst_execute(SRpnt, cmd, COMMAND_SIZE(cmd[0]), direction, bp, bytes,
-			 use_sg, timeout, retries))
-		/* could not allocate the buffer or request was too large */
-		(STp->buffer)->syscall_result = (-EBUSY);
-	else if (do_wait) {
-		wait_for_completion(waiting);
-		SRpnt->waiting = NULL;
-		STp->buffer->syscall_result = osst_chk_result(STp, SRpnt);
-#ifdef OSST_INJECT_ERRORS
-		if (STp->buffer->syscall_result == 0 &&
-		    cmd[0] == READ_6 &&
-		    cmd[4] && 
-		    ( (++ inject % 83) == 29  ||
-		      (STp->first_frame_position == 240 
-			         /* or STp->read_error_frame to fail again on the block calculated above */ &&
-				 ++repeat < 3))) {
-			printk(OSST_DEB_MSG "%s:D: Injecting read error\n", tape_name(STp));
-			STp->buffer->last_result_fatal = 1;
-		}
-#endif
-	}
-	return SRpnt;
-}
-
-
-/* Handle the write-behind checking (downs the semaphore) */
-static void osst_write_behind_check(struct osst_tape *STp)
-{
-	struct osst_buffer * STbuffer;
-
-	STbuffer = STp->buffer;
-
-#if DEBUG
-	if (STp->write_pending)
-		STp->nbr_waits++;
-	else
-		STp->nbr_finished++;
-#endif
-	wait_for_completion(&(STp->wait));
-	STp->buffer->last_SRpnt->waiting = NULL;
-
-	STp->buffer->syscall_result = osst_chk_result(STp, STp->buffer->last_SRpnt);
-
-	if (STp->buffer->syscall_result)
-		STp->buffer->syscall_result =
-			osst_write_error_recovery(STp, &(STp->buffer->last_SRpnt), 1);
-	else
-		STp->first_frame_position++;
-
-	osst_release_request(STp->buffer->last_SRpnt);
-
-	if (STbuffer->writing < STbuffer->buffer_bytes)
-		printk(KERN_WARNING "osst :A: write_behind_check: something left in buffer!\n");
-
-	STbuffer->last_SRpnt = NULL;
-	STbuffer->buffer_bytes -= STbuffer->writing;
-	STbuffer->writing = 0;
-
-	return;
-}
-
-
-
-/* Onstream specific Routines */
-/*
- * Initialize the OnStream AUX
- */
-static void osst_init_aux(struct osst_tape * STp, int frame_type, int frame_seq_number,
-					 int logical_blk_num, int blk_sz, int blk_cnt)
-{
-	os_aux_t       *aux = STp->buffer->aux;
-	os_partition_t *par = &aux->partition;
-	os_dat_t       *dat = &aux->dat;
-
-	if (STp->raw) return;
-
-	memset(aux, 0, sizeof(*aux));
-	aux->format_id = htonl(0);
-	memcpy(aux->application_sig, "LIN4", 4);
-	aux->hdwr = htonl(0);
-	aux->frame_type = frame_type;
-
-	switch (frame_type) {
-	  case	OS_FRAME_TYPE_HEADER:
-		aux->update_frame_cntr    = htonl(STp->update_frame_cntr);
-		par->partition_num        = OS_CONFIG_PARTITION;
-		par->par_desc_ver         = OS_PARTITION_VERSION;
-		par->wrt_pass_cntr        = htons(0xffff);
-		/* 0-4 = reserved, 5-9 = header, 2990-2994 = header, 2995-2999 = reserved */
-		par->first_frame_ppos     = htonl(0);
-		par->last_frame_ppos      = htonl(0xbb7);
-		aux->frame_seq_num        = htonl(0);
-		aux->logical_blk_num_high = htonl(0);
-		aux->logical_blk_num      = htonl(0);
-		aux->next_mark_ppos       = htonl(STp->first_mark_ppos);
-		break;
-	  case	OS_FRAME_TYPE_DATA:
-	  case	OS_FRAME_TYPE_MARKER:
-		dat->dat_sz = 8;
-		dat->reserved1 = 0;
-		dat->entry_cnt = 1;
-		dat->reserved3 = 0;
-		dat->dat_list[0].blk_sz   = htonl(blk_sz);
-		dat->dat_list[0].blk_cnt  = htons(blk_cnt);
-		dat->dat_list[0].flags    = frame_type==OS_FRAME_TYPE_MARKER?
-							OS_DAT_FLAGS_MARK:OS_DAT_FLAGS_DATA;
-		dat->dat_list[0].reserved = 0;
-	  case	OS_FRAME_TYPE_EOD:
-		aux->update_frame_cntr    = htonl(0);
-		par->partition_num        = OS_DATA_PARTITION;
-		par->par_desc_ver         = OS_PARTITION_VERSION;
-		par->wrt_pass_cntr        = htons(STp->wrt_pass_cntr);
-		par->first_frame_ppos     = htonl(STp->first_data_ppos);
-		par->last_frame_ppos      = htonl(STp->capacity);
-		aux->frame_seq_num        = htonl(frame_seq_number);
-		aux->logical_blk_num_high = htonl(0);
-		aux->logical_blk_num      = htonl(logical_blk_num);
-		break;
-	  default: ; /* probably FILL */
-	}
-	aux->filemark_cnt = htonl(STp->filemark_cnt);
-	aux->phys_fm = htonl(0xffffffff);
-	aux->last_mark_ppos = htonl(STp->last_mark_ppos);
-	aux->last_mark_lbn  = htonl(STp->last_mark_lbn);
-}
-
-/*
- * Verify that we have the correct tape frame
- */
-static int osst_verify_frame(struct osst_tape * STp, int frame_seq_number, int quiet)
-{
-	char               * name = tape_name(STp);
-	os_aux_t           * aux  = STp->buffer->aux;
-	os_partition_t     * par  = &(aux->partition);
-	struct st_partstat * STps = &(STp->ps[STp->partition]);
-	unsigned int	     blk_cnt, blk_sz, i;
-
-	if (STp->raw) {
-		if (STp->buffer->syscall_result) {
-			for (i=0; i < STp->buffer->sg_segs; i++)
-				memset(page_address(sg_page(&STp->buffer->sg[i])),
-				       0, STp->buffer->sg[i].length);
-			strcpy(STp->buffer->b_data, "READ ERROR ON FRAME");
-                } else
-			STp->buffer->buffer_bytes = OS_FRAME_SIZE;
-		return 1;
-	}
-	if (STp->buffer->syscall_result) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Skipping frame, read error\n", name);
-#endif
-		return 0;
-	}
-	if (ntohl(aux->format_id) != 0) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Skipping frame, format_id %u\n", name, ntohl(aux->format_id));
-#endif
-		goto err_out;
-	}
-	if (memcmp(aux->application_sig, STp->application_sig, 4) != 0 &&
-	    (memcmp(aux->application_sig, "LIN3", 4) != 0 || STp->linux_media_version != 4)) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Skipping frame, incorrect application signature\n", name);
-#endif
-		goto err_out;
-	}
-	if (par->partition_num != OS_DATA_PARTITION) {
-		if (!STp->linux_media || STp->linux_media_version != 2) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Skipping frame, partition num %d\n",
-					    name, par->partition_num);
-#endif
-			goto err_out;
-		}
-	}
-	if (par->par_desc_ver != OS_PARTITION_VERSION) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Skipping frame, partition version %d\n", name, par->par_desc_ver);
-#endif
-		goto err_out;
-	}
-	if (ntohs(par->wrt_pass_cntr) != STp->wrt_pass_cntr) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Skipping frame, wrt_pass_cntr %d (expected %d)\n", 
-				    name, ntohs(par->wrt_pass_cntr), STp->wrt_pass_cntr);
-#endif
-		goto err_out;
-	}
-	if (aux->frame_type != OS_FRAME_TYPE_DATA &&
-	    aux->frame_type != OS_FRAME_TYPE_EOD &&
-	    aux->frame_type != OS_FRAME_TYPE_MARKER) {
-		if (!quiet) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Skipping frame, frame type %x\n", name, aux->frame_type);
-#endif
-		}
-		goto err_out;
-	}
-	if (aux->frame_type == OS_FRAME_TYPE_EOD &&
-	    STp->first_frame_position < STp->eod_frame_ppos) {
-		printk(KERN_INFO "%s:I: Skipping premature EOD frame %d\n", name,
-				 STp->first_frame_position);
-		goto err_out;
-	}
-        if (frame_seq_number != -1 && ntohl(aux->frame_seq_num) != frame_seq_number) {
-		if (!quiet) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Skipping frame, sequence number %u (expected %d)\n", 
-					    name, ntohl(aux->frame_seq_num), frame_seq_number);
-#endif
-		}
-		goto err_out;
-	}
-	if (aux->frame_type == OS_FRAME_TYPE_MARKER) {
-		STps->eof = ST_FM_HIT;
-
-		i = ntohl(aux->filemark_cnt);
-		if (STp->header_cache != NULL && i < OS_FM_TAB_MAX && (i > STp->filemark_cnt ||
-		    STp->first_frame_position - 1 != ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[i]))) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: %s filemark %d at frame pos %d\n", name,
-				  STp->header_cache->dat_fm_tab.fm_tab_ent[i] == 0?"Learned":"Corrected",
-				  i, STp->first_frame_position - 1);
-#endif
-			STp->header_cache->dat_fm_tab.fm_tab_ent[i] = htonl(STp->first_frame_position - 1);
-			if (i >= STp->filemark_cnt)
-				 STp->filemark_cnt = i+1;
-		}
-	}
-	if (aux->frame_type == OS_FRAME_TYPE_EOD) {
-		STps->eof = ST_EOD_1;
-		STp->frame_in_buffer = 1;
-	}
-	if (aux->frame_type == OS_FRAME_TYPE_DATA) {
-                blk_cnt = ntohs(aux->dat.dat_list[0].blk_cnt);
-		blk_sz  = ntohl(aux->dat.dat_list[0].blk_sz);
-		STp->buffer->buffer_bytes = blk_cnt * blk_sz;
-		STp->buffer->read_pointer = 0;
-		STp->frame_in_buffer = 1;
-
-		/* See what block size was used to write file */
-		if (STp->block_size != blk_sz && blk_sz > 0) {
-			printk(KERN_INFO
-	    	"%s:I: File was written with block size %d%c, currently %d%c, adjusted to match.\n",
-       				name, blk_sz<1024?blk_sz:blk_sz/1024,blk_sz<1024?'b':'k',
-				STp->block_size<1024?STp->block_size:STp->block_size/1024,
-				STp->block_size<1024?'b':'k');
-			STp->block_size            = blk_sz;
-			STp->buffer->buffer_blocks = OS_DATA_SIZE / blk_sz;
-		}
-		STps->eof = ST_NOEOF;
-	}
-        STp->frame_seq_number = ntohl(aux->frame_seq_num);
-	STp->logical_blk_num  = ntohl(aux->logical_blk_num);
-	return 1;
-
-err_out:
-	if (STp->read_error_frame == 0)
-		STp->read_error_frame = STp->first_frame_position - 1;
-	return 0;
-}
-
-/*
- * Wait for the unit to become Ready
- */
-static int osst_wait_ready(struct osst_tape * STp, struct osst_request ** aSRpnt,
-				 unsigned timeout, int initial_delay)
-{
-	unsigned char		cmd[MAX_COMMAND_SIZE];
-	struct osst_request   * SRpnt;
-	unsigned long		startwait = jiffies;
-#if DEBUG
-	int			dbg  = debugging;
-	char    	      * name = tape_name(STp);
-
-	printk(OSST_DEB_MSG "%s:D: Reached onstream wait ready\n", name);
-#endif
-
-	if (initial_delay > 0)
-		msleep(jiffies_to_msecs(initial_delay));
-
-	memset(cmd, 0, MAX_COMMAND_SIZE);
-	cmd[0] = TEST_UNIT_READY;
-
-	SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
-	*aSRpnt = SRpnt;
-	if (!SRpnt) return (-EBUSY);
-
-	while ( STp->buffer->syscall_result && time_before(jiffies, startwait + timeout*HZ) &&
-	       (( SRpnt->sense[2]  == 2 && SRpnt->sense[12] == 4    &&
-		 (SRpnt->sense[13] == 1 || SRpnt->sense[13] == 8)    ) ||
-		( SRpnt->sense[2]  == 6 && SRpnt->sense[12] == 0x28 &&
-		  SRpnt->sense[13] == 0                                        )  )) {
-#if DEBUG
-	    if (debugging) {
-		printk(OSST_DEB_MSG "%s:D: Sleeping in onstream wait ready\n", name);
-		printk(OSST_DEB_MSG "%s:D: Turning off debugging for a while\n", name);
-		debugging = 0;
-	    }
-#endif
-	    msleep(100);
-
-	    memset(cmd, 0, MAX_COMMAND_SIZE);
-	    cmd[0] = TEST_UNIT_READY;
-
-	    SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
-	}
-	*aSRpnt = SRpnt;
-#if DEBUG
-	debugging = dbg;
-#endif
-	if ( STp->buffer->syscall_result &&
-	     osst_write_error_recovery(STp, aSRpnt, 0) ) {
-#if DEBUG
-	    printk(OSST_DEB_MSG "%s:D: Abnormal exit from onstream wait ready\n", name);
-	    printk(OSST_DEB_MSG "%s:D: Result = %d, Sense: 0=%02x, 2=%02x, 12=%02x, 13=%02x\n", name,
-			STp->buffer->syscall_result, SRpnt->sense[0], SRpnt->sense[2],
-			SRpnt->sense[12], SRpnt->sense[13]);
-#endif
-	    return (-EIO);
-	}
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Normal exit from onstream wait ready\n", name);
-#endif
-	return 0;
-}
-
-/*
- * Wait for a tape to be inserted in the unit
- */
-static int osst_wait_for_medium(struct osst_tape * STp, struct osst_request ** aSRpnt, unsigned timeout)
-{
-	unsigned char		cmd[MAX_COMMAND_SIZE];
-	struct osst_request   * SRpnt;
-	unsigned long		startwait = jiffies;
-#if DEBUG
-	int			dbg = debugging;
-	char    	      * name = tape_name(STp);
-
-	printk(OSST_DEB_MSG "%s:D: Reached onstream wait for medium\n", name);
-#endif
-
-	memset(cmd, 0, MAX_COMMAND_SIZE);
-	cmd[0] = TEST_UNIT_READY;
-
-	SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
-	*aSRpnt = SRpnt;
-	if (!SRpnt) return (-EBUSY);
-
-	while ( STp->buffer->syscall_result && time_before(jiffies, startwait + timeout*HZ) &&
-		SRpnt->sense[2] == 2 && SRpnt->sense[12] == 0x3a && SRpnt->sense[13] == 0  ) {
-#if DEBUG
-	    if (debugging) {
-		printk(OSST_DEB_MSG "%s:D: Sleeping in onstream wait medium\n", name);
-		printk(OSST_DEB_MSG "%s:D: Turning off debugging for a while\n", name);
-		debugging = 0;
-	    }
-#endif
-	    msleep(100);
-
-	    memset(cmd, 0, MAX_COMMAND_SIZE);
-	    cmd[0] = TEST_UNIT_READY;
-
-	    SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
-	}
-	*aSRpnt = SRpnt;
-#if DEBUG
-	debugging = dbg;
-#endif
-	if ( STp->buffer->syscall_result     && SRpnt->sense[2]  != 2 &&
-	     SRpnt->sense[12] != 4 && SRpnt->sense[13] == 1) {
-#if DEBUG
-	    printk(OSST_DEB_MSG "%s:D: Abnormal exit from onstream wait medium\n", name);
-	    printk(OSST_DEB_MSG "%s:D: Result = %d, Sense: 0=%02x, 2=%02x, 12=%02x, 13=%02x\n", name,
-			STp->buffer->syscall_result, SRpnt->sense[0], SRpnt->sense[2],
-			SRpnt->sense[12], SRpnt->sense[13]);
-#endif
-	    return 0;
-	}
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Normal exit from onstream wait medium\n", name);
-#endif
-	return 1;
-}
-
-static int osst_position_tape_and_confirm(struct osst_tape * STp, struct osst_request ** aSRpnt, int frame)
-{
-	int	retval;
-
-	osst_wait_ready(STp, aSRpnt, 15 * 60, 0);			/* TODO - can this catch a write error? */
-	retval = osst_set_frame_position(STp, aSRpnt, frame, 0);
-	if (retval) return (retval);
-	osst_wait_ready(STp, aSRpnt, 15 * 60, OSST_WAIT_POSITION_COMPLETE);
-	return (osst_get_frame_position(STp, aSRpnt));
-}
-
-/*
- * Wait for write(s) to complete
- */
-static int osst_flush_drive_buffer(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
-	unsigned char		cmd[MAX_COMMAND_SIZE];
-	struct osst_request   * SRpnt;
-	int			result = 0;
-	int			delay  = OSST_WAIT_WRITE_COMPLETE;
-#if DEBUG
-	char		      * name = tape_name(STp);
-
-	printk(OSST_DEB_MSG "%s:D: Reached onstream flush drive buffer (write filemark)\n", name);
-#endif
-
-	memset(cmd, 0, MAX_COMMAND_SIZE);
-	cmd[0] = WRITE_FILEMARKS;
-	cmd[1] = 1;
-
-	SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
-	*aSRpnt = SRpnt;
-	if (!SRpnt) return (-EBUSY);
-	if (STp->buffer->syscall_result) {
-		if ((SRpnt->sense[2] & 0x0f) == 2 && SRpnt->sense[12] == 4) {
-			if (SRpnt->sense[13] == 8) {
-				delay = OSST_WAIT_LONG_WRITE_COMPLETE;
-			}
-		} else
-			result = osst_write_error_recovery(STp, aSRpnt, 0);
-	}
-	result |= osst_wait_ready(STp, aSRpnt, 5 * 60, delay);
-	STp->ps[STp->partition].rw = OS_WRITING_COMPLETE;
-
-	return (result);
-}
-
-#define OSST_POLL_PER_SEC 10
-static int osst_wait_frame(struct osst_tape * STp, struct osst_request ** aSRpnt, int curr, int minlast, int to)
-{
-	unsigned long	startwait = jiffies;
-	char	      * name      = tape_name(STp);
-#if DEBUG
-	char	   notyetprinted  = 1;
-#endif
-	if (minlast >= 0 && STp->ps[STp->partition].rw != ST_READING)
-		printk(KERN_ERR "%s:A: Waiting for frame without having initialized read!\n", name);
-
-	while (time_before (jiffies, startwait + to*HZ))
-	{ 
-		int result;
-		result = osst_get_frame_position(STp, aSRpnt);
-		if (result == -EIO)
-			if ((result = osst_write_error_recovery(STp, aSRpnt, 0)) == 0)
-				return 0;	/* successful recovery leaves drive ready for frame */
-		if (result < 0) break;
-		if (STp->first_frame_position == curr &&
-		    ((minlast < 0 &&
-		      (signed)STp->last_frame_position > (signed)curr + minlast) ||
-		     (minlast >= 0 && STp->cur_frames > minlast)
-		    ) && result >= 0)
-		{
-#if DEBUG			
-			if (debugging || time_after_eq(jiffies, startwait + 2*HZ/OSST_POLL_PER_SEC))
-				printk (OSST_DEB_MSG
-					"%s:D: Succ wait f fr %i (>%i): %i-%i %i (%i): %3li.%li s\n",
-					name, curr, curr+minlast, STp->first_frame_position,
-					STp->last_frame_position, STp->cur_frames,
-					result, (jiffies-startwait)/HZ, 
-					(((jiffies-startwait)%HZ)*10)/HZ);
-#endif
-			return 0;
-		}
-#if DEBUG
-		if (time_after_eq(jiffies, startwait + 2*HZ/OSST_POLL_PER_SEC) && notyetprinted)
-		{
-			printk (OSST_DEB_MSG "%s:D: Wait for frame %i (>%i): %i-%i %i (%i)\n",
-				name, curr, curr+minlast, STp->first_frame_position,
-				STp->last_frame_position, STp->cur_frames, result);
-			notyetprinted--;
-		}
-#endif
-		msleep(1000 / OSST_POLL_PER_SEC);
-	}
-#if DEBUG
-	printk (OSST_DEB_MSG "%s:D: Fail wait f fr %i (>%i): %i-%i %i: %3li.%li s\n",
-		name, curr, curr+minlast, STp->first_frame_position,
-		STp->last_frame_position, STp->cur_frames,
-		(jiffies-startwait)/HZ, (((jiffies-startwait)%HZ)*10)/HZ);
-#endif	
-	return -EBUSY;
-}
-
-static int osst_recover_wait_frame(struct osst_tape * STp, struct osst_request ** aSRpnt, int writing)
-{
-	struct osst_request   * SRpnt;
-	unsigned char		cmd[MAX_COMMAND_SIZE];
-	unsigned long   	startwait = jiffies;
-	int			retval    = 1;
-        char		      * name      = tape_name(STp);
-                                                                                                                                
-	if (writing) {
-		char	mybuf[24];
-		char  * olddata = STp->buffer->b_data;
-		int	oldsize = STp->buffer->buffer_size;
-
-		/* write zero fm then read pos - if shows write error, try to recover - if no progress, wait */
-
-		memset(cmd, 0, MAX_COMMAND_SIZE);
-		cmd[0] = WRITE_FILEMARKS;
-		cmd[1] = 1;
-		SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout,
-								MAX_RETRIES, 1);
-
-		while (retval && time_before (jiffies, startwait + 5*60*HZ)) {
-
-			if (STp->buffer->syscall_result && (SRpnt->sense[2] & 0x0f) != 2) {
-
-				/* some failure - not just not-ready */
-				retval = osst_write_error_recovery(STp, aSRpnt, 0);
-				break;
-			}
-			schedule_timeout_interruptible(HZ / OSST_POLL_PER_SEC);
-
-			STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24;
-			memset(cmd, 0, MAX_COMMAND_SIZE);
-			cmd[0] = READ_POSITION;
-
-			SRpnt = osst_do_scsi(SRpnt, STp, cmd, 20, DMA_FROM_DEVICE, STp->timeout,
-										MAX_RETRIES, 1);
-
-			retval = ( STp->buffer->syscall_result || (STp->buffer)->b_data[15] > 25 );
-			STp->buffer->b_data = olddata; STp->buffer->buffer_size = oldsize;
-		}
-		if (retval)
-			printk(KERN_ERR "%s:E: Device did not succeed to write buffered data\n", name);
-	} else
-		/* TODO - figure out which error conditions can be handled */
-		if (STp->buffer->syscall_result)
-			printk(KERN_WARNING
-				"%s:W: Recover_wait_frame(read) cannot handle %02x:%02x:%02x\n", name,
-					(*aSRpnt)->sense[ 2] & 0x0f,
-					(*aSRpnt)->sense[12],
-					(*aSRpnt)->sense[13]);
-
-	return retval;
-}
-
-/*
- * Read the next OnStream tape frame at the current location
- */
-static int osst_read_frame(struct osst_tape * STp, struct osst_request ** aSRpnt, int timeout)
-{
-	unsigned char		cmd[MAX_COMMAND_SIZE];
-	struct osst_request   * SRpnt;
-	int			retval = 0;
-#if DEBUG
-	os_aux_t	      * aux    = STp->buffer->aux;
-	char		      * name   = tape_name(STp);
-#endif
-
-	if (STp->poll)
-		if (osst_wait_frame (STp, aSRpnt, STp->first_frame_position, 0, timeout))
-			retval = osst_recover_wait_frame(STp, aSRpnt, 0);
-
-	memset(cmd, 0, MAX_COMMAND_SIZE);
-	cmd[0] = READ_6;
-	cmd[1] = 1;
-	cmd[4] = 1;
-
-#if DEBUG
-	if (debugging)
-		printk(OSST_DEB_MSG "%s:D: Reading frame from OnStream tape\n", name);
-#endif
-	SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, DMA_FROM_DEVICE,
-				      STp->timeout, MAX_RETRIES, 1);
-	*aSRpnt = SRpnt;
-	if (!SRpnt)
-		return (-EBUSY);
-
-	if ((STp->buffer)->syscall_result) {
-	    retval = 1;
-	    if (STp->read_error_frame == 0) {
-		STp->read_error_frame = STp->first_frame_position;
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Recording read error at %d\n", name, STp->read_error_frame);
-#endif
-	    }
-#if DEBUG
-	    if (debugging)
-		printk(OSST_DEB_MSG "%s:D: Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n",
-		   name,
-		   SRpnt->sense[0], SRpnt->sense[1],
-		   SRpnt->sense[2], SRpnt->sense[3],
-		   SRpnt->sense[4], SRpnt->sense[5],
-		   SRpnt->sense[6], SRpnt->sense[7]);
-#endif
-	}
-	else
-	    STp->first_frame_position++;
-#if DEBUG
-	if (debugging) {
-	   char sig[8]; int i;
-	   for (i=0;i<4;i++)
-		   sig[i] = aux->application_sig[i]<32?'^':aux->application_sig[i];
-	   sig[4] = '\0';
-	   printk(OSST_DEB_MSG 
-		"%s:D: AUX: %s UpdFrCt#%d Wpass#%d %s FrSeq#%d LogBlk#%d Qty=%d Sz=%d\n", name, sig,
-			ntohl(aux->update_frame_cntr), ntohs(aux->partition.wrt_pass_cntr),
-			aux->frame_type==1?"EOD":aux->frame_type==2?"MARK":
-			aux->frame_type==8?"HEADR":aux->frame_type==0x80?"DATA":"FILL", 
-			ntohl(aux->frame_seq_num), ntohl(aux->logical_blk_num),
-			ntohs(aux->dat.dat_list[0].blk_cnt), ntohl(aux->dat.dat_list[0].blk_sz) );
-	   if (aux->frame_type==2)
-		printk(OSST_DEB_MSG "%s:D: mark_cnt=%d, last_mark_ppos=%d, last_mark_lbn=%d\n", name,
-			ntohl(aux->filemark_cnt), ntohl(aux->last_mark_ppos), ntohl(aux->last_mark_lbn));
-	   printk(OSST_DEB_MSG "%s:D: Exit read frame from OnStream tape with code %d\n", name, retval);
-	}
-#endif
-	return (retval);
-}
-
-static int osst_initiate_read(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
-	struct st_partstat    * STps   = &(STp->ps[STp->partition]);
-	struct osst_request   * SRpnt  ;
-	unsigned char		cmd[MAX_COMMAND_SIZE];
-	int			retval = 0;
-	char		      * name   = tape_name(STp);
-
-	if (STps->rw != ST_READING) {         /* Initialize read operation */
-		if (STps->rw == ST_WRITING || STp->dirty) {
-			STp->write_type = OS_WRITE_DATA;
-                        osst_flush_write_buffer(STp, aSRpnt);
-			osst_flush_drive_buffer(STp, aSRpnt);
-		}
-		STps->rw = ST_READING;
-		STp->frame_in_buffer = 0;
-
-		/*
-		 *      Issue a read 0 command to get the OnStream drive
-                 *      read frames into its buffer.
-		 */
-		memset(cmd, 0, MAX_COMMAND_SIZE);
-		cmd[0] = READ_6;
-		cmd[1] = 1;
-
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Start Read Ahead on OnStream tape\n", name);
-#endif
-		SRpnt   = osst_do_scsi(*aSRpnt, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
-		*aSRpnt = SRpnt;
-		if ((retval = STp->buffer->syscall_result))
-			printk(KERN_WARNING "%s:W: Error starting read ahead\n", name);
-	}
-
-	return retval;
-}
-
-static int osst_get_logical_frame(struct osst_tape * STp, struct osst_request ** aSRpnt,
-						int frame_seq_number, int quiet)
-{
-	struct st_partstat * STps  = &(STp->ps[STp->partition]);
-	char		   * name  = tape_name(STp);
-	int		     cnt   = 0,
-			     bad   = 0,
-			     past  = 0,
-			     x,
-			     position;
-
-	/*
-	 * If we want just any frame (-1) and there is a frame in the buffer, return it
-	 */
-	if (frame_seq_number == -1 && STp->frame_in_buffer) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Frame %d still in buffer\n", name, STp->frame_seq_number);
-#endif
-		return (STps->eof);
-	}
-	/*
-         * Search and wait for the next logical tape frame
-	 */
-	while (1) {
-		if (cnt++ > 400) {
-                        printk(KERN_ERR "%s:E: Couldn't find logical frame %d, aborting\n",
-					    name, frame_seq_number);
-			if (STp->read_error_frame) {
-				osst_set_frame_position(STp, aSRpnt, STp->read_error_frame, 0);
-#if DEBUG
-                        	printk(OSST_DEB_MSG "%s:D: Repositioning tape to bad frame %d\n",
-						    name, STp->read_error_frame);
-#endif
-				STp->read_error_frame = 0;
-				STp->abort_count++;
-			}
-			return (-EIO);
-		}
-#if DEBUG
-		if (debugging)
-			printk(OSST_DEB_MSG "%s:D: Looking for frame %d, attempt %d\n",
-					  name, frame_seq_number, cnt);
-#endif
-		if ( osst_initiate_read(STp, aSRpnt)
-                || ( (!STp->frame_in_buffer) && osst_read_frame(STp, aSRpnt, 30) ) ) {
-			if (STp->raw)
-				return (-EIO);
-			position = osst_get_frame_position(STp, aSRpnt);
-			if (position >= 0xbae && position < 0xbb8)
-				position = 0xbb8;
-			else if (position > STp->eod_frame_ppos || ++bad == 10) {
-				position = STp->read_error_frame - 1;
-				bad = 0;
-			}
-			else {
-				position += 29;
-				cnt      += 19;
-			}
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Bad frame detected, positioning tape to block %d\n",
-					 name, position);
-#endif
-			osst_set_frame_position(STp, aSRpnt, position, 0);
-			continue;
-		}
-		if (osst_verify_frame(STp, frame_seq_number, quiet))
-			break;
-		if (osst_verify_frame(STp, -1, quiet)) {
-			x = ntohl(STp->buffer->aux->frame_seq_num);
-			if (STp->fast_open) {
-				printk(KERN_WARNING
-				       "%s:W: Found logical frame %d instead of %d after fast open\n",
-				       name, x, frame_seq_number);
-				STp->header_ok = 0;
-				STp->read_error_frame = 0;
-				return (-EIO);
-			}
-			if (x > frame_seq_number) {
-				if (++past > 3) {
-					/* positioning backwards did not bring us to the desired frame */
-					position = STp->read_error_frame - 1;
-				}
-				else {
-			        	position = osst_get_frame_position(STp, aSRpnt)
-					         + frame_seq_number - x - 1;
-
-					if (STp->first_frame_position >= 3000 && position < 3000)
-						position -= 10;
-				}
-#if DEBUG
-                                printk(OSST_DEB_MSG
-				       "%s:D: Found logical frame %d while looking for %d: back up %d\n",
-						name, x, frame_seq_number,
-					       	STp->first_frame_position - position);
-#endif
-                        	osst_set_frame_position(STp, aSRpnt, position, 0);
-				cnt += 10;
-			}
-			else
-				past = 0;
-		}
-		if (osst_get_frame_position(STp, aSRpnt) == 0xbaf) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Skipping config partition\n", name);
-#endif
-			osst_set_frame_position(STp, aSRpnt, 0xbb8, 0);
-			cnt--;
-		}
-		STp->frame_in_buffer = 0;
-	}
-	if (cnt > 1) {
-		STp->recover_count++;
-		STp->recover_erreg++;
-		printk(KERN_WARNING "%s:I: Don't worry, Read error at position %d recovered\n", 
-					name, STp->read_error_frame);
- 	}
-	STp->read_count++;
-
-#if DEBUG
-	if (debugging || STps->eof)
-		printk(OSST_DEB_MSG
-			"%s:D: Exit get logical frame (%d=>%d) from OnStream tape with code %d\n",
-			name, frame_seq_number, STp->frame_seq_number, STps->eof);
-#endif
-	STp->fast_open = 0;
-	STp->read_error_frame = 0;
-	return (STps->eof);
-}
-
-static int osst_seek_logical_blk(struct osst_tape * STp, struct osst_request ** aSRpnt, int logical_blk_num)
-{
-        struct st_partstat * STps = &(STp->ps[STp->partition]);
-	char		   * name = tape_name(STp);
-	int	retries    = 0;
-	int	frame_seq_estimate, ppos_estimate, move;
-	
-	if (logical_blk_num < 0) logical_blk_num = 0;
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Seeking logical block %d (now at %d, size %d%c)\n",
-				name, logical_blk_num, STp->logical_blk_num, 
-				STp->block_size<1024?STp->block_size:STp->block_size/1024,
-				STp->block_size<1024?'b':'k');
-#endif
-	/* Do we know where we are? */
-	if (STps->drv_block >= 0) {
-		move                = logical_blk_num - STp->logical_blk_num;
-		if (move < 0) move -= (OS_DATA_SIZE / STp->block_size) - 1;
-		move               /= (OS_DATA_SIZE / STp->block_size);
-		frame_seq_estimate  = STp->frame_seq_number + move;
-	} else
-		frame_seq_estimate  = logical_blk_num * STp->block_size / OS_DATA_SIZE;
-
-	if (frame_seq_estimate < 2980) ppos_estimate = frame_seq_estimate + 10;
-	else			       ppos_estimate = frame_seq_estimate + 20;
-	while (++retries < 10) {
-	   if (ppos_estimate > STp->eod_frame_ppos-2) {
-	       frame_seq_estimate += STp->eod_frame_ppos - 2 - ppos_estimate;
-	       ppos_estimate       = STp->eod_frame_ppos - 2;
-	   }
-	   if (frame_seq_estimate < 0) {
-	       frame_seq_estimate = 0;
-	       ppos_estimate      = 10;
-	   }
-	   osst_set_frame_position(STp, aSRpnt, ppos_estimate, 0);
-	   if (osst_get_logical_frame(STp, aSRpnt, frame_seq_estimate, 1) >= 0) {
-	      /* we've located the estimated frame, now does it have our block? */
-	      if (logical_blk_num <  STp->logical_blk_num ||
-	          logical_blk_num >= STp->logical_blk_num + ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt)) {
-		 if (STps->eof == ST_FM_HIT)
-		    move = logical_blk_num < STp->logical_blk_num? -2 : 1;
-		 else {
-		    move                = logical_blk_num - STp->logical_blk_num;
-		    if (move < 0) move -= (OS_DATA_SIZE / STp->block_size) - 1;
-		    move               /= (OS_DATA_SIZE / STp->block_size);
-		 }
-		 if (!move) move = logical_blk_num > STp->logical_blk_num ? 1 : -1;
-#if DEBUG
-		 printk(OSST_DEB_MSG
-			"%s:D: Seek retry %d at ppos %d fsq %d (est %d) lbn %d (need %d) move %d\n",
-				name, retries, ppos_estimate, STp->frame_seq_number, frame_seq_estimate, 
-				STp->logical_blk_num, logical_blk_num, move);
-#endif
-		 frame_seq_estimate += move;
-		 ppos_estimate      += move;
-		 continue;
-	      } else {
-		 STp->buffer->read_pointer  = (logical_blk_num - STp->logical_blk_num) * STp->block_size;
-		 STp->buffer->buffer_bytes -= STp->buffer->read_pointer;
-		 STp->logical_blk_num       =  logical_blk_num;
-#if DEBUG
-		 printk(OSST_DEB_MSG 
-			"%s:D: Seek success at ppos %d fsq %d in_buf %d, bytes %d, ptr %d*%d\n",
-				name, ppos_estimate, STp->frame_seq_number, STp->frame_in_buffer, 
-				STp->buffer->buffer_bytes, STp->buffer->read_pointer / STp->block_size, 
-				STp->block_size);
-#endif
-		 STps->drv_file = ntohl(STp->buffer->aux->filemark_cnt);
-		 if (STps->eof == ST_FM_HIT) {
-		     STps->drv_file++;
-		     STps->drv_block = 0;
-		 } else {
-		     STps->drv_block = ntohl(STp->buffer->aux->last_mark_lbn)?
-					  STp->logical_blk_num -
-					     (STps->drv_file ? ntohl(STp->buffer->aux->last_mark_lbn) + 1 : 0):
-					-1;
-		 }
-		 STps->eof = (STp->first_frame_position >= STp->eod_frame_ppos)?ST_EOD:ST_NOEOF;
-		 return 0;
-	      }
-	   }
-	   if (osst_get_logical_frame(STp, aSRpnt, -1, 1) < 0)
-	      goto error;
-	   /* we are not yet at the estimated frame, adjust our estimate of its physical position */
-#if DEBUG
-	   printk(OSST_DEB_MSG "%s:D: Seek retry %d at ppos %d fsq %d (est %d) lbn %d (need %d)\n", 
-			   name, retries, ppos_estimate, STp->frame_seq_number, frame_seq_estimate, 
-			   STp->logical_blk_num, logical_blk_num);
-#endif
-	   if (frame_seq_estimate != STp->frame_seq_number)
-	      ppos_estimate += frame_seq_estimate - STp->frame_seq_number;
-	   else
-	      break;
-	}
-error:
-	printk(KERN_ERR "%s:E: Couldn't seek to logical block %d (at %d), %d retries\n", 
-			    name, logical_blk_num, STp->logical_blk_num, retries);
-	return (-EIO);
-}
-
-/* The values below are based on the OnStream frame payload size of 32K == 2**15,
- * that is, OSST_FRAME_SHIFT + OSST_SECTOR_SHIFT must be 15. With a minimum block
- * size of 512 bytes, we need to be able to resolve 32K/512 == 64 == 2**6 positions
- * inside each frame. Finally, OSST_SECTOR_MASK == 2**OSST_FRAME_SHIFT - 1.
- */
-#define OSST_FRAME_SHIFT  6
-#define OSST_SECTOR_SHIFT 9
-#define OSST_SECTOR_MASK  0x03F
-
-static int osst_get_sector(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
-	int	sector;
-#if DEBUG
-	char  * name = tape_name(STp);
-	
-	printk(OSST_DEB_MSG 
-		"%s:D: Positioned at ppos %d, frame %d, lbn %d, file %d, blk %d, %cptr %d, eof %d\n",
-		name, STp->first_frame_position, STp->frame_seq_number, STp->logical_blk_num,
-		STp->ps[STp->partition].drv_file, STp->ps[STp->partition].drv_block, 
-		STp->ps[STp->partition].rw == ST_WRITING?'w':'r',
-		STp->ps[STp->partition].rw == ST_WRITING?STp->buffer->buffer_bytes:
-		STp->buffer->read_pointer, STp->ps[STp->partition].eof);
-#endif
-	/* do we know where we are inside a file? */
-	if (STp->ps[STp->partition].drv_block >= 0) {
-		sector = (STp->frame_in_buffer ? STp->first_frame_position-1 :
-				STp->first_frame_position) << OSST_FRAME_SHIFT;
-		if (STp->ps[STp->partition].rw == ST_WRITING)
-		       	sector |= (STp->buffer->buffer_bytes >> OSST_SECTOR_SHIFT) & OSST_SECTOR_MASK;
-		else
-	       		sector |= (STp->buffer->read_pointer >> OSST_SECTOR_SHIFT) & OSST_SECTOR_MASK;
-	} else {
-		sector = osst_get_frame_position(STp, aSRpnt);
-		if (sector > 0)
-			sector <<= OSST_FRAME_SHIFT;
-	}
-	return sector;
-}
-
-static int osst_seek_sector(struct osst_tape * STp, struct osst_request ** aSRpnt, int sector)
-{
-        struct st_partstat * STps   = &(STp->ps[STp->partition]);
-	int		     frame  = sector >> OSST_FRAME_SHIFT,
-			     offset = (sector & OSST_SECTOR_MASK) << OSST_SECTOR_SHIFT, 
-			     r;
-#if DEBUG
-	char          * name = tape_name(STp);
-
-	printk(OSST_DEB_MSG "%s:D: Seeking sector %d in frame %d at offset %d\n",
-				name, sector, frame, offset);
-#endif
-	if (frame < 0 || frame >= STp->capacity) return (-ENXIO);
-
-	if (frame <= STp->first_data_ppos) {
-		STp->frame_seq_number = STp->logical_blk_num = STps->drv_file = STps->drv_block = 0;
-		return (osst_set_frame_position(STp, aSRpnt, frame, 0));
-	}
-	r = osst_set_frame_position(STp, aSRpnt, offset?frame:frame-1, 0);
-	if (r < 0) return r;
-
-	r = osst_get_logical_frame(STp, aSRpnt, -1, 1);
-	if (r < 0) return r;
-
-	if (osst_get_frame_position(STp, aSRpnt) != (offset?frame+1:frame)) return (-EIO);
-
-	if (offset) {
-		STp->logical_blk_num      += offset / STp->block_size;
-		STp->buffer->read_pointer  = offset;
-		STp->buffer->buffer_bytes -= offset;
-	} else {
-		STp->frame_seq_number++;
-		STp->frame_in_buffer       = 0;
-		STp->logical_blk_num      += ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
-		STp->buffer->buffer_bytes  = STp->buffer->read_pointer = 0;
-	}
-	STps->drv_file = ntohl(STp->buffer->aux->filemark_cnt);
-	if (STps->eof == ST_FM_HIT) {
-		STps->drv_file++;
-		STps->drv_block = 0;
-	} else {
-		STps->drv_block = ntohl(STp->buffer->aux->last_mark_lbn)?
-				    STp->logical_blk_num -
-					(STps->drv_file ? ntohl(STp->buffer->aux->last_mark_lbn) + 1 : 0):
-				  -1;
-	}
-	STps->eof       = (STp->first_frame_position >= STp->eod_frame_ppos)?ST_EOD:ST_NOEOF;
-#if DEBUG
-	printk(OSST_DEB_MSG 
-		"%s:D: Now positioned at ppos %d, frame %d, lbn %d, file %d, blk %d, rptr %d, eof %d\n",
-		name, STp->first_frame_position, STp->frame_seq_number, STp->logical_blk_num,
-		STps->drv_file, STps->drv_block, STp->buffer->read_pointer, STps->eof);
-#endif
-	return 0;
-}
-
-/*
- * Read back the drive's internal buffer contents, as a part
- * of the write error recovery mechanism for old OnStream
- * firmware revisions.
- * Precondition for this function to work: all frames in the
- * drive's buffer must be of one type (DATA, MARK or EOD)!
- */
-static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct osst_request ** aSRpnt,
-						unsigned int frame, unsigned int skip, int pending)
-{
-	struct osst_request   * SRpnt = * aSRpnt;
-	unsigned char	      * buffer, * p;
-	unsigned char		cmd[MAX_COMMAND_SIZE];
-	int			flag, new_frame, i;
-	int			nframes          = STp->cur_frames;
-	int			blks_per_frame   = ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
-	int			frame_seq_number = ntohl(STp->buffer->aux->frame_seq_num)
-						- (nframes + pending - 1);
-	int			logical_blk_num  = ntohl(STp->buffer->aux->logical_blk_num) 
-						- (nframes + pending - 1) * blks_per_frame;
-	char		      * name             = tape_name(STp);
-	unsigned long		startwait        = jiffies;
-#if DEBUG
-	int			dbg              = debugging;
-#endif
-
-	if ((buffer = vmalloc(array_size((nframes + 1), OS_DATA_SIZE))) == NULL)
-		return (-EIO);
-
-	printk(KERN_INFO "%s:I: Reading back %d frames from drive buffer%s\n",
-			 name, nframes, pending?" and one that was pending":"");
-
-	osst_copy_from_buffer(STp->buffer, (p = &buffer[nframes * OS_DATA_SIZE]));
-#if DEBUG
-	if (pending && debugging)
-		printk(OSST_DEB_MSG "%s:D: Pending frame %d (lblk %d), data %02x %02x %02x %02x\n",
-				name, frame_seq_number + nframes,
-			       	logical_blk_num + nframes * blks_per_frame,
-			       	p[0], p[1], p[2], p[3]);
-#endif
-	for (i = 0, p = buffer; i < nframes; i++, p += OS_DATA_SIZE) {
-
-		memset(cmd, 0, MAX_COMMAND_SIZE);
-		cmd[0] = 0x3C;		/* Buffer Read           */
-		cmd[1] = 6;		/* Retrieve Faulty Block */
-		cmd[7] = 32768 >> 8;
-		cmd[8] = 32768 & 0xff;
-
-		SRpnt = osst_do_scsi(SRpnt, STp, cmd, OS_FRAME_SIZE, DMA_FROM_DEVICE,
-					    STp->timeout, MAX_RETRIES, 1);
-	
-		if ((STp->buffer)->syscall_result || !SRpnt) {
-			printk(KERN_ERR "%s:E: Failed to read frame back from OnStream buffer\n", name);
-			vfree(buffer);
-			*aSRpnt = SRpnt;
-			return (-EIO);
-		}
-		osst_copy_from_buffer(STp->buffer, p);
-#if DEBUG
-		if (debugging)
-			printk(OSST_DEB_MSG "%s:D: Read back logical frame %d, data %02x %02x %02x %02x\n",
-					  name, frame_seq_number + i, p[0], p[1], p[2], p[3]);
-#endif
-	}
-	*aSRpnt = SRpnt;
-	osst_get_frame_position(STp, aSRpnt);
-
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Frames left in buffer: %d\n", name, STp->cur_frames);
-#endif
-	/* Write synchronously so we can be sure we're OK again and don't have to recover recursively */
-	/* In the header we don't actually re-write the frames that fail, just the ones after them */
-
-	for (flag=1, new_frame=frame, p=buffer, i=0; i < nframes + pending; ) {
-
-		if (flag) {
-			if (STp->write_type == OS_WRITE_HEADER) {
-				i += skip;
-				p += skip * OS_DATA_SIZE;
-			}
-			else if (new_frame < 2990 && new_frame+skip+nframes+pending >= 2990)
-				new_frame = 3000-i;
-			else
-				new_frame += skip;
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Position to frame %d, write fseq %d\n",
-						name, new_frame+i, frame_seq_number+i);
-#endif
-			osst_set_frame_position(STp, aSRpnt, new_frame + i, 0);
-			osst_wait_ready(STp, aSRpnt, 60, OSST_WAIT_POSITION_COMPLETE);
-			osst_get_frame_position(STp, aSRpnt);
-			SRpnt = * aSRpnt;
-
-			if (new_frame > frame + 1000) {
-				printk(KERN_ERR "%s:E: Failed to find writable tape media\n", name);
-				vfree(buffer);
-				return (-EIO);
-			}
-			if ( i >= nframes + pending ) break;
-			flag = 0;
-		}
-		osst_copy_to_buffer(STp->buffer, p);
-		/*
-		 * IMPORTANT: for error recovery to work, _never_ queue frames with mixed frame type!
-		 */
-		osst_init_aux(STp, STp->buffer->aux->frame_type, frame_seq_number+i,
-			       	logical_blk_num + i*blks_per_frame,
-			       	ntohl(STp->buffer->aux->dat.dat_list[0].blk_sz), blks_per_frame);
-		memset(cmd, 0, MAX_COMMAND_SIZE);
-		cmd[0] = WRITE_6;
-		cmd[1] = 1;
-		cmd[4] = 1;
-
-#if DEBUG
-		if (debugging)
-			printk(OSST_DEB_MSG
-				"%s:D: About to write frame %d, seq %d, lbn %d, data %02x %02x %02x %02x\n",
-				name, new_frame+i, frame_seq_number+i, logical_blk_num + i*blks_per_frame,
-				p[0], p[1], p[2], p[3]);
-#endif
-		SRpnt = osst_do_scsi(SRpnt, STp, cmd, OS_FRAME_SIZE, DMA_TO_DEVICE,
-					    STp->timeout, MAX_RETRIES, 1);
-
-		if (STp->buffer->syscall_result)
-			flag = 1;
-		else {
-			p += OS_DATA_SIZE; i++;
-
-			/* if we just sent the last frame, wait till all successfully written */
-			if ( i == nframes + pending ) {
-#if DEBUG
-				printk(OSST_DEB_MSG "%s:D: Check re-write successful\n", name);
-#endif
-				memset(cmd, 0, MAX_COMMAND_SIZE);
-				cmd[0] = WRITE_FILEMARKS;
-				cmd[1] = 1;
-				SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
-							    STp->timeout, MAX_RETRIES, 1);
-#if DEBUG
-				if (debugging) {
-					printk(OSST_DEB_MSG "%s:D: Sleeping in re-write wait ready\n", name);
-					printk(OSST_DEB_MSG "%s:D: Turning off debugging for a while\n", name);
-					debugging = 0;
-				}
-#endif
-				flag = STp->buffer->syscall_result;
-				while ( !flag && time_before(jiffies, startwait + 60*HZ) ) {
-
-					memset(cmd, 0, MAX_COMMAND_SIZE);
-					cmd[0] = TEST_UNIT_READY;
-
-					SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, STp->timeout,
-												MAX_RETRIES, 1);
-
-					if (SRpnt->sense[2] == 2 && SRpnt->sense[12] == 4 &&
-					    (SRpnt->sense[13] == 1 || SRpnt->sense[13] == 8)) {
-						/* in the process of becoming ready */
-						msleep(100);
-						continue;
-					}
-					if (STp->buffer->syscall_result)
-						flag = 1;
-					break;
-				}
-#if DEBUG
-				debugging = dbg;
-				printk(OSST_DEB_MSG "%s:D: Wait re-write finished\n", name);
-#endif
-			}
-		}
-		*aSRpnt = SRpnt;
-		if (flag) {
-			if ((SRpnt->sense[ 2] & 0x0f) == 13 &&
-			     SRpnt->sense[12]         ==  0 &&
-			     SRpnt->sense[13]         ==  2) {
-				printk(KERN_ERR "%s:E: Volume overflow in write error recovery\n", name);
-				vfree(buffer);
-				return (-EIO);			/* hit end of tape = fail */
-			}
-			i = ((SRpnt->sense[3] << 24) |
-			     (SRpnt->sense[4] << 16) |
-			     (SRpnt->sense[5] <<  8) |
-			      SRpnt->sense[6]        ) - new_frame;
-			p = &buffer[i * OS_DATA_SIZE];
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Additional write error at %d\n", name, new_frame+i);
-#endif
-			osst_get_frame_position(STp, aSRpnt);
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: reported frame positions: host = %d, tape = %d, buffer = %d\n",
-					  name, STp->first_frame_position, STp->last_frame_position, STp->cur_frames);
-#endif
-		}
-	}
-	if (flag) {
-		/* error recovery did not successfully complete */
-		printk(KERN_ERR "%s:D: Write error recovery failed in %s\n", name,
-				STp->write_type == OS_WRITE_HEADER?"header":"body");
-	}
-	if (!pending)
-		osst_copy_to_buffer(STp->buffer, p);	/* so buffer content == at entry in all cases */
-	vfree(buffer);
-	return 0;
-}
-
-static int osst_reposition_and_retry(struct osst_tape * STp, struct osst_request ** aSRpnt,
-					unsigned int frame, unsigned int skip, int pending)
-{
-	unsigned char		cmd[MAX_COMMAND_SIZE];
-	struct osst_request   * SRpnt;
-	char		      * name      = tape_name(STp);
-	int			expected  = 0;
-	int			attempts  = 1000 / skip;
-	int			flag      = 1;
-	unsigned long		startwait = jiffies;
-#if DEBUG
-	int			dbg       = debugging;
-#endif
-
-	while (attempts && time_before(jiffies, startwait + 60*HZ)) {
-		if (flag) {
-#if DEBUG
-			debugging = dbg;
-#endif
-			if (frame < 2990 && frame+skip+STp->cur_frames+pending >= 2990)
-				frame = 3000-skip;
-			expected = frame+skip+STp->cur_frames+pending;
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Position to fppos %d, re-write from fseq %d\n",
-					  name, frame+skip, STp->frame_seq_number-STp->cur_frames-pending);
-#endif
-			osst_set_frame_position(STp, aSRpnt, frame + skip, 1);
-			flag = 0;
-			attempts--;
-			schedule_timeout_interruptible(msecs_to_jiffies(100));
-		}
-		if (osst_get_frame_position(STp, aSRpnt) < 0) {		/* additional write error */
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Addl error, host %d, tape %d, buffer %d\n",
-					  name, STp->first_frame_position,
-					  STp->last_frame_position, STp->cur_frames);
-#endif
-			frame = STp->last_frame_position;
-			flag = 1;
-			continue;
-		}
-		if (pending && STp->cur_frames < 50) {
-
-			memset(cmd, 0, MAX_COMMAND_SIZE);
-			cmd[0] = WRITE_6;
-			cmd[1] = 1;
-			cmd[4] = 1;
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: About to write pending fseq %d at fppos %d\n",
-					  name, STp->frame_seq_number-1, STp->first_frame_position);
-#endif
-			SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, DMA_TO_DEVICE,
-						      STp->timeout, MAX_RETRIES, 1);
-			*aSRpnt = SRpnt;
-
-			if (STp->buffer->syscall_result) {		/* additional write error */
-				if ((SRpnt->sense[ 2] & 0x0f) == 13 &&
-				     SRpnt->sense[12]         ==  0 &&
-				     SRpnt->sense[13]         ==  2) {
-					printk(KERN_ERR
-					       "%s:E: Volume overflow in write error recovery\n",
-					       name);
-					break;				/* hit end of tape = fail */
-				}
-				flag = 1;
-			}
-			else
-				pending = 0;
-
-			continue;
-		}
-		if (STp->cur_frames == 0) {
-#if DEBUG
-			debugging = dbg;
-			printk(OSST_DEB_MSG "%s:D: Wait re-write finished\n", name);
-#endif
-			if (STp->first_frame_position != expected) {
-				printk(KERN_ERR "%s:A: Actual position %d - expected %d\n", 
-						name, STp->first_frame_position, expected);
-				return (-EIO);
-			}
-			return 0;
-		}
-#if DEBUG
-		if (debugging) {
-			printk(OSST_DEB_MSG "%s:D: Sleeping in re-write wait ready\n", name);
-			printk(OSST_DEB_MSG "%s:D: Turning off debugging for a while\n", name);
-			debugging = 0;
-		}
-#endif
-		schedule_timeout_interruptible(msecs_to_jiffies(100));
-	}
-	printk(KERN_ERR "%s:E: Failed to find valid tape media\n", name);
-#if DEBUG
-	debugging = dbg;
-#endif
-	return (-EIO);
-}
-
-/*
- * Error recovery algorithm for the OnStream tape.
- */
-
-static int osst_write_error_recovery(struct osst_tape * STp, struct osst_request ** aSRpnt, int pending)
-{
-	struct osst_request * SRpnt  = * aSRpnt;
-	struct st_partstat  * STps   = & STp->ps[STp->partition];
-	char		    * name   = tape_name(STp);
-	int		      retval = 0;
-	int		      rw_state;
-	unsigned int	      frame, skip;
-
-	rw_state = STps->rw;
-
-	if ((SRpnt->sense[ 2] & 0x0f) != 3
-	  || SRpnt->sense[12]         != 12
-	  || SRpnt->sense[13]         != 0) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Write error recovery cannot handle %02x:%02x:%02x\n", name,
-			SRpnt->sense[2], SRpnt->sense[12], SRpnt->sense[13]);
-#endif
-		return (-EIO);
-	}
-	frame =	(SRpnt->sense[3] << 24) |
-		(SRpnt->sense[4] << 16) |
-		(SRpnt->sense[5] <<  8) |
-		 SRpnt->sense[6];
-	skip  =  SRpnt->sense[9];
- 
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Detected physical bad frame at %u, advised to skip %d\n", name, frame, skip);
-#endif
-	osst_get_frame_position(STp, aSRpnt);
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: reported frame positions: host = %d, tape = %d\n",
-			name, STp->first_frame_position, STp->last_frame_position);
-#endif
-	switch (STp->write_type) {
-	   case OS_WRITE_DATA:
-	   case OS_WRITE_EOD:
-	   case OS_WRITE_NEW_MARK:
-		printk(KERN_WARNING 
-			"%s:I: Relocating %d buffered logical frames from position %u to %u\n",
-			name, STp->cur_frames, frame, (frame + skip > 3000 && frame < 3000)?3000:frame + skip);
-		if (STp->os_fw_rev >= 10600)
-			retval = osst_reposition_and_retry(STp, aSRpnt, frame, skip, pending);
-		else
-			retval = osst_read_back_buffer_and_rewrite(STp, aSRpnt, frame, skip, pending);
-		printk(KERN_WARNING "%s:%s: %sWrite error%srecovered\n", name,
-			       	retval?"E"    :"I",
-			       	retval?""     :"Don't worry, ",
-			       	retval?" not ":" ");
-		break;
-	   case OS_WRITE_LAST_MARK:
-		printk(KERN_ERR "%s:E: Bad frame in update last marker, fatal\n", name);
-		osst_set_frame_position(STp, aSRpnt, frame + STp->cur_frames + pending, 0);
-		retval = -EIO;
-		break;
-	   case OS_WRITE_HEADER:
-		printk(KERN_WARNING "%s:I: Bad frame in header partition, skipped\n", name);
-		retval = osst_read_back_buffer_and_rewrite(STp, aSRpnt, frame, 1, pending);
-		break;
-	   default:
-		printk(KERN_INFO "%s:I: Bad frame in filler, ignored\n", name);
-		osst_set_frame_position(STp, aSRpnt, frame + STp->cur_frames + pending, 0);
-	}
-	osst_get_frame_position(STp, aSRpnt);
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Positioning complete, cur_frames %d, pos %d, tape pos %d\n", 
-			name, STp->cur_frames, STp->first_frame_position, STp->last_frame_position);
-	printk(OSST_DEB_MSG "%s:D: next logical frame to write: %d\n", name, STp->logical_blk_num);
-#endif
-	if (retval == 0) {
-		STp->recover_count++;
-		STp->recover_erreg++;
-	} else
-		STp->abort_count++;
-
-	STps->rw = rw_state;
-	return retval;
-}
-
-static int osst_space_over_filemarks_backward(struct osst_tape * STp, struct osst_request ** aSRpnt,
-								 int mt_op, int mt_count)
-{
-	char  * name = tape_name(STp);
-	int     cnt;
-	int     last_mark_ppos = -1;
-
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Reached space_over_filemarks_backwards %d %d\n", name, mt_op, mt_count);
-#endif
-	if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks_bwd\n", name);
-#endif
-		return -EIO;
-	}
-	if (STp->linux_media_version >= 4) {
-		/*
-		 * direct lookup in header filemark list
-		 */
-		cnt = ntohl(STp->buffer->aux->filemark_cnt);
-		if (STp->header_ok                         && 
-		    STp->header_cache != NULL              &&
-		    (cnt - mt_count)  >= 0                 &&
-		    (cnt - mt_count)   < OS_FM_TAB_MAX     &&
-		    (cnt - mt_count)   < STp->filemark_cnt &&
-		    STp->header_cache->dat_fm_tab.fm_tab_ent[cnt-1] == STp->buffer->aux->last_mark_ppos)
-
-			last_mark_ppos = ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[cnt - mt_count]);
-#if DEBUG
-		if (STp->header_cache == NULL || (cnt - mt_count) < 0 || (cnt - mt_count) >= OS_FM_TAB_MAX)
-			printk(OSST_DEB_MSG "%s:D: Filemark lookup fail due to %s\n", name,
-			       STp->header_cache == NULL?"lack of header cache":"count out of range");
-		else
-			printk(OSST_DEB_MSG "%s:D: Filemark lookup: prev mark %d (%s), skip %d to %d\n",
-				name, cnt,
-				((cnt == -1 && ntohl(STp->buffer->aux->last_mark_ppos) == -1) ||
-				 (STp->header_cache->dat_fm_tab.fm_tab_ent[cnt-1] ==
-					 STp->buffer->aux->last_mark_ppos))?"match":"error",
-			       mt_count, last_mark_ppos);
-#endif
-		if (last_mark_ppos > 10 && last_mark_ppos < STp->eod_frame_ppos) {
-			osst_position_tape_and_confirm(STp, aSRpnt, last_mark_ppos);
-			if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
-				printk(OSST_DEB_MSG 
-					"%s:D: Couldn't get logical blk num in space_filemarks\n", name);
-#endif
-				return (-EIO);
-			}
-			if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
-				printk(KERN_WARNING "%s:W: Expected to find marker at ppos %d, not found\n",
-						 name, last_mark_ppos);
-				return (-EIO);
-			}
-			goto found;
-		}
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Reverting to scan filemark backwards\n", name);
-#endif
-	}
-	cnt = 0;
-	while (cnt != mt_count) {
-		last_mark_ppos = ntohl(STp->buffer->aux->last_mark_ppos);
-		if (last_mark_ppos == -1)
-			return (-EIO);
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Positioning to last mark at %d\n", name, last_mark_ppos);
-#endif
-		osst_position_tape_and_confirm(STp, aSRpnt, last_mark_ppos);
-		cnt++;
-		if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks\n", name);
-#endif
-			return (-EIO);
-		}
-		if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
-			printk(KERN_WARNING "%s:W: Expected to find marker at ppos %d, not found\n",
-					 name, last_mark_ppos);
-			return (-EIO);
-		}
-	}
-found:
-	if (mt_op == MTBSFM) {
-		STp->frame_seq_number++;
-		STp->frame_in_buffer      = 0;
-		STp->buffer->buffer_bytes = 0;
-		STp->buffer->read_pointer = 0;
-		STp->logical_blk_num     += ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
-	}
-	return 0;
-}
-
-/*
- * ADRL 1.1 compatible "slow" space filemarks fwd version
- *
- * Just scans for the filemark sequentially.
- */
-static int osst_space_over_filemarks_forward_slow(struct osst_tape * STp, struct osst_request ** aSRpnt,
-								     int mt_op, int mt_count)
-{
-	int	cnt = 0;
-#if DEBUG
-	char  * name = tape_name(STp);
-
-	printk(OSST_DEB_MSG "%s:D: Reached space_over_filemarks_forward_slow %d %d\n", name, mt_op, mt_count);
-#endif
-	if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks_fwd\n", name);
-#endif
-		return (-EIO);
-	}
-	while (1) {
-		if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks\n", name);
-#endif
-			return (-EIO);
-		}
-		if (STp->buffer->aux->frame_type == OS_FRAME_TYPE_MARKER)
-			cnt++;
-		if (STp->buffer->aux->frame_type == OS_FRAME_TYPE_EOD) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: space_fwd: EOD reached\n", name);
-#endif
-			if (STp->first_frame_position > STp->eod_frame_ppos+1) {
-#if DEBUG
-				printk(OSST_DEB_MSG "%s:D: EOD position corrected (%d=>%d)\n",
-					       	name, STp->eod_frame_ppos, STp->first_frame_position-1);
-#endif
-				STp->eod_frame_ppos = STp->first_frame_position-1;
-			}
-			return (-EIO);
-		}
-		if (cnt == mt_count)
-			break;
-		STp->frame_in_buffer = 0;
-	}
-	if (mt_op == MTFSF) {
-		STp->frame_seq_number++;
-		STp->frame_in_buffer      = 0;
-		STp->buffer->buffer_bytes = 0;
-		STp->buffer->read_pointer = 0;
-		STp->logical_blk_num     += ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
-	}
-	return 0;
-}
-
-/*
- * Fast linux specific version of OnStream FSF
- */
-static int osst_space_over_filemarks_forward_fast(struct osst_tape * STp, struct osst_request ** aSRpnt,
-								     int mt_op, int mt_count)
-{
-	char  * name = tape_name(STp);
-	int	cnt  = 0,
-		next_mark_ppos = -1;
-
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Reached space_over_filemarks_forward_fast %d %d\n", name, mt_op, mt_count);
-#endif
-	if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks_fwd\n", name);
-#endif
-		return (-EIO);
-	}
-
-	if (STp->linux_media_version >= 4) {
-		/*
-		 * direct lookup in header filemark list
-		 */
-		cnt = ntohl(STp->buffer->aux->filemark_cnt) - 1;
-		if (STp->header_ok                         && 
-		    STp->header_cache != NULL              &&
-		    (cnt + mt_count)   < OS_FM_TAB_MAX     &&
-		    (cnt + mt_count)   < STp->filemark_cnt &&
-		    ((cnt == -1 && ntohl(STp->buffer->aux->last_mark_ppos) == -1) ||
-		     (STp->header_cache->dat_fm_tab.fm_tab_ent[cnt] == STp->buffer->aux->last_mark_ppos)))
-
-			next_mark_ppos = ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[cnt + mt_count]);
-#if DEBUG
-		if (STp->header_cache == NULL || (cnt + mt_count) >= OS_FM_TAB_MAX)
-			printk(OSST_DEB_MSG "%s:D: Filemark lookup fail due to %s\n", name,
-			       STp->header_cache == NULL?"lack of header cache":"count out of range");
-		else
-			printk(OSST_DEB_MSG "%s:D: Filemark lookup: prev mark %d (%s), skip %d to %d\n",
-			       name, cnt,
-			       ((cnt == -1 && ntohl(STp->buffer->aux->last_mark_ppos) == -1) ||
-				(STp->header_cache->dat_fm_tab.fm_tab_ent[cnt] ==
-					 STp->buffer->aux->last_mark_ppos))?"match":"error",
-			       mt_count, next_mark_ppos);
-#endif
-		if (next_mark_ppos <= 10 || next_mark_ppos > STp->eod_frame_ppos) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Reverting to slow filemark space\n", name);
-#endif
-			return osst_space_over_filemarks_forward_slow(STp, aSRpnt, mt_op, mt_count);
-		} else {
-			osst_position_tape_and_confirm(STp, aSRpnt, next_mark_ppos);
-			if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
-				printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks\n",
-						 name);
-#endif
-				return (-EIO);
-			}
-			if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
-				printk(KERN_WARNING "%s:W: Expected to find marker at ppos %d, not found\n",
-						 name, next_mark_ppos);
-				return (-EIO);
-			}
-			if (ntohl(STp->buffer->aux->filemark_cnt) != cnt + mt_count) {
-				printk(KERN_WARNING "%s:W: Expected to find marker %d at ppos %d, not %d\n",
-						 name, cnt+mt_count, next_mark_ppos,
-						 ntohl(STp->buffer->aux->filemark_cnt));
-       				return (-EIO);
-			}
-		}
-	} else {
-		/*
-		 * Find nearest (usually previous) marker, then jump from marker to marker
-		 */
-		while (1) {
-			if (STp->buffer->aux->frame_type == OS_FRAME_TYPE_MARKER)
-				break;
-			if (STp->buffer->aux->frame_type == OS_FRAME_TYPE_EOD) {
-#if DEBUG
-				printk(OSST_DEB_MSG "%s:D: space_fwd: EOD reached\n", name);
-#endif
-				return (-EIO);
-			}
-			if (ntohl(STp->buffer->aux->filemark_cnt) == 0) {
-				if (STp->first_mark_ppos == -1) {
-#if DEBUG
-					printk(OSST_DEB_MSG "%s:D: Reverting to slow filemark space\n", name);
-#endif
-					return osst_space_over_filemarks_forward_slow(STp, aSRpnt, mt_op, mt_count);
-				}
-				osst_position_tape_and_confirm(STp, aSRpnt, STp->first_mark_ppos);
-				if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
-					printk(OSST_DEB_MSG
-					       "%s:D: Couldn't get logical blk num in space_filemarks_fwd_fast\n",
-					       name);
-#endif
-					return (-EIO);
-				}
-				if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
-					printk(KERN_WARNING "%s:W: Expected to find filemark at %d\n",
-							 name, STp->first_mark_ppos);
-					return (-EIO);
-				}
-			} else {
-				if (osst_space_over_filemarks_backward(STp, aSRpnt, MTBSF, 1) < 0)
-					return (-EIO);
-				mt_count++;
-			}
-		}
-		cnt++;
-		while (cnt != mt_count) {
-			next_mark_ppos = ntohl(STp->buffer->aux->next_mark_ppos);
-			if (!next_mark_ppos || next_mark_ppos > STp->eod_frame_ppos) {
-#if DEBUG
-				printk(OSST_DEB_MSG "%s:D: Reverting to slow filemark space\n", name);
-#endif
-				return osst_space_over_filemarks_forward_slow(STp, aSRpnt, mt_op, mt_count - cnt);
-			}
-#if DEBUG
-			else printk(OSST_DEB_MSG "%s:D: Positioning to next mark at %d\n", name, next_mark_ppos);
-#endif
-			osst_position_tape_and_confirm(STp, aSRpnt, next_mark_ppos);
-			cnt++;
-			if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
-				printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in space_filemarks\n",
-						 name);
-#endif
-				return (-EIO);
-			}
-			if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_MARKER) {
-				printk(KERN_WARNING "%s:W: Expected to find marker at ppos %d, not found\n",
-						 name, next_mark_ppos);
-				return (-EIO);
-			}
-		}
-	}
-	if (mt_op == MTFSF) {
-		STp->frame_seq_number++;
-		STp->frame_in_buffer      = 0;
-		STp->buffer->buffer_bytes = 0;
-		STp->buffer->read_pointer = 0;
-		STp->logical_blk_num     += ntohs(STp->buffer->aux->dat.dat_list[0].blk_cnt);
-	}
-	return 0;
-}
-
-/*
- * In debug mode, we want to see as many errors as possible
- * to test the error recovery mechanism.
- */
-#if DEBUG
-static void osst_set_retries(struct osst_tape * STp, struct osst_request ** aSRpnt, int retries)
-{
-	unsigned char		cmd[MAX_COMMAND_SIZE];
-	struct osst_request   * SRpnt  = * aSRpnt;
-	char		      * name   = tape_name(STp);
-
-	memset(cmd, 0, MAX_COMMAND_SIZE);
-	cmd[0] = MODE_SELECT;
-	cmd[1] = 0x10;
-	cmd[4] = NUMBER_RETRIES_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
-	(STp->buffer)->b_data[0] = cmd[4] - 1;
-	(STp->buffer)->b_data[1] = 0;			/* Medium Type - ignoring */
-	(STp->buffer)->b_data[2] = 0;			/* Reserved */
-	(STp->buffer)->b_data[3] = 0;			/* Block Descriptor Length */
-	(STp->buffer)->b_data[MODE_HEADER_LENGTH + 0] = NUMBER_RETRIES_PAGE | (1 << 7);
-	(STp->buffer)->b_data[MODE_HEADER_LENGTH + 1] = 2;
-	(STp->buffer)->b_data[MODE_HEADER_LENGTH + 2] = 4;
-	(STp->buffer)->b_data[MODE_HEADER_LENGTH + 3] = retries;
-
-	if (debugging)
-	    printk(OSST_DEB_MSG "%s:D: Setting number of retries on OnStream tape to %d\n", name, retries);
-
-	SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
-	*aSRpnt = SRpnt;
-
-	if ((STp->buffer)->syscall_result)
-	    printk (KERN_ERR "%s:D: Couldn't set retries to %d\n", name, retries);
-}
-#endif
-
-
-static int osst_write_filemark(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
-	int	result;
-	int	this_mark_ppos = STp->first_frame_position;
-	int	this_mark_lbn  = STp->logical_blk_num;
-#if DEBUG
-	char  * name = tape_name(STp);
-#endif
-
-	if (STp->raw) return 0;
-
-	STp->write_type = OS_WRITE_NEW_MARK;
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Writing Filemark %i at fppos %d (fseq %d, lblk %d)\n", 
-	       name, STp->filemark_cnt, this_mark_ppos, STp->frame_seq_number, this_mark_lbn);
-#endif
-	STp->dirty = 1;
-	result  = osst_flush_write_buffer(STp, aSRpnt);
-	result |= osst_flush_drive_buffer(STp, aSRpnt);
-	STp->last_mark_ppos = this_mark_ppos;
-	STp->last_mark_lbn  = this_mark_lbn;
-	if (STp->header_cache != NULL && STp->filemark_cnt < OS_FM_TAB_MAX)
-		STp->header_cache->dat_fm_tab.fm_tab_ent[STp->filemark_cnt] = htonl(this_mark_ppos);
-	if (STp->filemark_cnt++ == 0)
-		STp->first_mark_ppos = this_mark_ppos;
-	return result;
-}
-
-static int osst_write_eod(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
-	int	result;
-#if DEBUG
-	char  * name = tape_name(STp);
-#endif
-
-	if (STp->raw) return 0;
-
-	STp->write_type = OS_WRITE_EOD;
-	STp->eod_frame_ppos = STp->first_frame_position;
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Writing EOD at fppos %d (fseq %d, lblk %d)\n", name,
-			STp->eod_frame_ppos, STp->frame_seq_number, STp->logical_blk_num);
-#endif
-	STp->dirty = 1;
-
-	result  = osst_flush_write_buffer(STp, aSRpnt);	
-	result |= osst_flush_drive_buffer(STp, aSRpnt);
-	STp->eod_frame_lfa = --(STp->frame_seq_number);
-	return result;
-}
-
-static int osst_write_filler(struct osst_tape * STp, struct osst_request ** aSRpnt, int where, int count)
-{
-	char * name = tape_name(STp);
-
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Reached onstream write filler group %d\n", name, where);
-#endif
-	osst_wait_ready(STp, aSRpnt, 60 * 5, 0);
-	osst_set_frame_position(STp, aSRpnt, where, 0);
-	STp->write_type = OS_WRITE_FILLER;
-	while (count--) {
-		memcpy(STp->buffer->b_data, "Filler", 6);
-		STp->buffer->buffer_bytes = 6;
-		STp->dirty = 1;
-		if (osst_flush_write_buffer(STp, aSRpnt)) {
-			printk(KERN_INFO "%s:I: Couldn't write filler frame\n", name);
-			return (-EIO);
-		}
-	}
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Exiting onstream write filler group\n", name);
-#endif
-	return osst_flush_drive_buffer(STp, aSRpnt);
-}
-
-static int __osst_write_header(struct osst_tape * STp, struct osst_request ** aSRpnt, int where, int count)
-{
-	char * name = tape_name(STp);
-	int     result;
-
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Reached onstream write header group %d\n", name, where);
-#endif
-	osst_wait_ready(STp, aSRpnt, 60 * 5, 0);
-	osst_set_frame_position(STp, aSRpnt, where, 0);
-	STp->write_type = OS_WRITE_HEADER;
-	while (count--) {
-		osst_copy_to_buffer(STp->buffer, (unsigned char *)STp->header_cache);
-		STp->buffer->buffer_bytes = sizeof(os_header_t);
-		STp->dirty = 1;
-		if (osst_flush_write_buffer(STp, aSRpnt)) {
-			printk(KERN_INFO "%s:I: Couldn't write header frame\n", name);
-			return (-EIO);
-		}
-	}
-	result = osst_flush_drive_buffer(STp, aSRpnt);
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Write onstream header group %s\n", name, result?"failed":"done");
-#endif
-	return result;
-}
-
-static int osst_write_header(struct osst_tape * STp, struct osst_request ** aSRpnt, int locate_eod)
-{
-	os_header_t * header;
-	int	      result;
-	char        * name = tape_name(STp);
-
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Writing tape header\n", name);
-#endif
-	if (STp->raw) return 0;
-
-	if (STp->header_cache == NULL) {
-		if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
-			printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
-			return (-ENOMEM);
-		}
-		memset(STp->header_cache, 0, sizeof(os_header_t));
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Allocated and cleared memory for header cache\n", name);
-#endif
-	}
-	if (STp->header_ok) STp->update_frame_cntr++;
-	else                STp->update_frame_cntr = 0;
-
-	header = STp->header_cache;
-	strcpy(header->ident_str, "ADR_SEQ");
-	header->major_rev      = 1;
-	header->minor_rev      = 4;
-	header->ext_trk_tb_off = htons(17192);
-	header->pt_par_num     = 1;
-	header->partition[0].partition_num              = OS_DATA_PARTITION;
-	header->partition[0].par_desc_ver               = OS_PARTITION_VERSION;
-	header->partition[0].wrt_pass_cntr              = htons(STp->wrt_pass_cntr);
-	header->partition[0].first_frame_ppos           = htonl(STp->first_data_ppos);
-	header->partition[0].last_frame_ppos            = htonl(STp->capacity);
-	header->partition[0].eod_frame_ppos             = htonl(STp->eod_frame_ppos);
-	header->cfg_col_width                           = htonl(20);
-	header->dat_col_width                           = htonl(1500);
-	header->qfa_col_width                           = htonl(0);
-	header->ext_track_tb.nr_stream_part             = 1;
-	header->ext_track_tb.et_ent_sz                  = 32;
-	header->ext_track_tb.dat_ext_trk_ey.et_part_num = 0;
-	header->ext_track_tb.dat_ext_trk_ey.fmt         = 1;
-	header->ext_track_tb.dat_ext_trk_ey.fm_tab_off  = htons(17736);
-	header->ext_track_tb.dat_ext_trk_ey.last_hlb_hi = 0;
-	header->ext_track_tb.dat_ext_trk_ey.last_hlb    = htonl(STp->eod_frame_lfa);
-	header->ext_track_tb.dat_ext_trk_ey.last_pp	= htonl(STp->eod_frame_ppos);
-	header->dat_fm_tab.fm_part_num                  = 0;
-	header->dat_fm_tab.fm_tab_ent_sz                = 4;
-	header->dat_fm_tab.fm_tab_ent_cnt               = htons(STp->filemark_cnt<OS_FM_TAB_MAX?
-								STp->filemark_cnt:OS_FM_TAB_MAX);
-
-	result  = __osst_write_header(STp, aSRpnt, 0xbae, 5);
-	if (STp->update_frame_cntr == 0)
-		    osst_write_filler(STp, aSRpnt, 0xbb3, 5);
-	result &= __osst_write_header(STp, aSRpnt,     5, 5);
-
-	if (locate_eod) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Locating back to eod frame addr %d\n", name, STp->eod_frame_ppos);
-#endif
-		osst_set_frame_position(STp, aSRpnt, STp->eod_frame_ppos, 0);
-	}
-	if (result)
-		printk(KERN_ERR "%s:E: Write header failed\n", name);
-	else {
-		memcpy(STp->application_sig, "LIN4", 4);
-		STp->linux_media         = 1;
-		STp->linux_media_version = 4;
-		STp->header_ok           = 1;
-	}
-	return result;
-}
-
-static int osst_reset_header(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
-	if (STp->header_cache != NULL)
-		memset(STp->header_cache, 0, sizeof(os_header_t));
-
-	STp->logical_blk_num = STp->frame_seq_number = 0;
-	STp->frame_in_buffer = 0;
-	STp->eod_frame_ppos = STp->first_data_ppos = 0x0000000A;
-	STp->filemark_cnt = 0;
-	STp->first_mark_ppos = STp->last_mark_ppos = STp->last_mark_lbn = -1;
-	return osst_write_header(STp, aSRpnt, 1);
-}
-
-static int __osst_analyze_headers(struct osst_tape * STp, struct osst_request ** aSRpnt, int ppos)
-{
-	char        * name = tape_name(STp);
-	os_header_t * header;
-	os_aux_t    * aux;
-	char          id_string[8];
-	int	      linux_media_version,
-		      update_frame_cntr;
-
-	if (STp->raw)
-		return 1;
-
-	if (ppos == 5 || ppos == 0xbae || STp->buffer->syscall_result) {
-		if (osst_set_frame_position(STp, aSRpnt, ppos, 0))
-			printk(KERN_WARNING "%s:W: Couldn't position tape\n", name);
-		osst_wait_ready(STp, aSRpnt, 60 * 15, 0);
-		if (osst_initiate_read (STp, aSRpnt)) {
-			printk(KERN_WARNING "%s:W: Couldn't initiate read\n", name);
-			return 0;
-		}
-	}
-	if (osst_read_frame(STp, aSRpnt, 180)) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Couldn't read header frame\n", name);
-#endif
-		return 0;
-	}
-	header = (os_header_t *) STp->buffer->b_data;	/* warning: only first segment addressable */
-	aux = STp->buffer->aux;
-	if (aux->frame_type != OS_FRAME_TYPE_HEADER) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Skipping non-header frame (%d)\n", name, ppos);
-#endif
-		return 0;
-	}
-	if (ntohl(aux->frame_seq_num)              != 0                   ||
-	    ntohl(aux->logical_blk_num)            != 0                   ||
-	          aux->partition.partition_num     != OS_CONFIG_PARTITION ||
-	    ntohl(aux->partition.first_frame_ppos) != 0                   ||
-	    ntohl(aux->partition.last_frame_ppos)  != 0xbb7               ) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Invalid header frame (%d,%d,%d,%d,%d)\n", name,
-				ntohl(aux->frame_seq_num), ntohl(aux->logical_blk_num),
-			       	aux->partition.partition_num, ntohl(aux->partition.first_frame_ppos),
-			       	ntohl(aux->partition.last_frame_ppos));
-#endif
-		return 0;
-	}
-	if (strncmp(header->ident_str, "ADR_SEQ", 7) != 0 &&
-	    strncmp(header->ident_str, "ADR-SEQ", 7) != 0) {
-		strlcpy(id_string, header->ident_str, 8);
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Invalid header identification string %s\n", name, id_string);
-#endif
-		return 0;
-	}
-	update_frame_cntr = ntohl(aux->update_frame_cntr);
-	if (update_frame_cntr < STp->update_frame_cntr) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Skipping frame %d with update_frame_counter %d<%d\n",
-				   name, ppos, update_frame_cntr, STp->update_frame_cntr);
-#endif
-		return 0;
-	}
-	if (header->major_rev != 1 || header->minor_rev != 4 ) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: %s revision %d.%d detected (1.4 supported)\n", 
-				 name, (header->major_rev != 1 || header->minor_rev < 2 || 
-				       header->minor_rev  > 4 )? "Invalid" : "Warning:",
-				 header->major_rev, header->minor_rev);
-#endif
-		if (header->major_rev != 1 || header->minor_rev < 2 || header->minor_rev > 4)
-			return 0;
-	}
-#if DEBUG
-	if (header->pt_par_num != 1)
-		printk(KERN_INFO "%s:W: %d partitions defined, only one supported\n", 
-				 name, header->pt_par_num);
-#endif
-	memcpy(id_string, aux->application_sig, 4);
-	id_string[4] = 0;
-	if (memcmp(id_string, "LIN", 3) == 0) {
-		STp->linux_media = 1;
-		linux_media_version = id_string[3] - '0';
-		if (linux_media_version != 4)
-			printk(KERN_INFO "%s:I: Linux media version %d detected (current 4)\n",
-					 name, linux_media_version);
-	} else {
-		printk(KERN_WARNING "%s:W: Non Linux media detected (%s)\n", name, id_string);
-		return 0;
-	}
-	if (linux_media_version < STp->linux_media_version) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Skipping frame %d with linux_media_version %d\n",
-				  name, ppos, linux_media_version);
-#endif
-		return 0;
-	}
-	if (linux_media_version > STp->linux_media_version) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Frame %d sets linux_media_version to %d\n",
-				   name, ppos, linux_media_version);
-#endif
-		memcpy(STp->application_sig, id_string, 5);
-		STp->linux_media_version = linux_media_version;
-		STp->update_frame_cntr = -1;
-	}
-	if (update_frame_cntr > STp->update_frame_cntr) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Frame %d sets update_frame_counter to %d\n",
-				   name, ppos, update_frame_cntr);
-#endif
-		if (STp->header_cache == NULL) {
-			if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) {
-				printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name);
-				return 0;
-			}
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Allocated memory for header cache\n", name);
-#endif
-		}
-		osst_copy_from_buffer(STp->buffer, (unsigned char *)STp->header_cache);
-		header = STp->header_cache;	/* further accesses from cached (full) copy */
-
-		STp->wrt_pass_cntr     = ntohs(header->partition[0].wrt_pass_cntr);
-		STp->first_data_ppos   = ntohl(header->partition[0].first_frame_ppos);
-		STp->eod_frame_ppos    = ntohl(header->partition[0].eod_frame_ppos);
-		STp->eod_frame_lfa     = ntohl(header->ext_track_tb.dat_ext_trk_ey.last_hlb);
-		STp->filemark_cnt      = ntohl(aux->filemark_cnt);
-		STp->first_mark_ppos   = ntohl(aux->next_mark_ppos);
-		STp->last_mark_ppos    = ntohl(aux->last_mark_ppos);
-		STp->last_mark_lbn     = ntohl(aux->last_mark_lbn);
-		STp->update_frame_cntr = update_frame_cntr;
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Detected write pass %d, update frame counter %d, filemark counter %d\n",
-			  name, STp->wrt_pass_cntr, STp->update_frame_cntr, STp->filemark_cnt);
-	printk(OSST_DEB_MSG "%s:D: first data frame on tape = %d, last = %d, eod frame = %d\n", name,
-			  STp->first_data_ppos,
-			  ntohl(header->partition[0].last_frame_ppos),
-			  ntohl(header->partition[0].eod_frame_ppos));
-	printk(OSST_DEB_MSG "%s:D: first mark on tape = %d, last = %d, eod frame = %d\n", 
-			  name, STp->first_mark_ppos, STp->last_mark_ppos, STp->eod_frame_ppos);
-#endif
-		if (header->minor_rev < 4 && STp->linux_media_version == 4) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Moving filemark list to ADR 1.4 location\n", name);
-#endif
-			memcpy((void *)header->dat_fm_tab.fm_tab_ent, 
-			       (void *)header->old_filemark_list, sizeof(header->dat_fm_tab.fm_tab_ent));
-			memset((void *)header->old_filemark_list, 0, sizeof(header->old_filemark_list));
-		}
-		if (header->minor_rev == 4   &&
-		    (header->ext_trk_tb_off                          != htons(17192)               ||
-		     header->partition[0].partition_num              != OS_DATA_PARTITION          ||
-		     header->partition[0].par_desc_ver               != OS_PARTITION_VERSION       ||
-		     header->partition[0].last_frame_ppos            != htonl(STp->capacity)       ||
-		     header->cfg_col_width                           != htonl(20)                  ||
-		     header->dat_col_width                           != htonl(1500)                ||
-		     header->qfa_col_width                           != htonl(0)                   ||
-		     header->ext_track_tb.nr_stream_part             != 1                          ||
-		     header->ext_track_tb.et_ent_sz                  != 32                         ||
-		     header->ext_track_tb.dat_ext_trk_ey.et_part_num != OS_DATA_PARTITION          ||
-		     header->ext_track_tb.dat_ext_trk_ey.fmt         != 1                          ||
-		     header->ext_track_tb.dat_ext_trk_ey.fm_tab_off  != htons(17736)               ||
-		     header->ext_track_tb.dat_ext_trk_ey.last_hlb_hi != 0                          ||
-		     header->ext_track_tb.dat_ext_trk_ey.last_pp     != htonl(STp->eod_frame_ppos) ||
-		     header->dat_fm_tab.fm_part_num                  != OS_DATA_PARTITION          ||
-		     header->dat_fm_tab.fm_tab_ent_sz                != 4                          ||
-		     header->dat_fm_tab.fm_tab_ent_cnt               !=
-			     htons(STp->filemark_cnt<OS_FM_TAB_MAX?STp->filemark_cnt:OS_FM_TAB_MAX)))
-			printk(KERN_WARNING "%s:W: Failed consistency check ADR 1.4 format\n", name);
-
-	}
-
-	return 1;
-}
-
-static int osst_analyze_headers(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
-	int	position, ppos;
-	int	first, last;
-	int	valid = 0;
-	char  * name  = tape_name(STp);
-
-	position = osst_get_frame_position(STp, aSRpnt);
-
-	if (STp->raw) {
-		STp->header_ok = STp->linux_media = 1;
-		STp->linux_media_version = 0;
-		return 1;
-	}
-	STp->header_ok = STp->linux_media = STp->linux_media_version = 0;
-	STp->wrt_pass_cntr = STp->update_frame_cntr = -1;
-	STp->eod_frame_ppos = STp->first_data_ppos = -1;
-	STp->first_mark_ppos = STp->last_mark_ppos = STp->last_mark_lbn = -1;
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Reading header\n", name);
-#endif
-
-	/* optimization for speed - if we are positioned at ppos 10, read second group first  */	
-	/* TODO try the ADR 1.1 locations for the second group if we have no valid one yet... */
-
-	first = position==10?0xbae: 5;
-	last  = position==10?0xbb3:10;
-
-	for (ppos = first; ppos < last; ppos++)
-		if (__osst_analyze_headers(STp, aSRpnt, ppos))
-			valid = 1;
-
-	first = position==10? 5:0xbae;
-	last  = position==10?10:0xbb3;
-
-	for (ppos = first; ppos < last; ppos++)
-		if (__osst_analyze_headers(STp, aSRpnt, ppos))
-			valid = 1;
-
-	if (!valid) {
-		printk(KERN_ERR "%s:E: Failed to find valid ADRL header, new media?\n", name);
-		STp->eod_frame_ppos = STp->first_data_ppos = 0;
-		osst_set_frame_position(STp, aSRpnt, 10, 0);
-		return 0;
-	}
-	if (position <= STp->first_data_ppos) {
-		position = STp->first_data_ppos;
-		STp->ps[0].drv_file = STp->ps[0].drv_block = STp->frame_seq_number = STp->logical_blk_num = 0;
-	}
-	osst_set_frame_position(STp, aSRpnt, position, 0);
-	STp->header_ok = 1;
-
-	return 1;
-}
-
-static int osst_verify_position(struct osst_tape * STp, struct osst_request ** aSRpnt)
-{
-	int	frame_position  = STp->first_frame_position;
-	int	frame_seq_numbr = STp->frame_seq_number;
-	int	logical_blk_num = STp->logical_blk_num;
-       	int	halfway_frame   = STp->frame_in_buffer;
-	int	read_pointer    = STp->buffer->read_pointer;
-	int	prev_mark_ppos  = -1;
-	int	actual_mark_ppos, i, n;
-#if DEBUG
-	char  * name = tape_name(STp);
-
-	printk(OSST_DEB_MSG "%s:D: Verify that the tape is really the one we think before writing\n", name);
-#endif
-	osst_set_frame_position(STp, aSRpnt, frame_position - 1, 0);
-	if (osst_get_logical_frame(STp, aSRpnt, -1, 0) < 0) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Couldn't get logical blk num in verify_position\n", name);
-#endif
-		return (-EIO);
-	}
-	if (STp->linux_media_version >= 4) {
-		for (i=0; i<STp->filemark_cnt; i++)
-			if ((n=ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[i])) < frame_position)
-				prev_mark_ppos = n;
-	} else
-		prev_mark_ppos = frame_position - 1;  /* usually - we don't really know */
-	actual_mark_ppos = STp->buffer->aux->frame_type == OS_FRAME_TYPE_MARKER ?
-				frame_position - 1 : ntohl(STp->buffer->aux->last_mark_ppos);
-	if (frame_position  != STp->first_frame_position                   ||
-	    frame_seq_numbr != STp->frame_seq_number + (halfway_frame?0:1) ||
-	    prev_mark_ppos  != actual_mark_ppos                            ) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Block mismatch: fppos %d-%d, fseq %d-%d, mark %d-%d\n", name,
-				  STp->first_frame_position, frame_position, 
-				  STp->frame_seq_number + (halfway_frame?0:1),
-				  frame_seq_numbr, actual_mark_ppos, prev_mark_ppos);
-#endif
-		return (-EIO);
-	}
-	if (halfway_frame) {
-		/* prepare buffer for append and rewrite on top of original */
-		osst_set_frame_position(STp, aSRpnt, frame_position - 1, 0);
-		STp->buffer->buffer_bytes  = read_pointer;
-		STp->ps[STp->partition].rw = ST_WRITING;
-		STp->dirty                 = 1;
-	}
-	STp->frame_in_buffer  = halfway_frame;
-	STp->frame_seq_number = frame_seq_numbr;
-	STp->logical_blk_num  = logical_blk_num;
-	return 0;
-}
-
-/* Acc. to OnStream, the vers. numbering is the following:
- * X.XX for released versions (X=digit), 
- * XXXY for unreleased versions (Y=letter)
- * Ordering 1.05 < 106A < 106B < ...  < 106a < ... < 1.06
- * This fn makes monoton numbers out of this scheme ...
- */
-static unsigned int osst_parse_firmware_rev (const char * str)
-{
-	if (str[1] == '.') {
-		return (str[0]-'0')*10000
-			+(str[2]-'0')*1000
-			+(str[3]-'0')*100;
-	} else {
-		return (str[0]-'0')*10000
-			+(str[1]-'0')*1000
-			+(str[2]-'0')*100 - 100
-			+(str[3]-'@');
-	}
-}
-
-/*
- * Configure the OnStream SCII tape drive for default operation
- */
-static int osst_configure_onstream(struct osst_tape *STp, struct osst_request ** aSRpnt)
-{
-	unsigned char                  cmd[MAX_COMMAND_SIZE];
-	char                         * name = tape_name(STp);
-	struct osst_request          * SRpnt = * aSRpnt;
-	osst_mode_parameter_header_t * header;
-	osst_block_size_page_t       * bs;
-	osst_capabilities_page_t     * cp;
-	osst_tape_paramtr_page_t     * prm;
-	int                            drive_buffer_size;
-
-	if (STp->ready != ST_READY) {
-#if DEBUG
-	    printk(OSST_DEB_MSG "%s:D: Not Ready\n", name);
-#endif
-	    return (-EIO);
-	}
-	
-	if (STp->os_fw_rev < 10600) {
-	    printk(KERN_INFO "%s:I: Old OnStream firmware revision detected (%s),\n", name, STp->device->rev);
-	    printk(KERN_INFO "%s:I: an upgrade to version 1.06 or above is recommended\n", name);
-	}
-
-	/*
-	 * Configure 32.5KB (data+aux) frame size.
-         * Get the current frame size from the block size mode page
-	 */
-	memset(cmd, 0, MAX_COMMAND_SIZE);
-	cmd[0] = MODE_SENSE;
-	cmd[1] = 8;
-	cmd[2] = BLOCK_SIZE_PAGE;
-	cmd[4] = BLOCK_SIZE_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
-	SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
-	if (SRpnt == NULL) {
-#if DEBUG
- 	    printk(OSST_DEB_MSG "osst :D: Busy\n");
-#endif
-	    return (-EBUSY);
-	}
-	*aSRpnt = SRpnt;
-	if ((STp->buffer)->syscall_result != 0) {
-	    printk (KERN_ERR "%s:E: Can't get tape block size mode page\n", name);
-	    return (-EIO);
-	}
-
-	header = (osst_mode_parameter_header_t *) (STp->buffer)->b_data;
-	bs = (osst_block_size_page_t *) ((STp->buffer)->b_data + sizeof(osst_mode_parameter_header_t) + header->bdl);
-
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: 32KB play back: %s\n",   name, bs->play32     ? "Yes" : "No");
-	printk(OSST_DEB_MSG "%s:D: 32.5KB play back: %s\n", name, bs->play32_5   ? "Yes" : "No");
-	printk(OSST_DEB_MSG "%s:D: 32KB record: %s\n",      name, bs->record32   ? "Yes" : "No");
-	printk(OSST_DEB_MSG "%s:D: 32.5KB record: %s\n",    name, bs->record32_5 ? "Yes" : "No");
-#endif
-
-	/*
-	 * Configure default auto columns mode, 32.5KB transfer mode
-	 */ 
-	bs->one = 1;
-	bs->play32 = 0;
-	bs->play32_5 = 1;
-	bs->record32 = 0;
-	bs->record32_5 = 1;
-
-	memset(cmd, 0, MAX_COMMAND_SIZE);
-	cmd[0] = MODE_SELECT;
-	cmd[1] = 0x10;
-	cmd[4] = BLOCK_SIZE_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
-	SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
-	*aSRpnt = SRpnt;
-	if ((STp->buffer)->syscall_result != 0) {
-	    printk (KERN_ERR "%s:E: Couldn't set tape block size mode page\n", name);
-	    return (-EIO);
-	}
-
-#if DEBUG
-	printk(KERN_INFO "%s:D: Drive Block Size changed to 32.5K\n", name);
-	 /*
-	 * In debug mode, we want to see as many errors as possible
-	 * to test the error recovery mechanism.
-	 */
-	osst_set_retries(STp, aSRpnt, 0);
-	SRpnt = * aSRpnt;
-#endif
-
-	/*
-	 * Set vendor name to 'LIN4' for "Linux support version 4".
-	 */
-
-	memset(cmd, 0, MAX_COMMAND_SIZE);
-	cmd[0] = MODE_SELECT;
-	cmd[1] = 0x10;
-	cmd[4] = VENDOR_IDENT_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
-	header->mode_data_length = VENDOR_IDENT_PAGE_LENGTH + MODE_HEADER_LENGTH - 1;
-	header->medium_type      = 0;	/* Medium Type - ignoring */
-	header->dsp              = 0;	/* Reserved */
-	header->bdl              = 0;	/* Block Descriptor Length */
-	
-	(STp->buffer)->b_data[MODE_HEADER_LENGTH + 0] = VENDOR_IDENT_PAGE | (1 << 7);
-	(STp->buffer)->b_data[MODE_HEADER_LENGTH + 1] = 6;
-	(STp->buffer)->b_data[MODE_HEADER_LENGTH + 2] = 'L';
-	(STp->buffer)->b_data[MODE_HEADER_LENGTH + 3] = 'I';
-	(STp->buffer)->b_data[MODE_HEADER_LENGTH + 4] = 'N';
-	(STp->buffer)->b_data[MODE_HEADER_LENGTH + 5] = '4';
-	(STp->buffer)->b_data[MODE_HEADER_LENGTH + 6] = 0;
-	(STp->buffer)->b_data[MODE_HEADER_LENGTH + 7] = 0;
-
-	SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
-	*aSRpnt = SRpnt;
-
-	if ((STp->buffer)->syscall_result != 0) {
-	    printk (KERN_ERR "%s:E: Couldn't set vendor name to %s\n", name, 
-			(char *) ((STp->buffer)->b_data + MODE_HEADER_LENGTH + 2));
-	    return (-EIO);
-	}
-
-	memset(cmd, 0, MAX_COMMAND_SIZE);
-	cmd[0] = MODE_SENSE;
-	cmd[1] = 8;
-	cmd[2] = CAPABILITIES_PAGE;
-	cmd[4] = CAPABILITIES_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
-	SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
-	*aSRpnt = SRpnt;
-
-	if ((STp->buffer)->syscall_result != 0) {
-	    printk (KERN_ERR "%s:E: Can't get capabilities page\n", name);
-	    return (-EIO);
-	}
-
-	header = (osst_mode_parameter_header_t *) (STp->buffer)->b_data;
-	cp     = (osst_capabilities_page_t    *) ((STp->buffer)->b_data +
-		 sizeof(osst_mode_parameter_header_t) + header->bdl);
-
-	drive_buffer_size = ntohs(cp->buffer_size) / 2;
-
-	memset(cmd, 0, MAX_COMMAND_SIZE);
-	cmd[0] = MODE_SENSE;
-	cmd[1] = 8;
-	cmd[2] = TAPE_PARAMTR_PAGE;
-	cmd[4] = TAPE_PARAMTR_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
-	SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
-	*aSRpnt = SRpnt;
-
-	if ((STp->buffer)->syscall_result != 0) {
-	    printk (KERN_ERR "%s:E: Can't get tape parameter page\n", name);
-	    return (-EIO);
-	}
-
-	header = (osst_mode_parameter_header_t *) (STp->buffer)->b_data;
-	prm    = (osst_tape_paramtr_page_t    *) ((STp->buffer)->b_data +
-		 sizeof(osst_mode_parameter_header_t) + header->bdl);
-
-	STp->density  = prm->density;
-	STp->capacity = ntohs(prm->segtrk) * ntohs(prm->trks);
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Density %d, tape length: %dMB, drive buffer size: %dKB\n",
-			  name, STp->density, STp->capacity / 32, drive_buffer_size);
-#endif
-
-	return 0;
-	
-}
-
-
-/* Step over EOF if it has been inadvertently crossed (ioctl not used because
-   it messes up the block number). */
-static int cross_eof(struct osst_tape *STp, struct osst_request ** aSRpnt, int forward)
-{
-	int	result;
-	char  * name = tape_name(STp);
-
-#if DEBUG
-	if (debugging)
-		printk(OSST_DEB_MSG "%s:D: Stepping over filemark %s.\n",
-	   			  name, forward ? "forward" : "backward");
-#endif
-
-	if (forward) {
-	   /* assumes that the filemark is already read by the drive, so this is low cost */
-	   result = osst_space_over_filemarks_forward_slow(STp, aSRpnt, MTFSF, 1);
-	}
-	else
-	   /* assumes this is only called if we just read the filemark! */
-	   result = osst_seek_logical_blk(STp, aSRpnt, STp->logical_blk_num - 1);
-
-	if (result < 0)
-	   printk(KERN_WARNING "%s:W: Stepping over filemark %s failed.\n",
-				name, forward ? "forward" : "backward");
-
-	return result;
-}
-
-
-/* Get the tape position. */
-
-static int osst_get_frame_position(struct osst_tape *STp, struct osst_request ** aSRpnt)
-{
-	unsigned char		scmd[MAX_COMMAND_SIZE];
-	struct osst_request   * SRpnt;
-	int			result = 0;
-	char    	      * name   = tape_name(STp);
-
-	/* KG: We want to be able to use it for checking Write Buffer availability
-	 *  and thus don't want to risk to overwrite anything. Exchange buffers ... */
-	char		mybuf[24];
-	char	      * olddata = STp->buffer->b_data;
-	int		oldsize = STp->buffer->buffer_size;
-
-	if (STp->ready != ST_READY) return (-EIO);
-
-	memset (scmd, 0, MAX_COMMAND_SIZE);
-	scmd[0] = READ_POSITION;
-
-	STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24;
-	SRpnt = osst_do_scsi(*aSRpnt, STp, scmd, 20, DMA_FROM_DEVICE,
-				      STp->timeout, MAX_RETRIES, 1);
-	if (!SRpnt) {
-		STp->buffer->b_data = olddata; STp->buffer->buffer_size = oldsize;
-		return (-EBUSY);
-	}
-	*aSRpnt = SRpnt;
-
-	if (STp->buffer->syscall_result)
-		result = ((SRpnt->sense[2] & 0x0f) == 3) ? -EIO : -EINVAL;	/* 3: Write Error */
-
-	if (result == -EINVAL)
-		printk(KERN_ERR "%s:E: Can't read tape position.\n", name);
-	else {
-		if (result == -EIO) {	/* re-read position - this needs to preserve media errors */
-			unsigned char mysense[16];
-			memcpy (mysense, SRpnt->sense, 16);
-			memset (scmd, 0, MAX_COMMAND_SIZE);
-			scmd[0] = READ_POSITION;
-			STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24;
-			SRpnt = osst_do_scsi(SRpnt, STp, scmd, 20, DMA_FROM_DEVICE,
-						    STp->timeout, MAX_RETRIES, 1);
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Reread position, reason=[%02x:%02x:%02x], result=[%s%02x:%02x:%02x]\n",
-					name, mysense[2], mysense[12], mysense[13], STp->buffer->syscall_result?"":"ok:",
-					SRpnt->sense[2],SRpnt->sense[12],SRpnt->sense[13]);
-#endif
-			if (!STp->buffer->syscall_result)
-				memcpy (SRpnt->sense, mysense, 16);
-			else
-				printk(KERN_WARNING "%s:W: Double error in get position\n", name);
-		}
-		STp->first_frame_position = ((STp->buffer)->b_data[4] << 24)
-					  + ((STp->buffer)->b_data[5] << 16)
-					  + ((STp->buffer)->b_data[6] << 8)
-					  +  (STp->buffer)->b_data[7];
-		STp->last_frame_position  = ((STp->buffer)->b_data[ 8] << 24)
-					  + ((STp->buffer)->b_data[ 9] << 16)
-					  + ((STp->buffer)->b_data[10] <<  8)
-					  +  (STp->buffer)->b_data[11];
-		STp->cur_frames           =  (STp->buffer)->b_data[15];
-#if DEBUG
-		if (debugging) {
-			printk(OSST_DEB_MSG "%s:D: Drive Positions: host %d, tape %d%s, buffer %d\n", name,
-					    STp->first_frame_position, STp->last_frame_position,
-					    ((STp->buffer)->b_data[0]&0x80)?" (BOP)":
-					    ((STp->buffer)->b_data[0]&0x40)?" (EOP)":"",
-					    STp->cur_frames);
-		}
-#endif
-		if (STp->cur_frames == 0 && STp->first_frame_position != STp->last_frame_position) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Correcting read position %d, %d, %d\n", name,
-					STp->first_frame_position, STp->last_frame_position, STp->cur_frames);
-#endif
-			STp->first_frame_position = STp->last_frame_position;
-		}
-	}
-	STp->buffer->b_data = olddata; STp->buffer->buffer_size = oldsize;
-
-	return (result == 0 ? STp->first_frame_position : result);
-}
-
-
-/* Set the tape block */
-static int osst_set_frame_position(struct osst_tape *STp, struct osst_request ** aSRpnt, int ppos, int skip)
-{
-	unsigned char		scmd[MAX_COMMAND_SIZE];
-	struct osst_request   * SRpnt;
-	struct st_partstat    * STps;
-	int			result = 0;
-	int			pp     = (ppos == 3000 && !skip)? 0 : ppos;
-	char		      * name   = tape_name(STp);
-
-	if (STp->ready != ST_READY) return (-EIO);
-
-	STps = &(STp->ps[STp->partition]);
-
-	if (ppos < 0 || ppos > STp->capacity) {
-		printk(KERN_WARNING "%s:W: Reposition request %d out of range\n", name, ppos);
-		pp = ppos = ppos < 0 ? 0 : (STp->capacity - 1);
-		result = (-EINVAL);
-	}
-
-	do {
-#if DEBUG
-		if (debugging)
-			printk(OSST_DEB_MSG "%s:D: Setting ppos to %d.\n", name, pp);
-#endif
-		memset (scmd, 0, MAX_COMMAND_SIZE);
-		scmd[0] = SEEK_10;
-		scmd[1] = 1;
-		scmd[3] = (pp >> 24);
-		scmd[4] = (pp >> 16);
-		scmd[5] = (pp >> 8);
-		scmd[6] =  pp;
-		if (skip)
-			scmd[9] = 0x80;
-
-		SRpnt = osst_do_scsi(*aSRpnt, STp, scmd, 0, DMA_NONE, STp->long_timeout,
-								MAX_RETRIES, 1);
-		if (!SRpnt)
-			return (-EBUSY);
-		*aSRpnt  = SRpnt;
-
-		if ((STp->buffer)->syscall_result != 0) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: SEEK command from %d to %d failed.\n",
-					name, STp->first_frame_position, pp);
-#endif
-			result = (-EIO);
-		}
-		if (pp != ppos)
-			osst_wait_ready(STp, aSRpnt, 5 * 60, OSST_WAIT_POSITION_COMPLETE);
-	} while ((pp != ppos) && (pp = ppos));
-	STp->first_frame_position = STp->last_frame_position = ppos;
-	STps->eof = ST_NOEOF;
-	STps->at_sm = 0;
-	STps->rw = ST_IDLE;
-	STp->frame_in_buffer = 0;
-	return result;
-}
-
-static int osst_write_trailer(struct osst_tape *STp, struct osst_request ** aSRpnt, int leave_at_EOT)
-{
-	struct st_partstat * STps = &(STp->ps[STp->partition]);
-	int result = 0;
-
-	if (STp->write_type != OS_WRITE_NEW_MARK) {
-		/* true unless the user wrote the filemark for us */
-		result = osst_flush_drive_buffer(STp, aSRpnt);
-		if (result < 0) goto out;
-		result = osst_write_filemark(STp, aSRpnt);
-		if (result < 0) goto out;
-
-		if (STps->drv_file >= 0)
-			STps->drv_file++ ;
-		STps->drv_block = 0;
-	}
-	result = osst_write_eod(STp, aSRpnt);
-	osst_write_header(STp, aSRpnt, leave_at_EOT);
-
-	STps->eof = ST_FM;
-out:
-	return result;
-}
-
-/* osst versions of st functions - augmented and stripped to suit OnStream only */
-
-/* Flush the write buffer (never need to write if variable blocksize). */
-static int osst_flush_write_buffer(struct osst_tape *STp, struct osst_request ** aSRpnt)
-{
-	int			offset, transfer, blks = 0;
-	int			result = 0;
-	unsigned char		cmd[MAX_COMMAND_SIZE];
-	struct osst_request   * SRpnt = *aSRpnt;
-	struct st_partstat    * STps;
-	char		      * name = tape_name(STp);
-
-	if ((STp->buffer)->writing) {
-		if (SRpnt == (STp->buffer)->last_SRpnt)
-#if DEBUG
-			{ printk(OSST_DEB_MSG
-	 "%s:D: aSRpnt points to osst_request that write_behind_check will release -- cleared\n", name);
-#endif
-			*aSRpnt = SRpnt = NULL;
-#if DEBUG
-			} else if (SRpnt)
-				printk(OSST_DEB_MSG
-	 "%s:D: aSRpnt does not point to osst_request that write_behind_check will release -- strange\n", name);
-#endif	
-		osst_write_behind_check(STp);
-		if ((STp->buffer)->syscall_result) {
-#if DEBUG
-			if (debugging)
-				printk(OSST_DEB_MSG "%s:D: Async write error (flush) %x.\n",
-				       name, (STp->buffer)->midlevel_result);
-#endif
-			if ((STp->buffer)->midlevel_result == INT_MAX)
-				return (-ENOSPC);
-			return (-EIO);
-		}
-	}
-
-	result = 0;
-	if (STp->dirty == 1) {
-
-		STp->write_count++;
-		STps     = &(STp->ps[STp->partition]);
-		STps->rw = ST_WRITING;
-		offset   = STp->buffer->buffer_bytes;
-		blks     = (offset + STp->block_size - 1) / STp->block_size;
-		transfer = OS_FRAME_SIZE;
-		
-		if (offset < OS_DATA_SIZE)
-			osst_zero_buffer_tail(STp->buffer);
-
-		if (STp->poll)
-			if (osst_wait_frame (STp, aSRpnt, STp->first_frame_position, -50, 120))
-				result = osst_recover_wait_frame(STp, aSRpnt, 1);
-
-		memset(cmd, 0, MAX_COMMAND_SIZE);
-		cmd[0] = WRITE_6;
-		cmd[1] = 1;
-		cmd[4] = 1;
-
-		switch	(STp->write_type) {
-		   case OS_WRITE_DATA:
-#if DEBUG
-   			if (debugging)
-				printk(OSST_DEB_MSG "%s:D: Writing %d blocks to frame %d, lblks %d-%d\n",
-					name, blks, STp->frame_seq_number, 
-					STp->logical_blk_num - blks, STp->logical_blk_num - 1);
-#endif
-			osst_init_aux(STp, OS_FRAME_TYPE_DATA, STp->frame_seq_number++,
-				      STp->logical_blk_num - blks, STp->block_size, blks);
-			break;
-		   case OS_WRITE_EOD:
-			osst_init_aux(STp, OS_FRAME_TYPE_EOD, STp->frame_seq_number++,
-				      STp->logical_blk_num, 0, 0);
-			break;
-		   case OS_WRITE_NEW_MARK:
-			osst_init_aux(STp, OS_FRAME_TYPE_MARKER, STp->frame_seq_number++,
-				      STp->logical_blk_num++, 0, blks=1);
-			break;
-		   case OS_WRITE_HEADER:
-			osst_init_aux(STp, OS_FRAME_TYPE_HEADER, 0, 0, 0, blks=0);
-			break;
-		default: /* probably FILLER */
-			osst_init_aux(STp, OS_FRAME_TYPE_FILL, 0, 0, 0, 0);
-		}
-#if DEBUG
-		if (debugging)
-			printk(OSST_DEB_MSG "%s:D: Flushing %d bytes, Transferring %d bytes in %d lblocks.\n",
-			  			 name, offset, transfer, blks);
-#endif
-
-		SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, transfer, DMA_TO_DEVICE,
-					      STp->timeout, MAX_RETRIES, 1);
-		*aSRpnt = SRpnt;
-		if (!SRpnt)
-			return (-EBUSY);
-
-		if ((STp->buffer)->syscall_result != 0) {
-#if DEBUG
-			printk(OSST_DEB_MSG
-				"%s:D: write sense [0]=0x%02x [2]=%02x [12]=%02x [13]=%02x\n",
-				name, SRpnt->sense[0], SRpnt->sense[2],
-				SRpnt->sense[12], SRpnt->sense[13]);
-#endif
-			if ((SRpnt->sense[0] & 0x70) == 0x70 &&
-			    (SRpnt->sense[2] & 0x40) && /* FIXME - SC-30 drive doesn't assert EOM bit */
-			    (SRpnt->sense[2] & 0x0f) == NO_SENSE) {
-				STp->dirty = 0;
-				(STp->buffer)->buffer_bytes = 0;
-				result = (-ENOSPC);
-			}
-			else {
-				if (osst_write_error_recovery(STp, aSRpnt, 1)) {
-					printk(KERN_ERR "%s:E: Error on flush write.\n", name);
-					result = (-EIO);
-				}
-			}
-			STps->drv_block = (-1);		/* FIXME - even if write recovery succeeds? */
-		}
-		else {
-			STp->first_frame_position++;
-			STp->dirty = 0;
-			(STp->buffer)->buffer_bytes = 0;
-		}
-	}
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Exit flush write buffer with code %d\n", name, result);
-#endif
-	return result;
-}
-
-
-/* Flush the tape buffer. The tape will be positioned correctly unless
-   seek_next is true. */
-static int osst_flush_buffer(struct osst_tape * STp, struct osst_request ** aSRpnt, int seek_next)
-{
-	struct st_partstat * STps;
-	int    backspace = 0, result = 0;
-#if DEBUG
-	char * name = tape_name(STp);
-#endif
-
-	/*
-	 * If there was a bus reset, block further access
-	 * to this device.
-	 */
-	if( STp->pos_unknown)
-		return (-EIO);
-
-	if (STp->ready != ST_READY)
-		return 0;
-
-	STps = &(STp->ps[STp->partition]);
-	if (STps->rw == ST_WRITING || STp->dirty) {	/* Writing */
-		STp->write_type = OS_WRITE_DATA;
-		return osst_flush_write_buffer(STp, aSRpnt);
-	}
-	if (STp->block_size == 0)
-		return 0;
-
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Reached flush (read) buffer\n", name);
-#endif
-
-	if (!STp->can_bsr) {
-		backspace = ((STp->buffer)->buffer_bytes + (STp->buffer)->read_pointer) / STp->block_size -
-			    ((STp->buffer)->read_pointer + STp->block_size - 1        ) / STp->block_size ;
-		(STp->buffer)->buffer_bytes = 0;
-		(STp->buffer)->read_pointer = 0;
-		STp->frame_in_buffer = 0;		/* FIXME is this relevant w. OSST? */
-	}
-
-	if (!seek_next) {
-		if (STps->eof == ST_FM_HIT) {
-			result = cross_eof(STp, aSRpnt, 0); /* Back over the EOF hit */
-			if (!result)
-				STps->eof = ST_NOEOF;
-			else {
-				if (STps->drv_file >= 0)
-					STps->drv_file++;
-				STps->drv_block = 0;
-			}
-		}
-		if (!result && backspace > 0)	/* TODO -- design and run a test case for this */
-			result = osst_seek_logical_blk(STp, aSRpnt, STp->logical_blk_num - backspace);
-	}
-	else if (STps->eof == ST_FM_HIT) {
-		if (STps->drv_file >= 0)
-			STps->drv_file++;
-		STps->drv_block = 0;
-		STps->eof = ST_NOEOF;
-	}
-
-	return result;
-}
-
-static int osst_write_frame(struct osst_tape * STp, struct osst_request ** aSRpnt, int synchronous)
-{
-	unsigned char		cmd[MAX_COMMAND_SIZE];
-	struct osst_request   * SRpnt;
-	int			blks;
-#if DEBUG
-	char		      * name = tape_name(STp);
-#endif
-
-	if ((!STp-> raw) && (STp->first_frame_position == 0xbae)) { /* _must_ preserve buffer! */
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Reaching config partition.\n", name);
-#endif
-		if (osst_flush_drive_buffer(STp, aSRpnt) < 0) {
-			return (-EIO);
-		}
-		/* error recovery may have bumped us past the header partition */
-		if (osst_get_frame_position(STp, aSRpnt) < 0xbb8) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Skipping over config partition.\n", name);
-#endif
-		osst_position_tape_and_confirm(STp, aSRpnt, 0xbb8);
-		}
-	}
-
-	if (STp->poll)
-		if (osst_wait_frame (STp, aSRpnt, STp->first_frame_position, -48, 120))
-			if (osst_recover_wait_frame(STp, aSRpnt, 1))
-				return (-EIO);
-
-//	osst_build_stats(STp, &SRpnt);
-
-	STp->ps[STp->partition].rw = ST_WRITING;
-	STp->write_type            = OS_WRITE_DATA;
-			
-	memset(cmd, 0, MAX_COMMAND_SIZE);
-	cmd[0]   = WRITE_6;
-	cmd[1]   = 1;
-	cmd[4]   = 1;						/* one frame at a time... */
-	blks     = STp->buffer->buffer_bytes / STp->block_size;
-#if DEBUG
-	if (debugging)
-		printk(OSST_DEB_MSG "%s:D: Writing %d blocks to frame %d, lblks %d-%d\n", name, blks, 
-			STp->frame_seq_number, STp->logical_blk_num - blks, STp->logical_blk_num - 1);
-#endif
-	osst_init_aux(STp, OS_FRAME_TYPE_DATA, STp->frame_seq_number++,
-		      STp->logical_blk_num - blks, STp->block_size, blks);
-
-#if DEBUG
-	if (!synchronous)
-		STp->write_pending = 1;
-#endif
-	SRpnt = osst_do_scsi(*aSRpnt, STp, cmd, OS_FRAME_SIZE, DMA_TO_DEVICE, STp->timeout,
-									MAX_RETRIES, synchronous);
-	if (!SRpnt)
-		return (-EBUSY);
-	*aSRpnt = SRpnt;
-
-	if (synchronous) {
-		if (STp->buffer->syscall_result != 0) {
-#if DEBUG
-			if (debugging)
-				printk(OSST_DEB_MSG "%s:D: Error on write:\n", name);
-#endif
-			if ((SRpnt->sense[0] & 0x70) == 0x70 &&
-			    (SRpnt->sense[2] & 0x40)) {
-				if ((SRpnt->sense[2] & 0x0f) == VOLUME_OVERFLOW)
-					return (-ENOSPC);
-			}
-			else {
-				if (osst_write_error_recovery(STp, aSRpnt, 1))
-					return (-EIO);
-			}
-		}
-		else
-			STp->first_frame_position++;
-	}
-
-	STp->write_count++;
-
-	return 0;
-}
-
-/* Lock or unlock the drive door. Don't use when struct osst_request allocated. */
-static int do_door_lock(struct osst_tape * STp, int do_lock)
-{
-	int retval;
-
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: %socking drive door.\n", tape_name(STp), do_lock ? "L" : "Unl");
-#endif
-
-	retval = scsi_set_medium_removal(STp->device,
-			do_lock ? SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW);
-	if (!retval)
-		STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED;
-	else
-		STp->door_locked = ST_LOCK_FAILS;
-	return retval;
-}
-
-/* Set the internal state after reset */
-static void reset_state(struct osst_tape *STp)
-{
-	int i;
-	struct st_partstat *STps;
-
-	STp->pos_unknown = 0;
-	for (i = 0; i < ST_NBR_PARTITIONS; i++) {
-		STps = &(STp->ps[i]);
-		STps->rw = ST_IDLE;
-		STps->eof = ST_NOEOF;
-		STps->at_sm = 0;
-		STps->last_block_valid = 0;
-		STps->drv_block = -1;
-		STps->drv_file = -1;
-	}
-}
-				
-
-/* Entry points to osst */
-
-/* Write command */
-static ssize_t osst_write(struct file * filp, const char __user * buf, size_t count, loff_t *ppos)
-{
-	ssize_t		      total, retval = 0;
-	ssize_t		      i, do_count, blks, transfer;
-	int		      write_threshold;
-	int		      doing_write = 0;
-	const char   __user * b_point;
-	struct osst_request * SRpnt = NULL;
-	struct st_modedef   * STm;
-	struct st_partstat  * STps;
-	struct osst_tape    * STp  = filp->private_data;
-	char		    * name = tape_name(STp);
-
-
-	if (mutex_lock_interruptible(&STp->lock))
-		return (-ERESTARTSYS);
-
-	/*
-	 * If we are in the middle of error recovery, don't let anyone
-	 * else try and use this device.  Also, if error recovery fails, it
-	 * may try and take the device offline, in which case all further
-	 * access to the device is prohibited.
-	 */
-	if( !scsi_block_when_processing_errors(STp->device) ) {
-		retval = (-ENXIO);
-		goto out;
-	}
-	
-	if (STp->ready != ST_READY) {
-		if (STp->ready == ST_NO_TAPE)
-			retval = (-ENOMEDIUM);
-		else
-			retval = (-EIO);
-		goto out;
-	}
-	STm = &(STp->modes[STp->current_mode]);
-	if (!STm->defined) {
-		retval = (-ENXIO);
-		goto out;
-	}
-	if (count == 0)
-		goto out;
-
-	/*
-	 * If there was a bus reset, block further access
-	 * to this device.
-	 */
-	if (STp->pos_unknown) {
-		retval = (-EIO);
-		goto out;
-	}
-
-#if DEBUG
-	if (!STp->in_use) {
-		printk(OSST_DEB_MSG "%s:D: Incorrect device.\n", name);
-		retval = (-EIO);
-		goto out;
-	}
-#endif
-
-	if (STp->write_prot) {
-		retval = (-EACCES);
-		goto out;
-	}
-
-	/* Write must be integral number of blocks */
-	if (STp->block_size != 0 && (count % STp->block_size) != 0) {
-		printk(KERN_ERR "%s:E: Write (%zd bytes) not multiple of tape block size (%d%c).\n",
-				       name, count, STp->block_size<1024?
-				       STp->block_size:STp->block_size/1024, STp->block_size<1024?'b':'k');
-		retval = (-EINVAL);
-		goto out;
-	}
-
-	if (STp->first_frame_position >= STp->capacity - OSST_EOM_RESERVE) {
-		printk(KERN_ERR "%s:E: Write truncated at EOM early warning (frame %d).\n",
-				       name, STp->first_frame_position);
-		retval = (-ENOSPC);
-		goto out;
-	}
-
-	if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED && !do_door_lock(STp, 1))
-		STp->door_locked = ST_LOCKED_AUTO;
-
-	STps = &(STp->ps[STp->partition]);
-
-	if (STps->rw == ST_READING) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Switching from read to write at file %d, block %d\n", name, 
-					STps->drv_file, STps->drv_block);
-#endif
-		retval = osst_flush_buffer(STp, &SRpnt, 0);
-		if (retval)
-			goto out;
-		STps->rw = ST_IDLE;
-	}
-	if (STps->rw != ST_WRITING) {
-		/* Are we totally rewriting this tape? */
-		if (!STp->header_ok ||
-		    (STp->first_frame_position == STp->first_data_ppos && STps->drv_block < 0) ||
-		    (STps->drv_file == 0 && STps->drv_block == 0)) {
-			STp->wrt_pass_cntr++;
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Allocating next write pass counter: %d\n",
-						  name, STp->wrt_pass_cntr);
-#endif
-			osst_reset_header(STp, &SRpnt);
-			STps->drv_file = STps->drv_block = 0;
-		}
-		/* Do we know where we'll be writing on the tape? */
-		else {
-			if ((STp->fast_open && osst_verify_position(STp, &SRpnt)) ||
-			  		STps->drv_file < 0 || STps->drv_block < 0) {
-				if (STp->first_frame_position == STp->eod_frame_ppos) {	/* at EOD */
-			  		STps->drv_file = STp->filemark_cnt;
-			  		STps->drv_block = 0;
-				}
-				else {
-					/* We have no idea where the tape is positioned - give up */
-#if DEBUG
-					printk(OSST_DEB_MSG
-						"%s:D: Cannot write at indeterminate position.\n", name);
-#endif
-					retval = (-EIO);
-					goto out;
-				}
-      			}	  
-			if ((STps->drv_file + STps->drv_block) > 0 && STps->drv_file < STp->filemark_cnt) {
-				STp->filemark_cnt = STps->drv_file;
-				STp->last_mark_ppos =
-				       	ntohl(STp->header_cache->dat_fm_tab.fm_tab_ent[STp->filemark_cnt-1]);
-				printk(KERN_WARNING
-					"%s:W: Overwriting file %d with old write pass counter %d\n",
-						name, STps->drv_file, STp->wrt_pass_cntr);
-				printk(KERN_WARNING
-					"%s:W: may lead to stale data being accepted on reading back!\n",
-						name);
-#if DEBUG
-				printk(OSST_DEB_MSG
-				  "%s:D: resetting filemark count to %d and last mark ppos,lbn to %d,%d\n",
-					name, STp->filemark_cnt, STp->last_mark_ppos, STp->last_mark_lbn);
-#endif
-			}
-		}
-		STp->fast_open = 0;
-	}
-	if (!STp->header_ok) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Write cannot proceed without valid headers\n", name);
-#endif
-		retval = (-EIO);
-		goto out;
-	}
-
-	if ((STp->buffer)->writing) {
-if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name, __LINE__);
-		osst_write_behind_check(STp);
-		if ((STp->buffer)->syscall_result) {
-#if DEBUG
-		if (debugging)
-			printk(OSST_DEB_MSG "%s:D: Async write error (write) %x.\n", name,
-						 (STp->buffer)->midlevel_result);
-#endif
-		if ((STp->buffer)->midlevel_result == INT_MAX)
-			STps->eof = ST_EOM_OK;
-		else
-			STps->eof = ST_EOM_ERROR;
-		}
-	}
-	if (STps->eof == ST_EOM_OK) {
-		retval = (-ENOSPC);
-		goto out;
-	}
-	else if (STps->eof == ST_EOM_ERROR) {
-		retval = (-EIO);
-		goto out;
-	}
-
-	/* Check the buffer readability in cases where copy_user might catch
-		 the problems after some tape movement. */
-	if ((copy_from_user(&i, buf, 1) != 0 ||
-	     copy_from_user(&i, buf + count - 1, 1) != 0)) {
-		retval = (-EFAULT);
-		goto out;
-	}
-
-	if (!STm->do_buffer_writes) {
-		write_threshold = 1;
-	}
-	else
-		write_threshold = (STp->buffer)->buffer_blocks * STp->block_size;
-	if (!STm->do_async_writes)
-		write_threshold--;
-
-	total = count;
-#if DEBUG
-	if (debugging)
-		printk(OSST_DEB_MSG "%s:D: Writing %d bytes to file %d block %d lblk %d fseq %d fppos %d\n",
-				name, (int) count, STps->drv_file, STps->drv_block,
-				STp->logical_blk_num, STp->frame_seq_number, STp->first_frame_position);
-#endif
-	b_point = buf;
-	while ((STp->buffer)->buffer_bytes + count > write_threshold)
-	{
-		doing_write = 1;
-		do_count = (STp->buffer)->buffer_blocks * STp->block_size -
-			   (STp->buffer)->buffer_bytes;
-		if (do_count > count)
-			do_count = count;
-
-		i = append_to_buffer(b_point, STp->buffer, do_count);
-		if (i) {
-			retval = i;
-			goto out;
-		}
-
-		blks = do_count / STp->block_size;
-		STp->logical_blk_num += blks;  /* logical_blk_num is incremented as data is moved from user */
-  
-		i = osst_write_frame(STp, &SRpnt, 1);
-
-		if (i == (-ENOSPC)) {
-			transfer = STp->buffer->writing;	/* FIXME -- check this logic */
-			if (transfer <= do_count) {
-				*ppos += do_count - transfer;
-				count -= do_count - transfer;
-				if (STps->drv_block >= 0) {
-					STps->drv_block += (do_count - transfer) / STp->block_size;
-				}
-				STps->eof = ST_EOM_OK;
-				retval = (-ENOSPC);		/* EOM within current request */
-#if DEBUG
-				if (debugging)
-				      printk(OSST_DEB_MSG "%s:D: EOM with %d bytes unwritten.\n",
-							     name, (int) transfer);
-#endif
-			}
-			else {
-				STps->eof = ST_EOM_ERROR;
-				STps->drv_block = (-1);		/* Too cautious? */
-				retval = (-EIO);		/* EOM for old data */
-#if DEBUG
-				if (debugging)
-				      printk(OSST_DEB_MSG "%s:D: EOM with lost data.\n", name);
-#endif
-			}
-		}
-		else
-			retval = i;
-			
-		if (retval < 0) {
-			if (SRpnt != NULL) {
-				osst_release_request(SRpnt);
-				SRpnt = NULL;
-			}
-			STp->buffer->buffer_bytes = 0;
-			STp->dirty = 0;
-			if (count < total)
-				retval = total - count;
-			goto out;
-		}
-
-		*ppos += do_count;
-		b_point += do_count;
-		count -= do_count;
-		if (STps->drv_block >= 0) {
-			STps->drv_block += blks;
-		}
-		STp->buffer->buffer_bytes = 0;
-		STp->dirty = 0;
-	}  /* end while write threshold exceeded */
-
-	if (count != 0) {
-		STp->dirty = 1;
-		i = append_to_buffer(b_point, STp->buffer, count);
-		if (i) {
-			retval = i;
-			goto out;
-		}
-		blks = count / STp->block_size;
-		STp->logical_blk_num += blks;
-		if (STps->drv_block >= 0) {
-			STps->drv_block += blks;
-		}
-		*ppos += count;
-		count = 0;
-	}
-
-	if (doing_write && (STp->buffer)->syscall_result != 0) {
-		retval = (STp->buffer)->syscall_result;
-		goto out;
-	}
-
-	if (STm->do_async_writes && ((STp->buffer)->buffer_bytes >= STp->write_threshold)) { 
-		/* Schedule an asynchronous write */
-		(STp->buffer)->writing = ((STp->buffer)->buffer_bytes /
-					   STp->block_size) * STp->block_size;
-		STp->dirty = !((STp->buffer)->writing ==
-				          (STp->buffer)->buffer_bytes);
-
-		i = osst_write_frame(STp, &SRpnt, 0);
-		if (i < 0) {
-			retval = (-EIO);
-			goto out;
-		}
-		SRpnt = NULL;			/* Prevent releasing this request! */
-	}
-	STps->at_sm &= (total == 0);
-	if (total > 0)
-		STps->eof = ST_NOEOF;
-
-	retval = total;
-
-out:
-	if (SRpnt != NULL) osst_release_request(SRpnt);
-
-	mutex_unlock(&STp->lock);
-
-	return retval;
-}
-
-
-/* Read command */
-static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, loff_t *ppos)
-{
-	ssize_t		      total, retval = 0;
-	ssize_t		      i, transfer;
-	int		      special;
-	struct st_modedef   * STm;
-	struct st_partstat  * STps;
-	struct osst_request * SRpnt = NULL;
-	struct osst_tape    * STp   = filp->private_data;
-	char		    * name  = tape_name(STp);
-
-
-	if (mutex_lock_interruptible(&STp->lock))
-		return (-ERESTARTSYS);
-
-	/*
-	 * If we are in the middle of error recovery, don't let anyone
-	 * else try and use this device.  Also, if error recovery fails, it
-	 * may try and take the device offline, in which case all further
-	 * access to the device is prohibited.
-	 */
-	if( !scsi_block_when_processing_errors(STp->device) ) {
-		retval = (-ENXIO);
-		goto out;
-	}
-	
-	if (STp->ready != ST_READY) {
-		if (STp->ready == ST_NO_TAPE)
-			retval = (-ENOMEDIUM);
-		else
-			retval = (-EIO);
-		goto out;
-	}
-	STm = &(STp->modes[STp->current_mode]);
-	if (!STm->defined) {
-		retval = (-ENXIO);
-		goto out;
-	}
-#if DEBUG
-	if (!STp->in_use) {
-		printk(OSST_DEB_MSG "%s:D: Incorrect device.\n", name);
-		retval = (-EIO);
-		goto out;
-	}
-#endif
-	/* Must have initialized medium */
-	if (!STp->header_ok) {
-		retval = (-EIO);
-		goto out;
-	}
-
-	if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED && !do_door_lock(STp, 1))
-		STp->door_locked = ST_LOCKED_AUTO;
-
-	STps = &(STp->ps[STp->partition]);
-	if (STps->rw == ST_WRITING) {
-		retval = osst_flush_buffer(STp, &SRpnt, 0);
-		if (retval)
-			goto out;
-		STps->rw = ST_IDLE;
-		/* FIXME -- this may leave the tape without EOD and up2date headers */
-	}
-
-	if ((count % STp->block_size) != 0) {
-		printk(KERN_WARNING
-		    "%s:W: Read (%zd bytes) not multiple of tape block size (%d%c).\n", name, count,
-		    STp->block_size<1024?STp->block_size:STp->block_size/1024, STp->block_size<1024?'b':'k');
-	}
-
-#if DEBUG
-	if (debugging && STps->eof != ST_NOEOF)
-		printk(OSST_DEB_MSG "%s:D: EOF/EOM flag up (%d). Bytes %d\n", name,
-				     STps->eof, (STp->buffer)->buffer_bytes);
-#endif
-	if ((STp->buffer)->buffer_bytes == 0 &&
-	     STps->eof >= ST_EOD_1) {
-		if (STps->eof < ST_EOD) {
-			STps->eof += 1;
-			retval = 0;
-			goto out;
-		}
-		retval = (-EIO);  /* EOM or Blank Check */
-		goto out;
-	}
-
-	/* Check the buffer writability before any tape movement. Don't alter
-		 buffer data. */
-	if (copy_from_user(&i, buf, 1)             != 0 ||
-	    copy_to_user  (buf, &i, 1)             != 0 ||
-	    copy_from_user(&i, buf + count - 1, 1) != 0 ||
-	    copy_to_user  (buf + count - 1, &i, 1) != 0) {
-		retval = (-EFAULT);
-		goto out;
-	}
-
-	/* Loop until enough data in buffer or a special condition found */
-	for (total = 0, special = 0; total < count - STp->block_size + 1 && !special; ) {
-
-		/* Get new data if the buffer is empty */
-		if ((STp->buffer)->buffer_bytes == 0) {
-			if (STps->eof == ST_FM_HIT)
-				break;
-			special = osst_get_logical_frame(STp, &SRpnt, STp->frame_seq_number, 0);
-			if (special < 0) { 			/* No need to continue read */
-				STp->frame_in_buffer = 0;
-				retval = special;
-				goto out;
-			}
-		}
-
-		/* Move the data from driver buffer to user buffer */
-		if ((STp->buffer)->buffer_bytes > 0) {
-#if DEBUG
-			if (debugging && STps->eof != ST_NOEOF)
-			    printk(OSST_DEB_MSG "%s:D: EOF up (%d). Left %d, needed %d.\n", name,
-						 STps->eof, (STp->buffer)->buffer_bytes, (int) (count - total));
-#endif
-		       	/* force multiple of block size, note block_size may have been adjusted */
-			transfer = (((STp->buffer)->buffer_bytes < count - total ?
-				     (STp->buffer)->buffer_bytes : count - total)/
-					STp->block_size) * STp->block_size;
-
-			if (transfer == 0) {
-				printk(KERN_WARNING
-				  "%s:W: Nothing can be transferred, requested %zd, tape block size (%d%c).\n",
-			   		name, count, STp->block_size < 1024?
-					STp->block_size:STp->block_size/1024,
-				       	STp->block_size<1024?'b':'k');
-				break;
-			}
-			i = from_buffer(STp->buffer, buf, transfer);
-			if (i)  {
-				retval = i;
-				goto out;
-			}
-			STp->logical_blk_num += transfer / STp->block_size;
-			STps->drv_block      += transfer / STp->block_size;
-			*ppos          += transfer;
-			buf                  += transfer;
-			total                += transfer;
-		}
- 
-		if ((STp->buffer)->buffer_bytes == 0) {
-#if DEBUG
-			if (debugging)
-				printk(OSST_DEB_MSG "%s:D: Finished with frame %d\n",
-					       	name, STp->frame_seq_number);
-#endif
-			STp->frame_in_buffer = 0;
-			STp->frame_seq_number++;              /* frame to look for next time */
-		}
-	} /* for (total = 0, special = 0; total < count && !special; ) */
-
-	/* Change the eof state if no data from tape or buffer */
-	if (total == 0) {
-		if (STps->eof == ST_FM_HIT) {
-			STps->eof = (STp->first_frame_position >= STp->eod_frame_ppos)?ST_EOD_2:ST_FM;
-			STps->drv_block = 0;
-			if (STps->drv_file >= 0)
-				STps->drv_file++;
-		}
-		else if (STps->eof == ST_EOD_1) {
-			STps->eof = ST_EOD_2;
-			if (STps->drv_block > 0 && STps->drv_file >= 0)
-				STps->drv_file++;
-			STps->drv_block = 0;
-		}
-		else if (STps->eof == ST_EOD_2)
-			STps->eof = ST_EOD;
-	}
-	else if (STps->eof == ST_FM)
-		STps->eof = ST_NOEOF;
-
-	retval = total;
-
-out:
-	if (SRpnt != NULL) osst_release_request(SRpnt);
-
-	mutex_unlock(&STp->lock);
-
-	return retval;
-}
-
-
-/* Set the driver options */
-static void osst_log_options(struct osst_tape *STp, struct st_modedef *STm, char *name)
-{
-  printk(KERN_INFO
-"%s:I: Mode %d options: buffer writes: %d, async writes: %d, read ahead: %d\n",
-	 name, STp->current_mode, STm->do_buffer_writes, STm->do_async_writes,
-	 STm->do_read_ahead);
-  printk(KERN_INFO
-"%s:I:    can bsr: %d, two FMs: %d, fast mteom: %d, auto lock: %d,\n",
-	 name, STp->can_bsr, STp->two_fm, STp->fast_mteom, STp->do_auto_lock);
-  printk(KERN_INFO
-"%s:I:    defs for wr: %d, no block limits: %d, partitions: %d, s2 log: %d\n",
-	 name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions,
-	 STp->scsi2_logical);
-  printk(KERN_INFO
-"%s:I:    sysv: %d\n", name, STm->sysv);
-#if DEBUG
-  printk(KERN_INFO
-	 "%s:D:    debugging: %d\n",
-	 name, debugging);
-#endif
-}
-
-
-static int osst_set_options(struct osst_tape *STp, long options)
-{
-	int		    value;
-	long		    code;
-	struct st_modedef * STm;
-	char		  * name = tape_name(STp);
-
-	STm = &(STp->modes[STp->current_mode]);
-	if (!STm->defined) {
-		memcpy(STm, &(STp->modes[0]), sizeof(*STm));
-		modes_defined = 1;
-#if DEBUG
-		if (debugging)
-			printk(OSST_DEB_MSG "%s:D: Initialized mode %d definition from mode 0\n",
-					     name, STp->current_mode);
-#endif
-	}
-
-	code = options & MT_ST_OPTIONS;
-	if (code == MT_ST_BOOLEANS) {
-		STm->do_buffer_writes = (options & MT_ST_BUFFER_WRITES) != 0;
-		STm->do_async_writes  = (options & MT_ST_ASYNC_WRITES) != 0;
-		STm->defaults_for_writes = (options & MT_ST_DEF_WRITES) != 0;
-		STm->do_read_ahead    = (options & MT_ST_READ_AHEAD) != 0;
-		STp->two_fm	      = (options & MT_ST_TWO_FM) != 0;
-		STp->fast_mteom	      = (options & MT_ST_FAST_MTEOM) != 0;
-		STp->do_auto_lock     = (options & MT_ST_AUTO_LOCK) != 0;
-		STp->can_bsr          = (options & MT_ST_CAN_BSR) != 0;
-		STp->omit_blklims     = (options & MT_ST_NO_BLKLIMS) != 0;
-		if ((STp->device)->scsi_level >= SCSI_2)
-			STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0;
-		STp->scsi2_logical    = (options & MT_ST_SCSI2LOGICAL) != 0;
-		STm->sysv	      = (options & MT_ST_SYSV) != 0;
-#if DEBUG
-		debugging = (options & MT_ST_DEBUGGING) != 0;
-#endif
-		osst_log_options(STp, STm, name);
-	}
-	else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) {
-		value = (code == MT_ST_SETBOOLEANS);
-		if ((options & MT_ST_BUFFER_WRITES) != 0)
-			STm->do_buffer_writes = value;
-		if ((options & MT_ST_ASYNC_WRITES) != 0)
-			STm->do_async_writes = value;
-		if ((options & MT_ST_DEF_WRITES) != 0)
-			STm->defaults_for_writes = value;
-		if ((options & MT_ST_READ_AHEAD) != 0)
-			STm->do_read_ahead = value;
-		if ((options & MT_ST_TWO_FM) != 0)
-			STp->two_fm = value;
-		if ((options & MT_ST_FAST_MTEOM) != 0)
-			STp->fast_mteom = value;
-		if ((options & MT_ST_AUTO_LOCK) != 0)
-			STp->do_auto_lock = value;
-		if ((options & MT_ST_CAN_BSR) != 0)
-			STp->can_bsr = value;
-		if ((options & MT_ST_NO_BLKLIMS) != 0)
-			STp->omit_blklims = value;
-		if ((STp->device)->scsi_level >= SCSI_2 &&
-		    (options & MT_ST_CAN_PARTITIONS) != 0)
-			STp->can_partitions = value;
-		if ((options & MT_ST_SCSI2LOGICAL) != 0)
-			STp->scsi2_logical = value;
-		if ((options & MT_ST_SYSV) != 0)
-			STm->sysv = value;
-#if DEBUG
-		if ((options & MT_ST_DEBUGGING) != 0)
-			debugging = value;
-#endif
-		osst_log_options(STp, STm, name);
-	}
-	else if (code == MT_ST_WRITE_THRESHOLD) {
-		value = (options & ~MT_ST_OPTIONS) * ST_KILOBYTE;
-		if (value < 1 || value > osst_buffer_size) {
-			printk(KERN_WARNING "%s:W: Write threshold %d too small or too large.\n",
-					     name, value);
-			return (-EIO);
-		}
-		STp->write_threshold = value;
-		printk(KERN_INFO "%s:I: Write threshold set to %d bytes.\n",
-				  name, value);
-	}
-	else if (code == MT_ST_DEF_BLKSIZE) {
-		value = (options & ~MT_ST_OPTIONS);
-		if (value == ~MT_ST_OPTIONS) {
-			STm->default_blksize = (-1);
-			printk(KERN_INFO "%s:I: Default block size disabled.\n", name);
-		}
-		else {
-			if (value < 512 || value > OS_DATA_SIZE || OS_DATA_SIZE % value) {
-				printk(KERN_WARNING "%s:W: Default block size cannot be set to %d.\n",
-							 name, value);
-				return (-EINVAL);
-			}
-			STm->default_blksize = value;
-			printk(KERN_INFO "%s:I: Default block size set to %d bytes.\n",
-					  name, STm->default_blksize);
-		}
-	}
-	else if (code == MT_ST_TIMEOUTS) {
-		value = (options & ~MT_ST_OPTIONS);
-		if ((value & MT_ST_SET_LONG_TIMEOUT) != 0) {
-			STp->long_timeout = (value & ~MT_ST_SET_LONG_TIMEOUT) * HZ;
-			printk(KERN_INFO "%s:I: Long timeout set to %d seconds.\n", name,
-					     (value & ~MT_ST_SET_LONG_TIMEOUT));
-		}
-		else {
-			STp->timeout = value * HZ;
-			printk(KERN_INFO "%s:I: Normal timeout set to %d seconds.\n", name, value);
-		}
-	}
-	else if (code == MT_ST_DEF_OPTIONS) {
-		code = (options & ~MT_ST_CLEAR_DEFAULT);
-		value = (options & MT_ST_CLEAR_DEFAULT);
-		if (code == MT_ST_DEF_DENSITY) {
-			if (value == MT_ST_CLEAR_DEFAULT) {
-				STm->default_density = (-1);
-				printk(KERN_INFO "%s:I: Density default disabled.\n", name);
-			}
-			else {
-				STm->default_density = value & 0xff;
-				printk(KERN_INFO "%s:I: Density default set to %x\n",
-						  name, STm->default_density);
-			}
-		}
-		else if (code == MT_ST_DEF_DRVBUFFER) {
-			if (value == MT_ST_CLEAR_DEFAULT) {
-				STp->default_drvbuffer = 0xff;
-				printk(KERN_INFO "%s:I: Drive buffer default disabled.\n", name);
-			}
-			else {
-				STp->default_drvbuffer = value & 7;
-				printk(KERN_INFO "%s:I: Drive buffer default set to %x\n",
-						  name, STp->default_drvbuffer);
-			}
-		}
-		else if (code == MT_ST_DEF_COMPRESSION) {
-			if (value == MT_ST_CLEAR_DEFAULT) {
-				STm->default_compression = ST_DONT_TOUCH;
-				printk(KERN_INFO "%s:I: Compression default disabled.\n", name);
-			}
-			else {
-				STm->default_compression = (value & 1 ? ST_YES : ST_NO);
-				printk(KERN_INFO "%s:I: Compression default set to %x\n",
-						  name, (value & 1));
-			}
-		}
-	}
-	else
-		return (-EIO);
-
-	return 0;
-}
-
-
-/* Internal ioctl function */
-static int osst_int_ioctl(struct osst_tape * STp, struct osst_request ** aSRpnt,
-			     unsigned int cmd_in, unsigned long arg)
-{
-	int			timeout;
-	long			ltmp;
-	int			i, ioctl_result;
-	int			chg_eof = 1;
-	unsigned char		cmd[MAX_COMMAND_SIZE];
-	struct osst_request   * SRpnt = * aSRpnt;
-	struct st_partstat    * STps;
-	int			fileno, blkno, at_sm, frame_seq_numbr, logical_blk_num;
-	int			datalen = 0, direction = DMA_NONE;
-	char		      * name = tape_name(STp);
-
-	if (STp->ready != ST_READY && cmd_in != MTLOAD) {
-		if (STp->ready == ST_NO_TAPE)
-			return (-ENOMEDIUM);
-		else
-			return (-EIO);
-	}
-	timeout = STp->long_timeout;
-	STps = &(STp->ps[STp->partition]);
-	fileno = STps->drv_file;
-	blkno = STps->drv_block;
-	at_sm = STps->at_sm;
-	frame_seq_numbr = STp->frame_seq_number;
-	logical_blk_num = STp->logical_blk_num;
-
-	memset(cmd, 0, MAX_COMMAND_SIZE);
-	switch (cmd_in) {
-	 case MTFSFM:
-		chg_eof = 0; /* Changed from the FSF after this */
-	 case MTFSF:
-		if (STp->raw)
-		   return (-EIO);
-		if (STp->linux_media)
-		   ioctl_result = osst_space_over_filemarks_forward_fast(STp, &SRpnt, cmd_in, arg);
-		else
-		   ioctl_result = osst_space_over_filemarks_forward_slow(STp, &SRpnt, cmd_in, arg);
-		if (fileno >= 0)
-		   fileno += arg;
-		blkno = 0;
-		at_sm &= (arg == 0);
-		goto os_bypass;
-
-	 case MTBSF:
-		chg_eof = 0; /* Changed from the FSF after this */
-	 case MTBSFM:
-		if (STp->raw)
-		   return (-EIO);
-		ioctl_result = osst_space_over_filemarks_backward(STp, &SRpnt, cmd_in, arg);
-		if (fileno >= 0)
-		   fileno -= arg;
-		blkno = (-1);  /* We can't know the block number */
-		at_sm &= (arg == 0);
-		goto os_bypass;
-
-	 case MTFSR:
-	 case MTBSR:
-#if DEBUG
-		if (debugging)
-		   printk(OSST_DEB_MSG "%s:D: Skipping %lu blocks %s from logical block %d\n",
-				name, arg, cmd_in==MTFSR?"forward":"backward", logical_blk_num);
-#endif
-		if (cmd_in == MTFSR) {
-		   logical_blk_num += arg;
-		   if (blkno >= 0) blkno += arg;
-		}
-		else {
-		   logical_blk_num -= arg;
-		   if (blkno >= 0) blkno -= arg;
-		}
-		ioctl_result = osst_seek_logical_blk(STp, &SRpnt, logical_blk_num);
-		fileno = STps->drv_file;
-		blkno  = STps->drv_block;
-		at_sm &= (arg == 0);
-		goto os_bypass;
-
-	 case MTFSS:
-		cmd[0] = SPACE;
-		cmd[1] = 0x04; /* Space Setmarks */   /* FIXME -- OS can't do this? */
-		cmd[2] = (arg >> 16);
-		cmd[3] = (arg >> 8);
-		cmd[4] = arg;
-#if DEBUG
-		if (debugging)
-			printk(OSST_DEB_MSG "%s:D: Spacing tape forward %d setmarks.\n", name,
-		cmd[2] * 65536 + cmd[3] * 256 + cmd[4]);
-#endif
-		if (arg != 0) {
-			blkno = fileno = (-1);
-			at_sm = 1;
-		}
-		break;
-	 case MTBSS:
-		cmd[0] = SPACE;
-		cmd[1] = 0x04; /* Space Setmarks */   /* FIXME -- OS can't do this? */
-		ltmp = (-arg);
-		cmd[2] = (ltmp >> 16);
-		cmd[3] = (ltmp >> 8);
-		cmd[4] = ltmp;
-#if DEBUG
-		if (debugging) {
-			if (cmd[2] & 0x80)
-			   ltmp = 0xff000000;
-			ltmp = ltmp | (cmd[2] << 16) | (cmd[3] << 8) | cmd[4];
-			printk(OSST_DEB_MSG "%s:D: Spacing tape backward %ld setmarks.\n",
-						name, (-ltmp));
-		 }
-#endif
-		 if (arg != 0) {
-			blkno = fileno = (-1);
-			at_sm = 1;
-		 }
-		 break;
-	 case MTWEOF:
-		 if ((STps->rw == ST_WRITING || STp->dirty) && !STp->pos_unknown) {
-			STp->write_type = OS_WRITE_DATA;
-			ioctl_result = osst_flush_write_buffer(STp, &SRpnt);
-		 } else
-			ioctl_result = 0;
-#if DEBUG
-		 if (debugging) 
-			   printk(OSST_DEB_MSG "%s:D: Writing %ld filemark(s).\n", name, arg);
-#endif
-		 for (i=0; i<arg; i++)
-			ioctl_result |= osst_write_filemark(STp, &SRpnt);
-		 if (fileno >= 0) fileno += arg;
-		 if (blkno  >= 0) blkno   = 0;
-		 goto os_bypass;
-
-	 case MTWSM:
-		 if (STp->write_prot)
-			return (-EACCES);
-		 if (!STp->raw)
-			return 0;
-		 cmd[0] = WRITE_FILEMARKS;   /* FIXME -- need OS version */
-		 if (cmd_in == MTWSM)
-			 cmd[1] = 2;
-		 cmd[2] = (arg >> 16);
-		 cmd[3] = (arg >> 8);
-		 cmd[4] = arg;
-		 timeout = STp->timeout;
-#if DEBUG
-		 if (debugging) 
-			   printk(OSST_DEB_MSG "%s:D: Writing %d setmark(s).\n", name,
-				  cmd[2] * 65536 + cmd[3] * 256 + cmd[4]);
-#endif
-		 if (fileno >= 0)
-			fileno += arg;
-		 blkno = 0;
-		 at_sm = (cmd_in == MTWSM);
-		 break;
-	 case MTOFFL:
-	 case MTLOAD:
-	 case MTUNLOAD:
-	 case MTRETEN:
-		 cmd[0] = START_STOP;
-		 cmd[1] = 1;			/* Don't wait for completion */
-		 if (cmd_in == MTLOAD) {
-		     if (STp->ready == ST_NO_TAPE)
-			 cmd[4] = 4;		/* open tray */
-		      else
-			 cmd[4] = 1;		/* load */
-		 }
-		 if (cmd_in == MTRETEN)
-			 cmd[4] = 3;		/* retension then mount */
-		 if (cmd_in == MTOFFL)
-			 cmd[4] = 4;		/* rewind then eject */
-		 timeout = STp->timeout;
-#if DEBUG
-		 if (debugging) {
-			 switch (cmd_in) {
-				 case MTUNLOAD:
-					 printk(OSST_DEB_MSG "%s:D: Unloading tape.\n", name);
-					 break;
-				 case MTLOAD:
-					 printk(OSST_DEB_MSG "%s:D: Loading tape.\n", name);
-					 break;
-				 case MTRETEN:
-					 printk(OSST_DEB_MSG "%s:D: Retensioning tape.\n", name);
-					 break;
-				 case MTOFFL:
-					 printk(OSST_DEB_MSG "%s:D: Ejecting tape.\n", name);
-					 break;
-			 }
-		 }
-#endif
-       fileno = blkno = at_sm = frame_seq_numbr = logical_blk_num = 0 ;
-		 break;
-	 case MTNOP:
-#if DEBUG
-		 if (debugging)
-			 printk(OSST_DEB_MSG "%s:D: No-op on tape.\n", name);
-#endif
-		 return 0;  /* Should do something ? */
-		 break;
-	 case MTEOM:
-#if DEBUG
-		if (debugging)
-		   printk(OSST_DEB_MSG "%s:D: Spacing to end of recorded medium.\n", name);
-#endif
-		if ((osst_position_tape_and_confirm(STp, &SRpnt, STp->eod_frame_ppos) < 0) ||
-			    (osst_get_logical_frame(STp, &SRpnt, -1, 0)               < 0)) {
-		   ioctl_result = -EIO;
-		   goto os_bypass;
-		}
-		if (STp->buffer->aux->frame_type != OS_FRAME_TYPE_EOD) {
-#if DEBUG
-		   printk(OSST_DEB_MSG "%s:D: No EOD frame found where expected.\n", name);
-#endif
-		   ioctl_result = -EIO;
-		   goto os_bypass;
-		}
-		ioctl_result = osst_set_frame_position(STp, &SRpnt, STp->eod_frame_ppos, 0);
-		fileno = STp->filemark_cnt;
-		blkno  = at_sm = 0;
-		goto os_bypass;
-
-	 case MTERASE:
-		if (STp->write_prot)
-		   return (-EACCES);
-		ioctl_result = osst_reset_header(STp, &SRpnt);
-		i = osst_write_eod(STp, &SRpnt);
-		if (i < ioctl_result) ioctl_result = i;
-		i = osst_position_tape_and_confirm(STp, &SRpnt, STp->eod_frame_ppos);
-		if (i < ioctl_result) ioctl_result = i;
-		fileno = blkno = at_sm = 0 ;
-		goto os_bypass;
-
-	 case MTREW:
-		cmd[0] = REZERO_UNIT; /* rewind */
-		cmd[1] = 1;
-#if DEBUG
-		if (debugging)
-		   printk(OSST_DEB_MSG "%s:D: Rewinding tape, Immed=%d.\n", name, cmd[1]);
-#endif
-		fileno = blkno = at_sm = frame_seq_numbr = logical_blk_num = 0 ;
-		break;
-
-	 case MTSETBLK:           /* Set block length */
-		 if ((STps->drv_block == 0 )			  &&
-		     !STp->dirty				  &&
-		     ((STp->buffer)->buffer_bytes == 0)		  &&
-		     ((arg & MT_ST_BLKSIZE_MASK) >= 512 )	  && 
-		     ((arg & MT_ST_BLKSIZE_MASK) <= OS_DATA_SIZE) &&
-		     !(OS_DATA_SIZE % (arg & MT_ST_BLKSIZE_MASK))  ) {
-			 /*
-			  * Only allowed to change the block size if you opened the
-			  * device at the beginning of a file before writing anything.
-			  * Note, that when reading, changing block_size is futile,
-			  * as the size used when writing overrides it.
-			  */
-			 STp->block_size = (arg & MT_ST_BLKSIZE_MASK);
-			 printk(KERN_INFO "%s:I: Block size set to %d bytes.\n",
-					   name, STp->block_size);
-			 return 0;
-		 }
-	 case MTSETDENSITY:       /* Set tape density */
-	 case MTSETDRVBUFFER:     /* Set drive buffering */
-	 case SET_DENS_AND_BLK:   /* Set density and block size */
-		 chg_eof = 0;
-		 if (STp->dirty || (STp->buffer)->buffer_bytes != 0)
-			 return (-EIO);       /* Not allowed if data in buffer */
-		 if ((cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) &&
-		     (arg & MT_ST_BLKSIZE_MASK) != 0                    &&
-		     (arg & MT_ST_BLKSIZE_MASK) != STp->block_size       ) {
-			 printk(KERN_WARNING "%s:W: Illegal to set block size to %d%s.\n",
-						name, (int)(arg & MT_ST_BLKSIZE_MASK),
-						(OS_DATA_SIZE % (arg & MT_ST_BLKSIZE_MASK))?"":" now");
-			 return (-EINVAL);
-		 }
-		 return 0;  /* FIXME silently ignore if block size didn't change */
-
-	 default:
-		return (-ENOSYS);
-	}
-
-	SRpnt = osst_do_scsi(SRpnt, STp, cmd, datalen, direction, timeout, MAX_RETRIES, 1);
-
-	ioctl_result = (STp->buffer)->syscall_result;
-
-	if (!SRpnt) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Couldn't exec scsi cmd for IOCTL\n", name);
-#endif
-		return ioctl_result;
-	}
-
-	if (!ioctl_result) {  /* SCSI command successful */
-		STp->frame_seq_number = frame_seq_numbr;
-		STp->logical_blk_num  = logical_blk_num;
-	}
-
-os_bypass:
-#if DEBUG
-	if (debugging)
-		printk(OSST_DEB_MSG "%s:D: IOCTL (%d) Result=%d\n", name, cmd_in, ioctl_result);
-#endif
-
-	if (!ioctl_result) {				/* success */
-
-		if (cmd_in == MTFSFM) {
-			 fileno--;
-			 blkno--;
-		}
-		if (cmd_in == MTBSFM) {
-			 fileno++;
-			 blkno++;
-		}
-		STps->drv_block = blkno;
-		STps->drv_file = fileno;
-		STps->at_sm = at_sm;
-
-		if (cmd_in == MTEOM)
-			STps->eof = ST_EOD;
-		else if ((cmd_in == MTFSFM || cmd_in == MTBSF) && STps->eof == ST_FM_HIT) {
-			ioctl_result = osst_seek_logical_blk(STp, &SRpnt, STp->logical_blk_num-1);
-			STps->drv_block++;
-			STp->logical_blk_num++;
-			STp->frame_seq_number++;
-			STp->frame_in_buffer = 0;
-			STp->buffer->read_pointer = 0;
-		}
-		else if (cmd_in == MTFSF)
-			STps->eof = (STp->first_frame_position >= STp->eod_frame_ppos)?ST_EOD:ST_FM;
-		else if (chg_eof)
-			STps->eof = ST_NOEOF;
-
-		if (cmd_in == MTOFFL || cmd_in == MTUNLOAD)
-			STp->rew_at_close = 0;
-		else if (cmd_in == MTLOAD) {
-			for (i=0; i < ST_NBR_PARTITIONS; i++) {
-			    STp->ps[i].rw = ST_IDLE;
-			    STp->ps[i].last_block_valid = 0;/* FIXME - where else is this field maintained? */
-			}
-			STp->partition = 0;
-		}
-
-		if (cmd_in == MTREW) {
-			ioctl_result = osst_position_tape_and_confirm(STp, &SRpnt, STp->first_data_ppos); 
-			if (ioctl_result > 0)
-				ioctl_result = 0;
-		}
-
-	} else if (cmd_in == MTBSF || cmd_in == MTBSFM ) {
-		if (osst_position_tape_and_confirm(STp, &SRpnt, STp->first_data_ppos) < 0)
-			STps->drv_file = STps->drv_block = -1;
-		else
-			STps->drv_file = STps->drv_block = 0;
-		STps->eof = ST_NOEOF;
-	} else if (cmd_in == MTFSF || cmd_in == MTFSFM) {
-		if (osst_position_tape_and_confirm(STp, &SRpnt, STp->eod_frame_ppos) < 0)
-			STps->drv_file = STps->drv_block = -1;
-		else {
-			STps->drv_file  = STp->filemark_cnt;
-			STps->drv_block = 0;
-		}
-		STps->eof = ST_EOD;
-	} else if (cmd_in == MTBSR || cmd_in == MTFSR || cmd_in == MTWEOF || cmd_in == MTEOM) {
-		STps->drv_file = STps->drv_block = (-1);
-		STps->eof = ST_NOEOF;
-		STp->header_ok = 0;
-	} else if (cmd_in == MTERASE) {
-		STp->header_ok = 0;
-	} else if (SRpnt) {  /* SCSI command was not completely successful. */
-		if (SRpnt->sense[2] & 0x40) {
-			STps->eof = ST_EOM_OK;
-			STps->drv_block = 0;
-		}
-		if (chg_eof)
-			STps->eof = ST_NOEOF;
-
-		if ((SRpnt->sense[2] & 0x0f) == BLANK_CHECK)
-			STps->eof = ST_EOD;
-
-		if (cmd_in == MTLOAD && osst_wait_for_medium(STp, &SRpnt, 60))
-			ioctl_result = osst_wait_ready(STp, &SRpnt, 5 * 60, OSST_WAIT_POSITION_COMPLETE);
-	}
-	*aSRpnt = SRpnt;
-
-	return ioctl_result;
-}
-
-
-/* Open the device */
-static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
-{
-	unsigned short	      flags;
-	int		      i, b_size, new_session = 0, retval = 0;
-	unsigned char	      cmd[MAX_COMMAND_SIZE];
-	struct osst_request * SRpnt = NULL;
-	struct osst_tape    * STp;
-	struct st_modedef   * STm;
-	struct st_partstat  * STps;
-	char		    * name;
-	int		      dev  = TAPE_NR(inode);
-	int		      mode = TAPE_MODE(inode);
-
-	/*
-	 * We really want to do nonseekable_open(inode, filp); here, but some
-	 * versions of tar incorrectly call lseek on tapes and bail out if that
-	 * fails.  So we disallow pread() and pwrite(), but permit lseeks.
-	 */
-	filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
-
-	write_lock(&os_scsi_tapes_lock);
-	if (dev >= osst_max_dev || os_scsi_tapes == NULL ||
-	    (STp = os_scsi_tapes[dev]) == NULL || !STp->device) {
-		write_unlock(&os_scsi_tapes_lock);
-		return (-ENXIO);
-	}
-
-	name = tape_name(STp);
-
-	if (STp->in_use) {
-		write_unlock(&os_scsi_tapes_lock);
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Device already in use.\n", name);
-#endif
-		return (-EBUSY);
-	}
-	if (scsi_device_get(STp->device)) {
-		write_unlock(&os_scsi_tapes_lock);
-#if DEBUG
-                printk(OSST_DEB_MSG "%s:D: Failed scsi_device_get.\n", name);
-#endif
-		return (-ENXIO);
-	}
-	filp->private_data = STp;
-	STp->in_use = 1;
-	write_unlock(&os_scsi_tapes_lock);
-	STp->rew_at_close = TAPE_REWIND(inode);
-
-	if( !scsi_block_when_processing_errors(STp->device) ) {
-		return -ENXIO;
-	}
-
-	if (mode != STp->current_mode) {
-#if DEBUG
-		if (debugging)
-			printk(OSST_DEB_MSG "%s:D: Mode change from %d to %d.\n",
-					       name, STp->current_mode, mode);
-#endif
-		new_session = 1;
-		STp->current_mode = mode;
-	}
-	STm = &(STp->modes[STp->current_mode]);
-
-	flags = filp->f_flags;
-	STp->write_prot = ((flags & O_ACCMODE) == O_RDONLY);
-
-	STp->raw = TAPE_IS_RAW(inode);
-	if (STp->raw)
-		STp->header_ok = 0;
-
-	/* Allocate data segments for this device's tape buffer */
-	if (!enlarge_buffer(STp->buffer, STp->restr_dma)) {
-		printk(KERN_ERR "%s:E: Unable to allocate memory segments for tape buffer.\n", name);
-		retval = (-EOVERFLOW);
-		goto err_out;
-	}
-	if (STp->buffer->buffer_size >= OS_FRAME_SIZE) {
-		for (i = 0, b_size = 0; 
-		     (i < STp->buffer->sg_segs) && ((b_size + STp->buffer->sg[i].length) <= OS_DATA_SIZE); 
-		     b_size += STp->buffer->sg[i++].length);
-		STp->buffer->aux = (os_aux_t *) (page_address(sg_page(&STp->buffer->sg[i])) + OS_DATA_SIZE - b_size);
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: b_data points to %p in segment 0 at %p\n", name,
-			STp->buffer->b_data, page_address(STp->buffer->sg[0].page));
-		printk(OSST_DEB_MSG "%s:D: AUX points to %p in segment %d at %p\n", name,
-			 STp->buffer->aux, i, page_address(STp->buffer->sg[i].page));
-#endif
-	} else {
-		STp->buffer->aux = NULL; /* this had better never happen! */
-		printk(KERN_NOTICE "%s:A: Framesize %d too large for buffer.\n", name, OS_FRAME_SIZE);
-		retval = (-EIO);
-		goto err_out;
-	}
-	STp->buffer->writing = 0;
-	STp->buffer->syscall_result = 0;
-	STp->dirty = 0;
-	for (i=0; i < ST_NBR_PARTITIONS; i++) {
-		STps = &(STp->ps[i]);
-		STps->rw = ST_IDLE;
-	}
-	STp->ready = ST_READY;
-#if DEBUG
-	STp->nbr_waits = STp->nbr_finished = 0;
-#endif
-
-	memset (cmd, 0, MAX_COMMAND_SIZE);
-	cmd[0] = TEST_UNIT_READY;
-
-	SRpnt = osst_do_scsi(NULL, STp, cmd, 0, DMA_NONE, STp->timeout, MAX_RETRIES, 1);
-	if (!SRpnt) {
-		retval = (STp->buffer)->syscall_result;		/* FIXME - valid? */
-		goto err_out;
-	}
-	if ((SRpnt->sense[0] & 0x70) == 0x70      &&
-	    (SRpnt->sense[2] & 0x0f) == NOT_READY &&
-	     SRpnt->sense[12]        == 4         ) {
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Unit not ready, cause %x\n", name, SRpnt->sense[13]);
-#endif
-		if (filp->f_flags & O_NONBLOCK) {
-			retval = -EAGAIN;
-			goto err_out;
-		}
-		if (SRpnt->sense[13] == 2) {	/* initialize command required (LOAD) */
-			memset (cmd, 0, MAX_COMMAND_SIZE);
-        		cmd[0] = START_STOP;
-			cmd[1] = 1;
-			cmd[4] = 1;
-			SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
-					     STp->timeout, MAX_RETRIES, 1);
-		}
-		osst_wait_ready(STp, &SRpnt, (SRpnt->sense[13]==1?15:3) * 60, 0);
-	}
-	if ((SRpnt->sense[0] & 0x70) == 0x70 &&
-	    (SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) { /* New media? */
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Unit wants attention\n", name);
-#endif
-		STp->header_ok = 0;
-
-		for (i=0; i < 10; i++) {
-
-			memset (cmd, 0, MAX_COMMAND_SIZE);
-			cmd[0] = TEST_UNIT_READY;
-
-			SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
-					     STp->timeout, MAX_RETRIES, 1);
-			if ((SRpnt->sense[0] & 0x70) != 0x70 ||
-			    (SRpnt->sense[2] & 0x0f) != UNIT_ATTENTION)
-				break;
-		}
-
-		STp->pos_unknown = 0;
-		STp->partition = STp->new_partition = 0;
-		if (STp->can_partitions)
-			STp->nbr_partitions = 1;  /* This guess will be updated later if necessary */
-		for (i=0; i < ST_NBR_PARTITIONS; i++) {
-			STps = &(STp->ps[i]);
-			STps->rw = ST_IDLE;		/* FIXME - seems to be redundant... */
-			STps->eof = ST_NOEOF;
-			STps->at_sm = 0;
-			STps->last_block_valid = 0;
-			STps->drv_block = 0;
-			STps->drv_file = 0 ;
-		}
-		new_session = 1;
-		STp->recover_count = 0;
-		STp->abort_count = 0;
-	}
-	/*
-	 * if we have valid headers from before, and the drive/tape seem untouched,
-	 * open without reconfiguring and re-reading the headers
-	 */
-	if (!STp->buffer->syscall_result && STp->header_ok &&
-	    !SRpnt->result && SRpnt->sense[0] == 0) {
-
-		memset(cmd, 0, MAX_COMMAND_SIZE);
-		cmd[0] = MODE_SENSE;
-		cmd[1] = 8;
-		cmd[2] = VENDOR_IDENT_PAGE;
-		cmd[4] = VENDOR_IDENT_PAGE_LENGTH + MODE_HEADER_LENGTH;
-
-		SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, STp->timeout, 0, 1);
-
-		if (STp->buffer->syscall_result                     ||
-		    STp->buffer->b_data[MODE_HEADER_LENGTH + 2] != 'L' ||
-		    STp->buffer->b_data[MODE_HEADER_LENGTH + 3] != 'I' ||
-		    STp->buffer->b_data[MODE_HEADER_LENGTH + 4] != 'N' ||
-		    STp->buffer->b_data[MODE_HEADER_LENGTH + 5] != '4'  ) {
-#if DEBUG
-			printk(OSST_DEB_MSG "%s:D: Signature was changed to %c%c%c%c\n", name,
-			  STp->buffer->b_data[MODE_HEADER_LENGTH + 2],
-			  STp->buffer->b_data[MODE_HEADER_LENGTH + 3],
-			  STp->buffer->b_data[MODE_HEADER_LENGTH + 4],
-			  STp->buffer->b_data[MODE_HEADER_LENGTH + 5]);
-#endif
-			STp->header_ok = 0;
-		}
-		i = STp->first_frame_position;
-		if (STp->header_ok && i == osst_get_frame_position(STp, &SRpnt)) {
-			if (STp->door_locked == ST_UNLOCKED) {
-				if (do_door_lock(STp, 1))
-					printk(KERN_INFO "%s:I: Can't lock drive door\n", name);
-				else
-					STp->door_locked = ST_LOCKED_AUTO;
-			}
-			if (!STp->frame_in_buffer) {
-				STp->block_size = (STm->default_blksize > 0) ?
-							STm->default_blksize : OS_DATA_SIZE;
-				STp->buffer->buffer_bytes = STp->buffer->read_pointer = 0;
-			}
-			STp->buffer->buffer_blocks = OS_DATA_SIZE / STp->block_size;
-			STp->fast_open = 1;
-			osst_release_request(SRpnt);
-			return 0;
-		}
-#if DEBUG
-		if (i != STp->first_frame_position)
-			printk(OSST_DEB_MSG "%s:D: Tape position changed from %d to %d\n",
-						name, i, STp->first_frame_position);
-#endif
-		STp->header_ok = 0;
-	}
-	STp->fast_open = 0;
-
-	if ((STp->buffer)->syscall_result != 0 &&   /* in all error conditions except no medium */ 
-	    (SRpnt->sense[2] != 2 || SRpnt->sense[12] != 0x3A) ) {
-
-		memset(cmd, 0, MAX_COMMAND_SIZE);
-		cmd[0] = MODE_SELECT;
-		cmd[1] = 0x10;
-		cmd[4] = 4 + MODE_HEADER_LENGTH;
-
-		(STp->buffer)->b_data[0] = cmd[4] - 1;
-		(STp->buffer)->b_data[1] = 0;			/* Medium Type - ignoring */
-		(STp->buffer)->b_data[2] = 0;			/* Reserved */
-		(STp->buffer)->b_data[3] = 0;			/* Block Descriptor Length */
-		(STp->buffer)->b_data[MODE_HEADER_LENGTH + 0] = 0x3f;
-		(STp->buffer)->b_data[MODE_HEADER_LENGTH + 1] = 1;
-		(STp->buffer)->b_data[MODE_HEADER_LENGTH + 2] = 2;
-		(STp->buffer)->b_data[MODE_HEADER_LENGTH + 3] = 3;
-
-#if DEBUG
-		printk(OSST_DEB_MSG "%s:D: Applying soft reset\n", name);
-#endif
-		SRpnt = osst_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, STp->timeout, 0, 1);
-
-		STp->header_ok = 0;
-
-		for (i=0; i < 10; i++) {
-
-			memset (cmd, 0, MAX_COMMAND_SIZE);
-			cmd[0] = TEST_UNIT_READY;
-
-			SRpnt = osst_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
-						    STp->timeout, MAX_RETRIES, 1);
-			if ((SRpnt->sense[0] & 0x70) != 0x70 ||
-			    (SRpnt->sense[2] & 0x0f) == NOT_READY)
-			break;
-
-			if ((SRpnt->sense[2] & 0x0f) == UNIT_ATTENTION) {
-				int j;
-
-				STp->pos_unknown = 0;
-				STp->partition = STp->new_partition = 0;
-				if (STp->can_partitions)
-					STp->nbr_partitions = 1;  /* This guess will be updated later if necessary */
-				for (j = 0; j < ST_NBR_PARTITIONS; j++) {
-					STps = &(STp->ps[j]);
-					STps->rw = ST_IDLE;
-					STps->eof = ST_NOEOF;
-					STps->at_sm = 0;
-					STps->last_block_valid = 0;
-					STps->drv_block = 0;
-					STps->drv_file = 0 ;
-				}
-				new_session = 1;
-			}
-		}
-	}
-
-	if (osst_wait_ready(STp, &SRpnt, 15 * 60, 0))		/* FIXME - not allowed with NOBLOCK */
-		 printk(KERN_INFO "%s:I: Device did not become Ready in open\n", name);
-
-	if ((STp->buffer)->syscall_result != 0) {
-		if ((STp->device)->scsi_level >= SCSI_2 &&
-		    (SRpnt->sense[0] & 0x70) == 0x70 &&
-		    (SRpnt->sense[2] & 0x0f) == NOT_READY &&
-		     SRpnt->sense[12] == 0x3a) { /* Check ASC */
-			STp->ready = ST_NO_TAPE;
-		} else
-			STp->ready = ST_NOT_READY;
-		osst_release_request(SRpnt);
-		SRpnt = NULL;
-		STp->density = 0;   	/* Clear the erroneous "residue" */
-		STp->write_prot = 0;
-		STp->block_size = 0;
-		STp->ps[0].drv_file = STp->ps[0].drv_block = (-1);
-		STp->partition = STp->new_partition = 0;
-		STp->door_locked = ST_UNLOCKED;
-		return 0;
-	}
-
-	osst_configure_onstream(STp, &SRpnt);
-
-	STp->block_size = STp->raw ? OS_FRAME_SIZE : (
-			     (STm->default_blksize > 0) ? STm->default_blksize : OS_DATA_SIZE);
-	STp->buffer->buffer_blocks = STp->raw ? 1 : OS_DATA_SIZE / STp->block_size;
-	STp->buffer->buffer_bytes  =
-	STp->buffer->read_pointer  =
-	STp->frame_in_buffer       = 0;
-
-#if DEBUG
-	if (debugging)
-		printk(OSST_DEB_MSG "%s:D: Block size: %d, frame size: %d, buffer size: %d (%d blocks).\n",
-		     name, STp->block_size, OS_FRAME_SIZE, (STp->buffer)->buffer_size,
-		     (STp->buffer)->buffer_blocks);
-#endif
-
-	if (STp->drv_write_prot) {
-		STp->write_prot = 1;
-#if DEBUG
-		if (debugging)
-			printk(OSST_DEB_MSG "%s:D: Write protected\n", name);
-#endif
-		if ((flags & O_ACCMODE) == O_WRONLY || (flags & O_ACCMODE) == O_RDWR) {
-			retval = (-EROFS);
-			goto err_out;
-		}
-	}
-
-	if (new_session) {  /* Change the drive parameters for the new mode */
-#if DEBUG
-		if (debugging)
-	printk(OSST_DEB_MSG "%s:D: New Session\n", name);
-#endif
-		STp->density_changed = STp->blksize_changed = 0;
-		STp->compression_changed = 0;
-	}
-
-	/*
-	 * properly position the tape and check the ADR headers
-	 */
-	if (STp->door_locked == ST_UNLOCKED) {
-		 if (do_door_lock(STp, 1))
-			printk(KERN_INFO "%s:I: Can't lock drive door\n", name);
-		 else
-			STp->door_locked = ST_LOCKED_AUTO;
-	}
-
-	osst_analyze_headers(STp, &SRpnt);
-
-	osst_release_request(SRpnt);
-	SRpnt = NULL;
-
-	return 0;
-
-err_out:
-	if (SRpnt != NULL)
-		osst_release_request(SRpnt);
-	normalize_buffer(STp->buffer);
-	STp->header_ok = 0;
-	STp->in_use = 0;
-	scsi_device_put(STp->device);
-
-	return retval;
-}
-
-/* BKL pushdown: spaghetti avoidance wrapper */
-static int os_scsi_tape_open(struct inode * inode, struct file * filp)
-{
-	int ret;
-
-	mutex_lock(&osst_int_mutex);
-	ret = __os_scsi_tape_open(inode, filp);
-	mutex_unlock(&osst_int_mutex);
-	return ret;
-}
-
-
-
-/* Flush the tape buffer before close */
-static int os_scsi_tape_flush(struct file * filp, fl_owner_t id)
-{
-	int		      result = 0, result2;
-	struct osst_tape    * STp    = filp->private_data;
-	struct st_modedef   * STm    = &(STp->modes[STp->current_mode]);
-	struct st_partstat  * STps   = &(STp->ps[STp->partition]);
-	struct osst_request * SRpnt  = NULL;
-	char		    * name   = tape_name(STp);
-
-	if (file_count(filp) > 1)
-		return 0;
-
-	if ((STps->rw == ST_WRITING || STp->dirty) && !STp->pos_unknown) {
-		STp->write_type = OS_WRITE_DATA;
-		result = osst_flush_write_buffer(STp, &SRpnt);
-		if (result != 0 && result != (-ENOSPC))
-			goto out;
-	}
-	if ( STps->rw >= ST_WRITING && !STp->pos_unknown) {
-
-#if DEBUG
-		if (debugging) {
-			printk(OSST_DEB_MSG "%s:D: File length %ld bytes.\n",
-					       name, (long)(filp->f_pos));
-			printk(OSST_DEB_MSG "%s:D: Async write waits %d, finished %d.\n",
-					       name, STp->nbr_waits, STp->nbr_finished);
-		}
-#endif
-		result = osst_write_trailer(STp, &SRpnt, !(STp->rew_at_close));
-#if DEBUG
-		if (debugging)
-			printk(OSST_DEB_MSG "%s:D: Buffer flushed, %d EOF(s) written\n",
-					       name, 1+STp->two_fm);
-#endif
-	}
-	else if (!STp->rew_at_close) {
-		STps = &(STp->ps[STp->partition]);
-		if (!STm->sysv || STps->rw != ST_READING) {
-			if (STp->can_bsr)
-				result = osst_flush_buffer(STp, &SRpnt, 0); /* this is the default path */
-			else if (STps->eof == ST_FM_HIT) {
-				result = cross_eof(STp, &SRpnt, 0);
-					if (result) {
-						if (STps->drv_file >= 0)
-							STps->drv_file++;
-						STps->drv_block = 0;
-						STps->eof = ST_FM;
-					}
-					else
-						STps->eof = ST_NOEOF;
-			}
-		}
-		else if ((STps->eof == ST_NOEOF &&
-			  !(result = cross_eof(STp, &SRpnt, 1))) ||
-			 STps->eof == ST_FM_HIT) {
-			if (STps->drv_file >= 0)
-				STps->drv_file++;
-			STps->drv_block = 0;
-			STps->eof = ST_FM;
-		}
-	}
-
-out:
-	if (STp->rew_at_close) {
-		result2 = osst_position_tape_and_confirm(STp, &SRpnt, STp->first_data_ppos);
-		STps->drv_file = STps->drv_block = STp->frame_seq_number = STp->logical_blk_num = 0;
-		if (result == 0 && result2 < 0)
-			result = result2;
-	}
-	if (SRpnt) osst_release_request(SRpnt);
-
-	if (STp->abort_count || STp->recover_count) {
-		printk(KERN_INFO "%s:I:", name);
-		if (STp->abort_count)
-			printk(" %d unrecovered errors", STp->abort_count);
-		if (STp->recover_count)
-			printk(" %d recovered errors", STp->recover_count);
-		if (STp->write_count)
-			printk(" in %d frames written", STp->write_count);
-		if (STp->read_count)
-			printk(" in %d frames read", STp->read_count);
-		printk("\n");
-		STp->recover_count = 0;
-		STp->abort_count   = 0;
-	}
-	STp->write_count = 0;
-	STp->read_count  = 0;
-
-	return result;
-}
-
-
-/* Close the device and release it */
-static int os_scsi_tape_close(struct inode * inode, struct file * filp)
-{
-	int		      result = 0;
-	struct osst_tape    * STp    = filp->private_data;
-
-	if (STp->door_locked == ST_LOCKED_AUTO)
-		do_door_lock(STp, 0);
-
-	if (STp->raw)
-		STp->header_ok = 0;
-	
-	normalize_buffer(STp->buffer);
-	write_lock(&os_scsi_tapes_lock);
-	STp->in_use = 0;
-	write_unlock(&os_scsi_tapes_lock);
-
-	scsi_device_put(STp->device);
-
-	return result;
-}
-
-
-/* The ioctl command */
-static long osst_ioctl(struct file * file,
-	 unsigned int cmd_in, unsigned long arg)
-{
-	int		      i, cmd_nr, cmd_type, blk, retval = 0;
-	struct st_modedef   * STm;
-	struct st_partstat  * STps;
-	struct osst_request * SRpnt = NULL;
-	struct osst_tape    * STp   = file->private_data;
-	char		    * name  = tape_name(STp);
-	void	    __user  * p     = (void __user *)arg;
-
-	mutex_lock(&osst_int_mutex);
-	if (mutex_lock_interruptible(&STp->lock)) {
-		mutex_unlock(&osst_int_mutex);
-		return -ERESTARTSYS;
-	}
-
-#if DEBUG
-	if (debugging && !STp->in_use) {
-		printk(OSST_DEB_MSG "%s:D: Incorrect device.\n", name);
-		retval = (-EIO);
-		goto out;
-	}
-#endif
-	STm = &(STp->modes[STp->current_mode]);
-	STps = &(STp->ps[STp->partition]);
-
-	/*
-	 * If we are in the middle of error recovery, don't let anyone
-	 * else try and use this device.  Also, if error recovery fails, it
-	 * may try and take the device offline, in which case all further
-	 * access to the device is prohibited.
-	 */
-	retval = scsi_ioctl_block_when_processing_errors(STp->device, cmd_in,
-			file->f_flags & O_NDELAY);
-	if (retval)
-		goto out;
-
-	cmd_type = _IOC_TYPE(cmd_in);
-	cmd_nr   = _IOC_NR(cmd_in);
-#if DEBUG
-	printk(OSST_DEB_MSG "%s:D: Ioctl %d,%d in %s mode\n", name,
-			    cmd_type, cmd_nr, STp->raw?"raw":"normal");
-#endif
-	if (cmd_type == _IOC_TYPE(MTIOCTOP) && cmd_nr == _IOC_NR(MTIOCTOP)) {
-		struct mtop mtc;
-		int    auto_weof = 0;
-
-		if (_IOC_SIZE(cmd_in) != sizeof(mtc)) {
-			retval = (-EINVAL);
-			goto out;
-		}
-
-		i = copy_from_user((char *) &mtc, p, sizeof(struct mtop));
-		if (i) {
-			retval = (-EFAULT);
-			goto out;
-		}
-
-		if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) {
-			printk(KERN_WARNING "%s:W: MTSETDRVBUFFER only allowed for root.\n", name);
-			retval = (-EPERM);
-			goto out;
-		}
-
-		if (!STm->defined && (mtc.mt_op != MTSETDRVBUFFER && (mtc.mt_count & MT_ST_OPTIONS) == 0)) {
-			retval = (-ENXIO);
-			goto out;
-		}
-
-		if (!STp->pos_unknown) {
-
-			if (STps->eof == ST_FM_HIT) {
-				if (mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM|| mtc.mt_op == MTEOM) {
-					mtc.mt_count -= 1;
-					if (STps->drv_file >= 0)
-						STps->drv_file += 1;
-				}
-				else if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM) {
-					mtc.mt_count += 1;
-					if (STps->drv_file >= 0)
-						STps->drv_file += 1;
-				}
-			}
-
-			if (mtc.mt_op == MTSEEK) {
-				/* Old position must be restored if partition will be changed */
-				i = !STp->can_partitions || (STp->new_partition != STp->partition);
-			}
-			else {
-				i = mtc.mt_op == MTREW   || mtc.mt_op == MTOFFL ||
-				    mtc.mt_op == MTRETEN || mtc.mt_op == MTEOM  ||
-				    mtc.mt_op == MTLOCK  || mtc.mt_op == MTLOAD ||
-				    mtc.mt_op == MTFSF   || mtc.mt_op == MTFSFM ||
-				    mtc.mt_op == MTBSF   || mtc.mt_op == MTBSFM ||
-				    mtc.mt_op == MTCOMPRESSION;
-			}
-			i = osst_flush_buffer(STp, &SRpnt, i);
-			if (i < 0) {
-				retval = i;
-				goto out;
-			}
-		}
-		else {
-			/*
-			 * If there was a bus reset, block further access
-			 * to this device.  If the user wants to rewind the tape,
-			 * then reset the flag and allow access again.
-			 */
-			if(mtc.mt_op != MTREW   &&
-			   mtc.mt_op != MTOFFL  &&
-			   mtc.mt_op != MTRETEN &&
-			   mtc.mt_op != MTERASE &&
-			   mtc.mt_op != MTSEEK  &&
-			   mtc.mt_op != MTEOM)   {
-				retval = (-EIO);
-				goto out;
-			}
-			reset_state(STp);
-			/* remove this when the midlevel properly clears was_reset */
-			STp->device->was_reset = 0;
-		}
-
-		if (mtc.mt_op != MTCOMPRESSION  && mtc.mt_op != MTLOCK         &&
-		    mtc.mt_op != MTNOP          && mtc.mt_op != MTSETBLK       &&
-		    mtc.mt_op != MTSETDENSITY   && mtc.mt_op != MTSETDRVBUFFER && 
-		    mtc.mt_op != MTMKPART       && mtc.mt_op != MTSETPART      &&
-		    mtc.mt_op != MTWEOF         && mtc.mt_op != MTWSM           ) {
-
-			/*
-			 * The user tells us to move to another position on the tape.
-			 * If we were appending to the tape content, that would leave
-			 * the tape without proper end, in that case write EOD and
-			 * update the header to reflect its position.
-			 */
-#if DEBUG
-			printk(KERN_WARNING "%s:D: auto_weod %s at ffp=%d,efp=%d,fsn=%d,lbn=%d,fn=%d,bn=%d\n", name,
-					STps->rw >= ST_WRITING ? "write" : STps->rw == ST_READING ? "read" : "idle",
-					STp->first_frame_position, STp->eod_frame_ppos, STp->frame_seq_number,
-					STp->logical_blk_num, STps->drv_file, STps->drv_block );
-#endif
-			if (STps->rw >= ST_WRITING && STp->first_frame_position >= STp->eod_frame_ppos) {
-				auto_weof = ((STp->write_type != OS_WRITE_NEW_MARK) &&
-							!(mtc.mt_op == MTREW || mtc.mt_op == MTOFFL));
-				i = osst_write_trailer(STp, &SRpnt,
-							!(mtc.mt_op == MTREW || mtc.mt_op == MTOFFL));
-#if DEBUG
-				printk(KERN_WARNING "%s:D: post trailer xeof=%d,ffp=%d,efp=%d,fsn=%d,lbn=%d,fn=%d,bn=%d\n",
-						name, auto_weof, STp->first_frame_position, STp->eod_frame_ppos,
-						STp->frame_seq_number, STp->logical_blk_num, STps->drv_file, STps->drv_block );
-#endif
-				if (i < 0) {
-					retval = i;
-					goto out;
-				}
-			}
-			STps->rw = ST_IDLE;
-		}
-
-		if (mtc.mt_op == MTOFFL && STp->door_locked != ST_UNLOCKED)
-			do_door_lock(STp, 0);  /* Ignore result! */
-
-		if (mtc.mt_op == MTSETDRVBUFFER &&
-		   (mtc.mt_count & MT_ST_OPTIONS) != 0) {
-			retval = osst_set_options(STp, mtc.mt_count);
-			goto out;
-		}
-
-		if (mtc.mt_op == MTSETPART) {
-			if (mtc.mt_count >= STp->nbr_partitions)
-				retval = -EINVAL;
-			else {
-				STp->new_partition = mtc.mt_count;
-				retval = 0;
-			}
-			goto out;
-		}
-
-		if (mtc.mt_op == MTMKPART) {
-			if (!STp->can_partitions) {
-				retval = (-EINVAL);
-				goto out;
-			}
-			if ((i = osst_int_ioctl(STp, &SRpnt, MTREW, 0)) < 0 /*||
-			    (i = partition_tape(inode, mtc.mt_count)) < 0*/) {
-				retval = i;
-				goto out;
-			}
-			for (i=0; i < ST_NBR_PARTITIONS; i++) {
-				STp->ps[i].rw = ST_IDLE;
-				STp->ps[i].at_sm = 0;
-				STp->ps[i].last_block_valid = 0;
-			}
-			STp->partition = STp->new_partition = 0;
-			STp->nbr_partitions = 1;  /* Bad guess ?-) */
-			STps->drv_block = STps->drv_file = 0;
-			retval = 0;
-			goto out;
-	 	}
-
-		if (mtc.mt_op == MTSEEK) {
-			if (STp->raw)
-				i = osst_set_frame_position(STp, &SRpnt, mtc.mt_count, 0);
-			else
-				i = osst_seek_sector(STp, &SRpnt, mtc.mt_count);
-			if (!STp->can_partitions)
-				STp->ps[0].rw = ST_IDLE;
-			retval = i;
-			goto out;
-		}
- 
-		if (mtc.mt_op == MTLOCK || mtc.mt_op == MTUNLOCK) {
-			retval = do_door_lock(STp, (mtc.mt_op == MTLOCK));
-			goto out;
-		}
-
-		if (auto_weof)
-			cross_eof(STp, &SRpnt, 0);
-
-		if (mtc.mt_op == MTCOMPRESSION)
-			retval = -EINVAL;       /* OnStream drives don't have compression hardware */
-		else
-			/* MTBSF MTBSFM MTBSR MTBSS MTEOM MTERASE MTFSF MTFSFB MTFSR MTFSS
-			 * MTLOAD MTOFFL MTRESET MTRETEN MTREW MTUNLOAD MTWEOF MTWSM */
-			retval = osst_int_ioctl(STp, &SRpnt, mtc.mt_op, mtc.mt_count);
-		goto out;
-	}
-
-	if (!STm->defined) {
-		retval = (-ENXIO);
-		goto out;
-	}
-
-	if ((i = osst_flush_buffer(STp, &SRpnt, 0)) < 0) {
-		retval = i;
-		goto out;
-	}
-
-	if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) {
-		struct mtget mt_status;
-
-		if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) {
-			 retval = (-EINVAL);
-			 goto out;
-		}
-
-		mt_status.mt_type = MT_ISONSTREAM_SC;
-		mt_status.mt_erreg = STp->recover_erreg << MT_ST_SOFTERR_SHIFT;
-		mt_status.mt_dsreg =
-			((STp->block_size << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK) |
-			((STp->density    << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK);
-		mt_status.mt_blkno = STps->drv_block;
-		mt_status.mt_fileno = STps->drv_file;
-		if (STp->block_size != 0) {
-			if (STps->rw == ST_WRITING)
-				mt_status.mt_blkno += (STp->buffer)->buffer_bytes / STp->block_size;
-			else if (STps->rw == ST_READING)
-				mt_status.mt_blkno -= ((STp->buffer)->buffer_bytes +
-							STp->block_size - 1) / STp->block_size;
-		}
-
-		mt_status.mt_gstat = 0;
-		if (STp->drv_write_prot)
-			mt_status.mt_gstat |= GMT_WR_PROT(0xffffffff);
-		if (mt_status.mt_blkno == 0) {
-			if (mt_status.mt_fileno == 0)
-				mt_status.mt_gstat |= GMT_BOT(0xffffffff);
-			else
-				mt_status.mt_gstat |= GMT_EOF(0xffffffff);
-		}
-		mt_status.mt_resid = STp->partition;
-		if (STps->eof == ST_EOM_OK || STps->eof == ST_EOM_ERROR)
-			mt_status.mt_gstat |= GMT_EOT(0xffffffff);
-		else if (STps->eof >= ST_EOM_OK)
-			mt_status.mt_gstat |= GMT_EOD(0xffffffff);
-		if (STp->density == 1)
-			mt_status.mt_gstat |= GMT_D_800(0xffffffff);
-		else if (STp->density == 2)
-			mt_status.mt_gstat |= GMT_D_1600(0xffffffff);
-		else if (STp->density == 3)
-			mt_status.mt_gstat |= GMT_D_6250(0xffffffff);
-		if (STp->ready == ST_READY)
-			mt_status.mt_gstat |= GMT_ONLINE(0xffffffff);
-		if (STp->ready == ST_NO_TAPE)
-			mt_status.mt_gstat |= GMT_DR_OPEN(0xffffffff);
-		if (STps->at_sm)
-			mt_status.mt_gstat |= GMT_SM(0xffffffff);
-		if (STm->do_async_writes || (STm->do_buffer_writes && STp->block_size != 0) ||
-		    STp->drv_buffer != 0)
-			mt_status.mt_gstat |= GMT_IM_REP_EN(0xffffffff);
-
-		i = copy_to_user(p, &mt_status, sizeof(struct mtget));
-		if (i) {
-			retval = (-EFAULT);
-			goto out;
-		}
-
-		STp->recover_erreg = 0;  /* Clear after read */
-		retval = 0;
-		goto out;
-	} /* End of MTIOCGET */
-
-	if (cmd_type == _IOC_TYPE(MTIOCPOS) && cmd_nr == _IOC_NR(MTIOCPOS)) {
-		struct mtpos mt_pos;
-
-		if (_IOC_SIZE(cmd_in) != sizeof(struct mtpos)) {
-			retval = (-EINVAL);
-			goto out;
-		}
-		if (STp->raw)
-			blk = osst_get_frame_position(STp, &SRpnt);
-		else
-			blk = osst_get_sector(STp, &SRpnt);
-		if (blk < 0) {
-			retval = blk;
-			goto out;
-		}
-		mt_pos.mt_blkno = blk;
-		i = copy_to_user(p, &mt_pos, sizeof(struct mtpos));
-		if (i)
-			retval = -EFAULT;
-		goto out;
-	}
-	if (SRpnt) osst_release_request(SRpnt);
-
-	mutex_unlock(&STp->lock);
-
-	retval = scsi_ioctl(STp->device, cmd_in, p);
-	mutex_unlock(&osst_int_mutex);
-	return retval;
-
-out:
-	if (SRpnt) osst_release_request(SRpnt);
-
-	mutex_unlock(&STp->lock);
-	mutex_unlock(&osst_int_mutex);
-
-	return retval;
-}
-
-#ifdef CONFIG_COMPAT
-static long osst_compat_ioctl(struct file * file, unsigned int cmd_in, unsigned long arg)
-{
-	struct osst_tape *STp = file->private_data;
-	struct scsi_device *sdev = STp->device;
-	int ret = -ENOIOCTLCMD;
-	if (sdev->host->hostt->compat_ioctl) {
-
-		ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
-
-	}
-	return ret;
-}
-#endif
-
-
-
-/* Memory handling routines */
-
-/* Try to allocate a new tape buffer skeleton. Caller must not hold os_scsi_tapes_lock */
-static struct osst_buffer * new_tape_buffer( int from_initialization, int need_dma, int max_sg )
-{
-	int i;
-	gfp_t priority;
-	struct osst_buffer *tb;
-
-	if (from_initialization)
-		priority = GFP_ATOMIC;
-	else
-		priority = GFP_KERNEL;
-
-	i = sizeof(struct osst_buffer) + (osst_max_sg_segs - 1) * sizeof(struct scatterlist);
-	tb = kzalloc(i, priority);
-	if (!tb) {
-		printk(KERN_NOTICE "osst :I: Can't allocate new tape buffer.\n");
-		return NULL;
-	}
-
-	tb->sg_segs = tb->orig_sg_segs = 0;
-	tb->use_sg = max_sg;
-	tb->in_use = 1;
-	tb->dma = need_dma;
-	tb->buffer_size = 0;
-#if DEBUG
-	if (debugging) 
-		printk(OSST_DEB_MSG
-			"osst :D: Allocated tape buffer skeleton (%d bytes, %d segments, dma: %d).\n",
-			   i, max_sg, need_dma);
-#endif
-	return tb;
-}
-
-/* Try to allocate a temporary (while a user has the device open) enlarged tape buffer */
-static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
-{
-	int segs, nbr, max_segs, b_size, order, got;
-	gfp_t priority;
-
-	if (STbuffer->buffer_size >= OS_FRAME_SIZE)
-		return 1;
-
-	if (STbuffer->sg_segs) {
-		printk(KERN_WARNING "osst :A: Buffer not previously normalized.\n");
-		normalize_buffer(STbuffer);
-	}
-	/* See how many segments we can use -- need at least two */
-	nbr = max_segs = STbuffer->use_sg;
-	if (nbr <= 2)
-		return 0;
-
-	priority = GFP_KERNEL /* | __GFP_NOWARN */;
-	if (need_dma)
-		priority |= GFP_DMA;
-
-	/* Try to allocate the first segment up to OS_DATA_SIZE and the others
-	   big enough to reach the goal (code assumes no segments in place) */
-	for (b_size = OS_DATA_SIZE, order = OSST_FIRST_ORDER; b_size >= PAGE_SIZE; order--, b_size /= 2) {
-		struct page *page = alloc_pages(priority, order);
-
-		STbuffer->sg[0].offset = 0;
-		if (page != NULL) {
-		    sg_set_page(&STbuffer->sg[0], page, b_size, 0);
-		    STbuffer->b_data = page_address(page);
-		    break;
-		}
-	}
-	if (sg_page(&STbuffer->sg[0]) == NULL) {
-		printk(KERN_NOTICE "osst :I: Can't allocate tape buffer main segment.\n");
-		return 0;
-	}
-	/* Got initial segment of 'bsize,order', continue with same size if possible, except for AUX */
-	for (segs=STbuffer->sg_segs=1, got=b_size;
-	     segs < max_segs && got < OS_FRAME_SIZE; ) {
-		struct page *page = alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order);
-		STbuffer->sg[segs].offset = 0;
-		if (page == NULL) {
-			printk(KERN_WARNING "osst :W: Failed to enlarge buffer to %d bytes.\n",
-						OS_FRAME_SIZE);
-#if DEBUG
-			STbuffer->buffer_size = got;
-#endif
-			normalize_buffer(STbuffer);
-			return 0;
-		}
-		sg_set_page(&STbuffer->sg[segs], page, (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size, 0);
-		got += STbuffer->sg[segs].length;
-		STbuffer->buffer_size = got;
-		STbuffer->sg_segs = ++segs;
-	}
-#if DEBUG
-	if (debugging) {
-		printk(OSST_DEB_MSG
-			   "osst :D: Expanded tape buffer (%d bytes, %d->%d segments, dma: %d, at: %p).\n",
-			   got, STbuffer->orig_sg_segs, STbuffer->sg_segs, need_dma, STbuffer->b_data);
-		printk(OSST_DEB_MSG
-			   "osst :D: segment sizes: first %d at %p, last %d bytes at %p.\n",
-			   STbuffer->sg[0].length, page_address(STbuffer->sg[0].page),
-			   STbuffer->sg[segs-1].length, page_address(STbuffer->sg[segs-1].page));
-	}
-#endif
-
-	return 1;
-}
-
-
-/* Release the segments */
-static void normalize_buffer(struct osst_buffer *STbuffer)
-{
-  int i, order, b_size;
-
-	for (i=0; i < STbuffer->sg_segs; i++) {
-
-		for (b_size = PAGE_SIZE, order = 0;
-		     b_size < STbuffer->sg[i].length;
-		     b_size *= 2, order++);
-
-		__free_pages(sg_page(&STbuffer->sg[i]), order);
-		STbuffer->buffer_size -= STbuffer->sg[i].length;
-	}
-#if DEBUG
-	if (debugging && STbuffer->orig_sg_segs < STbuffer->sg_segs)
-		printk(OSST_DEB_MSG "osst :D: Buffer at %p normalized to %d bytes (segs %d).\n",
-			     STbuffer->b_data, STbuffer->buffer_size, STbuffer->sg_segs);
-#endif
-	STbuffer->sg_segs = STbuffer->orig_sg_segs = 0;
-}
-
-
-/* Move data from the user buffer to the tape buffer. Returns zero (success) or
-   negative error code. */
-static int append_to_buffer(const char __user *ubp, struct osst_buffer *st_bp, int do_count)
-{
-	int i, cnt, res, offset;
-
-	for (i=0, offset=st_bp->buffer_bytes;
-	     i < st_bp->sg_segs && offset >= st_bp->sg[i].length; i++)
-		offset -= st_bp->sg[i].length;
-	if (i == st_bp->sg_segs) {  /* Should never happen */
-		printk(KERN_WARNING "osst :A: Append_to_buffer offset overflow.\n");
-		return (-EIO);
-	}
-	for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
-		cnt = st_bp->sg[i].length - offset < do_count ?
-		      st_bp->sg[i].length - offset : do_count;
-		res = copy_from_user(page_address(sg_page(&st_bp->sg[i])) + offset, ubp, cnt);
-		if (res)
-			return (-EFAULT);
-		do_count -= cnt;
-		st_bp->buffer_bytes += cnt;
-		ubp += cnt;
-		offset = 0;
-	}
-	if (do_count) {  /* Should never happen */
-		printk(KERN_WARNING "osst :A: Append_to_buffer overflow (left %d).\n",
-		       do_count);
-		return (-EIO);
-	}
-	return 0;
-}
-
-
-/* Move data from the tape buffer to the user buffer. Returns zero (success) or
-   negative error code. */
-static int from_buffer(struct osst_buffer *st_bp, char __user *ubp, int do_count)
-{
-	int i, cnt, res, offset;
-
-	for (i=0, offset=st_bp->read_pointer;
-	     i < st_bp->sg_segs && offset >= st_bp->sg[i].length; i++)
-		offset -= st_bp->sg[i].length;
-	if (i == st_bp->sg_segs) {  /* Should never happen */
-		printk(KERN_WARNING "osst :A: From_buffer offset overflow.\n");
-		return (-EIO);
-	}
-	for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
-		cnt = st_bp->sg[i].length - offset < do_count ?
-		      st_bp->sg[i].length - offset : do_count;
-		res = copy_to_user(ubp, page_address(sg_page(&st_bp->sg[i])) + offset, cnt);
-		if (res)
-			return (-EFAULT);
-		do_count -= cnt;
-		st_bp->buffer_bytes -= cnt;
-		st_bp->read_pointer += cnt;
-		ubp += cnt;
-		offset = 0;
-	}
-	if (do_count) {  /* Should never happen */
-		printk(KERN_WARNING "osst :A: From_buffer overflow (left %d).\n", do_count);
-		return (-EIO);
-	}
-	return 0;
-}
-
-/* Sets the tail of the buffer after fill point to zero.
-   Returns zero (success) or negative error code.        */
-static int osst_zero_buffer_tail(struct osst_buffer *st_bp)
-{
-	int	i, offset, do_count, cnt;
-
-	for (i = 0, offset = st_bp->buffer_bytes;
-	     i < st_bp->sg_segs && offset >= st_bp->sg[i].length; i++)
-		offset -= st_bp->sg[i].length;
-	if (i == st_bp->sg_segs) {  /* Should never happen */
-		printk(KERN_WARNING "osst :A: Zero_buffer offset overflow.\n");
-		return (-EIO);
-	}
-	for (do_count = OS_DATA_SIZE - st_bp->buffer_bytes;
-	     i < st_bp->sg_segs && do_count > 0; i++) {
-		cnt = st_bp->sg[i].length - offset < do_count ?
-		      st_bp->sg[i].length - offset : do_count ;
-		memset(page_address(sg_page(&st_bp->sg[i])) + offset, 0, cnt);
-		do_count -= cnt;
-		offset = 0;
-	}
-	if (do_count) {  /* Should never happen */
-		printk(KERN_WARNING "osst :A: Zero_buffer overflow (left %d).\n", do_count);
-		return (-EIO);
-	}
-	return 0;
-}
-
-/* Copy a osst 32K chunk of memory into the buffer.
-   Returns zero (success) or negative error code.  */
-static int osst_copy_to_buffer(struct osst_buffer *st_bp, unsigned char *ptr)
-{
-	int	i, cnt, do_count = OS_DATA_SIZE;
-
-	for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
-		cnt = st_bp->sg[i].length < do_count ?
-		      st_bp->sg[i].length : do_count ;
-		memcpy(page_address(sg_page(&st_bp->sg[i])), ptr, cnt);
-		do_count -= cnt;
-		ptr      += cnt;
-	}
-	if (do_count || i != st_bp->sg_segs-1) {  /* Should never happen */
-		printk(KERN_WARNING "osst :A: Copy_to_buffer overflow (left %d at sg %d).\n",
-					 do_count, i);
-		return (-EIO);
-	}
-	return 0;
-}
-
-/* Copy a osst 32K chunk of memory from the buffer.
-   Returns zero (success) or negative error code.  */
-static int osst_copy_from_buffer(struct osst_buffer *st_bp, unsigned char *ptr)
-{
-	int	i, cnt, do_count = OS_DATA_SIZE;
-
-	for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
-		cnt = st_bp->sg[i].length < do_count ?
-		      st_bp->sg[i].length : do_count ;
-		memcpy(ptr, page_address(sg_page(&st_bp->sg[i])), cnt);
-		do_count -= cnt;
-		ptr      += cnt;
-	}
-	if (do_count || i != st_bp->sg_segs-1) {  /* Should never happen */
-		printk(KERN_WARNING "osst :A: Copy_from_buffer overflow (left %d at sg %d).\n",
-					 do_count, i);
-		return (-EIO);
-	}
-	return 0;
-}
-
-
-/* Module housekeeping */
-
-static void validate_options (void)
-{
-  if (max_dev > 0)
-		osst_max_dev = max_dev;  
-  if (write_threshold_kbs > 0)
-		osst_write_threshold = write_threshold_kbs * ST_KILOBYTE;
-  if (osst_write_threshold > osst_buffer_size)
-		osst_write_threshold = osst_buffer_size;
-  if (max_sg_segs >= OSST_FIRST_SG)
-		osst_max_sg_segs = max_sg_segs;
-#if DEBUG
-  printk(OSST_DEB_MSG "osst :D: max tapes %d, write threshold %d, max s/g segs %d.\n",
-			   osst_max_dev, osst_write_threshold, osst_max_sg_segs);
-#endif
-}
-	
-#ifndef MODULE
-/* Set the boot options. Syntax: osst=xxx,yyy,...
-   where xxx is write threshold in 1024 byte blocks,
-   and   yyy is number of s/g segments to use. */
-static int __init osst_setup (char *str)
-{
-  int i, ints[5];
-  char *stp;
-
-  stp = get_options(str, ARRAY_SIZE(ints), ints);
-
-  if (ints[0] > 0) {
-	for (i = 0; i < ints[0] && i < ARRAY_SIZE(parms); i++)
-		  *parms[i].val = ints[i + 1];
-  } else {
-	while (stp != NULL) {
-		for (i = 0; i < ARRAY_SIZE(parms); i++) {
-			int len = strlen(parms[i].name);
-			if (!strncmp(stp, parms[i].name, len) &&
-			    (*(stp + len) == ':' || *(stp + len) == '=')) {
-				*parms[i].val =
-					simple_strtoul(stp + len + 1, NULL, 0);
-				break;
-			}
-		}
-		if (i >= ARRAY_SIZE(parms))
-			printk(KERN_INFO "osst :I: Illegal parameter in '%s'\n",
-			       stp);
-		stp = strchr(stp, ',');
-		if (stp)
-			stp++;
-	}
-  }
-
-  return 1;
-}
-
-__setup("osst=", osst_setup);
-
-#endif
-
-static const struct file_operations osst_fops = {
-	.owner =        THIS_MODULE,
-	.read =         osst_read,
-	.write =        osst_write,
-	.unlocked_ioctl = osst_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl = osst_compat_ioctl,
-#endif
-	.open =         os_scsi_tape_open,
-	.flush =        os_scsi_tape_flush,
-	.release =      os_scsi_tape_close,
-	.llseek =	noop_llseek,
-};
-
-static int osst_supports(struct scsi_device * SDp)
-{
-	struct	osst_support_data {
-		char *vendor;
-		char *model;
-		char *rev;
-		char *driver_hint; /* Name of the correct driver, NULL if unknown */
-	};
-
-static	struct	osst_support_data support_list[] = {
-		/* {"XXX", "Yy-", "", NULL},  example */
-		SIGS_FROM_OSST,
-		{NULL, }};
-
-	struct	osst_support_data *rp;
-
-	/* We are willing to drive OnStream SC-x0 as well as the
-	 * 	 * IDE, ParPort, FireWire, USB variants, if accessible by
-	 * 	 	 * emulation layer (ide-scsi, usb-storage, ...) */
-
-	for (rp=&(support_list[0]); rp->vendor != NULL; rp++)
-		if (!strncmp(rp->vendor, SDp->vendor, strlen(rp->vendor)) &&
-		    !strncmp(rp->model, SDp->model, strlen(rp->model)) &&
-		    !strncmp(rp->rev, SDp->rev, strlen(rp->rev))) 
-			return 1;
-	return 0;
-}
-
-/*
- * sysfs support for osst driver parameter information
- */
-
-static ssize_t version_show(struct device_driver *ddd, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%s\n", osst_version);
-}
-
-static DRIVER_ATTR_RO(version);
-
-static int osst_create_sysfs_files(struct device_driver *sysfs)
-{
-	return driver_create_file(sysfs, &driver_attr_version);
-}
-
-static void osst_remove_sysfs_files(struct device_driver *sysfs)
-{
-	driver_remove_file(sysfs, &driver_attr_version);
-}
-
-/*
- * sysfs support for accessing ADR header information
- */
-
-static ssize_t osst_adr_rev_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
-	ssize_t l = 0;
-
-	if (STp && STp->header_ok && STp->linux_media)
-		l = snprintf(buf, PAGE_SIZE, "%d.%d\n", STp->header_cache->major_rev, STp->header_cache->minor_rev);
-	return l;
-}
-
-DEVICE_ATTR(ADR_rev, S_IRUGO, osst_adr_rev_show, NULL);
-
-static ssize_t osst_linux_media_version_show(struct device *dev,
-					     struct device_attribute *attr,
-					     char *buf)
-{
-	struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
-	ssize_t l = 0;
-
-	if (STp && STp->header_ok && STp->linux_media)
-		l = snprintf(buf, PAGE_SIZE, "LIN%d\n", STp->linux_media_version);
-	return l;
-}
-
-DEVICE_ATTR(media_version, S_IRUGO, osst_linux_media_version_show, NULL);
-
-static ssize_t osst_capacity_show(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
-	ssize_t l = 0;
-
-	if (STp && STp->header_ok && STp->linux_media)
-		l = snprintf(buf, PAGE_SIZE, "%d\n", STp->capacity);
-	return l;
-}
-
-DEVICE_ATTR(capacity, S_IRUGO, osst_capacity_show, NULL);
-
-static ssize_t osst_first_data_ppos_show(struct device *dev,
-					 struct device_attribute *attr,
-					 char *buf)
-{
-	struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
-	ssize_t l = 0;
-
-	if (STp && STp->header_ok && STp->linux_media)
-		l = snprintf(buf, PAGE_SIZE, "%d\n", STp->first_data_ppos);
-	return l;
-}
-
-DEVICE_ATTR(BOT_frame, S_IRUGO, osst_first_data_ppos_show, NULL);
-
-static ssize_t osst_eod_frame_ppos_show(struct device *dev,
-					struct device_attribute *attr,
-					char *buf)
-{
-	struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
-	ssize_t l = 0;
-
-	if (STp && STp->header_ok && STp->linux_media)
-		l = snprintf(buf, PAGE_SIZE, "%d\n", STp->eod_frame_ppos);
-	return l;
-}
-
-DEVICE_ATTR(EOD_frame, S_IRUGO, osst_eod_frame_ppos_show, NULL);
-
-static ssize_t osst_filemark_cnt_show(struct device *dev,
-				      struct device_attribute *attr, char *buf)
-{
-	struct osst_tape * STp = (struct osst_tape *) dev_get_drvdata (dev);
-	ssize_t l = 0;
-
-	if (STp && STp->header_ok && STp->linux_media)
-		l = snprintf(buf, PAGE_SIZE, "%d\n", STp->filemark_cnt);
-	return l;
-}
-
-DEVICE_ATTR(file_count, S_IRUGO, osst_filemark_cnt_show, NULL);
-
-static struct class *osst_sysfs_class;
-
-static int osst_sysfs_init(void)
-{
-	osst_sysfs_class = class_create(THIS_MODULE, "onstream_tape");
-	if (IS_ERR(osst_sysfs_class)) {
-		printk(KERN_ERR "osst :W: Unable to register sysfs class\n");
-		return PTR_ERR(osst_sysfs_class);
-	}
-
-	return 0;
-}
-
-static void osst_sysfs_destroy(dev_t dev)
-{
-	device_destroy(osst_sysfs_class, dev);
-}
-
-static int osst_sysfs_add(dev_t dev, struct device *device, struct osst_tape * STp, char * name)
-{
-	struct device *osst_member;
-	int err;
-
-	osst_member = device_create(osst_sysfs_class, device, dev, STp,
-				    "%s", name);
-	if (IS_ERR(osst_member)) {
-		printk(KERN_WARNING "osst :W: Unable to add sysfs class member %s\n", name);
-		return PTR_ERR(osst_member);
-	}
-
-	err = device_create_file(osst_member, &dev_attr_ADR_rev);
-	if (err)
-		goto err_out;
-	err = device_create_file(osst_member, &dev_attr_media_version);
-	if (err)
-		goto err_out;
-	err = device_create_file(osst_member, &dev_attr_capacity);
-	if (err)
-		goto err_out;
-	err = device_create_file(osst_member, &dev_attr_BOT_frame);
-	if (err)
-		goto err_out;
-	err = device_create_file(osst_member, &dev_attr_EOD_frame);
-	if (err)
-		goto err_out;
-	err = device_create_file(osst_member, &dev_attr_file_count);
-	if (err)
-		goto err_out;
-
-	return 0;
-
-err_out:
-	osst_sysfs_destroy(dev);
-	return err;
-}
-
-static void osst_sysfs_cleanup(void)
-{
-	class_destroy(osst_sysfs_class);
-}
-
-/*
- * osst startup / cleanup code
- */
-
-static int osst_probe(struct device *dev)
-{
-	struct scsi_device * SDp = to_scsi_device(dev);
-	struct osst_tape   * tpnt;
-	struct st_modedef  * STm;
-	struct st_partstat * STps;
-	struct osst_buffer * buffer;
-	struct gendisk	   * drive;
-	int		     i, dev_num, err = -ENODEV;
-
-	if (SDp->type != TYPE_TAPE || !osst_supports(SDp))
-		return -ENODEV;
-
-	drive = alloc_disk(1);
-	if (!drive) {
-		printk(KERN_ERR "osst :E: Out of memory. Device not attached.\n");
-		return -ENODEV;
-	}
-
-	/* if this is the first attach, build the infrastructure */
-	write_lock(&os_scsi_tapes_lock);
-	if (os_scsi_tapes == NULL) {
-		os_scsi_tapes = kmalloc_array(osst_max_dev,
-                                              sizeof(struct osst_tape *),
-                                              GFP_ATOMIC);
-		if (os_scsi_tapes == NULL) {
-			write_unlock(&os_scsi_tapes_lock);
-			printk(KERN_ERR "osst :E: Unable to allocate array for OnStream SCSI tapes.\n");
-			goto out_put_disk;
-		}
-		for (i=0; i < osst_max_dev; ++i) os_scsi_tapes[i] = NULL;
-	}
-	
-	if (osst_nr_dev >= osst_max_dev) {
-		write_unlock(&os_scsi_tapes_lock);
-		printk(KERN_ERR "osst :E: Too many tape devices (max. %d).\n", osst_max_dev);
-		goto out_put_disk;
-	}
-
-	/* find a free minor number */
-	for (i = 0; i < osst_max_dev && os_scsi_tapes[i]; i++)
-		;
-	if(i >= osst_max_dev) panic ("Scsi_devices corrupt (osst)");
-	dev_num = i;
-
-	/* allocate a struct osst_tape for this device */
-	tpnt = kzalloc(sizeof(struct osst_tape), GFP_ATOMIC);
-	if (!tpnt) {
-		write_unlock(&os_scsi_tapes_lock);
-		printk(KERN_ERR "osst :E: Can't allocate device descriptor, device not attached.\n");
-		goto out_put_disk;
-	}
-
-	/* allocate a buffer for this device */
-	i = SDp->host->sg_tablesize;
-	if (osst_max_sg_segs < i)
-		i = osst_max_sg_segs;
-	buffer = new_tape_buffer(1, SDp->host->unchecked_isa_dma, i);
-	if (buffer == NULL) {
-		write_unlock(&os_scsi_tapes_lock);
-		printk(KERN_ERR "osst :E: Unable to allocate a tape buffer, device not attached.\n");
-		kfree(tpnt);
-		goto out_put_disk;
-	}
-	os_scsi_tapes[dev_num] = tpnt;
-	tpnt->buffer = buffer;
-	tpnt->device = SDp;
-	drive->private_data = &tpnt->driver;
-	sprintf(drive->disk_name, "osst%d", dev_num);
-	tpnt->driver = &osst_template;
-	tpnt->drive = drive;
-	tpnt->in_use = 0;
-	tpnt->capacity = 0xfffff;
-	tpnt->dirty = 0;
-	tpnt->drv_buffer = 1;  /* Try buffering if no mode sense */
-	tpnt->restr_dma = (SDp->host)->unchecked_isa_dma;
-	tpnt->density = 0;
-	tpnt->do_auto_lock = OSST_AUTO_LOCK;
-	tpnt->can_bsr = OSST_IN_FILE_POS;
-	tpnt->can_partitions = 0;
-	tpnt->two_fm = OSST_TWO_FM;
-	tpnt->fast_mteom = OSST_FAST_MTEOM;
-	tpnt->scsi2_logical = OSST_SCSI2LOGICAL; /* FIXME */
-	tpnt->write_threshold = osst_write_threshold;
-	tpnt->default_drvbuffer = 0xff; /* No forced buffering */
-	tpnt->partition = 0;
-	tpnt->new_partition = 0;
-	tpnt->nbr_partitions = 0;
-	tpnt->min_block = 512;
-	tpnt->max_block = OS_DATA_SIZE;
-	tpnt->timeout = OSST_TIMEOUT;
-	tpnt->long_timeout = OSST_LONG_TIMEOUT;
-
-	/* Recognize OnStream tapes */
-	/* We don't need to test for OnStream, as this has been done in detect () */
-	tpnt->os_fw_rev = osst_parse_firmware_rev (SDp->rev);
-	tpnt->omit_blklims = 1;
-
-	tpnt->poll = (strncmp(SDp->model, "DI-", 3) == 0) || 
-		     (strncmp(SDp->model, "FW-", 3) == 0) || OSST_FW_NEED_POLL(tpnt->os_fw_rev,SDp);
-	tpnt->frame_in_buffer = 0;
-	tpnt->header_ok = 0;
-	tpnt->linux_media = 0;
-	tpnt->header_cache = NULL;
-
-	for (i=0; i < ST_NBR_MODES; i++) {
-		STm = &(tpnt->modes[i]);
-		STm->defined = 0;
-		STm->sysv = OSST_SYSV;
-		STm->defaults_for_writes = 0;
-		STm->do_async_writes = OSST_ASYNC_WRITES;
-		STm->do_buffer_writes = OSST_BUFFER_WRITES;
-		STm->do_read_ahead = OSST_READ_AHEAD;
-		STm->default_compression = ST_DONT_TOUCH;
-		STm->default_blksize = 512;
-		STm->default_density = (-1);  /* No forced density */
-	}
-
-	for (i=0; i < ST_NBR_PARTITIONS; i++) {
-		STps = &(tpnt->ps[i]);
-		STps->rw = ST_IDLE;
-		STps->eof = ST_NOEOF;
-		STps->at_sm = 0;
-		STps->last_block_valid = 0;
-		STps->drv_block = (-1);
-		STps->drv_file = (-1);
-	}
-
-	tpnt->current_mode = 0;
-	tpnt->modes[0].defined = 1;
-	tpnt->modes[2].defined = 1;
-	tpnt->density_changed = tpnt->compression_changed = tpnt->blksize_changed = 0;
-
-	mutex_init(&tpnt->lock);
-	osst_nr_dev++;
-	write_unlock(&os_scsi_tapes_lock);
-
-	{
-		char name[8];
-
-		/*  Rewind entry  */
-		err = osst_sysfs_add(MKDEV(OSST_MAJOR, dev_num), dev, tpnt, tape_name(tpnt));
-		if (err)
-			goto out_free_buffer;
-
-		/*  No-rewind entry  */
-		snprintf(name, 8, "%s%s", "n", tape_name(tpnt));
-		err = osst_sysfs_add(MKDEV(OSST_MAJOR, dev_num + 128), dev, tpnt, name);
-		if (err)
-			goto out_free_sysfs1;
-	}
-
-	sdev_printk(KERN_INFO, SDp,
-		"osst :I: Attached OnStream %.5s tape as %s\n",
-		SDp->model, tape_name(tpnt));
-
-	return 0;
-
-out_free_sysfs1:
-	osst_sysfs_destroy(MKDEV(OSST_MAJOR, dev_num));
-out_free_buffer:
-	kfree(buffer);
-out_put_disk:
-        put_disk(drive);
-        return err;
-};
-
-static int osst_remove(struct device *dev)
-{
-	struct scsi_device * SDp = to_scsi_device(dev);
-	struct osst_tape * tpnt;
-	int i;
-
-	if ((SDp->type != TYPE_TAPE) || (osst_nr_dev <= 0))
-		return 0;
-
-	write_lock(&os_scsi_tapes_lock);
-	for(i=0; i < osst_max_dev; i++) {
-		if((tpnt = os_scsi_tapes[i]) && (tpnt->device == SDp)) {
-			osst_sysfs_destroy(MKDEV(OSST_MAJOR, i));
-			osst_sysfs_destroy(MKDEV(OSST_MAJOR, i+128));
-			tpnt->device = NULL;
-			put_disk(tpnt->drive);
-			os_scsi_tapes[i] = NULL;
-			osst_nr_dev--;
-			write_unlock(&os_scsi_tapes_lock);
-			vfree(tpnt->header_cache);
-			if (tpnt->buffer) {
-				normalize_buffer(tpnt->buffer);
-				kfree(tpnt->buffer);
-			}
-			kfree(tpnt);
-			return 0;
-		}
-	}
-	write_unlock(&os_scsi_tapes_lock);
-	return 0;
-}
-
-static int __init init_osst(void) 
-{
-	int err;
-
-	printk(KERN_INFO "osst :I: Tape driver with OnStream support version %s\nosst :I: %s\n", osst_version, cvsid);
-
-	validate_options();
-
-	err = osst_sysfs_init();
-	if (err)
-		return err;
-
-	err = register_chrdev(OSST_MAJOR, "osst", &osst_fops);
-	if (err < 0) {
-		printk(KERN_ERR "osst :E: Unable to register major %d for OnStream tapes\n", OSST_MAJOR);
-		goto err_out;
-	}
-
-	err = scsi_register_driver(&osst_template.gendrv);
-	if (err)
-		goto err_out_chrdev;
-
-	err = osst_create_sysfs_files(&osst_template.gendrv);
-	if (err)
-		goto err_out_scsidrv;
-
-	return 0;
-
-err_out_scsidrv:
-	scsi_unregister_driver(&osst_template.gendrv);
-err_out_chrdev:
-	unregister_chrdev(OSST_MAJOR, "osst");
-err_out:
-	osst_sysfs_cleanup();
-	return err;
-}
-
-static void __exit exit_osst (void)
-{
-	int i;
-	struct osst_tape * STp;
-
-	osst_remove_sysfs_files(&osst_template.gendrv);
-	scsi_unregister_driver(&osst_template.gendrv);
-	unregister_chrdev(OSST_MAJOR, "osst");
-	osst_sysfs_cleanup();
-
-	if (os_scsi_tapes) {
-		for (i=0; i < osst_max_dev; ++i) {
-			if (!(STp = os_scsi_tapes[i])) continue;
-			/* This is defensive, supposed to happen during detach */
-			vfree(STp->header_cache);
-			if (STp->buffer) {
-				normalize_buffer(STp->buffer);
-				kfree(STp->buffer);
-			}
-			put_disk(STp->drive);
-			kfree(STp);
-		}
-		kfree(os_scsi_tapes);
-	}
-	printk(KERN_INFO "osst :I: Unloaded.\n");
-}
-
-module_init(init_osst);
-module_exit(exit_osst);
diff --git a/drivers/scsi/osst.h b/drivers/scsi/osst.h
deleted file mode 100644
index b90ae28..0000000
--- a/drivers/scsi/osst.h
+++ /dev/null
@@ -1,651 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- *	$Header: /cvsroot/osst/Driver/osst.h,v 1.16 2005/01/01 21:13:35 wriede Exp $
- */
-
-#include <asm/byteorder.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-
-/*	FIXME - rename and use the following two types or delete them!
- *              and the types really should go to st.h anyway...
- *	INQUIRY packet command - Data Format (From Table 6-8 of QIC-157C)
- */
-typedef struct {
-	unsigned	device_type	:5;	/* Peripheral Device Type */
-	unsigned	reserved0_765	:3;	/* Peripheral Qualifier - Reserved */
-	unsigned	reserved1_6t0	:7;	/* Reserved */
-	unsigned	rmb		:1;	/* Removable Medium Bit */
-	unsigned	ansi_version	:3;	/* ANSI Version */
-	unsigned	ecma_version	:3;	/* ECMA Version */
-	unsigned	iso_version	:2;	/* ISO Version */
-	unsigned	response_format :4;	/* Response Data Format */
-	unsigned	reserved3_45	:2;	/* Reserved */
-	unsigned	reserved3_6	:1;	/* TrmIOP - Reserved */
-	unsigned	reserved3_7	:1;	/* AENC - Reserved */
-	u8		additional_length;	/* Additional Length (total_length-4) */
-	u8		rsv5, rsv6, rsv7;	/* Reserved */
-	u8		vendor_id[8];		/* Vendor Identification */
-	u8		product_id[16];		/* Product Identification */
-	u8		revision_level[4];	/* Revision Level */
-	u8		vendor_specific[20];	/* Vendor Specific - Optional */
-	u8		reserved56t95[40];	/* Reserved - Optional */
-						/* Additional information may be returned */
-} idetape_inquiry_result_t;
-
-/*
- *	READ POSITION packet command - Data Format (From Table 6-57)
- */
-typedef struct {
-	unsigned	reserved0_10	:2;	/* Reserved */
-	unsigned	bpu		:1;	/* Block Position Unknown */	
-	unsigned	reserved0_543	:3;	/* Reserved */
-	unsigned	eop		:1;	/* End Of Partition */
-	unsigned	bop		:1;	/* Beginning Of Partition */
-	u8		partition;		/* Partition Number */
-	u8		reserved2, reserved3;	/* Reserved */
-	u32		first_block;		/* First Block Location */
-	u32		last_block;		/* Last Block Location (Optional) */
-	u8		reserved12;		/* Reserved */
-	u8		blocks_in_buffer[3];	/* Blocks In Buffer - (Optional) */
-	u32		bytes_in_buffer;	/* Bytes In Buffer (Optional) */
-} idetape_read_position_result_t;
-
-/*
- *      Follows structures which are related to the SELECT SENSE / MODE SENSE
- *      packet commands. 
- */
-#define COMPRESSION_PAGE           0x0f
-#define COMPRESSION_PAGE_LENGTH    16
-
-#define CAPABILITIES_PAGE          0x2a
-#define CAPABILITIES_PAGE_LENGTH   20
-
-#define TAPE_PARAMTR_PAGE          0x2b
-#define TAPE_PARAMTR_PAGE_LENGTH   16
-
-#define NUMBER_RETRIES_PAGE        0x2f
-#define NUMBER_RETRIES_PAGE_LENGTH 4
-
-#define BLOCK_SIZE_PAGE            0x30
-#define BLOCK_SIZE_PAGE_LENGTH     4
-
-#define BUFFER_FILLING_PAGE        0x33
-#define BUFFER_FILLING_PAGE_LENGTH 4
-
-#define VENDOR_IDENT_PAGE          0x36
-#define VENDOR_IDENT_PAGE_LENGTH   8
-
-#define LOCATE_STATUS_PAGE         0x37
-#define LOCATE_STATUS_PAGE_LENGTH  0
-
-#define MODE_HEADER_LENGTH         4
-
-
-/*
- *	REQUEST SENSE packet command result - Data Format.
- */
-typedef struct {
-	unsigned	error_code	:7;	/* Current of deferred errors */
-	unsigned	valid		:1;	/* The information field conforms to QIC-157C */
-	u8		reserved1	:8;	/* Segment Number - Reserved */
-	unsigned	sense_key	:4;	/* Sense Key */
-	unsigned	reserved2_4	:1;	/* Reserved */
-	unsigned	ili		:1;	/* Incorrect Length Indicator */
-	unsigned	eom		:1;	/* End Of Medium */
-	unsigned	filemark 	:1;	/* Filemark */
-	u32		information __attribute__ ((packed));
-	u8		asl;			/* Additional sense length (n-7) */
-	u32		command_specific;	/* Additional command specific information */
-	u8		asc;			/* Additional Sense Code */
-	u8		ascq;			/* Additional Sense Code Qualifier */
-	u8		replaceable_unit_code;	/* Field Replaceable Unit Code */
-	unsigned	sk_specific1 	:7;	/* Sense Key Specific */
-	unsigned	sksv		:1;	/* Sense Key Specific information is valid */
-	u8		sk_specific2;		/* Sense Key Specific */
-	u8		sk_specific3;		/* Sense Key Specific */
-	u8		pad[2];			/* Padding to 20 bytes */
-} idetape_request_sense_result_t;
-
-/*
- *      Mode Parameter Header for the MODE SENSE packet command
- */
-typedef struct {
-        u8              mode_data_length;       /* Length of the following data transfer */
-        u8              medium_type;            /* Medium Type */
-        u8              dsp;                    /* Device Specific Parameter */
-        u8              bdl;                    /* Block Descriptor Length */
-} osst_mode_parameter_header_t;
-
-/*
- *      Mode Parameter Block Descriptor the MODE SENSE packet command
- *
- *      Support for block descriptors is optional.
- */
-typedef struct {
-        u8              density_code;           /* Medium density code */
-        u8              blocks[3];              /* Number of blocks */
-        u8              reserved4;              /* Reserved */
-        u8              length[3];              /* Block Length */
-} osst_parameter_block_descriptor_t;
-
-/*
- *      The Data Compression Page, as returned by the MODE SENSE packet command.
- */
-typedef struct {
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        ps              :1;
-        unsigned        reserved0       :1;     /* Reserved */
-	unsigned        page_code       :6;     /* Page Code - Should be 0xf */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        page_code       :6;     /* Page Code - Should be 0xf */
-        unsigned        reserved0       :1;     /* Reserved */
-        unsigned        ps              :1;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-        u8              page_length;            /* Page Length - Should be 14 */
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        dce             :1;     /* Data Compression Enable */
-        unsigned        dcc             :1;     /* Data Compression Capable */
-	unsigned        reserved2       :6;     /* Reserved */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        reserved2       :6;     /* Reserved */
-        unsigned        dcc             :1;     /* Data Compression Capable */
-        unsigned        dce             :1;     /* Data Compression Enable */
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        dde             :1;     /* Data Decompression Enable */
-        unsigned        red             :2;     /* Report Exception on Decompression */
-	unsigned        reserved3       :5;     /* Reserved */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        reserved3       :5;     /* Reserved */
-        unsigned        red             :2;     /* Report Exception on Decompression */
-        unsigned        dde             :1;     /* Data Decompression Enable */
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-        u32             ca;                     /* Compression Algorithm */
-        u32             da;                     /* Decompression Algorithm */
-        u8              reserved[4];            /* Reserved */
-} osst_data_compression_page_t;
-
-/*
- *      The Medium Partition Page, as returned by the MODE SENSE packet command.
- */
-typedef struct {
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        ps              :1;
-        unsigned        reserved1_6     :1;     /* Reserved */
-	unsigned        page_code       :6;     /* Page Code - Should be 0x11 */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        page_code       :6;     /* Page Code - Should be 0x11 */
-        unsigned        reserved1_6     :1;     /* Reserved */
-        unsigned        ps              :1;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-        u8              page_length;            /* Page Length - Should be 6 */
-        u8              map;                    /* Maximum Additional Partitions - Should be 0 */
-        u8              apd;                    /* Additional Partitions Defined - Should be 0 */
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        fdp             :1;     /* Fixed Data Partitions */
-        unsigned        sdp             :1;     /* Should be 0 */
-        unsigned        idp             :1;     /* Should be 0 */
-        unsigned        psum            :2;     /* Should be 0 */
-	unsigned        reserved4_012   :3;     /* Reserved */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        reserved4_012   :3;     /* Reserved */
-        unsigned        psum            :2;     /* Should be 0 */
-        unsigned        idp             :1;     /* Should be 0 */
-        unsigned        sdp             :1;     /* Should be 0 */
-        unsigned        fdp             :1;     /* Fixed Data Partitions */
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-        u8              mfr;                    /* Medium Format Recognition */
-        u8              reserved[2];            /* Reserved */
-} osst_medium_partition_page_t;
-
-/*
- *      Capabilities and Mechanical Status Page
- */
-typedef struct {
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        reserved1_67    :2;
-	unsigned        page_code       :6;     /* Page code - Should be 0x2a */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        page_code       :6;     /* Page code - Should be 0x2a */
-        unsigned        reserved1_67    :2;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-        u8              page_length;            /* Page Length - Should be 0x12 */
-        u8              reserved2, reserved3;
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        reserved4_67    :2;
-        unsigned        sprev           :1;     /* Supports SPACE in the reverse direction */
-        unsigned        reserved4_1234  :4;
-	unsigned        ro              :1;     /* Read Only Mode */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        ro              :1;     /* Read Only Mode */
-        unsigned        reserved4_1234  :4;
-        unsigned        sprev           :1;     /* Supports SPACE in the reverse direction */
-        unsigned        reserved4_67    :2;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        reserved5_67    :2;
-        unsigned        qfa             :1;     /* Supports the QFA two partition formats */
-        unsigned        reserved5_4     :1;
-        unsigned        efmt            :1;     /* Supports ERASE command initiated formatting */
-	unsigned        reserved5_012   :3;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        reserved5_012   :3;
-        unsigned        efmt            :1;     /* Supports ERASE command initiated formatting */
-        unsigned        reserved5_4     :1;
-        unsigned        qfa             :1;     /* Supports the QFA two partition formats */
-        unsigned        reserved5_67    :2;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        cmprs           :1;     /* Supports data compression */
-        unsigned        ecc             :1;     /* Supports error correction */
-	unsigned        reserved6_45    :2;     /* Reserved */  
-        unsigned        eject           :1;     /* The device can eject the volume */
-        unsigned        prevent         :1;     /* The device defaults in the prevent state after power up */
-        unsigned        locked          :1;     /* The volume is locked */
-	unsigned        lock            :1;     /* Supports locking the volume */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        lock            :1;     /* Supports locking the volume */
-        unsigned        locked          :1;     /* The volume is locked */
-        unsigned        prevent         :1;     /* The device defaults in the prevent state after power up */
-        unsigned        eject           :1;     /* The device can eject the volume */
-	unsigned        reserved6_45    :2;     /* Reserved */  
-        unsigned        ecc             :1;     /* Supports error correction */
-        unsigned        cmprs           :1;     /* Supports data compression */
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        blk32768        :1;     /* slowb - the device restricts the byte count for PIO */
-                                                /* transfers for slow buffer memory ??? */
-                                                /* Also 32768 block size in some cases */
-        unsigned        reserved7_3_6   :4;
-        unsigned        blk1024         :1;     /* Supports 1024 bytes block size */
-        unsigned        blk512          :1;     /* Supports 512 bytes block size */
-	unsigned        reserved7_0     :1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        reserved7_0     :1;
-        unsigned        blk512          :1;     /* Supports 512 bytes block size */
-        unsigned        blk1024         :1;     /* Supports 1024 bytes block size */
-        unsigned        reserved7_3_6   :4;
-        unsigned        blk32768        :1;     /* slowb - the device restricts the byte count for PIO */
-                                                /* transfers for slow buffer memory ??? */
-                                                /* Also 32768 block size in some cases */
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-        __be16          max_speed;              /* Maximum speed supported in KBps */
-        u8              reserved10, reserved11;
-        __be16          ctl;                    /* Continuous Transfer Limit in blocks */
-        __be16          speed;                  /* Current Speed, in KBps */
-        __be16          buffer_size;            /* Buffer Size, in 512 bytes */
-        u8              reserved18, reserved19;
-} osst_capabilities_page_t;
-
-/*
- *      Block Size Page
- */
-typedef struct {
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        ps              :1;
-        unsigned        reserved1_6     :1;
-	unsigned        page_code       :6;     /* Page code - Should be 0x30 */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        page_code       :6;     /* Page code - Should be 0x30 */
-        unsigned        reserved1_6     :1;
-        unsigned        ps              :1;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-        u8              page_length;            /* Page Length - Should be 2 */
-        u8              reserved2;
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        one             :1;
-        unsigned        reserved2_6     :1;
-        unsigned        record32_5      :1;
-        unsigned        record32        :1;
-        unsigned        reserved2_23    :2;
-        unsigned        play32_5        :1;
-	unsigned        play32          :1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        play32          :1;
-        unsigned        play32_5        :1;
-        unsigned        reserved2_23    :2;
-        unsigned        record32        :1;
-        unsigned        record32_5      :1;
-        unsigned        reserved2_6     :1;
-        unsigned        one             :1;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-} osst_block_size_page_t;
-
-/*
- *	Tape Parameters Page
- */
-typedef struct {
-#if   defined(__BIG_ENDIAN_BITFIELD)
-        unsigned        ps              :1;
-        unsigned        reserved1_6     :1;
-	unsigned        page_code       :6;     /* Page code - Should be 0x2b */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-	unsigned        page_code       :6;     /* Page code - Should be 0x2b */
-        unsigned        reserved1_6     :1;
-        unsigned        ps              :1;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
-	u8		reserved2;
-	u8		density;
-	u8		reserved3,reserved4;
-	__be16		segtrk;
-	__be16		trks;
-	u8		reserved5,reserved6,reserved7,reserved8,reserved9,reserved10;
-} osst_tape_paramtr_page_t;
-
-/* OnStream definitions */
-
-#define OS_CONFIG_PARTITION     (0xff)
-#define OS_DATA_PARTITION       (0)
-#define OS_PARTITION_VERSION    (1)
-
-/*
- * partition
- */
-typedef struct os_partition_s {
-        __u8    partition_num;
-        __u8    par_desc_ver;
-        __be16  wrt_pass_cntr;
-        __be32  first_frame_ppos;
-        __be32  last_frame_ppos;
-        __be32  eod_frame_ppos;
-} os_partition_t;
-
-/*
- * DAT entry
- */
-typedef struct os_dat_entry_s {
-        __be32  blk_sz;
-        __be16  blk_cnt;
-        __u8    flags;
-        __u8    reserved;
-} os_dat_entry_t;
-
-/*
- * DAT
- */
-#define OS_DAT_FLAGS_DATA       (0xc)
-#define OS_DAT_FLAGS_MARK       (0x1)
-
-typedef struct os_dat_s {
-        __u8            dat_sz;
-        __u8            reserved1;
-        __u8            entry_cnt;
-        __u8            reserved3;
-        os_dat_entry_t  dat_list[16];
-} os_dat_t;
-
-/*
- * Frame types
- */
-#define OS_FRAME_TYPE_FILL      (0)
-#define OS_FRAME_TYPE_EOD       (1 << 0)
-#define OS_FRAME_TYPE_MARKER    (1 << 1)
-#define OS_FRAME_TYPE_HEADER    (1 << 3)
-#define OS_FRAME_TYPE_DATA      (1 << 7)
-
-/*
- * AUX
- */
-typedef struct os_aux_s {
-        __be32          format_id;              /* hardware compatibility AUX is based on */
-        char            application_sig[4];     /* driver used to write this media */
-        __be32          hdwr;                   /* reserved */
-        __be32          update_frame_cntr;      /* for configuration frame */
-        __u8            frame_type;
-        __u8            frame_type_reserved;
-        __u8            reserved_18_19[2];
-        os_partition_t  partition;
-        __u8            reserved_36_43[8];
-        __be32          frame_seq_num;
-        __be32          logical_blk_num_high;
-        __be32          logical_blk_num;
-        os_dat_t        dat;
-        __u8            reserved188_191[4];
-        __be32          filemark_cnt;
-        __be32          phys_fm;
-        __be32          last_mark_ppos;
-        __u8            reserved204_223[20];
-
-        /*
-         * __u8         app_specific[32];
-         *
-         * Linux specific fields:
-         */
-         __be32         next_mark_ppos;         /* when known, points to next marker */
-	 __be32		last_mark_lbn;		/* storing log_blk_num of last mark is extends ADR spec */
-         __u8           linux_specific[24];
-
-        __u8            reserved_256_511[256];
-} os_aux_t;
-
-#define OS_FM_TAB_MAX 1024
-
-typedef struct os_fm_tab_s {
-	__u8		fm_part_num;
-	__u8		reserved_1;
-	__u8		fm_tab_ent_sz;
-	__u8		reserved_3;
-	__be16		fm_tab_ent_cnt;
-	__u8		reserved6_15[10];
-	__be32		fm_tab_ent[OS_FM_TAB_MAX];
-} os_fm_tab_t;
-
-typedef struct os_ext_trk_ey_s {
-	__u8		et_part_num;
-	__u8		fmt;
-	__be16		fm_tab_off;
-	__u8		reserved4_7[4];
-	__be32		last_hlb_hi;
-	__be32		last_hlb;
-	__be32		last_pp;
-	__u8		reserved20_31[12];
-} os_ext_trk_ey_t;
-
-typedef struct os_ext_trk_tb_s {
-	__u8		nr_stream_part;
-	__u8		reserved_1;
-	__u8		et_ent_sz;
-	__u8		reserved3_15[13];
-	os_ext_trk_ey_t	dat_ext_trk_ey;
-	os_ext_trk_ey_t	qfa_ext_trk_ey;
-} os_ext_trk_tb_t;
-
-typedef struct os_header_s {
-        char            ident_str[8];
-        __u8            major_rev;
-        __u8            minor_rev;
-	__be16		ext_trk_tb_off;
-        __u8            reserved12_15[4];
-        __u8            pt_par_num;
-        __u8            pt_reserved1_3[3];
-        os_partition_t  partition[16];
-	__be32		cfg_col_width;
-	__be32		dat_col_width;
-	__be32		qfa_col_width;
-	__u8		cartridge[16];
-	__u8		reserved304_511[208];
-	__be32		old_filemark_list[16680/4];		/* in ADR 1.4 __u8 track_table[16680] */
-	os_ext_trk_tb_t	ext_track_tb;
-	__u8		reserved17272_17735[464];
-	os_fm_tab_t	dat_fm_tab;
-	os_fm_tab_t	qfa_fm_tab;
-	__u8		reserved25960_32767[6808];
-} os_header_t;
-
-
-/*
- * OnStream ADRL frame
- */
-#define OS_FRAME_SIZE   (32 * 1024 + 512)
-#define OS_DATA_SIZE    (32 * 1024)
-#define OS_AUX_SIZE     (512)
-//#define OSST_MAX_SG      2
-
-/* The OnStream tape buffer descriptor. */
-struct osst_buffer {
-  unsigned char in_use;
-  unsigned char dma;	/* DMA-able buffer */
-  int buffer_size;
-  int buffer_blocks;
-  int buffer_bytes;
-  int read_pointer;
-  int writing;
-  int midlevel_result;
-  int syscall_result;
-  struct osst_request *last_SRpnt;
-  struct st_cmdstatus cmdstat;
-  struct rq_map_data map_data;
-  unsigned char *b_data;
-  os_aux_t *aux;               /* onstream AUX structure at end of each block     */
-  unsigned short use_sg;       /* zero or number of s/g segments for this adapter */
-  unsigned short sg_segs;      /* number of segments in s/g list                  */
-  unsigned short orig_sg_segs; /* number of segments allocated at first try       */
-  struct scatterlist sg[1];    /* MUST BE last item                               */
-} ;
-
-/* The OnStream tape drive descriptor */
-struct osst_tape {
-  struct scsi_driver *driver;
-  unsigned capacity;
-  struct scsi_device *device;
-  struct mutex lock;           /* for serialization */
-  struct completion wait;      /* for SCSI commands */
-  struct osst_buffer * buffer;
-
-  /* Drive characteristics */
-  unsigned char omit_blklims;
-  unsigned char do_auto_lock;
-  unsigned char can_bsr;
-  unsigned char can_partitions;
-  unsigned char two_fm;
-  unsigned char fast_mteom;
-  unsigned char restr_dma;
-  unsigned char scsi2_logical;
-  unsigned char default_drvbuffer;  /* 0xff = don't touch, value 3 bits */
-  unsigned char pos_unknown;        /* after reset position unknown */
-  int write_threshold;
-  int timeout;			/* timeout for normal commands */
-  int long_timeout;		/* timeout for commands known to take long time*/
-
-  /* Mode characteristics */
-  struct st_modedef modes[ST_NBR_MODES];
-  int current_mode;
-
-  /* Status variables */
-  int partition;
-  int new_partition;
-  int nbr_partitions;    /* zero until partition support enabled */
-  struct st_partstat ps[ST_NBR_PARTITIONS];
-  unsigned char dirty;
-  unsigned char ready;
-  unsigned char write_prot;
-  unsigned char drv_write_prot;
-  unsigned char in_use;
-  unsigned char blksize_changed;
-  unsigned char density_changed;
-  unsigned char compression_changed;
-  unsigned char drv_buffer;
-  unsigned char density;
-  unsigned char door_locked;
-  unsigned char rew_at_close;
-  unsigned char inited;
-  int block_size;
-  int min_block;
-  int max_block;
-  int recover_count;            /* from tape opening */
-  int abort_count;
-  int write_count;
-  int read_count;
-  int recover_erreg;            /* from last status call */
-  /*
-   * OnStream specific data
-   */
-  int	   os_fw_rev;			       /* the firmware revision * 10000 */
-  unsigned char  raw;                          /* flag OnStream raw access (32.5KB block size) */
-  unsigned char  poll;                         /* flag that this drive needs polling (IDE|firmware) */
-  unsigned char  frame_in_buffer;	       /* flag that the frame as per frame_seq_number
-						* has been read into STp->buffer and is valid */
-  int      frame_seq_number;                   /* logical frame number */
-  int      logical_blk_num;                    /* logical block number */
-  unsigned first_frame_position;               /* physical frame to be transferred to/from host */
-  unsigned last_frame_position;                /* physical frame to be transferd to/from tape */
-  int      cur_frames;                         /* current number of frames in internal buffer */
-  int      max_frames;                         /* max number of frames in internal buffer */
-  char     application_sig[5];                 /* application signature */
-  unsigned char  fast_open;                    /* flag that reminds us we didn't check headers at open */
-  unsigned short wrt_pass_cntr;                /* write pass counter */
-  int      update_frame_cntr;                  /* update frame counter */
-  int      onstream_write_error;               /* write error recovery active */
-  int      header_ok;                          /* header frame verified ok */
-  int      linux_media;                        /* reading linux-specifc media */
-  int      linux_media_version;
-  os_header_t * header_cache;		       /* cache is kept for filemark positions */
-  int      filemark_cnt;
-  int      first_mark_ppos;
-  int      last_mark_ppos;
-  int      last_mark_lbn;			/* storing log_blk_num of last mark is extends ADR spec */
-  int      first_data_ppos;
-  int      eod_frame_ppos;
-  int      eod_frame_lfa;
-  int      write_type;				/* used in write error recovery */
-  int      read_error_frame;			/* used in read error recovery */
-  unsigned long cmd_start_time;
-  unsigned long max_cmd_time;
-
-#if DEBUG
-  unsigned char write_pending;
-  int nbr_finished;
-  int nbr_waits;
-  unsigned char last_cmnd[6];
-  unsigned char last_sense[16];
-#endif
-  struct gendisk *drive;
-} ;
-
-/* scsi tape command */
-struct osst_request {
-	unsigned char cmd[MAX_COMMAND_SIZE];
-	unsigned char sense[SCSI_SENSE_BUFFERSIZE];
-	int result;
-	struct osst_tape *stp;
-	struct completion *waiting;
-	struct bio *bio;
-};
-
-/* Values of write_type */
-#define OS_WRITE_DATA      0
-#define OS_WRITE_EOD       1
-#define OS_WRITE_NEW_MARK  2
-#define OS_WRITE_LAST_MARK 3
-#define OS_WRITE_HEADER    4
-#define OS_WRITE_FILLER    5
-
-/* Additional rw state */
-#define OS_WRITING_COMPLETE 3
diff --git a/drivers/scsi/osst_detect.h b/drivers/scsi/osst_detect.h
deleted file mode 100644
index 83c1d4f..0000000
--- a/drivers/scsi/osst_detect.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#define SIGS_FROM_OSST \
-       {"OnStream", "SC-", "", "osst"}, \
-       {"OnStream", "DI-", "", "osst"}, \
-       {"OnStream", "DP-", "", "osst"}, \
-       {"OnStream", "FW-", "", "osst"}, \
-       {"OnStream", "USB", "", "osst"}
diff --git a/drivers/scsi/osst_options.h b/drivers/scsi/osst_options.h
deleted file mode 100644
index a6a389b..0000000
--- a/drivers/scsi/osst_options.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
-   The compile-time configurable defaults for the Linux SCSI tape driver.
-
-   Copyright 1995 Kai Makisara.
-   
-   Last modified: Wed Sep  2 21:24:07 1998 by root@home
-   
-   Changed (and renamed) for OnStream SCSI drives garloff@suse.de
-   2000-06-21
-
-   $Header: /cvsroot/osst/Driver/osst_options.h,v 1.6 2003/12/23 14:22:12 wriede Exp $
-*/
-
-#ifndef _OSST_OPTIONS_H
-#define _OSST_OPTIONS_H
-
-/* The minimum limit for the number of SCSI tape devices is determined by
-   OSST_MAX_TAPES. If the number of tape devices and the "slack" defined by
-   OSST_EXTRA_DEVS exceeds OSST_MAX_TAPES, the large number is used. */
-#define OSST_MAX_TAPES 4
-
-/* If OSST_IN_FILE_POS is nonzero, the driver positions the tape after the
-   record been read by the user program even if the tape has moved further
-   because of buffered reads. Should be set to zero to support also drives
-   that can't space backwards over records. NOTE: The tape will be
-   spaced backwards over an "accidentally" crossed filemark in any case. */
-#define OSST_IN_FILE_POS 1
-
-/* The tape driver buffer size in kilobytes. */
-/* Don't change, as this is the HW blocksize */
-#define OSST_BUFFER_BLOCKS 32
-
-/* The number of kilobytes of data in the buffer that triggers an
-   asynchronous write in fixed block mode. See also OSST_ASYNC_WRITES
-   below. */
-#define OSST_WRITE_THRESHOLD_BLOCKS 32
-
-/* OSST_EOM_RESERVE defines the number of frames are kept in reserve for
- *  * write error recovery when writing near end of medium. ENOSPC is returned
- *   * when write() is called and the tape write position is within this number
- *    * of blocks from the tape capacity. */
-#define OSST_EOM_RESERVE 300
-
-/* The maximum number of tape buffers the driver allocates. The number
-   is also constrained by the number of drives detected. Determines the
-   maximum number of concurrently active tape drives. */
-#define OSST_MAX_BUFFERS OSST_MAX_TAPES 
-
-/* Maximum number of scatter/gather segments */
-/* Fit one buffer in pages and add one for the AUX header */
-#define OSST_MAX_SG      (((OSST_BUFFER_BLOCKS*1024) / PAGE_SIZE) + 1)
-
-/* The number of scatter/gather segments to allocate at first try (must be
-   smaller or equal to the maximum). */
-#define OSST_FIRST_SG    ((OSST_BUFFER_BLOCKS*1024) / PAGE_SIZE)
-
-/* The size of the first scatter/gather segments (determines the maximum block
-   size for SCSI adapters not supporting scatter/gather). The default is set
-   to try to allocate the buffer as one chunk. */
-#define OSST_FIRST_ORDER  (15-PAGE_SHIFT)
-
-
-/* The following lines define defaults for properties that can be set
-   separately for each drive using the MTSTOPTIONS ioctl. */
-
-/* If OSST_TWO_FM is non-zero, the driver writes two filemarks after a
-   file being written. Some drives can't handle two filemarks at the
-   end of data. */
-#define OSST_TWO_FM 0
-
-/* If OSST_BUFFER_WRITES is non-zero, writes in fixed block mode are
-   buffered until the driver buffer is full or asynchronous write is
-   triggered. */
-#define OSST_BUFFER_WRITES 1
-
-/* If OSST_ASYNC_WRITES is non-zero, the SCSI write command may be started
-   without waiting for it to finish. May cause problems in multiple
-   tape backups. */
-#define OSST_ASYNC_WRITES 1
-
-/* If OSST_READ_AHEAD is non-zero, blocks are read ahead in fixed block
-   mode. */
-#define OSST_READ_AHEAD 1
-
-/* If OSST_AUTO_LOCK is non-zero, the drive door is locked at the first
-   read or write command after the device is opened. The door is opened
-   when the device is closed. */
-#define OSST_AUTO_LOCK 0
-
-/* If OSST_FAST_MTEOM is non-zero, the MTEOM ioctl is done using the
-   direct SCSI command. The file number status is lost but this method
-   is fast with some drives. Otherwise MTEOM is done by spacing over
-   files and the file number status is retained. */
-#define OSST_FAST_MTEOM 0
-
-/* If OSST_SCSI2LOGICAL is nonzero, the logical block addresses are used for
-   MTIOCPOS and MTSEEK by default. Vendor addresses are used if OSST_SCSI2LOGICAL
-   is zero. */
-#define OSST_SCSI2LOGICAL 0
-
-/* If OSST_SYSV is non-zero, the tape behaves according to the SYS V semantics.
-   The default is BSD semantics. */
-#define OSST_SYSV 0
-
-
-#endif
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index 2d435f1..2368f34 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # PCMCIA SCSI adapter configuration
 #
@@ -19,6 +20,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called aha152x_cs.
 
+config PCMCIA_FDOMAIN
+	tristate "Future Domain PCMCIA support"
+	select SCSI_FDOMAIN
+	help
+	  Say Y here if you intend to attach this type of PCMCIA SCSI host
+	  adapter to your computer.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called fdomain_cs.
+
 config PCMCIA_NINJA_SCSI
 	tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support"
 	depends on !64BIT
diff --git a/drivers/scsi/pcmcia/Makefile b/drivers/scsi/pcmcia/Makefile
index faa87a4..02f5b44 100644
--- a/drivers/scsi/pcmcia/Makefile
+++ b/drivers/scsi/pcmcia/Makefile
@@ -1,9 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 
-ccflags-y		:= -Idrivers/scsi
+ccflags-y		:= -I $(srctree)/drivers/scsi
 
 # 16-bit client drivers
 obj-$(CONFIG_PCMCIA_QLOGIC)	+= qlogic_cs.o
+obj-$(CONFIG_PCMCIA_FDOMAIN)	+= fdomain_cs.o
 obj-$(CONFIG_PCMCIA_AHA152X)	+= aha152x_cs.o
 obj-$(CONFIG_PCMCIA_NINJA_SCSI)	+= nsp_cs.o
 obj-$(CONFIG_PCMCIA_SYM53C500)	+= sym53c500_cs.o
diff --git a/drivers/scsi/pcmcia/aha152x_core.c b/drivers/scsi/pcmcia/aha152x_core.c
index dba3716..24b8922 100644
--- a/drivers/scsi/pcmcia/aha152x_core.c
+++ b/drivers/scsi/pcmcia/aha152x_core.c
@@ -1,3 +1,3 @@
-#define PCMCIA	1
+#define AHA152X_PCMCIA 1
 #define AHA152X_STAT 1
 #include "aha152x.c"
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 7d1609f..df82a34 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -220,16 +220,4 @@
 	.id_table       = aha152x_ids,
 	.resume		= aha152x_resume,
 };
-
-static int __init init_aha152x_cs(void)
-{
-	return pcmcia_register_driver(&aha152x_cs_driver);
-}
-
-static void __exit exit_aha152x_cs(void)
-{
-	pcmcia_unregister_driver(&aha152x_cs_driver);
-}
-
-module_init(init_aha152x_cs);
-module_exit(exit_aha152x_cs);
+module_pcmcia_driver(aha152x_cs_driver);
diff --git a/drivers/scsi/pcmcia/fdomain_cs.c b/drivers/scsi/pcmcia/fdomain_cs.c
new file mode 100644
index 0000000..e42acf3
--- /dev/null
+++ b/drivers/scsi/pcmcia/fdomain_cs.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
+/*
+ * Driver for Future Domain-compatible PCMCIA SCSI cards
+ * Copyright 2019 Ondrej Zary
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>.  Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds.  All Rights Reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <scsi/scsi_host.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include "fdomain.h"
+
+MODULE_AUTHOR("Ondrej Zary, David Hinds");
+MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver");
+MODULE_LICENSE("Dual MPL/GPL");
+
+static int fdomain_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+	p_dev->io_lines = 10;
+	p_dev->resource[0]->end = FDOMAIN_REGION_SIZE;
+	p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+	p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+	return pcmcia_request_io(p_dev);
+}
+
+static int fdomain_probe(struct pcmcia_device *link)
+{
+	int ret;
+	struct Scsi_Host *sh;
+
+	link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+	link->config_regs = PRESENT_OPTION;
+
+	ret = pcmcia_loop_config(link, fdomain_config_check, NULL);
+	if (ret)
+		return ret;
+
+	ret = pcmcia_enable_device(link);
+	if (ret)
+		goto fail_disable;
+
+	if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE,
+			    "fdomain_cs"))
+		goto fail_disable;
+
+	sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev);
+	if (!sh) {
+		dev_err(&link->dev, "Controller initialization failed");
+		ret = -ENODEV;
+		goto fail_release;
+	}
+
+	link->priv = sh;
+
+	return 0;
+
+fail_release:
+	release_region(link->resource[0]->start, FDOMAIN_REGION_SIZE);
+fail_disable:
+	pcmcia_disable_device(link);
+	return ret;
+}
+
+static void fdomain_remove(struct pcmcia_device *link)
+{
+	fdomain_destroy(link->priv);
+	release_region(link->resource[0]->start, FDOMAIN_REGION_SIZE);
+	pcmcia_disable_device(link);
+}
+
+static const struct pcmcia_device_id fdomain_ids[] = {
+	PCMCIA_DEVICE_PROD_ID12("IBM Corp.", "SCSI PCMCIA Card", 0xe3736c88,
+				0x859cad20),
+	PCMCIA_DEVICE_PROD_ID1("SCSI PCMCIA Adapter Card", 0x8dacb57e),
+	PCMCIA_DEVICE_PROD_ID12(" SIMPLE TECHNOLOGY Corporation",
+				"SCSI PCMCIA Credit Card Controller",
+				0x182bdafe, 0xc80d106f),
+	PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, fdomain_ids);
+
+static struct pcmcia_driver fdomain_cs_driver = {
+	.owner		= THIS_MODULE,
+	.name		= "fdomain_cs",
+	.probe		= fdomain_probe,
+	.remove		= fdomain_remove,
+	.id_table       = fdomain_ids,
+};
+
+module_pcmcia_driver(fdomain_cs_driver);
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 5fb6eef..97416e1 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -86,7 +86,7 @@
 	.can_queue		 = 1,
 	.this_id		 = NSP_INITIATOR_ID,
 	.sg_tablesize		 = SG_ALL,
-	.use_clustering		 = DISABLE_CLUSTERING,
+	.dma_boundary		 = PAGE_SIZE - 1,
 };
 
 static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
@@ -789,7 +789,7 @@
 		    SCpnt->SCp.buffers_residual != 0 ) {
 			//nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out);
 			SCpnt->SCp.buffers_residual--;
-			SCpnt->SCp.buffer++;
+			SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
 			SCpnt->SCp.ptr		 = BUFFER_ADDR;
 			SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
 			time_out = 1000;
@@ -887,7 +887,7 @@
 		    SCpnt->SCp.buffers_residual != 0 ) {
 			//nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next");
 			SCpnt->SCp.buffers_residual--;
-			SCpnt->SCp.buffer++;
+			SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
 			SCpnt->SCp.ptr		 = BUFFER_ADDR;
 			SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
 			time_out = 1000;
@@ -1134,7 +1134,8 @@
 
 		//*sync_neg       = SYNC_NOT_YET;
 
-		if ((tmpSC->SCp.Message == MSG_COMMAND_COMPLETE)) {     /* all command complete and return status */
+		/* all command complete and return status */
+		if (tmpSC->SCp.Message == MSG_COMMAND_COMPLETE) {
 			tmpSC->result = (DID_OK		             << 16) |
 					((tmpSC->SCp.Message & 0xff) <<  8) |
 					((tmpSC->SCp.Status  & 0xff) <<  0);
@@ -1742,19 +1743,6 @@
 	.suspend	= nsp_cs_suspend,
 	.resume		= nsp_cs_resume,
 };
-
-static int __init nsp_cs_init(void)
-{
-	return pcmcia_register_driver(&nsp_driver);
-}
-
-static void __exit nsp_cs_exit(void)
-{
-	pcmcia_unregister_driver(&nsp_driver);
-}
-
-
-module_init(nsp_cs_init)
-module_exit(nsp_cs_exit)
+module_pcmcia_driver(nsp_driver);
 
 /* end */
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index afd64f0..ea5122f 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -326,10 +326,6 @@
 /* Interrupt handler */
 //static irqreturn_t nspintr(int irq, void *dev_id);
 
-/* Module entry point*/
-static int  __init nsp_cs_init(void);
-static void __exit nsp_cs_exit(void);
-
 /* Debug */
 #ifdef NSP_DEBUG
 static void show_command (struct scsi_cmnd *SCpnt);
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 0556054..828d53f 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -72,7 +72,7 @@
 	.can_queue		= 1,
 	.this_id		= -1,
 	.sg_tablesize		= SG_ALL,
-	.use_clustering		= DISABLE_CLUSTERING,
+	.dma_boundary		= PAGE_SIZE - 1,
 };
 
 /*====================================================================*/
@@ -254,8 +254,12 @@
 static int qlogic_resume(struct pcmcia_device *link)
 {
 	scsi_info_t *info = link->priv;
+	int ret;
 
-	pcmcia_enable_device(link);
+	ret = pcmcia_enable_device(link);
+	if (ret)
+		return ret;
+
 	if ((info->manf_id == MANFID_MACNICA) ||
 	    (info->manf_id == MANFID_PIONEER) ||
 	    (info->manf_id == 0x0098)) {
@@ -300,18 +304,7 @@
 	.resume		= qlogic_resume,
 };
 
-static int __init init_qlogic_cs(void)
-{
-	return pcmcia_register_driver(&qlogic_cs_driver);
-}
-
-static void __exit exit_qlogic_cs(void)
-{
-	pcmcia_unregister_driver(&qlogic_cs_driver);
-}
-
 MODULE_AUTHOR("Tom Zerucha, Michael Griffith");
 MODULE_DESCRIPTION("Driver for the PCMCIA Qlogic FAS SCSI controllers");
 MODULE_LICENSE("GPL");
-module_init(init_qlogic_cs);
-module_exit(exit_qlogic_cs);
+module_pcmcia_driver(qlogic_cs_driver);
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 20011c8..a366ff1 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
 *  sym53c500_cs.c	Bob Tracy (rct@frus.com)
 *
@@ -25,16 +26,6 @@
 *	Original by Tom Corner (tcorner@via.at) was adapted from a
 *	driver for the Qlogic SCSI card written by
 *	David Hinds (dhinds@allegro.stanford.edu).
-* 
-*  This program is free software; you can redistribute it and/or modify it
-*  under the terms of the GNU General Public License as published by the
-*  Free Software Foundation; either version 2, or (at your option) any
-*  later version.
-*
-*  This program is distributed in the hope that it will be useful, but
-*  WITHOUT ANY WARRANTY; without even the implied warranty of
-*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-*  General Public License for more details.
 */
 
 #define SYM53C500_DEBUG 0
@@ -680,7 +671,6 @@
      .can_queue			= 1,
      .this_id			= 7,
      .sg_tablesize		= 32,
-     .use_clustering		= ENABLE_CLUSTERING,
      .shost_attrs		= SYM53C500_shost_attrs
 };
 
@@ -880,18 +870,4 @@
 	.id_table       = sym53c500_ids,
 	.resume		= sym53c500_resume,
 };
-
-static int __init
-init_sym53c500_cs(void)
-{
-	return pcmcia_register_driver(&sym53c500_cs_driver);
-}
-
-static void __exit
-exit_sym53c500_cs(void)
-{
-	pcmcia_unregister_driver(&sym53c500_cs_driver);
-}
-
-module_init(init_sym53c500_cs);
-module_exit(exit_sym53c500_cs);
+module_pcmcia_driver(sym53c500_cs_driver);
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index d193961..6b85016 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -462,6 +462,24 @@
 }
 static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
 /**
+ * event_log_size_show - event log size
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs read  shost attribute.
+ */
+static ssize_t event_log_size_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
+}
+static DEVICE_ATTR_RO(event_log_size);
+/**
  * pm8001_ctl_aap_log_show - IOP event log
  * @cdev: pointer to embedded class device
  * @buf: the buffer returned
@@ -474,25 +492,26 @@
 	struct Scsi_Host *shost = class_to_shost(cdev);
 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
-#define IOP_MEMMAP(r, c) \
-	(*(u32 *)((u8*)pm8001_ha->memoryMap.region[IOP].virt_ptr + (r) * 32 \
-	+ (c)))
-	int i;
 	char *str = buf;
-	int max = 2;
-	for (i = 0; i < max; i++) {
-		str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
-			       "0x%08x 0x%08x\n",
-			       IOP_MEMMAP(i, 0),
-			       IOP_MEMMAP(i, 4),
-			       IOP_MEMMAP(i, 8),
-			       IOP_MEMMAP(i, 12),
-			       IOP_MEMMAP(i, 16),
-			       IOP_MEMMAP(i, 20),
-			       IOP_MEMMAP(i, 24),
-			       IOP_MEMMAP(i, 28));
+	u32 read_size =
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size / 1024;
+	static u32 start, end, count;
+	u32 max_read_times = 32;
+	u32 max_count = (read_size * 1024) / (max_read_times * 4);
+	u32 *temp = (u32 *)pm8001_ha->memoryMap.region[IOP].virt_ptr;
+
+	if ((count % max_count) == 0) {
+		start = 0;
+		end = max_read_times;
+		count = 0;
+	} else {
+		start = end;
+		end = end + max_read_times;
 	}
 
+	for (; start < end; start++)
+		str += sprintf(str, "%08x ", *(temp+start));
+	count++;
 	return str - buf;
 }
 static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
@@ -796,6 +815,7 @@
 	&dev_attr_max_sg_list,
 	&dev_attr_sas_spec_support,
 	&dev_attr_logging_level,
+	&dev_attr_event_log_size,
 	&dev_attr_host_sas_address,
 	&dev_attr_bios_version,
 	&dev_attr_ib_log,
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 199527d..48e0624 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -132,4 +132,12 @@
 	PM8001F_RUN_TIME	= (1U << 1),
 };
 
+/**
+ * Phy Status
+ */
+#define PHY_LINK_DISABLE	0x00
+#define PHY_LINK_DOWN		0x01
+#define PHY_STATE_LINK_UP_SPCV	0x2
+#define PHY_STATE_LINK_UP_SPC	0x1
+
 #endif
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 4dd6cad..68a8217 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -960,9 +960,9 @@
 		return -1;
 	}
 	regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET);
-		PM8001_INIT_DBG(pm8001_ha,
-				pm8001_printk("GPIO Output Control Register:"
-				" = 0x%x\n", regVal));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("GPIO Output Control Register:"
+			" = 0x%x\n", regVal));
 	/* set GPIO-0 output control to tri-state */
 	regVal &= 0xFFFFFFFC;
 	pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal);
@@ -1204,6 +1204,7 @@
 	}
 }
 
+#ifndef PM8001_USE_MSIX
 /**
  * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
@@ -1225,6 +1226,8 @@
 	pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL);
 }
 
+#else
+
 /**
  * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
@@ -1256,6 +1259,7 @@
 	msi_index += MSIX_TABLE_BASE;
 	pm8001_cw32(pm8001_ha, 0,  msi_index, MSIX_INTERRUPT_DISABLE);
 }
+#endif
 
 /**
  * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
@@ -1266,10 +1270,9 @@
 {
 #ifdef PM8001_USE_MSIX
 	pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
-	return;
-#endif
+#else
 	pm8001_chip_intx_interrupt_enable(pm8001_ha);
-
+#endif
 }
 
 /**
@@ -1281,10 +1284,9 @@
 {
 #ifdef PM8001_USE_MSIX
 	pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
-	return;
-#endif
+#else
 	pm8001_chip_intx_interrupt_disable(pm8001_ha);
-
+#endif
 }
 
 /**
@@ -1479,6 +1481,12 @@
 		} else {
 			u32 producer_index;
 			void *pi_virt = circularQ->pi_virt;
+			/* spurious interrupt during setup if
+			 * kexec-ing and driver doing a doorbell access
+			 * with the pre-kexec oq interrupt setup
+			 */
+			if (!pi_virt)
+				break;
 			/* Update the producer index from SPC */
 			producer_index = pm8001_read_32(pi_virt);
 			circularQ->producer_index = cpu_to_le32(producer_index);
@@ -2348,7 +2356,7 @@
 	if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
 		(status != IO_UNDERFLOW)) {
 		if (!((t->dev->parent) &&
-			(DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+			(dev_is_expander(t->dev->parent->dev_type)))) {
 			for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++)
 				sata_addr_low[i] = pm8001_ha->sas_addr[j];
 			for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++)
@@ -2414,7 +2422,7 @@
 			sata_resp = &psataPayload->sata_resp[0];
 			resp = (struct ata_task_resp *)ts->buf;
 			if (t->ata_task.dma_xfer == 0 &&
-			t->data_dir == PCI_DMA_FROMDEVICE) {
+			    t->data_dir == DMA_FROM_DEVICE) {
 				len = sizeof(struct pio_setup_fis);
 				PM8001_IO_DBG(pm8001_ha,
 				pm8001_printk("PIO read len = %d\n", len));
@@ -2892,7 +2900,6 @@
 static void
 mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
-	u32 param;
 	struct sas_task *t;
 	struct pm8001_ccb_info *ccb;
 	unsigned long flags;
@@ -2907,7 +2914,6 @@
 	tag = le32_to_cpu(psmpPayload->tag);
 
 	ccb = &pm8001_ha->ccb_info[tag];
-	param = le32_to_cpu(psmpPayload->param);
 	t = ccb->task;
 	ts = &t->task_status;
 	pm8001_dev = ccb->device;
@@ -2922,7 +2928,7 @@
 		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
 		ts->resp = SAS_TASK_COMPLETE;
 		ts->stat = SAM_STAT_GOOD;
-	if (pm8001_dev)
+		if (pm8001_dev)
 			pm8001_dev->running_req--;
 		break;
 	case IO_ABORTED:
@@ -3238,11 +3244,9 @@
 {
 	struct pm8001_phy *phy = &pm8001_ha->phy[i];
 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
-	struct sas_ha_struct *sas_ha;
 	if (!phy->phy_attached)
 		return;
 
-	sas_ha = pm8001_ha->sas;
 	if (sas_phy->phy) {
 		struct sas_phy *sphy = sas_phy->phy;
 		sphy->negotiated_linkrate = sas_phy->linkrate;
@@ -3810,7 +3814,8 @@
 			" status = %x\n", status));
 		if (status == 0) {
 			phy->phy_state = 1;
-			if (pm8001_ha->flags == PM8001F_RUN_TIME)
+			if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+					phy->enable_completion != NULL)
 				complete(phy->enable_completion);
 		}
 		break;
@@ -4196,12 +4201,12 @@
 	return ret;
 }
 
-/* PCI_DMA_... to our direction translation. */
+/* DMA_... to our direction translation. */
 static const u8 data_dir_flags[] = {
-	[PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
-	[PCI_DMA_TODEVICE]	= DATA_DIR_OUT,/* OUTBOUND */
-	[PCI_DMA_FROMDEVICE]	= DATA_DIR_IN,/* INBOUND */
-	[PCI_DMA_NONE]		= DATA_DIR_NONE,/* NO TRANSFER */
+	[DMA_BIDIRECTIONAL]	= DATA_DIR_BYRECIPIENT,	/* UNSPECIFIED */
+	[DMA_TO_DEVICE]		= DATA_DIR_OUT,		/* OUTBOUND */
+	[DMA_FROM_DEVICE]	= DATA_DIR_IN,		/* INBOUND */
+	[DMA_NONE]		= DATA_DIR_NONE,	/* NO TRANSFER */
 };
 void
 pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
@@ -4248,13 +4253,13 @@
 	 * DMA-map SMP request, response buffers
 	 */
 	sg_req = &task->smp_task.smp_req;
-	elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+	elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
 	if (!elem)
 		return -ENOMEM;
 	req_len = sg_dma_len(sg_req);
 
 	sg_resp = &task->smp_task.smp_resp;
-	elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+	elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
 	if (!elem) {
 		rc = -ENOMEM;
 		goto err_out;
@@ -4287,10 +4292,10 @@
 
 err_out_2:
 	dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
-			PCI_DMA_FROMDEVICE);
+			DMA_FROM_DEVICE);
 err_out:
 	dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
-			PCI_DMA_TODEVICE);
+			DMA_TO_DEVICE);
 	return rc;
 }
 
@@ -4369,7 +4374,7 @@
 	u32  opc = OPC_INB_SATA_HOST_OPSTART;
 	memset(&sata_cmd, 0, sizeof(sata_cmd));
 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
-	if (task->data_dir == PCI_DMA_NONE) {
+	if (task->data_dir == DMA_NONE) {
 		ATAP = 0x04;  /* no data*/
 		PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
 	} else if (likely(!task->ata_task.device_control_reg_update)) {
@@ -4555,7 +4560,7 @@
 			pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
 			stp_sspsmp_sata = 0x01; /*ssp or smp*/
 	}
-	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+	if (parent_dev && dev_is_expander(parent_dev->dev_type))
 		phy_id = parent_dev->ex_dev.ex_phy->phy_id;
 	else
 		phy_id = pm8001_dev->attached_phy;
@@ -4620,17 +4625,18 @@
 	return ret;
 }
 
-static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
 {
-	u32 value;
 #ifdef PM8001_USE_MSIX
 	return 1;
-#endif
+#else
+	u32 value;
+
 	value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
 	if (value)
 		return 1;
 	return 0;
-
+#endif
 }
 
 /**
@@ -5116,7 +5122,7 @@
 	.chip_rst		= pm8001_hw_chip_rst,
 	.chip_iounmap		= pm8001_chip_iounmap,
 	.isr			= pm8001_chip_isr,
-	.is_our_interupt	= pm8001_chip_is_our_interupt,
+	.is_our_interrupt	= pm8001_chip_is_our_interrupt,
 	.isr_process_oq		= process_oq,
 	.interrupt_enable 	= pm8001_chip_interrupt_enable,
 	.interrupt_disable	= pm8001_chip_interrupt_disable,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index e4867e6..6d91e24 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -131,10 +131,6 @@
 #define LINKRATE_30			(0x02 << 8)
 #define LINKRATE_60			(0x04 << 8)
 
-/* for phy state */
-
-#define PHY_STATE_LINK_UP_SPC		0x1
-
 /* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
 #define GSM_SM_BASE			0x4F0000
 struct mpi_msg_hdr{
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 7a697ca..3374f55 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -84,7 +84,6 @@
 	.this_id		= -1,
 	.sg_tablesize		= SG_ALL,
 	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = sas_eh_device_reset_handler,
 	.eh_target_reset_handler = sas_eh_target_reset_handler,
 	.target_destroy		= sas_target_destroy,
@@ -121,7 +120,7 @@
 {
 	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
-	phy->phy_state = 0;
+	phy->phy_state = PHY_LINK_DISABLE;
 	phy->pm8001_ha = pm8001_ha;
 	sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
 	sas_phy->class = SAS;
@@ -152,7 +151,7 @@
 
 	for (i = 0; i < USI_MAX_MEMCNT; i++) {
 		if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
-			pci_free_consistent(pm8001_ha->pdev,
+			dma_free_coherent(&pm8001_ha->pdev->dev,
 				(pm8001_ha->memoryMap.region[i].total_len +
 				pm8001_ha->memoryMap.region[i].alignment),
 				pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -202,7 +201,7 @@
 
 	if (unlikely(!pm8001_ha))
 		return IRQ_NONE;
-	if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+	if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
 		return IRQ_NONE;
 #ifdef PM8001_USE_TASKLET
 	tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]);
@@ -225,7 +224,7 @@
 	pm8001_ha = sha->lldd_ha;
 	if (unlikely(!pm8001_ha))
 		return IRQ_NONE;
-	if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+	if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
 		return IRQ_NONE;
 
 #ifdef PM8001_USE_TASKLET
@@ -501,30 +500,12 @@
 {
 	int rc;
 
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) {
-		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44));
-		if (rc) {
-			rc = pci_set_consistent_dma_mask(pdev,
-				DMA_BIT_MASK(32));
-			if (rc) {
-				dev_printk(KERN_ERR, &pdev->dev,
-					"44-bit DMA enable failed\n");
-				return rc;
-			}
-		}
-	} else {
-		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (rc) {
+	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+	if (rc) {
+		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (rc)
 			dev_printk(KERN_ERR, &pdev->dev,
 				"32-bit DMA enable failed\n");
-			return rc;
-		}
-		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (rc) {
-			dev_printk(KERN_ERR, &pdev->dev,
-				"32-bit consistent DMA enable failed\n");
-			return rc;
-		}
 	}
 	return rc;
 }
@@ -1067,6 +1048,7 @@
 	if (rc)
 		goto err_out_shost;
 	scsi_scan_host(pm8001_ha->shost);
+	pm8001_ha->flags = PM8001F_RUN_TIME;
 	return 0;
 
 err_out_shost:
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 947d601..7e48154 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -116,8 +116,8 @@
 	u64 align_offset = 0;
 	if (align)
 		align_offset = (dma_addr_t)align - 1;
-	mem_virt_alloc = pci_zalloc_consistent(pdev, mem_size + align,
-					       &mem_dma_handle);
+	mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
+					    &mem_dma_handle, GFP_KERNEL);
 	if (!mem_virt_alloc) {
 		pm8001_printk("memory allocation error\n");
 		return -1;
@@ -157,9 +157,12 @@
 	int rc = 0, phy_id = sas_phy->id;
 	struct pm8001_hba_info *pm8001_ha = NULL;
 	struct sas_phy_linkrates *rates;
+	struct sas_ha_struct *sas_ha;
+	struct pm8001_phy *phy;
 	DECLARE_COMPLETION_ONSTACK(completion);
 	unsigned long flags;
 	pm8001_ha = sas_phy->ha->lldd_ha;
+	phy = &pm8001_ha->phy[phy_id];
 	pm8001_ha->phy[phy_id].enable_completion = &completion;
 	switch (func) {
 	case PHY_FUNC_SET_LINK_RATE:
@@ -172,7 +175,7 @@
 			pm8001_ha->phy[phy_id].maximum_linkrate =
 				rates->maximum_linkrate;
 		}
-		if (pm8001_ha->phy[phy_id].phy_state == 0) {
+		if (pm8001_ha->phy[phy_id].phy_state ==  PHY_LINK_DISABLE) {
 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
 			wait_for_completion(&completion);
 		}
@@ -180,7 +183,7 @@
 					      PHY_LINK_RESET);
 		break;
 	case PHY_FUNC_HARD_RESET:
-		if (pm8001_ha->phy[phy_id].phy_state == 0) {
+		if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
 			wait_for_completion(&completion);
 		}
@@ -188,7 +191,7 @@
 					      PHY_HARD_RESET);
 		break;
 	case PHY_FUNC_LINK_RESET:
-		if (pm8001_ha->phy[phy_id].phy_state == 0) {
+		if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
 			wait_for_completion(&completion);
 		}
@@ -200,6 +203,25 @@
 					      PHY_LINK_RESET);
 		break;
 	case PHY_FUNC_DISABLE:
+		if (pm8001_ha->chip_id != chip_8001) {
+			if (pm8001_ha->phy[phy_id].phy_state ==
+				PHY_STATE_LINK_UP_SPCV) {
+				sas_ha = pm8001_ha->sas;
+				sas_phy_disconnected(&phy->sas_phy);
+				sas_ha->notify_phy_event(&phy->sas_phy,
+					PHYE_LOSS_OF_SIGNAL);
+				phy->phy_attached = 0;
+			}
+		} else {
+			if (pm8001_ha->phy[phy_id].phy_state ==
+				PHY_STATE_LINK_UP_SPC) {
+				sas_ha = pm8001_ha->sas;
+				sas_phy_disconnected(&phy->sas_phy);
+				sas_ha->notify_phy_event(&phy->sas_phy,
+					PHYE_LOSS_OF_SIGNAL);
+				phy->phy_attached = 0;
+			}
+		}
 		PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
 		break;
 	case PHY_FUNC_GET_EVENTS:
@@ -374,6 +396,13 @@
 		return 0;
 	}
 	pm8001_ha = pm8001_find_ha_by_dev(task->dev);
+	if (pm8001_ha->controller_fatal_error) {
+		struct task_status_struct *ts = &t->task_status;
+
+		ts->resp = SAS_TASK_UNDELIVERED;
+		t->task_done(t);
+		return 0;
+	}
 	PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
 	spin_lock_irqsave(&pm8001_ha->lock, flags);
 	do {
@@ -466,7 +495,7 @@
 	dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
 	if (!sas_protocol_ata(t->task_proto))
 		if (n_elem)
-			dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem,
+			dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
 				t->data_dir);
 out_done:
 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -504,9 +533,9 @@
 	switch (task->task_proto) {
 	case SAS_PROTOCOL_SMP:
 		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
-			PCI_DMA_FROMDEVICE);
+			DMA_FROM_DEVICE);
 		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
-			PCI_DMA_TODEVICE);
+			DMA_TO_DEVICE);
 		break;
 
 	case SAS_PROTOCOL_SATA:
@@ -605,7 +634,7 @@
 	dev->lldd_dev = pm8001_device;
 	pm8001_device->dev_type = dev->dev_type;
 	pm8001_device->dcompletion = &completion;
-	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
 		int phy_id;
 		struct ex_phy *phy;
 		for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
@@ -628,7 +657,7 @@
 		if (dev->dev_type == SAS_SATA_DEV) {
 			pm8001_device->attached_phy =
 				dev->rphy->identify.phy_identifier;
-				flag = 1; /* directly sata*/
+			flag = 1; /* directly sata */
 		}
 	} /*register this device to HBA*/
 	PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
@@ -711,8 +740,8 @@
 		wait_for_completion(&task->slow_task->completion);
 		if (pm8001_ha->chip_id != chip_8001) {
 			pm8001_dev->setds_completion = &completion_setstate;
-				PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
-					pm8001_dev, 0x01);
+			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+				pm8001_dev, 0x01);
 			wait_for_completion(&completion_setstate);
 		}
 		res = -TMF_RESP_FUNC_FAILED;
@@ -859,6 +888,8 @@
 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 			pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
 				dev, 1, 0);
+			while (pm8001_dev->running_req)
+				msleep(20);
 			spin_lock_irqsave(&pm8001_ha->lock, flags);
 		}
 		PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
@@ -1020,13 +1051,11 @@
 	struct pm8001_device *pm8001_dev;
 	struct pm8001_hba_info *pm8001_ha;
 	struct sas_phy *phy;
-	u32 device_id = 0;
 
 	if (!dev || !dev->lldd_dev)
 		return -1;
 
 	pm8001_dev = dev->lldd_dev;
-	device_id = pm8001_dev->device_id;
 	pm8001_ha = pm8001_find_ha_by_dev(dev);
 
 	PM8001_EH_DBG(pm8001_ha,
@@ -1154,12 +1183,11 @@
 	return rc;
 }
 
-/*  mandatory SAM-3, still need free task/ccb info, abord the specified task */
+/*  mandatory SAM-3, still need free task/ccb info, abort the specified task */
 int pm8001_abort_task(struct sas_task *task)
 {
 	unsigned long flags;
 	u32 tag;
-	u32 device_id;
 	struct domain_device *dev ;
 	struct pm8001_hba_info *pm8001_ha;
 	struct scsi_lun lun;
@@ -1173,7 +1201,6 @@
 	dev = task->dev;
 	pm8001_dev = dev->lldd_dev;
 	pm8001_ha = pm8001_find_ha_by_dev(dev);
-	device_id = pm8001_dev->device_id;
 	phy_id = pm8001_dev->attached_phy;
 	rc = pm8001_find_tag(task, &tag);
 	if (rc == 0) {
@@ -1231,8 +1258,10 @@
 			PM8001_MSG_DBG(pm8001_ha,
 				pm8001_printk("Waiting for Port reset\n"));
 			wait_for_completion(&completion_reset);
-			if (phy->port_reset_status)
+			if (phy->port_reset_status) {
+				pm8001_dev_gone_notify(dev);
 				goto out;
+			}
 
 			/*
 			 * 4. SATA Abort ALL
@@ -1279,28 +1308,22 @@
 
 int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
 {
-	int rc = TMF_RESP_FUNC_FAILED;
 	struct pm8001_tmf_task tmf_task;
 
 	tmf_task.tmf = TMF_ABORT_TASK_SET;
-	rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
-	return rc;
+	return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
 }
 
 int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
 {
-	int rc = TMF_RESP_FUNC_FAILED;
 	struct pm8001_tmf_task tmf_task;
 
 	tmf_task.tmf = TMF_CLEAR_ACA;
-	rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
-
-	return rc;
+	return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
 }
 
 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
 {
-	int rc = TMF_RESP_FUNC_FAILED;
 	struct pm8001_tmf_task tmf_task;
 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
@@ -1309,7 +1332,6 @@
 		pm8001_printk("I_T_L_Q clear task set[%x]\n",
 		pm8001_dev->device_id));
 	tmf_task.tmf = TMF_CLEAR_TASK_SET;
-	rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
-	return rc;
+	return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
 }
 
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 80b4dd6..ff17c6a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -58,7 +58,7 @@
 #include "pm8001_defs.h"
 
 #define DRV_NAME		"pm80xx"
-#define DRV_VERSION		"0.1.38"
+#define DRV_VERSION		"0.1.39"
 #define PM8001_FAIL_LOGGING	0x01 /* Error message logging */
 #define PM8001_INIT_LOGGING	0x02 /* driver init logging */
 #define PM8001_DISC_LOGGING	0x04 /* discovery layer logging */
@@ -103,7 +103,6 @@
 #define PM8001_READ_VPD
 
 
-#define DEV_IS_EXPANDER(type)	((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
 #define IS_SPCV_12G(dev)	((dev->device == 0X8074)		\
 				|| (dev->device == 0X8076)		\
 				|| (dev->device == 0X8077)		\
@@ -197,7 +196,7 @@
 	int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
 	void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
 	irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
-	u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
+	u32 (*is_our_interrupt)(struct pm8001_hba_info *pm8001_ha);
 	int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
 	void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
 	void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
@@ -538,6 +537,7 @@
 	u32			logging_level;
 	u32			fw_status;
 	u32			smp_exp_mode;
+	bool			controller_fatal_error;
 	const struct firmware 	*fw_image;
 	struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
 	u32			reset_in_progress;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 42f0405..7326190 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -577,6 +577,9 @@
 		pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
 	pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
 		pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
+	/* Update Fatal error interrupt vector */
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+					((pm8001_ha->number_of_intr - 1) << 8);
 	pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
 		pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
 	pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
@@ -601,7 +604,7 @@
 		pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &=
 					0x0000ffff;
 		pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
-					0x140000;
+					CHIP_8006_PORT_RECOVERY_TIMEOUT;
 	}
 	pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
 			pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
@@ -1110,6 +1113,9 @@
 		return -EBUSY;
 	}
 
+	/* Initialize the controller fatal error flag */
+	pm8001_ha->controller_fatal_error = false;
+
 	/* Initialize pci space address eg: mpi offset */
 	init_pci_device_addresses(pm8001_ha);
 	init_default_table_values(pm8001_ha);
@@ -1218,13 +1224,17 @@
 	u32 bootloader_state;
 	u32 ibutton0, ibutton1;
 
-	/* Check if MPI is in ready state to reset */
-	if (mpi_uninit_check(pm8001_ha) != 0) {
-		PM8001_FAIL_DBG(pm8001_ha,
-			pm8001_printk("MPI state is not ready\n"));
-		return -1;
+	/* Process MPI table uninitialization only if FW is ready */
+	if (!pm8001_ha->controller_fatal_error) {
+		/* Check if MPI is in ready state to reset */
+		if (mpi_uninit_check(pm8001_ha) != 0) {
+			regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+			PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+				"MPI state is not ready scratch1 :0x%x\n",
+				regval));
+			return -1;
+		}
 	}
-
 	/* checked for reset register normal state; 0x0 */
 	regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
 	PM8001_INIT_DBG(pm8001_ha,
@@ -1306,7 +1316,7 @@
 
 static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
 {
-	 u32 i;
+	u32 i;
 
 	PM8001_INIT_DBG(pm8001_ha,
 		pm8001_printk("chip reset start\n"));
@@ -2056,7 +2066,7 @@
 	if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
 		(status != IO_UNDERFLOW)) {
 		if (!((t->dev->parent) &&
-			(DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+			(dev_is_expander(t->dev->parent->dev_type)))) {
 			for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++)
 				sata_addr_low[i] = pm8001_ha->sas_addr[j];
 			for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++)
@@ -2123,7 +2133,7 @@
 			sata_resp = &psataPayload->sata_resp[0];
 			resp = (struct ata_task_resp *)ts->buf;
 			if (t->ata_task.dma_xfer == 0 &&
-			t->data_dir == PCI_DMA_FROMDEVICE) {
+			    t->data_dir == DMA_FROM_DEVICE) {
 				len = sizeof(struct pio_setup_fis);
 				PM8001_IO_DBG(pm8001_ha,
 				pm8001_printk("PIO read len = %d\n", len));
@@ -3118,8 +3128,9 @@
 		pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
 				status, phy_id));
 	if (status == 0) {
-		phy->phy_state = 1;
-		if (pm8001_ha->flags == PM8001F_RUN_TIME)
+		phy->phy_state = PHY_LINK_DOWN;
+		if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+				phy->enable_completion != NULL)
 			complete(phy->enable_completion);
 	}
 	return 0;
@@ -3211,7 +3222,7 @@
 			return 0;
 		}
 		phy->phy_attached = 0;
-		phy->phy_state = 0;
+		phy->phy_state = PHY_LINK_DISABLE;
 		break;
 	case HW_EVENT_PORT_INVALID:
 		PM8001_MSG_DBG(pm8001_ha,
@@ -3384,13 +3395,14 @@
 	u32 status =
 		le32_to_cpu(pPayload->status);
 	u32 phyid =
-		le32_to_cpu(pPayload->phyid);
+		le32_to_cpu(pPayload->phyid) & 0xFF;
 	struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
 	PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("phy:0x%x status:0x%x\n",
 					phyid, status));
-	if (status == 0)
-		phy->phy_state = 0;
+	if (status == PHY_STOP_SUCCESS ||
+		status == PHY_STOP_ERR_DEVICE_ATTACHED)
+		phy->phy_state = PHY_LINK_DISABLE;
 	return 0;
 }
 
@@ -3752,6 +3764,46 @@
 	}
 }
 
+static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha)
+{
+	PM8001_FAIL_DBG(pm8001_ha,
+		pm8001_printk("MSGU_SCRATCH_PAD_0: 0x%x\n",
+			pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
+	PM8001_FAIL_DBG(pm8001_ha,
+		pm8001_printk("MSGU_SCRATCH_PAD_1:0x%x\n",
+			pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)));
+	PM8001_FAIL_DBG(pm8001_ha,
+		pm8001_printk("MSGU_SCRATCH_PAD_2: 0x%x\n",
+			pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)));
+	PM8001_FAIL_DBG(pm8001_ha,
+		pm8001_printk("MSGU_SCRATCH_PAD_3: 0x%x\n",
+			pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
+	PM8001_FAIL_DBG(pm8001_ha,
+		pm8001_printk("MSGU_HOST_SCRATCH_PAD_0: 0x%x\n",
+			pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0)));
+	PM8001_FAIL_DBG(pm8001_ha,
+		pm8001_printk("MSGU_HOST_SCRATCH_PAD_1: 0x%x\n",
+			pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1)));
+	PM8001_FAIL_DBG(pm8001_ha,
+		pm8001_printk("MSGU_HOST_SCRATCH_PAD_2: 0x%x\n",
+			pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2)));
+	PM8001_FAIL_DBG(pm8001_ha,
+		pm8001_printk("MSGU_HOST_SCRATCH_PAD_3: 0x%x\n",
+			pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3)));
+	PM8001_FAIL_DBG(pm8001_ha,
+		pm8001_printk("MSGU_HOST_SCRATCH_PAD_4: 0x%x\n",
+			pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4)));
+	PM8001_FAIL_DBG(pm8001_ha,
+		pm8001_printk("MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
+			pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5)));
+	PM8001_FAIL_DBG(pm8001_ha,
+		pm8001_printk("MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
+			pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6)));
+	PM8001_FAIL_DBG(pm8001_ha,
+		pm8001_printk("MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
+			pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7)));
+}
+
 static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 	struct outbound_queue_table *circularQ;
@@ -3759,10 +3811,28 @@
 	u8 uninitialized_var(bc);
 	u32 ret = MPI_IO_STATUS_FAIL;
 	unsigned long flags;
+	u32 regval;
 
+	if (vec == (pm8001_ha->number_of_intr - 1)) {
+		regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+		if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
+					SCRATCH_PAD_MIPSALL_READY) {
+			pm8001_ha->controller_fatal_error = true;
+			PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+				"Firmware Fatal error! Regval:0x%x\n", regval));
+			print_scratchpad_registers(pm8001_ha);
+			return ret;
+		}
+	}
 	spin_lock_irqsave(&pm8001_ha->lock, flags);
 	circularQ = &pm8001_ha->outbnd_q_tbl[vec];
 	do {
+		/* spurious interrupt during setup if kexec-ing and
+		 * driver doing a doorbell access w/ the pre-kexec oq
+		 * interrupt setup.
+		 */
+		if (!circularQ->pi_virt)
+			break;
 		ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
 		if (MPI_IO_STATUS_SUCCESS == ret) {
 			/* process the outbound message */
@@ -3785,12 +3855,12 @@
 	return ret;
 }
 
-/* PCI_DMA_... to our direction translation. */
+/* DMA_... to our direction translation. */
 static const u8 data_dir_flags[] = {
-	[PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
-	[PCI_DMA_TODEVICE]	= DATA_DIR_OUT,/* OUTBOUND */
-	[PCI_DMA_FROMDEVICE]	= DATA_DIR_IN,/* INBOUND */
-	[PCI_DMA_NONE]		= DATA_DIR_NONE,/* NO TRANSFER */
+	[DMA_BIDIRECTIONAL]	= DATA_DIR_BYRECIPIENT,	/* UNSPECIFIED */
+	[DMA_TO_DEVICE]		= DATA_DIR_OUT,		/* OUTBOUND */
+	[DMA_FROM_DEVICE]	= DATA_DIR_IN,		/* INBOUND */
+	[DMA_NONE]		= DATA_DIR_NONE,	/* NO TRANSFER */
 };
 
 static void build_smp_cmd(u32 deviceID, __le32 hTag,
@@ -3832,13 +3902,13 @@
 	 * DMA-map SMP request, response buffers
 	 */
 	sg_req = &task->smp_task.smp_req;
-	elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+	elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
 	if (!elem)
 		return -ENOMEM;
 	req_len = sg_dma_len(sg_req);
 
 	sg_resp = &task->smp_task.smp_resp;
-	elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+	elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
 	if (!elem) {
 		rc = -ENOMEM;
 		goto err_out;
@@ -3929,10 +3999,10 @@
 
 err_out_2:
 	dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
-			PCI_DMA_FROMDEVICE);
+			DMA_FROM_DEVICE);
 err_out:
 	dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
-			PCI_DMA_TODEVICE);
+			DMA_TO_DEVICE);
 	return rc;
 }
 
@@ -4156,7 +4226,7 @@
 	q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
 	circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
 
-	if (task->data_dir == PCI_DMA_NONE) {
+	if (task->data_dir == DMA_NONE) {
 		ATAP = 0x04; /* no data*/
 		PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
 	} else if (likely(!task->ata_task.device_control_reg_update)) {
@@ -4311,27 +4381,27 @@
 			sata_cmd.len = cpu_to_le32(task->total_xfer_len);
 			sata_cmd.esgl = 0;
 		}
-			/* scsi cdb */
-			sata_cmd.atapi_scsi_cdb[0] =
-				cpu_to_le32(((task->ata_task.atapi_packet[0]) |
-				(task->ata_task.atapi_packet[1] << 8) |
-				(task->ata_task.atapi_packet[2] << 16) |
-				(task->ata_task.atapi_packet[3] << 24)));
-			sata_cmd.atapi_scsi_cdb[1] =
-				cpu_to_le32(((task->ata_task.atapi_packet[4]) |
-				(task->ata_task.atapi_packet[5] << 8) |
-				(task->ata_task.atapi_packet[6] << 16) |
-				(task->ata_task.atapi_packet[7] << 24)));
-			sata_cmd.atapi_scsi_cdb[2] =
-				cpu_to_le32(((task->ata_task.atapi_packet[8]) |
-				(task->ata_task.atapi_packet[9] << 8) |
-				(task->ata_task.atapi_packet[10] << 16) |
-				(task->ata_task.atapi_packet[11] << 24)));
-			sata_cmd.atapi_scsi_cdb[3] =
-				cpu_to_le32(((task->ata_task.atapi_packet[12]) |
-				(task->ata_task.atapi_packet[13] << 8) |
-				(task->ata_task.atapi_packet[14] << 16) |
-				(task->ata_task.atapi_packet[15] << 24)));
+		/* scsi cdb */
+		sata_cmd.atapi_scsi_cdb[0] =
+			cpu_to_le32(((task->ata_task.atapi_packet[0]) |
+			(task->ata_task.atapi_packet[1] << 8) |
+			(task->ata_task.atapi_packet[2] << 16) |
+			(task->ata_task.atapi_packet[3] << 24)));
+		sata_cmd.atapi_scsi_cdb[1] =
+			cpu_to_le32(((task->ata_task.atapi_packet[4]) |
+			(task->ata_task.atapi_packet[5] << 8) |
+			(task->ata_task.atapi_packet[6] << 16) |
+			(task->ata_task.atapi_packet[7] << 24)));
+		sata_cmd.atapi_scsi_cdb[2] =
+			cpu_to_le32(((task->ata_task.atapi_packet[8]) |
+			(task->ata_task.atapi_packet[9] << 8) |
+			(task->ata_task.atapi_packet[10] << 16) |
+			(task->ata_task.atapi_packet[11] << 24)));
+		sata_cmd.atapi_scsi_cdb[3] =
+			cpu_to_le32(((task->ata_task.atapi_packet[12]) |
+			(task->ata_task.atapi_packet[13] << 8) |
+			(task->ata_task.atapi_packet[14] << 16) |
+			(task->ata_task.atapi_packet[15] << 24)));
 	}
 
 	/* Check for read log for failed drive and return */
@@ -4491,7 +4561,7 @@
 			pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
 			stp_sspsmp_sata = 0x01; /*ssp or smp*/
 	}
-	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+	if (parent_dev && dev_is_expander(parent_dev->dev_type))
 		phy_id = parent_dev->ex_dev.ex_phy->phy_id;
 	else
 		phy_id = pm8001_dev->attached_phy;
@@ -4547,17 +4617,18 @@
 	return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 }
 
-static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
 {
-	u32 value;
 #ifdef PM8001_USE_MSIX
 	return 1;
-#endif
+#else
+	u32 value;
+
 	value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
 	if (value)
 		return 1;
 	return 0;
-
+#endif
 }
 
 /**
@@ -4606,9 +4677,8 @@
 void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
 	u32 length, u8 *buf)
 {
-	u32 page_code, i;
+	u32 i;
 
-	page_code = SAS_PHY_ANALOG_SETTINGS_PAGE;
 	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
 		mpi_set_phy_profile_req(pm8001_ha,
 			SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
@@ -4655,7 +4725,7 @@
 	.chip_rst		= pm80xx_hw_chip_rst,
 	.chip_iounmap		= pm8001_chip_iounmap,
 	.isr			= pm80xx_chip_isr,
-	.is_our_interupt	= pm80xx_chip_is_our_interupt,
+	.is_our_interrupt	= pm80xx_chip_is_our_interrupt,
 	.isr_process_oq		= process_oq,
 	.interrupt_enable	= pm80xx_chip_interrupt_enable,
 	.interrupt_disable	= pm80xx_chip_interrupt_disable,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 889e69c..dc9ab76 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -170,6 +170,10 @@
 #define LINKRATE_60			(0x04 << 8)
 #define LINKRATE_120			(0x08 << 8)
 
+/*phy_stop*/
+#define PHY_STOP_SUCCESS		0x00
+#define PHY_STOP_ERR_DEVICE_ATTACHED	0x1046
+
 /* phy_profile */
 #define SAS_PHY_ANALOG_SETTINGS_PAGE	0x04
 #define PHY_DWORD_LENGTH		0xC
@@ -216,8 +220,6 @@
 #define SAS_DOPNRJT_RTRY_TMO            128
 #define SAS_COPNRJT_RTRY_TMO            128
 
-/* for phy state */
-#define PHY_STATE_LINK_UP_SPCV		0x2
 /*
   Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
   Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
@@ -228,6 +230,8 @@
 #define SAS_MAX_AIP                     0x200000
 #define IT_NEXUS_TIMEOUT       0x7D0
 #define PORT_RECOVERY_TIMEOUT  ((IT_NEXUS_TIMEOUT/100) + 30)
+/* Port recovery timeout, 10000 ms for PM8006 controller */
+#define CHIP_8006_PORT_RECOVERY_TIMEOUT 0x640000
 
 #ifdef __LITTLE_ENDIAN_BITFIELD
 struct sas_identify_frame_local {
@@ -1384,6 +1388,9 @@
 #define SCRATCH_PAD_BOOT_LOAD_SUCCESS	0x0
 #define SCRATCH_PAD_IOP0_READY		0xC00
 #define SCRATCH_PAD_IOP1_READY		0x3000
+#define SCRATCH_PAD_MIPSALL_READY	(SCRATCH_PAD_IOP1_READY | \
+					SCRATCH_PAD_IOP0_READY | \
+					SCRATCH_PAD_RAAE_READY)
 
 /* boot loader state */
 #define SCRATCH_PAD1_BOOTSTATE_MASK		0x70	/* Bit 4-6 */
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4e86994..398d2af 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
  *
@@ -5,22 +6,6 @@
  *             PMC-Sierra Inc
  *
  * Copyright (C) 2008, 2009 PMC Sierra Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
- * USA
- *
  */
 #include <linux/fs.h>
 #include <linux/init.h>
@@ -846,16 +831,9 @@
 			    cmd->ioa_cb->ioarcb.cdb[0], ioasc);
 	}
 
-	/* if we had allocated sense buffers for request sense, copy the sense
-	 * release the buffers
-	 */
-	if (cmd->sense_buffer != NULL) {
-		memcpy(scsi_cmd->sense_buffer,
-		       cmd->sense_buffer,
-		       SCSI_SENSE_BUFFERSIZE);
-		pci_free_consistent(pinstance->pdev,
-				    SCSI_SENSE_BUFFERSIZE,
-				    cmd->sense_buffer, cmd->sense_buffer_dma);
+	if (cmd->sense_buffer) {
+		dma_unmap_single(&pinstance->pdev->dev, cmd->sense_buffer_dma,
+				 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 		cmd->sense_buffer = NULL;
 		cmd->sense_buffer_dma = 0;
 	}
@@ -2444,13 +2422,12 @@
 {
 	struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
 	struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+	struct device *dev = &cmd->drv_inst->pdev->dev;
 
-	/* allocate DMAable memory for sense buffers */
-	cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev,
-						 SCSI_SENSE_BUFFERSIZE,
-						 &cmd->sense_buffer_dma);
-
-	if (cmd->sense_buffer == NULL) {
+	cmd->sense_buffer = cmd->scsi_cmd->sense_buffer;
+	cmd->sense_buffer_dma = dma_map_single(dev, cmd->sense_buffer,
+			SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, cmd->sense_buffer_dma)) {
 		pmcraid_err
 			("couldn't allocate sense buffer for request sense\n");
 		pmcraid_erp_done(cmd);
@@ -2491,17 +2468,15 @@
 /**
  * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
  * @cmd: command that failed
- * @sense: true if request_sense is required after cancel all
+ * @need_sense: true if request_sense is required after cancel all
  *
  * This function sends a cancel all to a device to clear the queue.
  */
-static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
+static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, bool need_sense)
 {
 	struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
 	struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
 	struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
-	void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
-							: pmcraid_request_sense;
 
 	memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
 	ioarcb->request_flags0 = SYNC_OVERRIDE;
@@ -2519,7 +2494,8 @@
 	/* writing to IOARRIN must be protected by host_lock, as mid-layer
 	 * schedule queuecommand while we are doing this
 	 */
-	pmcraid_send_cmd(cmd, cmd_done,
+	pmcraid_send_cmd(cmd, need_sense ?
+			 pmcraid_erp_done : pmcraid_request_sense,
 			 PMCRAID_REQUEST_SENSE_TIMEOUT,
 			 pmcraid_timeout_handler);
 }
@@ -2612,7 +2588,7 @@
 	struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
 	u32 ioasc = le32_to_cpu(ioasa->ioasc);
 	u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
-	u32 sense_copied = 0;
+	bool sense_copied = false;
 
 	if (!res) {
 		pmcraid_info("resource pointer is NULL\n");
@@ -2684,7 +2660,7 @@
 			memcpy(scsi_cmd->sense_buffer,
 			       ioasa->sense_data,
 			       data_size);
-			sense_copied = 1;
+			sense_copied = true;
 		}
 
 		if (RES_IS_GSCSI(res->cfg_entry))
@@ -3279,7 +3255,7 @@
 	int direction
 )
 {
-	struct scatterlist *scatterlist;
+	struct scatterlist *sg;
 	void *kaddr;
 	int bsize_elem;
 	int i;
@@ -3288,10 +3264,10 @@
 	/* Determine the actual number of bytes per element */
 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
 
-	scatterlist = sglist->scatterlist;
+	sg = sglist->scatterlist;
 
-	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
-		struct page *page = sg_page(&scatterlist[i]);
+	for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
+		struct page *page = sg_page(sg);
 
 		kaddr = kmap(page);
 		if (direction == DMA_TO_DEVICE)
@@ -3306,11 +3282,11 @@
 			return -EFAULT;
 		}
 
-		scatterlist[i].length = bsize_elem;
+		sg->length = bsize_elem;
 	}
 
 	if (len % bsize_elem) {
-		struct page *page = sg_page(&scatterlist[i]);
+		struct page *page = sg_page(sg);
 
 		kaddr = kmap(page);
 
@@ -3321,7 +3297,7 @@
 
 		kunmap(page);
 
-		scatterlist[i].length = len % bsize_elem;
+		sg->length = len % bsize_elem;
 	}
 
 	if (rc) {
@@ -3523,7 +3499,7 @@
 		return -ENOMEM;
 	}
 
-	sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev,
+	sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
 					sglist->scatterlist,
 					sglist->num_sg, direction);
 
@@ -3572,7 +3548,7 @@
 	struct pmcraid_sglist *sglist = cmd->sglist;
 
 	if (buflen > 0) {
-		pci_unmap_sg(cmd->drv_inst->pdev,
+		dma_unmap_sg(&cmd->drv_inst->pdev->dev,
 			     sglist->scatterlist,
 			     sglist->num_sg,
 			     direction);
@@ -3609,7 +3585,7 @@
 	u32 ioasc;
 	int request_size;
 	int buffer_size;
-	u8 access, direction;
+	u8 direction;
 	int rc = 0;
 
 	/* If IOA reset is in progress, wait 10 secs for reset to complete */
@@ -3658,10 +3634,8 @@
 	request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length);
 
 	if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
-		access = VERIFY_READ;
 		direction = DMA_TO_DEVICE;
 	} else {
-		access = VERIFY_WRITE;
 		direction = DMA_FROM_DEVICE;
 	}
 
@@ -4158,7 +4132,6 @@
 	.max_sectors = PMCRAID_IOA_MAX_SECTORS,
 	.no_write_same = 1,
 	.cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
-	.use_clustering = ENABLE_CLUSTERING,
 	.shost_attrs = pmcraid_host_attrs,
 	.proc_name = PMCRAID_DRIVER_NAME,
 };
@@ -4708,9 +4681,9 @@
 pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
 {
 	int i;
-	for (i = 0; i < maxindex; i++) {
 
-		pci_free_consistent(pinstance->pdev,
+	for (i = 0; i < maxindex; i++) {
+		dma_free_coherent(&pinstance->pdev->dev,
 				    HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
 				    pinstance->hrrq_start[i],
 				    pinstance->hrrq_start_bus_addr[i]);
@@ -4737,11 +4710,9 @@
 
 	for (i = 0; i < pinstance->num_hrrq; i++) {
 		pinstance->hrrq_start[i] =
-			pci_alloc_consistent(
-					pinstance->pdev,
-					buffer_size,
-					&(pinstance->hrrq_start_bus_addr[i]));
-
+			dma_alloc_coherent(&pinstance->pdev->dev, buffer_size,
+					   &pinstance->hrrq_start_bus_addr[i],
+					   GFP_KERNEL);
 		if (!pinstance->hrrq_start[i]) {
 			pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
 				    i);
@@ -4770,7 +4741,7 @@
 static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
 {
 	if (pinstance->ccn.msg != NULL) {
-		pci_free_consistent(pinstance->pdev,
+		dma_free_coherent(&pinstance->pdev->dev,
 				    PMCRAID_AEN_HDR_SIZE +
 				    sizeof(struct pmcraid_hcam_ccn_ext),
 				    pinstance->ccn.msg,
@@ -4782,7 +4753,7 @@
 	}
 
 	if (pinstance->ldn.msg != NULL) {
-		pci_free_consistent(pinstance->pdev,
+		dma_free_coherent(&pinstance->pdev->dev,
 				    PMCRAID_AEN_HDR_SIZE +
 				    sizeof(struct pmcraid_hcam_ldn),
 				    pinstance->ldn.msg,
@@ -4803,17 +4774,15 @@
  */
 static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
 {
-	pinstance->ccn.msg = pci_alloc_consistent(
-					pinstance->pdev,
+	pinstance->ccn.msg = dma_alloc_coherent(&pinstance->pdev->dev,
 					PMCRAID_AEN_HDR_SIZE +
 					sizeof(struct pmcraid_hcam_ccn_ext),
-					&(pinstance->ccn.baddr));
+					&pinstance->ccn.baddr, GFP_KERNEL);
 
-	pinstance->ldn.msg = pci_alloc_consistent(
-					pinstance->pdev,
+	pinstance->ldn.msg = dma_alloc_coherent(&pinstance->pdev->dev,
 					PMCRAID_AEN_HDR_SIZE +
 					sizeof(struct pmcraid_hcam_ldn),
-					&(pinstance->ldn.baddr));
+					&pinstance->ldn.baddr, GFP_KERNEL);
 
 	if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
 		pmcraid_release_hcams(pinstance);
@@ -4841,7 +4810,7 @@
 {
 	if (pinstance->cfg_table != NULL &&
 	    pinstance->cfg_table_bus_addr != 0) {
-		pci_free_consistent(pinstance->pdev,
+		dma_free_coherent(&pinstance->pdev->dev,
 				    sizeof(struct pmcraid_config_table),
 				    pinstance->cfg_table,
 				    pinstance->cfg_table_bus_addr);
@@ -4886,10 +4855,10 @@
 		list_add_tail(&pinstance->res_entries[i].queue,
 			      &pinstance->free_res_q);
 
-	pinstance->cfg_table =
-		pci_alloc_consistent(pinstance->pdev,
+	pinstance->cfg_table = dma_alloc_coherent(&pinstance->pdev->dev,
 				     sizeof(struct pmcraid_config_table),
-				     &pinstance->cfg_table_bus_addr);
+				     &pinstance->cfg_table_bus_addr,
+				     GFP_KERNEL);
 
 	if (NULL == pinstance->cfg_table) {
 		pmcraid_err("couldn't alloc DMA memory for config table\n");
@@ -4954,7 +4923,7 @@
 	pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
 
 	if (pinstance->inq_data != NULL) {
-		pci_free_consistent(pinstance->pdev,
+		dma_free_coherent(&pinstance->pdev->dev,
 				    sizeof(struct pmcraid_inquiry_data),
 				    pinstance->inq_data,
 				    pinstance->inq_data_baddr);
@@ -4964,7 +4933,7 @@
 	}
 
 	if (pinstance->timestamp_data != NULL) {
-		pci_free_consistent(pinstance->pdev,
+		dma_free_coherent(&pinstance->pdev->dev,
 				    sizeof(struct pmcraid_timestamp_data),
 				    pinstance->timestamp_data,
 				    pinstance->timestamp_data_baddr);
@@ -4981,8 +4950,8 @@
  * This routine pre-allocates memory based on the type of block as below:
  * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
  * IOARCBs(PMCRAID_MAX_CMD)  : DMAable memory, using pci pool allocator
- * config-table entries      : DMAable memory using pci_alloc_consistent
- * HostRRQs                  : DMAable memory, using pci_alloc_consistent
+ * config-table entries      : DMAable memory using dma_alloc_coherent
+ * HostRRQs                  : DMAable memory, using dma_alloc_coherent
  *
  * Return Value
  *	 0 in case all of the blocks are allocated, -ENOMEM otherwise.
@@ -5019,11 +4988,9 @@
 	}
 
 	/* allocate DMAable memory for page D0 INQUIRY buffer */
-	pinstance->inq_data = pci_alloc_consistent(
-					pinstance->pdev,
+	pinstance->inq_data = dma_alloc_coherent(&pinstance->pdev->dev,
 					sizeof(struct pmcraid_inquiry_data),
-					&pinstance->inq_data_baddr);
-
+					&pinstance->inq_data_baddr, GFP_KERNEL);
 	if (pinstance->inq_data == NULL) {
 		pmcraid_err("couldn't allocate DMA memory for INQUIRY\n");
 		pmcraid_release_buffers(pinstance);
@@ -5031,11 +4998,10 @@
 	}
 
 	/* allocate DMAable memory for set timestamp data buffer */
-	pinstance->timestamp_data = pci_alloc_consistent(
-					pinstance->pdev,
+	pinstance->timestamp_data = dma_alloc_coherent(&pinstance->pdev->dev,
 					sizeof(struct pmcraid_timestamp_data),
-					&pinstance->timestamp_data_baddr);
-
+					&pinstance->timestamp_data_baddr,
+					GFP_KERNEL);
 	if (pinstance->timestamp_data == NULL) {
 		pmcraid_err("couldn't allocate DMA memory for \
 				set time_stamp \n");
@@ -5324,12 +5290,12 @@
 
 	pci_set_master(pdev);
 
-	if ((sizeof(dma_addr_t) == 4) ||
-	     pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
-		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (sizeof(dma_addr_t) == 4 ||
+	    dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 
 	if (rc == 0)
-		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 
 	if (rc != 0) {
 		dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
@@ -5733,19 +5699,19 @@
 	/* Firmware requires the system bus address of IOARCB to be within
 	 * 32-bit addressable range though it has 64-bit IOARRIN register.
 	 * However, firmware supports 64-bit streaming DMA buffers, whereas
-	 * coherent buffers are to be 32-bit. Since pci_alloc_consistent always
+	 * coherent buffers are to be 32-bit. Since dma_alloc_coherent always
 	 * returns memory within 4GB (if not, change this logic), coherent
 	 * buffers are within firmware acceptable address ranges.
 	 */
-	if ((sizeof(dma_addr_t) == 4) ||
-	    pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
-		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (sizeof(dma_addr_t) == 4 ||
+	    dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 
 	/* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
-	 * bit mask for pci_alloc_consistent to return addresses within 4GB
+	 * bit mask for dma_alloc_coherent to return addresses within 4GB
 	 */
 	if (rc == 0)
-		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 
 	if (rc != 0) {
 		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
@@ -5875,7 +5841,7 @@
 }
 
 /*
- * PCI driver structure of pcmraid driver
+ * PCI driver structure of pmcraid driver
  */
 static struct pci_driver pmcraid_driver = {
 	.name = PMCRAID_DRIVER_NAME,
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 754ef30..a4f7eb8 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * pmcraid.h -- PMC Sierra MaxRAID controller driver header file
  *
@@ -5,20 +6,6 @@
  *             PMC-Sierra Inc
  *
  * Copyright (C) 2008, 2009 PMC Sierra Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #ifndef _PMCRAID_H
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index ee86a0c..a406cc8 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -590,7 +590,7 @@
 		if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
 			/* if scatter/gather, advance to the next segment */
 			if (cmd->SCp.buffers_residual--) {
-				cmd->SCp.buffer++;
+				cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
 				cmd->SCp.this_residual =
 				    cmd->SCp.buffer->length;
 				cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
@@ -717,6 +717,7 @@
 			}
 			cmd->SCp.phase++;
 		}
+		/* fall through */
 
 	case 2:		/* Phase 2 - We are now talking to the scsi bus */
 		if (!ppa_select(dev, scmd_id(cmd))) {
@@ -978,7 +979,6 @@
 	.bios_param		= ppa_biosparam,
 	.this_id		= -1,
 	.sg_tablesize		= SG_ALL,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.can_queue		= 1,
 	.slave_alloc		= ppa_adjust_queue,
 };
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 4924424..f75c0b5 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * PS3 BD/DVD/CD-ROM Storage Driver
  *
  * Copyright (C) 2007 Sony Computer Entertainment Inc.
  * Copyright 2007 Sony Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published
- * by the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #include <linux/cdrom.h>
@@ -349,7 +337,6 @@
 	.sg_tablesize =		SG_ALL,
 	.emulated =             1,		/* only sg driver uses this */
 	.max_sectors =		PS3ROM_MAX_SECTORS,
-	.use_clustering =	ENABLE_CLUSTERING,
 	.module =		THIS_MODULE,
 };
 
diff --git a/drivers/scsi/qedf/Kconfig b/drivers/scsi/qedf/Kconfig
index 943f5ee..7cd993b 100644
--- a/drivers/scsi/qedf/Kconfig
+++ b/drivers/scsi/qedf/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config QEDF
 	tristate "QLogic QEDF 25/40/100Gb FCoE Initiator Driver Support"
 	depends on PCI && SCSI
diff --git a/drivers/scsi/qedf/Makefile b/drivers/scsi/qedf/Makefile
index 414f2a7..c462878 100644
--- a/drivers/scsi/qedf/Makefile
+++ b/drivers/scsi/qedf/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_QEDF) := qedf.o
 qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
 	 qedf_attr.o qedf_els.o drv_scsi_fw_funcs.o drv_fcoe_fw_funcs.o
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index 5bd10b5..747af96 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* QLogic FCoE Offload Driver
  * Copyright (c) 2016-2018 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 #include "drv_fcoe_fw_funcs.h"
 #include "drv_scsi_fw_funcs.h"
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index 42fde55..1ee31a5 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /* QLogic FCoE Offload Driver
  * Copyright (c) 2016-2018 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 #ifndef _FCOE_FW_FUNCS_H
 #define _FCOE_FW_FUNCS_H
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
index 29a5525..3289b71 100644
--- a/drivers/scsi/qedf/drv_scsi_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* QLogic FCoE Offload Driver
  * Copyright (c) 2016-2018 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 #include "drv_scsi_fw_funcs.h"
 
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
index bf10220..6195f13 100644
--- a/drivers/scsi/qedf/drv_scsi_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /* QLogic FCoE Offload Driver
  * Copyright (c) 2016-2018 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 #ifndef _SCSI_FW_FUNCS_H
 #define _SCSI_FW_FUNCS_H
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 2c78d8f..f3f399f 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  *  QLogic FCoE Offload Driver
  *  Copyright (c) 2016-2018 Cavium Inc.
- *
- *  This software is available under the terms of the GNU General Public License
- *  (GPL) Version 2, available from the file COPYING in the main directory of
- *  this source tree.
  */
 #ifndef _QEDFC_H_
 #define _QEDFC_H_
@@ -35,9 +32,6 @@
 #define QEDF_DESCR "QLogic FCoE Offload Driver"
 #define QEDF_MODULE_NAME "qedf"
 
-#define QEDF_MIN_XID		0
-#define QEDF_MAX_SCSI_XID	(NUM_TASKS_PER_CONNECTION - 1)
-#define QEDF_MAX_ELS_XID	4095
 #define QEDF_FLOGI_RETRY_CNT	3
 #define QEDF_RPORT_RETRY_CNT	255
 #define QEDF_MAX_SESSIONS	1024
@@ -52,9 +46,10 @@
 	sizeof(struct fc_frame_header))
 #define QEDF_MAX_NPIV		64
 #define QEDF_TM_TIMEOUT		10
-#define QEDF_ABORT_TIMEOUT	10
-#define QEDF_CLEANUP_TIMEOUT	10
+#define QEDF_ABORT_TIMEOUT	(10 * 1000)
+#define QEDF_CLEANUP_TIMEOUT	1
 #define QEDF_MAX_CDB_LEN	16
+#define QEDF_LL2_BUF_SIZE	2500	/* Buffer size required for LL2 Rx */
 
 #define UPSTREAM_REMOVE		1
 #define UPSTREAM_KEEP		1
@@ -85,6 +80,7 @@
 };
 
 enum qedf_ioreq_event {
+	QEDF_IOREQ_EV_NONE,
 	QEDF_IOREQ_EV_ABORT_SUCCESS,
 	QEDF_IOREQ_EV_ABORT_FAILED,
 	QEDF_IOREQ_EV_SEND_RRQ,
@@ -105,7 +101,6 @@
 	struct list_head link;
 	uint16_t xid;
 	struct scsi_cmnd *sc_cmd;
-	bool use_slowpath; /* Use slow SGL for this I/O */
 #define QEDF_SCSI_CMD		1
 #define QEDF_TASK_MGMT_CMD	2
 #define QEDF_ABTS		3
@@ -117,22 +112,43 @@
 #define QEDF_CMD_IN_ABORT		0x1
 #define QEDF_CMD_IN_CLEANUP		0x2
 #define QEDF_CMD_SRR_SENT		0x3
+#define QEDF_CMD_DIRTY			0x4
+#define QEDF_CMD_ERR_SCSI_DONE		0x5
 	u8 io_req_flags;
 	uint8_t tm_flags;
 	struct qedf_rport *fcport;
+#define	QEDF_CMD_ST_INACTIVE		0
+#define	QEDFC_CMD_ST_IO_ACTIVE		1
+#define	QEDFC_CMD_ST_ABORT_ACTIVE	2
+#define	QEDFC_CMD_ST_ABORT_ACTIVE_EH	3
+#define	QEDFC_CMD_ST_CLEANUP_ACTIVE	4
+#define	QEDFC_CMD_ST_CLEANUP_ACTIVE_EH	5
+#define	QEDFC_CMD_ST_RRQ_ACTIVE		6
+#define	QEDFC_CMD_ST_RRQ_WAIT		7
+#define	QEDFC_CMD_ST_OXID_RETIRE_WAIT	8
+#define	QEDFC_CMD_ST_TMF_ACTIVE		9
+#define	QEDFC_CMD_ST_DRAIN_ACTIVE	10
+#define	QEDFC_CMD_ST_CLEANED		11
+#define	QEDFC_CMD_ST_ELS_ACTIVE		12
+	atomic_t state;
 	unsigned long flags;
 	enum qedf_ioreq_event event;
 	size_t data_xfer_len;
+	/* ID: 001: Alloc cmd (qedf_alloc_cmd) */
+	/* ID: 002: Initiate ABTS (qedf_initiate_abts) */
+	/* ID: 003: For RRQ (qedf_process_abts_compl) */
 	struct kref refcount;
 	struct qedf_cmd_mgr *cmd_mgr;
 	struct io_bdt *bd_tbl;
 	struct delayed_work timeout_work;
 	struct completion tm_done;
 	struct completion abts_done;
+	struct completion cleanup_done;
 	struct e4_fcoe_task_context *task;
 	struct fcoe_task_params *task_params;
 	struct scsi_sgl_task_params *sgl_task_params;
 	int idx;
+	int lun;
 /*
  * Need to allocate enough room for both sense data and FCP response data
  * which has a max length of 8 bytes according to spec.
@@ -155,9 +171,9 @@
 	int fp_idx;
 	unsigned int cpu;
 	unsigned int int_cpu;
-#define QEDF_IOREQ_SLOW_SGE		0
-#define QEDF_IOREQ_SINGLE_SGE		1
-#define QEDF_IOREQ_FAST_SGE		2
+#define QEDF_IOREQ_UNKNOWN_SGE		1
+#define QEDF_IOREQ_SLOW_SGE		2
+#define QEDF_IOREQ_FAST_SGE		3
 	u8 sge_type;
 	struct delayed_work rrq_work;
 
@@ -172,6 +188,8 @@
 	 * during some form of error processing.
 	 */
 	bool return_scsi_cmd_on_abts;
+
+	unsigned int alloc;
 };
 
 extern struct workqueue_struct *qedf_io_wq;
@@ -181,7 +199,10 @@
 #define QEDF_RPORT_SESSION_READY 1
 #define QEDF_RPORT_UPLOADING_CONNECTION	2
 #define QEDF_RPORT_IN_RESET 3
+#define QEDF_RPORT_IN_LUN_RESET 4
+#define QEDF_RPORT_IN_TARGET_RESET 5
 	unsigned long flags;
+	int lun_reset_lun;
 	unsigned long retry_delay_timestamp;
 	struct fc_rport *rport;
 	struct fc_rport_priv *rdata;
@@ -191,6 +212,7 @@
 	void __iomem *p_doorbell;
 	/* Send queue management */
 	atomic_t free_sqes;
+	atomic_t ios_to_queue;
 	atomic_t num_active_ios;
 	struct fcoe_wqe *sq;
 	dma_addr_t sq_dma;
@@ -295,8 +317,6 @@
 #define QEDF_DCBX_PENDING	0
 #define QEDF_DCBX_DONE		1
 	atomic_t dcbx;
-	uint16_t max_scsi_xid;
-	uint16_t max_els_xid;
 #define QEDF_NULL_VLAN_ID	-1
 #define QEDF_FALLBACK_VLAN	1002
 #define QEDF_DEFAULT_PRIO	3
@@ -371,7 +391,6 @@
 
 	u32 slow_sge_ios;
 	u32 fast_sge_ios;
-	u32 single_sge_ios;
 
 	uint8_t	*grcdump;
 	uint32_t grcdump_size;
@@ -396,6 +415,8 @@
 	u8 target_resets;
 	u8 task_set_fulls;
 	u8 busy;
+	/* Used for flush routine */
+	struct mutex flush_mutex;
 };
 
 struct io_bdt {
@@ -435,6 +456,12 @@
 /*
  * Externs
  */
+
+/*
+ * (QEDF_LOG_NPIV | QEDF_LOG_SESS | QEDF_LOG_LPORT | QEDF_LOG_ELS | QEDF_LOG_MQ
+ * | QEDF_LOG_IO | QEDF_LOG_UNSOL | QEDF_LOG_SCSI_TM | QEDF_LOG_MP_REQ |
+ * QEDF_LOG_EVT | QEDF_LOG_CONN | QEDF_LOG_DISC | QEDF_LOG_INFO)
+ */
 #define QEDF_DEFAULT_LOG_MASK		0x3CFB6
 extern const struct qed_fcoe_ops *qed_ops;
 extern uint qedf_dump_frames;
@@ -494,7 +521,7 @@
 extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf);
 extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf);
 extern void qedf_capture_grc_dump(struct qedf_ctx *qedf);
-extern void qedf_wait_for_upload(struct qedf_ctx *qedf);
+bool qedf_wait_for_upload(struct qedf_ctx *qedf);
 extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
 	struct fcoe_cqe *cqe);
 extern void qedf_restart_rport(struct qedf_rport *fcport);
@@ -508,6 +535,8 @@
 extern void qedf_fp_io_handler(struct work_struct *work);
 extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
 extern void qedf_wq_grcdump(struct work_struct *work);
+void qedf_stag_change_work(struct work_struct *work);
+void qedf_ctx_soft_reset(struct fc_lport *lport);
 
 #define FCOE_WORD_TO_BYTE  4
 #define QEDF_MAX_TASK_NUM	0xFFFF
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
index 0487b72..d995f72 100644
--- a/drivers/scsi/qedf/qedf_attr.c
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  QLogic FCoE Offload Driver
  *  Copyright (c) 2016-2018 Cavium Inc.
- *
- *  This software is available under the terms of the GNU General Public License
- *  (GPL) Version 2, available from the file COPYING in the main directory of
- *  this source tree.
  */
 #include "qedf.h"
 
diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c
index f2397ee..e0387e4 100644
--- a/drivers/scsi/qedf/qedf_dbg.c
+++ b/drivers/scsi/qedf/qedf_dbg.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  QLogic FCoE Offload Driver
  *  Copyright (c) 2016-2018 Cavium Inc.
- *
- *  This software is available under the terms of the GNU General Public License
- *  (GPL) Version 2, available from the file COPYING in the main directory of
- *  this source tree.
  */
 #include "qedf_dbg.h"
 #include <linux/vmalloc.h>
@@ -15,10 +12,6 @@
 {
 	va_list va;
 	struct va_format vaf;
-	char nfunc[32];
-
-	memset(nfunc, 0, sizeof(nfunc));
-	memcpy(nfunc, func, sizeof(nfunc) - 1);
 
 	va_start(va, fmt);
 
@@ -27,9 +20,9 @@
 
 	if (likely(qedf) && likely(qedf->pdev))
 		pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
-			nfunc, line, qedf->host_no, &vaf);
+			func, line, qedf->host_no, &vaf);
 	else
-		pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+		pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
 
 	va_end(va);
 }
@@ -40,10 +33,6 @@
 {
 	va_list va;
 	struct va_format vaf;
-	char nfunc[32];
-
-	memset(nfunc, 0, sizeof(nfunc));
-	memcpy(nfunc, func, sizeof(nfunc) - 1);
 
 	va_start(va, fmt);
 
@@ -55,9 +44,9 @@
 
 	if (likely(qedf) && likely(qedf->pdev))
 		pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
-			nfunc, line, qedf->host_no, &vaf);
+			func, line, qedf->host_no, &vaf);
 	else
-		pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+		pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
 
 ret:
 	va_end(va);
@@ -69,10 +58,6 @@
 {
 	va_list va;
 	struct va_format vaf;
-	char nfunc[32];
-
-	memset(nfunc, 0, sizeof(nfunc));
-	memcpy(nfunc, func, sizeof(nfunc) - 1);
 
 	va_start(va, fmt);
 
@@ -84,10 +69,10 @@
 
 	if (likely(qedf) && likely(qedf->pdev))
 		pr_notice("[%s]:[%s:%d]:%d: %pV",
-			  dev_name(&(qedf->pdev->dev)), nfunc, line,
+			  dev_name(&(qedf->pdev->dev)), func, line,
 			  qedf->host_no, &vaf);
 	else
-		pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+		pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
 
 ret:
 	va_end(va);
@@ -99,10 +84,6 @@
 {
 	va_list va;
 	struct va_format vaf;
-	char nfunc[32];
-
-	memset(nfunc, 0, sizeof(nfunc));
-	memcpy(nfunc, func, sizeof(nfunc) - 1);
 
 	va_start(va, fmt);
 
@@ -114,9 +95,9 @@
 
 	if (likely(qedf) && likely(qedf->pdev))
 		pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
-			nfunc, line, qedf->host_no, &vaf);
+			func, line, qedf->host_no, &vaf);
 	else
-		pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+		pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
 
 ret:
 	va_end(va);
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index dd01096..d979f09 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  *  QLogic FCoE Offload Driver
  *  Copyright (c) 2016-2018 Cavium Inc.
- *
- *  This software is available under the terms of the GNU General Public License
- *  (GPL) Version 2, available from the file COPYING in the main directory of
- *  this source tree.
  */
 #ifndef _QEDF_DBG_H_
 #define _QEDF_DBG_H_
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index c29c162..b88bed9 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  QLogic FCoE Offload Driver
  *  Copyright (c) 2016-2018 QLogic Corporation
- *
- *  This software is available under the terms of the GNU General Public License
- *  (GPL) Version 2, available from the file COPYING in the main directory of
- *  this source tree.
  */
 #ifdef CONFIG_DEBUG_FS
 
@@ -27,30 +24,19 @@
 		    const struct file_operations *fops)
 {
 	char host_dirname[32];
-	struct dentry *file_dentry = NULL;
 
 	QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Creating debugfs host node\n");
 	/* create pf dir */
 	sprintf(host_dirname, "host%u", qedf->host_no);
 	qedf->bdf_dentry = debugfs_create_dir(host_dirname, qedf_dbg_root);
-	if (!qedf->bdf_dentry)
-		return;
 
 	/* create debugfs files */
 	while (dops) {
 		if (!(dops->name))
 			break;
 
-		file_dentry = debugfs_create_file(dops->name, 0600,
-						  qedf->bdf_dentry, qedf,
-						  fops);
-		if (!file_dentry) {
-			QEDF_INFO(qedf, QEDF_LOG_DEBUGFS,
-				   "Debugfs entry %s creation failed\n",
-				   dops->name);
-			debugfs_remove_recursive(qedf->bdf_dentry);
-			return;
-		}
+		debugfs_create_file(dops->name, 0600, qedf->bdf_dentry, qedf,
+				    fops);
 		dops++;
 		fops++;
 	}
@@ -61,13 +47,13 @@
  * @pf: the pf that is stopping
  **/
 void
-qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf)
+qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf_dbg)
 {
-	QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Destroying debugfs host "
+	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Destroying debugfs host "
 		   "entry\n");
 	/* remove debugfs  entries of this PF */
-	debugfs_remove_recursive(qedf->bdf_dentry);
-	qedf->bdf_dentry = NULL;
+	debugfs_remove_recursive(qedf_dbg->bdf_dentry);
+	qedf_dbg->bdf_dentry = NULL;
 }
 
 /**
@@ -80,9 +66,6 @@
 
 	/* create qed dir in root of debugfs. NULL means debugfs root */
 	qedf_dbg_root = debugfs_create_dir(drv_name, NULL);
-	if (!qedf_dbg_root)
-		QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Init of debugfs "
-			   "failed\n");
 }
 
 /**
@@ -157,10 +140,10 @@
 			loff_t *ppos)
 {
 	int cnt;
-	struct qedf_dbg_ctx *qedf =
+	struct qedf_dbg_ctx *qedf_dbg =
 				(struct qedf_dbg_ctx *)filp->private_data;
 
-	QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "entered\n");
+	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug);
 	cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
 
 	cnt = min_t(int, count, cnt - *ppos);
@@ -175,7 +158,7 @@
 	uint32_t val;
 	void *kern_buf;
 	int rval;
-	struct qedf_dbg_ctx *qedf =
+	struct qedf_dbg_ctx *qedf_dbg =
 	    (struct qedf_dbg_ctx *)filp->private_data;
 
 	if (!count || *ppos)
@@ -195,7 +178,7 @@
 	else
 		qedf_debug = val;
 
-	QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val);
+	QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val);
 	return count;
 }
 
@@ -307,6 +290,33 @@
 	return single_open(file, qedf_io_trace_show, qedf);
 }
 
+/* Based on fip_state enum from libfcoe.h */
+static char *fip_state_names[] = {
+	"FIP_ST_DISABLED",
+	"FIP_ST_LINK_WAIT",
+	"FIP_ST_AUTO",
+	"FIP_ST_NON_FIP",
+	"FIP_ST_ENABLED",
+	"FIP_ST_VNMP_START",
+	"FIP_ST_VNMP_PROBE1",
+	"FIP_ST_VNMP_PROBE2",
+	"FIP_ST_VNMP_CLAIM",
+	"FIP_ST_VNMP_UP",
+};
+
+/* Based on fc_rport_state enum from libfc.h */
+static char *fc_rport_state_names[] = {
+	"RPORT_ST_INIT",
+	"RPORT_ST_FLOGI",
+	"RPORT_ST_PLOGI_WAIT",
+	"RPORT_ST_PLOGI",
+	"RPORT_ST_PRLI",
+	"RPORT_ST_RTV",
+	"RPORT_ST_READY",
+	"RPORT_ST_ADISC",
+	"RPORT_ST_DELETE",
+};
+
 static int
 qedf_driver_stats_show(struct seq_file *s, void *unused)
 {
@@ -314,10 +324,28 @@
 	struct qedf_rport *fcport;
 	struct fc_rport_priv *rdata;
 
+	seq_printf(s, "Host WWNN/WWPN: %016llx/%016llx\n",
+		   qedf->wwnn, qedf->wwpn);
+	seq_printf(s, "Host NPortID: %06x\n", qedf->lport->port_id);
+	seq_printf(s, "Link State: %s\n", atomic_read(&qedf->link_state) ?
+	    "Up" : "Down");
+	seq_printf(s, "Logical Link State: %s\n", qedf->lport->link_up ?
+	    "Up" : "Down");
+	seq_printf(s, "FIP state: %s\n", fip_state_names[qedf->ctlr.state]);
+	seq_printf(s, "FIP VLAN ID: %d\n", qedf->vlan_id & 0xfff);
+	seq_printf(s, "FIP 802.1Q Priority: %d\n", qedf->prio);
+	if (qedf->ctlr.sel_fcf) {
+		seq_printf(s, "FCF WWPN: %016llx\n",
+			   qedf->ctlr.sel_fcf->switch_name);
+		seq_printf(s, "FCF MAC: %pM\n", qedf->ctlr.sel_fcf->fcf_mac);
+	} else {
+		seq_puts(s, "FCF not selected\n");
+	}
+
+	seq_puts(s, "\nSGE stats:\n\n");
 	seq_printf(s, "cmg_mgr free io_reqs: %d\n",
 	    atomic_read(&qedf->cmd_mgr->free_list_cnt));
 	seq_printf(s, "slow SGEs: %d\n", qedf->slow_sge_ios);
-	seq_printf(s, "single SGEs: %d\n", qedf->single_sge_ios);
 	seq_printf(s, "fast SGEs: %d\n\n", qedf->fast_sge_ios);
 
 	seq_puts(s, "Offloaded ports:\n\n");
@@ -327,9 +355,12 @@
 		rdata = fcport->rdata;
 		if (rdata == NULL)
 			continue;
-		seq_printf(s, "%06x: free_sqes: %d, num_active_ios: %d\n",
-		    rdata->ids.port_id, atomic_read(&fcport->free_sqes),
-		    atomic_read(&fcport->num_active_ios));
+		seq_printf(s, "%016llx/%016llx/%06x: state=%s, free_sqes=%d, num_active_ios=%d\n",
+			   rdata->rport->node_name, rdata->rport->port_name,
+			   rdata->ids.port_id,
+			   fc_rport_state_names[rdata->rp_state],
+			   atomic_read(&fcport->free_sqes),
+			   atomic_read(&fcport->num_active_ios));
 	}
 	rcu_read_unlock();
 
@@ -375,7 +406,6 @@
 
 	/* Clear stat counters exposed by 'stats' node */
 	qedf->slow_sge_ios = 0;
-	qedf->single_sge_ios = 0;
 	qedf->fast_sge_ios = 0;
 
 	return count;
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 04f0c4d..87e169d 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  QLogic FCoE Offload Driver
  *  Copyright (c) 2016-2018 Cavium Inc.
- *
- *  This software is available under the terms of the GNU General Public License
- *  (GPL) Version 2, available from the file COPYING in the main directory of
- *  this source tree.
  */
 #include "qedf.h"
 
@@ -23,8 +20,6 @@
 	int rc = 0;
 	uint32_t did, sid;
 	uint16_t xid;
-	uint32_t start_time = jiffies / HZ;
-	uint32_t current_time;
 	struct fcoe_wqe *sqe;
 	unsigned long flags;
 	u16 sqe_idx;
@@ -59,18 +54,12 @@
 		goto els_err;
 	}
 
-retry_els:
 	els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
 	if (!els_req) {
-		current_time = jiffies / HZ;
-		if ((current_time - start_time) > 10) {
-			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
-				   "els: Failed els 0x%x\n", op);
-			rc = -ENOMEM;
-			goto els_err;
-		}
-		mdelay(20 * USEC_PER_MSEC);
-		goto retry_els;
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
+			  "Failed to alloc ELS request 0x%x\n", op);
+		rc = -ENOMEM;
+		goto els_err;
 	}
 
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
@@ -143,6 +132,8 @@
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
 		   "req\n");
 	qedf_ring_doorbell(fcport);
+	set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
+
 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
 els_err:
 	return rc;
@@ -151,21 +142,16 @@
 void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 	struct qedf_ioreq *els_req)
 {
-	struct fcoe_task_context *task_ctx;
-	struct scsi_cmnd *sc_cmd;
-	uint16_t xid;
 	struct fcoe_cqe_midpath_info *mp_info;
 
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
 		   " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
 
+	clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
+
 	/* Kill the ELS timer */
 	cancel_delayed_work(&els_req->timeout_work);
 
-	xid = els_req->xid;
-	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
-	sc_cmd = els_req->sc_cmd;
-
 	/* Get ELS response length from CQE */
 	mp_info = &cqe->cqe_info.midpath_info;
 	els_req->mp_req.resp_len = mp_info->data_placement_size;
@@ -193,8 +179,11 @@
 
 	orig_io_req = cb_arg->aborted_io_req;
 
-	if (!orig_io_req)
+	if (!orig_io_req) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Original io_req is NULL, rrq_req = %p.\n", rrq_req);
 		goto out_free;
+	}
 
 	if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
 	    rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
@@ -205,8 +194,12 @@
 		   " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
 		   orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
 
-	/* This should return the aborted io_req to the command pool */
-	if (orig_io_req)
+	/*
+	 * This should return the aborted io_req to the command pool. Note that
+	 * we need to check the refcound in case the original request was
+	 * flushed but we get a completion on this xid.
+	 */
+	if (orig_io_req && refcount > 0)
 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
 
 out_free:
@@ -233,6 +226,7 @@
 	uint32_t sid;
 	uint32_t r_a_tov;
 	int rc;
+	int refcount;
 
 	if (!aborted_io_req) {
 		QEDF_ERR(NULL, "abort_io_req is NULL.\n");
@@ -241,6 +235,15 @@
 
 	fcport = aborted_io_req->fcport;
 
+	if (!fcport) {
+		refcount = kref_read(&aborted_io_req->refcount);
+		QEDF_ERR(NULL,
+			 "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
+			 aborted_io_req->xid, refcount);
+		kref_put(&aborted_io_req->refcount, qedf_release_cmd);
+		return -EINVAL;
+	}
+
 	/* Check that fcport is still offloaded */
 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
@@ -253,6 +256,19 @@
 	}
 
 	qedf = fcport->qedf;
+
+	/*
+	 * Sanity check that we can send a RRQ to make sure that refcount isn't
+	 * 0
+	 */
+	refcount = kref_read(&aborted_io_req->refcount);
+	if (refcount != 1) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
+			  "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
+			  aborted_io_req->xid, aborted_io_req, refcount);
+		return -EINVAL;
+	}
+
 	lport = qedf->lport;
 	sid = fcport->sid;
 	r_a_tov = lport->r_a_tov;
@@ -335,32 +351,51 @@
 	struct fc_lport *lport;
 	struct fc_rport_priv *rdata;
 	u32 port_id;
+	unsigned long flags;
 
-	if (!fcport)
+	if (!fcport) {
+		QEDF_ERR(NULL, "fcport is NULL.\n");
 		return;
+	}
 
+	spin_lock_irqsave(&fcport->rport_lock, flags);
 	if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
 	    !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
 	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
 		    fcport);
+		spin_unlock_irqrestore(&fcport->rport_lock, flags);
 		return;
 	}
 
 	/* Set that we are now in reset */
 	set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
+	spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
 	rdata = fcport->rdata;
-	if (rdata) {
+	if (rdata && !kref_get_unless_zero(&rdata->kref)) {
+		fcport->rdata = NULL;
+		rdata = NULL;
+	}
+
+	if (rdata && rdata->rp_state == RPORT_ST_READY) {
 		lport = fcport->qedf->lport;
 		port_id = rdata->ids.port_id;
 		QEDF_ERR(&(fcport->qedf->dbg_ctx),
 		    "LOGO port_id=%x.\n", port_id);
 		fc_rport_logoff(rdata);
+		kref_put(&rdata->kref, fc_rport_destroy);
+		mutex_lock(&lport->disc.disc_mutex);
 		/* Recreate the rport and log back in */
 		rdata = fc_rport_create(lport, port_id);
-		if (rdata)
+		if (rdata) {
+			mutex_unlock(&lport->disc.disc_mutex);
 			fc_rport_login(rdata);
+			fcport->rdata = rdata;
+		} else {
+			mutex_unlock(&lport->disc.disc_mutex);
+			fcport->rdata = NULL;
+		}
 	}
 	clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
 }
@@ -388,8 +423,11 @@
 	 * If we are flushing the command just free the cb_arg as none of the
 	 * response data will be valid.
 	 */
-	if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
+	if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) {
+		QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n",
+			 els_req->xid);
 		goto free_arg;
+	}
 
 	fcport = els_req->fcport;
 	mp_req = &(els_req->mp_req);
@@ -502,8 +540,10 @@
 
 	orig_io_req = cb_arg->aborted_io_req;
 
-	if (!orig_io_req)
+	if (!orig_io_req) {
+		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
 		goto out_free;
+	}
 
 	clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
 
@@ -517,8 +557,11 @@
 		   orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
 
 	/* If a SRR times out, simply free resources */
-	if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
+	if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "ELS timeout rec_xid=0x%x.\n", srr_req->xid);
 		goto out_put;
+	}
 
 	/* Normalize response data into struct fc_frame */
 	mp_req = &(srr_req->mp_req);
@@ -569,7 +612,7 @@
 	struct qedf_rport *fcport;
 	struct fc_lport *lport;
 	struct qedf_els_cb_arg *cb_arg = NULL;
-	u32 sid, r_a_tov;
+	u32 r_a_tov;
 	int rc;
 
 	if (!orig_io_req) {
@@ -595,7 +638,6 @@
 
 	qedf = fcport->qedf;
 	lport = qedf->lport;
-	sid = fcport->sid;
 	r_a_tov = lport->r_a_tov;
 
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
@@ -692,8 +734,11 @@
 	cb_arg = io_req->cb_arg;
 
 	/* If we timed out just free resources */
-	if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
+	if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "cqe is NULL or timeout event (0x%x)", io_req->event);
 		goto free;
+	}
 
 	/* Kill the timer we put on the request */
 	cancel_delayed_work_sync(&io_req->timeout_work);
@@ -796,8 +841,10 @@
 
 	orig_io_req = cb_arg->aborted_io_req;
 
-	if (!orig_io_req)
+	if (!orig_io_req) {
+		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
 		goto out_free;
+	}
 
 	if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
 	    rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
@@ -809,8 +856,12 @@
 		   orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
 
 	/* If a REC times out, free resources */
-	if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
+	if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n",
+			 orig_io_req, orig_io_req->xid);
 		goto out_put;
+	}
 
 	/* Normalize response data into struct fc_frame */
 	mp_req = &(rec_req->mp_req);
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 3fd3af7..bb82f08 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  QLogic FCoE Offload Driver
  *  Copyright (c) 2016-2018 Cavium Inc.
- *
- *  This software is available under the terms of the GNU General Public License
- *  (GPL) Version 2, available from the file COPYING in the main directory of
- *  this source tree.
  */
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
@@ -19,17 +16,19 @@
 {
 	struct sk_buff *skb;
 	char *eth_fr;
-	int fr_len;
 	struct fip_vlan *vlan;
 #define MY_FIP_ALL_FCF_MACS        ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
 	static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS;
 	unsigned long flags = 0;
+	int rc = -1;
 
 	skb = dev_alloc_skb(sizeof(struct fip_vlan));
-	if (!skb)
+	if (!skb) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Failed to allocate skb.\n");
 		return;
+	}
 
-	fr_len = sizeof(*vlan);
 	eth_fr = (char *)skb->data;
 	vlan = (struct fip_vlan *)eth_fr;
 
@@ -68,7 +67,13 @@
 	}
 
 	set_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &flags);
-	qed_ops->ll2->start_xmit(qedf->cdev, skb, flags);
+	rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, flags);
+	if (rc) {
+		QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
+		kfree_skb(skb);
+		return;
+	}
+
 }
 
 static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
@@ -95,6 +100,12 @@
 		rlen -= dlen;
 	}
 
+	if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+			  "Dropping VLAN response as link is down.\n");
+		return;
+	}
+
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "VLAN response, "
 		   "vid=0x%x.\n", vid);
 
@@ -114,6 +125,7 @@
 	struct fip_header *fiph;
 	u16 op, vlan_tci = 0;
 	u8 sub;
+	int rc = -1;
 
 	if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
 		QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
@@ -142,9 +154,16 @@
 		print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
 		    skb->data, skb->len, false);
 
-	qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+	rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+	if (rc) {
+		QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
+		kfree_skb(skb);
+		return;
+	}
 }
 
+static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS;
+
 /* Process incoming FIP frames. */
 void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
 {
@@ -157,20 +176,37 @@
 	size_t rlen, dlen;
 	u16 op;
 	u8 sub;
-	bool do_reset = false;
+	bool fcf_valid = false;
+	/* Default is to handle CVL regardless of fabric id descriptor */
+	bool fabric_id_valid = true;
+	bool fc_wwpn_valid = false;
+	u64 switch_name;
+	u16 vlan = 0;
 
 	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
 	fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
 	op = ntohs(fiph->fip_op);
 	sub = fiph->fip_subcode;
 
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame received: "
-	    "skb=%p fiph=%p source=%pM op=%x sub=%x", skb, fiph,
-	    eth_hdr->h_source, op, sub);
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+		  "FIP frame received: skb=%p fiph=%p source=%pM destn=%pM op=%x sub=%x vlan=%04x",
+		  skb, fiph, eth_hdr->h_source, eth_hdr->h_dest, op,
+		  sub, vlan);
 	if (qedf_dump_frames)
 		print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
 		    skb->data, skb->len, false);
 
+	if (!ether_addr_equal(eth_hdr->h_dest, qedf->mac) &&
+	    !ether_addr_equal(eth_hdr->h_dest, fcoe_all_enode) &&
+		!ether_addr_equal(eth_hdr->h_dest, qedf->data_src_addr)) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+			  "Dropping FIP type 0x%x pkt due to destination MAC mismatch dest_mac=%pM ctlr.dest_addr=%pM data_src_addr=%pM.\n",
+			  op, eth_hdr->h_dest, qedf->mac,
+			  qedf->data_src_addr);
+		kfree_skb(skb);
+		return;
+	}
+
 	/* Handle FIP VLAN resp in the driver */
 	if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
 		qedf_fcoe_process_vlan_resp(qedf, skb);
@@ -199,25 +235,42 @@
 			switch (desc->fip_dtype) {
 			case FIP_DT_MAC:
 				mp = (struct fip_mac_desc *)desc;
-				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
-				    "fd_mac=%pM\n", mp->fd_mac);
+				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+					  "Switch fd_mac=%pM.\n", mp->fd_mac);
 				if (ether_addr_equal(mp->fd_mac,
 				    qedf->ctlr.sel_fcf->fcf_mac))
-					do_reset = true;
+					fcf_valid = true;
 				break;
 			case FIP_DT_NAME:
 				wp = (struct fip_wwn_desc *)desc;
-				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
-				    "fc_wwpn=%016llx.\n",
-				    get_unaligned_be64(&wp->fd_wwn));
+				switch_name = get_unaligned_be64(&wp->fd_wwn);
+				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+					  "Switch fd_wwn=%016llx fcf_switch_name=%016llx.\n",
+					  switch_name,
+					  qedf->ctlr.sel_fcf->switch_name);
+				if (switch_name ==
+				    qedf->ctlr.sel_fcf->switch_name)
+					fc_wwpn_valid = true;
 				break;
 			case FIP_DT_VN_ID:
+				fabric_id_valid = false;
 				vp = (struct fip_vn_desc *)desc;
-				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
-				    "fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id));
-				if (ntoh24(vp->fd_fc_id) ==
-				    qedf->lport->port_id)
-					do_reset = true;
+
+				QEDF_ERR(&qedf->dbg_ctx,
+					 "CVL vx_port fd_fc_id=0x%x fd_mac=%pM fd_wwpn=%016llx.\n",
+					 ntoh24(vp->fd_fc_id), vp->fd_mac,
+					 get_unaligned_be64(&vp->fd_wwpn));
+				/* Check for vx_port wwpn OR Check vx_port
+				 * fabric ID OR Check vx_port MAC
+				 */
+				if ((get_unaligned_be64(&vp->fd_wwpn) ==
+					qedf->wwpn) ||
+				   (ntoh24(vp->fd_fc_id) ==
+					qedf->lport->port_id) ||
+				   (ether_addr_equal(vp->fd_mac,
+					qedf->data_src_addr))) {
+					fabric_id_valid = true;
+				}
 				break;
 			default:
 				/* Ignore anything else */
@@ -227,13 +280,11 @@
 			rlen -= dlen;
 		}
 
-		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
-		    "do_reset=%d.\n", do_reset);
-		if (do_reset) {
-			fcoe_ctlr_link_down(&qedf->ctlr);
-			qedf_wait_for_upload(qedf);
-			fcoe_ctlr_link_up(&qedf->ctlr);
-		}
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+			  "fcf_valid=%d fabric_id_valid=%d fc_wwpn_valid=%d.\n",
+			  fcf_valid, fabric_id_valid, fc_wwpn_valid);
+		if (fcf_valid && fabric_id_valid && fc_wwpn_valid)
+			qedf_ctx_soft_reset(qedf->lport);
 		kfree_skb(skb);
 	} else {
 		/* Everything else is handled by libfcoe */
diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h
index f6f634e..ecd5cb5 100644
--- a/drivers/scsi/qedf/qedf_hsi.h
+++ b/drivers/scsi/qedf/qedf_hsi.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  *  QLogic FCoE Offload Driver
  *  Copyright (c) 2016-2018 Cavium Inc.
- *
- *  This software is available under the terms of the GNU General Public License
- *  (GPL) Version 2, available from the file COPYING in the main directory of
- *  this source tree.
  */
 #ifndef __QEDF_HSI__
 #define __QEDF_HSI__
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 6bbc38b..e749a2d 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  QLogic FCoE Offload Driver
  *  Copyright (c) 2016-2018 Cavium Inc.
- *
- *  This software is available under the terms of the GNU General Public License
- *  (GPL) Version 2, available from the file COPYING in the main directory of
- *  this source tree.
  */
 #include <linux/spinlock.h>
 #include <linux/vmalloc.h>
@@ -43,8 +40,9 @@
 	switch (io_req->cmd_type) {
 	case QEDF_ABTS:
 		if (qedf == NULL) {
-			QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for xid=0x%x.\n",
-			    io_req->xid);
+			QEDF_INFO(NULL, QEDF_LOG_IO,
+				  "qedf is NULL for ABTS xid=0x%x.\n",
+				  io_req->xid);
 			return;
 		}
 
@@ -61,6 +59,9 @@
 		 */
 		kref_put(&io_req->refcount, qedf_release_cmd);
 
+		/* Clear in abort bit now that we're done with the command */
+		clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
+
 		/*
 		 * Now that the original I/O and the ABTS are complete see
 		 * if we need to reconnect to the target.
@@ -68,6 +69,15 @@
 		qedf_restart_rport(fcport);
 		break;
 	case QEDF_ELS:
+		if (!qedf) {
+			QEDF_INFO(NULL, QEDF_LOG_IO,
+				  "qedf is NULL for ELS xid=0x%x.\n",
+				  io_req->xid);
+			return;
+		}
+		/* ELS request no longer outstanding since it timed out */
+		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
 		kref_get(&io_req->refcount);
 		/*
 		 * Don't attempt to clean an ELS timeout as any subseqeunt
@@ -94,6 +104,8 @@
 		qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
 		break;
 	default:
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+			  "Hit default case, xid=0x%x.\n", io_req->xid);
 		break;
 	}
 }
@@ -103,7 +115,7 @@
 	struct io_bdt *bdt_info;
 	struct qedf_ctx *qedf = cmgr->qedf;
 	size_t bd_tbl_sz;
-	u16 min_xid = QEDF_MIN_XID;
+	u16 min_xid = 0;
 	u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
 	int num_ios;
 	int i;
@@ -112,8 +124,10 @@
 	num_ios = max_xid - min_xid + 1;
 
 	/* Free fcoe_bdt_ctx structures */
-	if (!cmgr->io_bdt_pool)
+	if (!cmgr->io_bdt_pool) {
+		QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
 		goto free_cmd_pool;
+	}
 
 	bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
 	for (i = 0; i < num_ios; i++) {
@@ -157,6 +171,7 @@
 	struct qedf_ioreq *io_req =
 	    container_of(work, struct qedf_ioreq, rrq_work.work);
 
+	atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
 	qedf_send_rrq(io_req);
 
 }
@@ -169,7 +184,7 @@
 	u16 xid;
 	int i;
 	int num_ios;
-	u16 min_xid = QEDF_MIN_XID;
+	u16 min_xid = 0;
 	u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
 
 	/* Make sure num_queues is already set before calling this function */
@@ -201,7 +216,7 @@
 	/*
 	 * Initialize I/O request fields.
 	 */
-	xid = QEDF_MIN_XID;
+	xid = 0;
 
 	for (i = 0; i < num_ios; i++) {
 		io_req = &cmgr->cmds[i];
@@ -215,8 +230,11 @@
 		io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
 		    QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
 		    GFP_KERNEL);
-		if (!io_req->sense_buffer)
+		if (!io_req->sense_buffer) {
+			QEDF_ERR(&qedf->dbg_ctx,
+				 "Failed to alloc sense buffer.\n");
 			goto mem_err;
+		}
 
 		/* Allocate task parameters to pass to f/w init funcions */
 		io_req->task_params = kzalloc(sizeof(*io_req->task_params),
@@ -329,7 +347,7 @@
 			cmd_mgr->idx = 0;
 
 		/* Check to make sure command was previously freed */
-		if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
+		if (!io_req->alloc)
 			break;
 	}
 
@@ -338,7 +356,14 @@
 		goto out_failed;
 	}
 
-	set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+	if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "io_req found to be dirty ox_id = 0x%x.\n",
+			 io_req->xid);
+
+	/* Clear any flags now that we've reallocated the xid */
+	io_req->flags = 0;
+	io_req->alloc = 1;
 	spin_unlock_irqrestore(&cmd_mgr->lock, flags);
 
 	atomic_inc(&fcport->num_active_ios);
@@ -349,8 +374,13 @@
 	io_req->cmd_mgr = cmd_mgr;
 	io_req->fcport = fcport;
 
+	/* Clear any stale sc_cmd back pointer */
+	io_req->sc_cmd = NULL;
+	io_req->lun = -1;
+
 	/* Hold the io_req against deletion */
-	kref_init(&io_req->refcount);
+	kref_init(&io_req->refcount);	/* ID: 001 */
+	atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
 
 	/* Bind io_bdt for this io_req */
 	/* Have a static link between io_req and io_bdt_pool */
@@ -412,6 +442,14 @@
 	    container_of(ref, struct qedf_ioreq, refcount);
 	struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
 	struct qedf_rport *fcport = io_req->fcport;
+	unsigned long flags;
+
+	if (io_req->cmd_type == QEDF_SCSI_CMD) {
+		QEDF_WARN(&fcport->qedf->dbg_ctx,
+			  "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
+			  io_req, io_req->xid);
+		WARN_ON(io_req->sc_cmd);
+	}
 
 	if (io_req->cmd_type == QEDF_ELS ||
 	    io_req->cmd_type == QEDF_TASK_MGMT_CMD)
@@ -419,36 +457,22 @@
 
 	atomic_inc(&cmd_mgr->free_list_cnt);
 	atomic_dec(&fcport->num_active_ios);
-	if (atomic_read(&fcport->num_active_ios) < 0)
+	atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
+	if (atomic_read(&fcport->num_active_ios) < 0) {
 		QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
+		WARN_ON(1);
+	}
 
 	/* Increment task retry identifier now that the request is released */
 	io_req->task_retry_identifier++;
+	io_req->fcport = NULL;
 
-	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
-}
-
-static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
-	int bd_index)
-{
-	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
-	int frag_size, sg_frags;
-
-	sg_frags = 0;
-	while (sg_len) {
-		if (sg_len > QEDF_BD_SPLIT_SZ)
-			frag_size = QEDF_BD_SPLIT_SZ;
-		else
-			frag_size = sg_len;
-		bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
-		bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
-		bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
-
-		addr += (u64)frag_size;
-		sg_frags++;
-		sg_len -= frag_size;
-	}
-	return sg_frags;
+	clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
+	io_req->cpu = 0;
+	spin_lock_irqsave(&cmd_mgr->lock, flags);
+	io_req->fcport = NULL;
+	io_req->alloc = 0;
+	spin_unlock_irqrestore(&cmd_mgr->lock, flags);
 }
 
 static int qedf_map_sg(struct qedf_ioreq *io_req)
@@ -462,75 +486,45 @@
 	int byte_count = 0;
 	int sg_count = 0;
 	int bd_count = 0;
-	int sg_frags;
-	unsigned int sg_len;
+	u32 sg_len;
 	u64 addr, end_addr;
-	int i;
+	int i = 0;
 
 	sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
 	    scsi_sg_count(sc), sc->sc_data_direction);
-
 	sg = scsi_sglist(sc);
 
-	/*
-	 * New condition to send single SGE as cached-SGL with length less
-	 * than 64k.
-	 */
-	if ((sg_count == 1) && (sg_dma_len(sg) <=
-	    QEDF_MAX_SGLEN_FOR_CACHESGL)) {
-		sg_len = sg_dma_len(sg);
-		addr = (u64)sg_dma_address(sg);
+	io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
 
-		bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
-		bd[bd_count].sge_addr.hi = (addr >> 32);
-		bd[bd_count].sge_len = (u16)sg_len;
-
-		return ++bd_count;
-	}
+	if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
+		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
 
 	scsi_for_each_sg(sc, sg, sg_count, i) {
-		sg_len = sg_dma_len(sg);
+		sg_len = (u32)sg_dma_len(sg);
 		addr = (u64)sg_dma_address(sg);
 		end_addr = (u64)(addr + sg_len);
 
 		/*
-		 * First s/g element in the list so check if the end_addr
-		 * is paged aligned. Also check to make sure the length is
-		 * at least page size.
-		 */
-		if ((i == 0) && (sg_count > 1) &&
-		    ((end_addr % QEDF_PAGE_SIZE) ||
-		    sg_len < QEDF_PAGE_SIZE))
-			io_req->use_slowpath = true;
-		/*
-		 * Last s/g element so check if the start address is paged
-		 * aligned.
-		 */
-		else if ((i == (sg_count - 1)) && (sg_count > 1) &&
-		    (addr % QEDF_PAGE_SIZE))
-			io_req->use_slowpath = true;
-		/*
 		 * Intermediate s/g element so check if start and end address
-		 * is page aligned.
+		 * is page aligned.  Only required for writes and only if the
+		 * number of scatter/gather elements is 8 or more.
 		 */
-		else if ((i != 0) && (i != (sg_count - 1)) &&
-		    ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
-			io_req->use_slowpath = true;
+		if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
+		    (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
+			io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
 
-		if (sg_len > QEDF_MAX_BD_LEN) {
-			sg_frags = qedf_split_bd(io_req, addr, sg_len,
-			    bd_count);
-		} else {
-			sg_frags = 1;
-			bd[bd_count].sge_addr.lo = U64_LO(addr);
-			bd[bd_count].sge_addr.hi  = U64_HI(addr);
-			bd[bd_count].sge_len = (uint16_t)sg_len;
-		}
+		bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
+		bd[bd_count].sge_addr.hi  = cpu_to_le32(U64_HI(addr));
+		bd[bd_count].sge_len = cpu_to_le32(sg_len);
 
-		bd_count += sg_frags;
+		bd_count++;
 		byte_count += sg_len;
 	}
 
+	/* To catch a case where FAST and SLOW nothing is set, set FAST */
+	if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
+		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
+
 	if (byte_count != scsi_bufflen(sc))
 		QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
 			  "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
@@ -655,8 +649,10 @@
 		io_req->sgl_task_params->num_sges = bd_count;
 		io_req->sgl_task_params->total_buffer_size =
 		    scsi_bufflen(io_req->sc_cmd);
-		io_req->sgl_task_params->small_mid_sge =
-			io_req->use_slowpath;
+		if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
+			io_req->sgl_task_params->small_mid_sge = 1;
+		else
+			io_req->sgl_task_params->small_mid_sge = 0;
 	}
 
 	/* Fill in physical address of sense buffer */
@@ -679,16 +675,10 @@
 				    io_req->task_retry_identifier, fcp_cmnd);
 
 	/* Increment SGL type counters */
-	if (bd_count == 1) {
-		qedf->single_sge_ios++;
-		io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
-	} else if (io_req->use_slowpath) {
+	if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
 		qedf->slow_sge_ios++;
-		io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
-	} else {
+	else
 		qedf->fast_sge_ios++;
-		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
-	}
 }
 
 void qedf_init_mp_task(struct qedf_ioreq *io_req,
@@ -770,9 +760,6 @@
 						     &task_fc_hdr,
 						     &tx_sgl_task_params,
 						     &rx_sgl_task_params, 0);
-
-	/* Midpath requests always consume 1 SGE */
-	qedf->single_sge_ios++;
 }
 
 /* Presumed that fcport->rport_lock is held */
@@ -804,10 +791,18 @@
 	    FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
 
 	dbell.sq_prod = fcport->fw_sq_prod_idx;
-	writel(*(u32 *)&dbell, fcport->p_doorbell);
-	/* Make sure SQ index is updated so f/w prcesses requests in order */
+	/* wmb makes sure that the BDs data is updated before updating the
+	 * producer, otherwise FW may read old data from the BDs.
+	 */
 	wmb();
-	mmiowb();
+	barrier();
+	writel(*(u32 *)&dbell, fcport->p_doorbell);
+	/*
+	 * Fence required to flush the write combined buffer, since another
+	 * CPU may write to the same doorbell address and data may be lost
+	 * due to relaxed order nature of write combined bar.
+	 */
+	wmb();
 }
 
 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
@@ -872,7 +867,7 @@
 	/* Initialize rest of io_req fileds */
 	io_req->data_xfer_len = scsi_bufflen(sc_cmd);
 	sc_cmd->SCp.ptr = (char *)io_req;
-	io_req->use_slowpath = false; /* Assume fast SGL by default */
+	io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
 
 	/* Record which cpu this request is associated with */
 	io_req->cpu = smp_processor_id();
@@ -895,15 +890,24 @@
 	/* Build buffer descriptor list for firmware from sg list */
 	if (qedf_build_bd_list_from_sg(io_req)) {
 		QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
+		/* Release cmd will release io_req, but sc_cmd is assigned */
+		io_req->sc_cmd = NULL;
 		kref_put(&io_req->refcount, qedf_release_cmd);
 		return -EAGAIN;
 	}
 
-	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
+	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+		/* Release cmd will release io_req, but sc_cmd is assigned */
+		io_req->sc_cmd = NULL;
 		kref_put(&io_req->refcount, qedf_release_cmd);
+		return -EINVAL;
 	}
 
+	/* Record LUN number for later use if we neeed them */
+	io_req->lun = (int)sc_cmd->device->lun;
+
 	/* Obtain free SQE */
 	sqe_idx = qedf_get_sqe_idx(fcport);
 	sqe = &fcport->sq[sqe_idx];
@@ -914,6 +918,8 @@
 	if (!task_ctx) {
 		QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
 			   xid);
+		/* Release cmd will release io_req, but sc_cmd is assigned */
+		io_req->sc_cmd = NULL;
 		kref_put(&io_req->refcount, qedf_release_cmd);
 		return -EINVAL;
 	}
@@ -923,6 +929,9 @@
 	/* Ring doorbell */
 	qedf_ring_doorbell(fcport);
 
+	/* Set that command is with the firmware now */
+	set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
 	if (qedf_io_tracing && io_req->sc_cmd)
 		qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
 
@@ -941,10 +950,23 @@
 	int rc = 0;
 	int rval;
 	unsigned long flags = 0;
+	int num_sgs = 0;
 
+	num_sgs = scsi_sg_count(sc_cmd);
+	if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
+			 num_sgs, QEDF_MAX_BDS_PER_CMD);
+		sc_cmd->result = DID_ERROR;
+		sc_cmd->scsi_done(sc_cmd);
+		return 0;
+	}
 
 	if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
 	    test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+			  "Returning DNC as unloading or stop io, flags 0x%lx.\n",
+			  qedf->flags);
 		sc_cmd->result = DID_NO_CONNECT << 16;
 		sc_cmd->scsi_done(sc_cmd);
 		return 0;
@@ -961,6 +983,9 @@
 
 	rval = fc_remote_port_chkready(rport);
 	if (rval) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+			  "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
+			  rval, rport->port_id);
 		sc_cmd->result = rval;
 		sc_cmd->scsi_done(sc_cmd);
 		return 0;
@@ -968,12 +993,14 @@
 
 	/* Retry command if we are doing a qed drain operation */
 	if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		goto exit_qcmd;
 	}
 
 	if (lport->state != LPORT_ST_READY ||
 	    atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		goto exit_qcmd;
 	}
@@ -981,7 +1008,8 @@
 	/* rport and tgt are allocated together, so tgt should be non-NULL */
 	fcport = (struct qedf_rport *)&rp[1];
 
-	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
+	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
 		/*
 		 * Session is not offloaded yet. Let SCSI-ml retry
 		 * the command.
@@ -989,12 +1017,16 @@
 		rc = SCSI_MLQUEUE_TARGET_BUSY;
 		goto exit_qcmd;
 	}
+
+	atomic_inc(&fcport->ios_to_queue);
+
 	if (fcport->retry_delay_timestamp) {
 		if (time_after(jiffies, fcport->retry_delay_timestamp)) {
 			fcport->retry_delay_timestamp = 0;
 		} else {
 			/* If retry_delay timer is active, flow off the ML */
 			rc = SCSI_MLQUEUE_TARGET_BUSY;
+			atomic_dec(&fcport->ios_to_queue);
 			goto exit_qcmd;
 		}
 	}
@@ -1002,6 +1034,7 @@
 	io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
 	if (!io_req) {
 		rc = SCSI_MLQUEUE_HOST_BUSY;
+		atomic_dec(&fcport->ios_to_queue);
 		goto exit_qcmd;
 	}
 
@@ -1016,6 +1049,7 @@
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 	}
 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
+	atomic_dec(&fcport->ios_to_queue);
 
 exit_qcmd:
 	return rc;
@@ -1092,7 +1126,7 @@
 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 	struct qedf_ioreq *io_req)
 {
-	u16 xid, rval;
+	u16 xid;
 	struct e4_fcoe_task_context *task_ctx;
 	struct scsi_cmnd *sc_cmd;
 	struct fcoe_cqe_rsp_info *fcp_rsp;
@@ -1106,6 +1140,15 @@
 	if (!cqe)
 		return;
 
+	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
+	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
+	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
+			 io_req->xid);
+		return;
+	}
+
 	xid = io_req->xid;
 	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
 	sc_cmd = io_req->sc_cmd;
@@ -1122,15 +1165,15 @@
 		return;
 	}
 
-	if (!sc_cmd->request) {
-		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
-		    "sc_cmd=%p.\n", sc_cmd);
+	if (!sc_cmd->device) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Device for sc_cmd %p is NULL.\n", sc_cmd);
 		return;
 	}
 
-	if (!sc_cmd->request->special) {
-		QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so "
-		    "request not valid, sc_cmd=%p.\n", sc_cmd);
+	if (!sc_cmd->request) {
+		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
+		    "sc_cmd=%p.\n", sc_cmd);
 		return;
 	}
 
@@ -1142,6 +1185,19 @@
 
 	fcport = io_req->fcport;
 
+	/*
+	 * When flush is active, let the cmds be completed from the cleanup
+	 * context
+	 */
+	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
+	    (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
+	     sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+			  "Dropping good completion xid=0x%x as fcport is flushing",
+			  io_req->xid);
+		return;
+	}
+
 	qedf_parse_fcp_rsp(io_req, fcp_rsp);
 
 	qedf_unmap_sg_list(qedf, io_req);
@@ -1159,25 +1215,18 @@
 	fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
 	    FCOE_CQE_RSP_INFO_FW_UNDERRUN);
 	if (fw_residual_flag) {
-		QEDF_ERR(&(qedf->dbg_ctx),
-		    "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
-		    "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
-		    fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
-		    cqe->cqe_info.rsp_info.fw_residual);
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
+			 io_req->xid, fcp_rsp->rsp_flags.flags,
+			 io_req->fcp_resid,
+			 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
+			 sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
 
 		if (io_req->cdb_status == 0)
 			sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
 		else
 			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
 
-		/* Abort the command since we did not get all the data */
-		init_completion(&io_req->abts_done);
-		rval = qedf_initiate_abts(io_req, true);
-		if (rval) {
-			QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
-			sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
-		}
-
 		/*
 		 * Set resid to the whole buffer length so we won't try to resue
 		 * any previously data.
@@ -1249,6 +1298,12 @@
 	if (qedf_io_tracing)
 		qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
 
+	/*
+	 * We wait till the end of the function to clear the
+	 * outstanding bit in case we need to send an abort
+	 */
+	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
 	io_req->sc_cmd = NULL;
 	sc_cmd->SCp.ptr =  NULL;
 	sc_cmd->scsi_done(sc_cmd);
@@ -1263,8 +1318,23 @@
 	struct scsi_cmnd *sc_cmd;
 	int refcount;
 
-	if (!io_req)
+	if (!io_req) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
 		return;
+	}
+
+	if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+			  "io_req:%p scsi_done handling already done\n",
+			  io_req);
+		return;
+	}
+
+	/*
+	 * We will be done with this command after this call so clear the
+	 * outstanding bit.
+	 */
+	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
 
 	xid = io_req->xid;
 	sc_cmd = io_req->sc_cmd;
@@ -1274,12 +1344,50 @@
 		return;
 	}
 
+	if (!virt_addr_valid(sc_cmd)) {
+		QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
+		goto bad_scsi_ptr;
+	}
+
 	if (!sc_cmd->SCp.ptr) {
 		QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
 		    "another context.\n");
 		return;
 	}
 
+	if (!sc_cmd->device) {
+		QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
+			 sc_cmd);
+		goto bad_scsi_ptr;
+	}
+
+	if (!virt_addr_valid(sc_cmd->device)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
+		goto bad_scsi_ptr;
+	}
+
+	if (!sc_cmd->sense_buffer) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
+			 sc_cmd);
+		goto bad_scsi_ptr;
+	}
+
+	if (!virt_addr_valid(sc_cmd->sense_buffer)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
+			 sc_cmd);
+		goto bad_scsi_ptr;
+	}
+
+	if (!sc_cmd->scsi_done) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
+			 sc_cmd);
+		goto bad_scsi_ptr;
+	}
+
 	qedf_unmap_sg_list(qedf, io_req);
 
 	sc_cmd->result = result << 16;
@@ -1306,6 +1414,15 @@
 	sc_cmd->SCp.ptr = NULL;
 	sc_cmd->scsi_done(sc_cmd);
 	kref_put(&io_req->refcount, qedf_release_cmd);
+	return;
+
+bad_scsi_ptr:
+	/*
+	 * Clear the io_req->sc_cmd backpointer so we don't try to process
+	 * this again
+	 */
+	io_req->sc_cmd = NULL;
+	kref_put(&io_req->refcount, qedf_release_cmd);  /* ID: 001 */
 }
 
 /*
@@ -1320,8 +1437,12 @@
 	u64 err_warn_bit_map;
 	u8 err_warn = 0xff;
 
-	if (!cqe)
+	if (!cqe) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+			  "cqe is NULL for io_req %p xid=0x%x\n",
+			  io_req, io_req->xid);
 		return;
+	}
 
 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
 		  "xid=0x%x\n", io_req->xid);
@@ -1383,8 +1504,11 @@
 {
 	int rval;
 
-	if (!cqe)
+	if (!cqe) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+			  "cqe is NULL for io_req %p\n", io_req);
 		return;
+	}
 
 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
 		  "xid=0x%x\n", io_req->xid);
@@ -1444,9 +1568,15 @@
 	struct qedf_ctx *qedf;
 	struct qedf_cmd_mgr *cmd_mgr;
 	int i, rc;
+	unsigned long flags;
+	int flush_cnt = 0;
+	int wait_cnt = 100;
+	int refcount = 0;
 
-	if (!fcport)
+	if (!fcport) {
+		QEDF_ERR(NULL, "fcport is NULL\n");
 		return;
+	}
 
 	/* Check that fcport is still offloaded */
 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
@@ -1455,18 +1585,102 @@
 	}
 
 	qedf = fcport->qedf;
+
+	if (!qedf) {
+		QEDF_ERR(NULL, "qedf is NULL.\n");
+		return;
+	}
+
+	/* Only wait for all commands to be queued in the Upload context */
+	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
+	    (lun == -1)) {
+		while (atomic_read(&fcport->ios_to_queue)) {
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+				  "Waiting for %d I/Os to be queued\n",
+				  atomic_read(&fcport->ios_to_queue));
+			if (wait_cnt == 0) {
+				QEDF_ERR(NULL,
+					 "%d IOs request could not be queued\n",
+					 atomic_read(&fcport->ios_to_queue));
+			}
+			msleep(20);
+			wait_cnt--;
+		}
+	}
+
 	cmd_mgr = qedf->cmd_mgr;
 
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+		  "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
+		  atomic_read(&fcport->num_active_ios), fcport,
+		  fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
+
+	mutex_lock(&qedf->flush_mutex);
+	if (lun == -1) {
+		set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
+	} else {
+		set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
+		fcport->lun_reset_lun = lun;
+	}
 
 	for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
 		io_req = &cmd_mgr->cmds[i];
 
 		if (!io_req)
 			continue;
+		if (!io_req->fcport)
+			continue;
+
+		spin_lock_irqsave(&cmd_mgr->lock, flags);
+
+		if (io_req->alloc) {
+			if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
+				if (io_req->cmd_type == QEDF_SCSI_CMD)
+					QEDF_ERR(&qedf->dbg_ctx,
+						 "Allocated but not queued, xid=0x%x\n",
+						 io_req->xid);
+			}
+			spin_unlock_irqrestore(&cmd_mgr->lock, flags);
+		} else {
+			spin_unlock_irqrestore(&cmd_mgr->lock, flags);
+			continue;
+		}
+
 		if (io_req->fcport != fcport)
 			continue;
-		if (io_req->cmd_type == QEDF_ELS) {
+
+		/* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
+		 * but RRQ is still pending.
+		 * Workaround: Within qedf_send_rrq, we check if the fcport is
+		 * NULL, and we drop the ref on the io_req to clean it up.
+		 */
+		if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
+			refcount = kref_read(&io_req->refcount);
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+				  "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
+				  io_req->xid, io_req->cmd_type, refcount);
+			/* If RRQ work has been queue, try to cancel it and
+			 * free the io_req
+			 */
+			if (atomic_read(&io_req->state) ==
+			    QEDFC_CMD_ST_RRQ_WAIT) {
+				if (cancel_delayed_work_sync
+				    (&io_req->rrq_work)) {
+					QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+						  "Putting reference for pending RRQ work xid=0x%x.\n",
+						  io_req->xid);
+					/* ID: 003 */
+					kref_put(&io_req->refcount,
+						 qedf_release_cmd);
+				}
+			}
+			continue;
+		}
+
+		/* Only consider flushing ELS during target reset */
+		if (io_req->cmd_type == QEDF_ELS &&
+		    lun == -1) {
 			rc = kref_get_unless_zero(&io_req->refcount);
 			if (!rc) {
 				QEDF_ERR(&(qedf->dbg_ctx),
@@ -1474,6 +1688,7 @@
 				    io_req, io_req->xid);
 				continue;
 			}
+			flush_cnt++;
 			qedf_flush_els_req(qedf, io_req);
 			/*
 			 * Release the kref and go back to the top of the
@@ -1483,6 +1698,7 @@
 		}
 
 		if (io_req->cmd_type == QEDF_ABTS) {
+			/* ID: 004 */
 			rc = kref_get_unless_zero(&io_req->refcount);
 			if (!rc) {
 				QEDF_ERR(&(qedf->dbg_ctx),
@@ -1490,28 +1706,50 @@
 				    io_req, io_req->xid);
 				continue;
 			}
+			if (lun != -1 && io_req->lun != lun)
+				goto free_cmd;
+
 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
 			    "Flushing abort xid=0x%x.\n", io_req->xid);
 
-			clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
-
-			if (io_req->sc_cmd) {
-				if (io_req->return_scsi_cmd_on_abts)
-					qedf_scsi_done(qedf, io_req, DID_ERROR);
+			if (cancel_delayed_work_sync(&io_req->rrq_work)) {
+				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+					  "Putting ref for cancelled RRQ work xid=0x%x.\n",
+					  io_req->xid);
+				kref_put(&io_req->refcount, qedf_release_cmd);
 			}
 
-			/* Notify eh_abort handler that ABTS is complete */
-			complete(&io_req->abts_done);
-			kref_put(&io_req->refcount, qedf_release_cmd);
-
+			if (cancel_delayed_work_sync(&io_req->timeout_work)) {
+				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+					  "Putting ref for cancelled tmo work xid=0x%x.\n",
+					  io_req->xid);
+				qedf_initiate_cleanup(io_req, true);
+				/* Notify eh_abort handler that ABTS is
+				 * complete
+				 */
+				complete(&io_req->abts_done);
+				clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
+				/* ID: 002 */
+				kref_put(&io_req->refcount, qedf_release_cmd);
+			}
+			flush_cnt++;
 			goto free_cmd;
 		}
 
 		if (!io_req->sc_cmd)
 			continue;
-		if (lun > 0) {
-			if (io_req->sc_cmd->device->lun !=
-			    (u64)lun)
+		if (!io_req->sc_cmd->device) {
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+				  "Device backpointer NULL for sc_cmd=%p.\n",
+				  io_req->sc_cmd);
+			/* Put reference for non-existent scsi_cmnd */
+			io_req->sc_cmd = NULL;
+			qedf_initiate_cleanup(io_req, false);
+			kref_put(&io_req->refcount, qedf_release_cmd);
+			continue;
+		}
+		if (lun > -1) {
+			if (io_req->lun != lun)
 				continue;
 		}
 
@@ -1525,15 +1763,65 @@
 			    "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
 			continue;
 		}
+
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
 		    "Cleanup xid=0x%x.\n", io_req->xid);
+		flush_cnt++;
 
 		/* Cleanup task and return I/O mid-layer */
 		qedf_initiate_cleanup(io_req, true);
 
 free_cmd:
-		kref_put(&io_req->refcount, qedf_release_cmd);
+		kref_put(&io_req->refcount, qedf_release_cmd);	/* ID: 004 */
 	}
+
+	wait_cnt = 60;
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+		  "Flushed 0x%x I/Os, active=0x%x.\n",
+		  flush_cnt, atomic_read(&fcport->num_active_ios));
+	/* Only wait for all commands to complete in the Upload context */
+	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
+	    (lun == -1)) {
+		while (atomic_read(&fcport->num_active_ios)) {
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+				  "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
+				  flush_cnt,
+				  atomic_read(&fcport->num_active_ios),
+				  wait_cnt);
+			if (wait_cnt == 0) {
+				QEDF_ERR(&qedf->dbg_ctx,
+					 "Flushed %d I/Os, active=%d.\n",
+					 flush_cnt,
+					 atomic_read(&fcport->num_active_ios));
+				for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
+					io_req = &cmd_mgr->cmds[i];
+					if (io_req->fcport &&
+					    io_req->fcport == fcport) {
+						refcount =
+						kref_read(&io_req->refcount);
+						set_bit(QEDF_CMD_DIRTY,
+							&io_req->flags);
+						QEDF_ERR(&qedf->dbg_ctx,
+							 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
+							 io_req, io_req->xid,
+							 io_req->flags,
+							 io_req->sc_cmd,
+							 refcount,
+							 io_req->cmd_type);
+					}
+				}
+				WARN_ON(1);
+				break;
+			}
+			msleep(500);
+			wait_cnt--;
+		}
+	}
+
+	clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
+	clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
+	mutex_unlock(&qedf->flush_mutex);
 }
 
 /*
@@ -1552,52 +1840,60 @@
 	unsigned long flags;
 	struct fcoe_wqe *sqe;
 	u16 sqe_idx;
+	int refcount = 0;
 
 	/* Sanity check qedf_rport before dereferencing any pointers */
 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
 		QEDF_ERR(NULL, "tgt not offloaded\n");
 		rc = 1;
-		goto abts_err;
+		goto out;
 	}
 
-	rdata = fcport->rdata;
-	r_a_tov = rdata->r_a_tov;
 	qedf = fcport->qedf;
+	rdata = fcport->rdata;
+
+	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+		QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
+		rc = 1;
+		goto out;
+	}
+
+	r_a_tov = rdata->r_a_tov;
 	lport = qedf->lport;
 
 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
 		QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
 		rc = 1;
-		goto abts_err;
+		goto drop_rdata_kref;
 	}
 
 	if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
 		QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
 		rc = 1;
-		goto abts_err;
+		goto drop_rdata_kref;
 	}
 
 	/* Ensure room on SQ */
 	if (!atomic_read(&fcport->free_sqes)) {
 		QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
 		rc = 1;
-		goto abts_err;
+		goto drop_rdata_kref;
 	}
 
 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
 		QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
 		rc = 1;
-		goto out;
+		goto drop_rdata_kref;
 	}
 
 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
 	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
 	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
-		QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
-			  "cleanup or abort processing or already "
-			  "completed.\n", io_req->xid);
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
+			 io_req->xid, io_req->sc_cmd);
 		rc = 1;
-		goto out;
+		goto drop_rdata_kref;
 	}
 
 	kref_get(&io_req->refcount);
@@ -1606,18 +1902,17 @@
 	qedf->control_requests++;
 	qedf->packet_aborts++;
 
-	/* Set the return CPU to be the same as the request one */
-	io_req->cpu = smp_processor_id();
-
 	/* Set the command type to abort */
 	io_req->cmd_type = QEDF_ABTS;
 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
 
 	set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
-		   "0x%x\n", xid);
+	refcount = kref_read(&io_req->refcount);
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
+		  "ABTS io_req xid = 0x%x refcount=%d\n",
+		  xid, refcount);
 
-	qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
+	qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
 
 	spin_lock_irqsave(&fcport->rport_lock, flags);
 
@@ -1631,13 +1926,8 @@
 
 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
-	return rc;
-abts_err:
-	/*
-	 * If the ABTS task fails to queue then we need to cleanup the
-	 * task at the firmware.
-	 */
-	qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
+drop_rdata_kref:
+	kref_put(&rdata->kref, fc_rport_destroy);
 out:
 	return rc;
 }
@@ -1647,27 +1937,62 @@
 {
 	uint32_t r_ctl;
 	uint16_t xid;
+	int rc;
+	struct qedf_rport *fcport = io_req->fcport;
 
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
 		   "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
 
-	cancel_delayed_work(&io_req->timeout_work);
-
 	xid = io_req->xid;
 	r_ctl = cqe->cqe_info.abts_info.r_ctl;
 
+	/* This was added at a point when we were scheduling abts_compl &
+	 * cleanup_compl on different CPUs and there was a possibility of
+	 * the io_req to be freed from the other context before we got here.
+	 */
+	if (!fcport) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+			  "Dropping ABTS completion xid=0x%x as fcport is NULL",
+			  io_req->xid);
+		return;
+	}
+
+	/*
+	 * When flush is active, let the cmds be completed from the cleanup
+	 * context
+	 */
+	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
+	    test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+			  "Dropping ABTS completion xid=0x%x as fcport is flushing",
+			  io_req->xid);
+		return;
+	}
+
+	if (!cancel_delayed_work(&io_req->timeout_work)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Wasn't able to cancel abts timeout work.\n");
+	}
+
 	switch (r_ctl) {
 	case FC_RCTL_BA_ACC:
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
 		    "ABTS response - ACC Send RRQ after R_A_TOV\n");
 		io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
+		rc = kref_get_unless_zero(&io_req->refcount);	/* ID: 003 */
+		if (!rc) {
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
+				  "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
+				  io_req->xid);
+			return;
+		}
 		/*
 		 * Dont release this cmd yet. It will be relesed
 		 * after we get RRQ response
 		 */
-		kref_get(&io_req->refcount);
 		queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
 		    msecs_to_jiffies(qedf->lport->r_a_tov));
+		atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
 		break;
 	/* For error cases let the cleanup return the command */
 	case FC_RCTL_BA_RJT:
@@ -1683,6 +2008,10 @@
 	clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
 
 	if (io_req->sc_cmd) {
+		if (!io_req->return_scsi_cmd_on_abts)
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
+				  "Not call scsi_done for xid=0x%x.\n",
+				  io_req->xid);
 		if (io_req->return_scsi_cmd_on_abts)
 			qedf_scsi_done(qedf, io_req, DID_ERROR);
 	}
@@ -1809,6 +2138,7 @@
 	unsigned long flags;
 	struct fcoe_wqe *sqe;
 	u16 sqe_idx;
+	int refcount = 0;
 
 	fcport = io_req->fcport;
 	if (!fcport) {
@@ -1830,36 +2160,45 @@
 	}
 
 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
-	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
+	    test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
 		QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
 			  "cleanup processing or already completed.\n",
 			  io_req->xid);
 		return SUCCESS;
 	}
+	set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
 
 	/* Ensure room on SQ */
 	if (!atomic_read(&fcport->free_sqes)) {
 		QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
+		/* Need to make sure we clear the flag since it was set */
+		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
 		return FAILED;
 	}
 
+	if (io_req->cmd_type == QEDF_CLEANUP) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
+			 io_req->xid, io_req->cmd_type);
+		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
+		return SUCCESS;
+	}
 
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
-	    io_req->xid);
+	refcount = kref_read(&io_req->refcount);
+
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+		  "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
+		  io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
+		  refcount, fcport, fcport->rdata->ids.port_id);
 
 	/* Cleanup cmds re-use the same TID as the original I/O */
 	xid = io_req->xid;
 	io_req->cmd_type = QEDF_CLEANUP;
 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
 
-	/* Set the return CPU to be the same as the request one */
-	io_req->cpu = smp_processor_id();
-
-	set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
-
 	task = qedf_get_task_mem(&qedf->tasks, xid);
 
-	init_completion(&io_req->tm_done);
+	init_completion(&io_req->cleanup_done);
 
 	spin_lock_irqsave(&fcport->rport_lock, flags);
 
@@ -1873,8 +2212,8 @@
 
 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
-	tmo = wait_for_completion_timeout(&io_req->tm_done,
-	    QEDF_CLEANUP_TIMEOUT * HZ);
+	tmo = wait_for_completion_timeout(&io_req->cleanup_done,
+					  QEDF_CLEANUP_TIMEOUT * HZ);
 
 	if (!tmo) {
 		rc = FAILED;
@@ -1887,7 +2226,21 @@
 		qedf_drain_request(qedf);
 	}
 
+	/* If it TASK MGMT handle it, reference will be decreased
+	 * in qedf_execute_tmf
+	 */
+	if (io_req->tm_flags  == FCP_TMF_LUN_RESET ||
+	    io_req->tm_flags == FCP_TMF_TGT_RESET) {
+		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+		io_req->sc_cmd = NULL;
+		complete(&io_req->tm_done);
+	}
+
 	if (io_req->sc_cmd) {
+		if (!io_req->return_scsi_cmd_on_abts)
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
+				  "Not call scsi_done for xid=0x%x.\n",
+				  io_req->xid);
 		if (io_req->return_scsi_cmd_on_abts)
 			qedf_scsi_done(qedf, io_req, DID_ERROR);
 	}
@@ -1909,7 +2262,7 @@
 	clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
 
 	/* Complete so we can finish cleaning up the I/O */
-	complete(&io_req->tm_done);
+	complete(&io_req->cleanup_done);
 }
 
 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
@@ -1922,29 +2275,28 @@
 	int rc = 0;
 	uint16_t xid;
 	int tmo = 0;
+	int lun = 0;
 	unsigned long flags;
 	struct fcoe_wqe *sqe;
 	u16 sqe_idx;
 
 	if (!sc_cmd) {
-		QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
+		QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
 		return FAILED;
 	}
 
+	lun = (int)sc_cmd->device->lun;
 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
 		QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
 		rc = FAILED;
-		return FAILED;
+		goto no_flush;
 	}
 
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
-		   "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
-
 	io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
 	if (!io_req) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
 		rc = -EAGAIN;
-		goto reset_tmf_err;
+		goto no_flush;
 	}
 
 	if (tm_flags == FCP_TMF_LUN_RESET)
@@ -1957,7 +2309,7 @@
 	io_req->fcport = fcport;
 	io_req->cmd_type = QEDF_TASK_MGMT_CMD;
 
-	/* Set the return CPU to be the same as the request one */
+	/* Record which cpu this request is associated with */
 	io_req->cpu = smp_processor_id();
 
 	/* Set TM flags */
@@ -1966,7 +2318,7 @@
 	io_req->tm_flags = tm_flags;
 
 	/* Default is to return a SCSI command when an error occurs */
-	io_req->return_scsi_cmd_on_abts = true;
+	io_req->return_scsi_cmd_on_abts = false;
 
 	/* Obtain exchange id */
 	xid = io_req->xid;
@@ -1990,12 +2342,16 @@
 
 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
+	set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
 	tmo = wait_for_completion_timeout(&io_req->tm_done,
 	    QEDF_TM_TIMEOUT * HZ);
 
 	if (!tmo) {
 		rc = FAILED;
 		QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
+		/* Clear outstanding bit since command timed out */
+		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+		io_req->sc_cmd = NULL;
 	} else {
 		/* Check TMF response code */
 		if (io_req->fcp_rsp_code == 0)
@@ -2003,14 +2359,25 @@
 		else
 			rc = FAILED;
 	}
+	/*
+	 * Double check that fcport has not gone into an uploading state before
+	 * executing the command flush for the LUN/target.
+	 */
+	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "fcport is uploading, not executing flush.\n");
+		goto no_flush;
+	}
+	/* We do not need this io_req any more */
+	kref_put(&io_req->refcount, qedf_release_cmd);
+
 
 	if (tm_flags == FCP_TMF_LUN_RESET)
-		qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
+		qedf_flush_active_ios(fcport, lun);
 	else
 		qedf_flush_active_ios(fcport, -1);
 
-	kref_put(&io_req->refcount, qedf_release_cmd);
-
+no_flush:
 	if (rc != SUCCESS) {
 		QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
 		rc = FAILED;
@@ -2018,7 +2385,6 @@
 		QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
 		rc = SUCCESS;
 	}
-reset_tmf_err:
 	return rc;
 }
 
@@ -2028,26 +2394,65 @@
 	struct fc_rport_libfc_priv *rp = rport->dd_data;
 	struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
 	struct qedf_ctx *qedf;
-	struct fc_lport *lport;
+	struct fc_lport *lport = shost_priv(sc_cmd->device->host);
 	int rc = SUCCESS;
 	int rval;
+	struct qedf_ioreq *io_req = NULL;
+	int ref_cnt = 0;
+	struct fc_rport_priv *rdata = fcport->rdata;
+
+	QEDF_ERR(NULL,
+		 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
+		 tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
+		 rport->scsi_target_id, (int)sc_cmd->device->lun);
+
+	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+		QEDF_ERR(NULL, "stale rport\n");
+		return FAILED;
+	}
+
+	QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
+		 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
+		 "LUN RESET");
+
+	if (sc_cmd->SCp.ptr) {
+		io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
+		ref_cnt = kref_read(&io_req->refcount);
+		QEDF_ERR(NULL,
+			 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
+			 io_req, io_req->xid, ref_cnt);
+	}
 
 	rval = fc_remote_port_chkready(rport);
-
 	if (rval) {
 		QEDF_ERR(NULL, "device_reset rport not ready\n");
 		rc = FAILED;
 		goto tmf_err;
 	}
 
-	if (fcport == NULL) {
+	rc = fc_block_scsi_eh(sc_cmd);
+	if (rc)
+		goto tmf_err;
+
+	if (!fcport) {
 		QEDF_ERR(NULL, "device_reset: rport is NULL\n");
 		rc = FAILED;
 		goto tmf_err;
 	}
 
 	qedf = fcport->qedf;
-	lport = qedf->lport;
+
+	if (!qedf) {
+		QEDF_ERR(NULL, "qedf is NULL.\n");
+		rc = FAILED;
+		goto tmf_err;
+	}
+
+	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+		QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
+		rc = SUCCESS;
+		goto tmf_err;
+	}
 
 	if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
 	    test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
@@ -2061,9 +2466,22 @@
 		goto tmf_err;
 	}
 
+	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+		if (!fcport->rdata)
+			QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
+				 fcport);
+		else
+			QEDF_ERR(&qedf->dbg_ctx,
+				 "fcport %p port_id=%06x is uploading.\n",
+				 fcport, fcport->rdata->ids.port_id);
+		rc = FAILED;
+		goto tmf_err;
+	}
+
 	rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
 
 tmf_err:
+	kref_put(&rdata->kref, fc_rport_destroy);
 	return rc;
 }
 
@@ -2072,6 +2490,8 @@
 {
 	struct fcoe_cqe_rsp_info *fcp_rsp;
 
+	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
 	fcp_rsp = &cqe->cqe_info.rsp_info;
 	qedf_parse_fcp_rsp(io_req, fcp_rsp);
 
@@ -2135,6 +2555,11 @@
 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
 	memcpy(fh, (void *)bdq_addr, pktlen);
 
+	QEDF_WARN(&qedf->dbg_ctx,
+		  "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
+		  ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
+		  fh->fh_type, fc_frame_payload_op(fp));
+
 	/* Initialize the frame so libfc sees it as a valid frame */
 	crc = fcoe_fc_crc(fp);
 	fc_frame_init(fp);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 0a5dd55..59ca98f 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  QLogic FCoE Offload Driver
  *  Copyright (c) 2016-2018 Cavium Inc.
- *
- *  This software is available under the terms of the GNU General Public License
- *  (GPL) Version 2, available from the file COPYING in the main directory of
- *  this source tree.
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -30,6 +27,7 @@
 
 static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
 static void qedf_remove(struct pci_dev *pdev);
+static void qedf_shutdown(struct pci_dev *pdev);
 
 /*
  * Driver module parameters.
@@ -113,35 +111,45 @@
 
 void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
 {
-	qedf->vlan_id = vlan_id;
-	qedf->vlan_id |= qedf->prio << VLAN_PRIO_SHIFT;
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x "
-		   "prio=%d.\n", vlan_id, qedf->prio);
+	int vlan_id_tmp = 0;
+
+	vlan_id_tmp = vlan_id  | (qedf->prio << VLAN_PRIO_SHIFT);
+	qedf->vlan_id = vlan_id_tmp;
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+		  "Setting vlan_id=0x%04x prio=%d.\n",
+		  vlan_id_tmp, qedf->prio);
 }
 
 /* Returns true if we have a valid vlan, false otherwise */
 static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
 {
-	int rc;
-
-	if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
-		QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n");
-		return  false;
-	}
 
 	while (qedf->fipvlan_retries--) {
-		if (qedf->vlan_id > 0)
+		/* This is to catch if link goes down during fipvlan retries */
+		if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+			QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
+			return false;
+		}
+
+		if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+			QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
+			return false;
+		}
+
+		if (qedf->vlan_id > 0) {
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+				  "vlan = 0x%x already set, calling ctlr_link_up.\n",
+				  qedf->vlan_id);
+			if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
+				fcoe_ctlr_link_up(&qedf->ctlr);
 			return true;
+		}
+
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 			   "Retry %d.\n", qedf->fipvlan_retries);
 		init_completion(&qedf->fipvlan_compl);
 		qedf_fcoe_send_vlan_req(qedf);
-		rc = wait_for_completion_timeout(&qedf->fipvlan_compl,
-		    1 * HZ);
-		if (rc > 0) {
-			fcoe_ctlr_link_up(&qedf->ctlr);
-			return true;
-		}
+		wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
 	}
 
 	return false;
@@ -153,12 +161,21 @@
 	    container_of(work, struct qedf_ctx, link_update.work);
 	int rc;
 
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n");
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
+		  atomic_read(&qedf->link_state));
 
 	if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
 		rc = qedf_initiate_fipvlan_req(qedf);
 		if (rc)
 			return;
+
+		if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+				  "Link is down, resetting vlan_id.\n");
+			qedf->vlan_id = 0;
+			return;
+		}
+
 		/*
 		 * If we get here then we never received a repsonse to our
 		 * fip vlan request so set the vlan_id to the default and
@@ -185,7 +202,9 @@
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 		    "Calling fcoe_ctlr_link_down().\n");
 		fcoe_ctlr_link_down(&qedf->ctlr);
-		qedf_wait_for_upload(qedf);
+		if (qedf_wait_for_upload(qedf) == false)
+			QEDF_ERR(&qedf->dbg_ctx,
+				 "Could not upload all sessions.\n");
 		/* Reset the number of FIP VLAN retries */
 		qedf->fipvlan_retries = qedf_fipvlan_retries;
 	}
@@ -302,8 +321,10 @@
 
 	lport = qedf->lport;
 
-	if (!lport->tt.elsct_send)
+	if (!lport->tt.elsct_send) {
+		QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
 		return -EINVAL;
+	}
 
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
 	if (!fp) {
@@ -321,11 +342,6 @@
 	return 0;
 }
 
-struct qedf_tmp_rdata_item {
-	struct fc_rport_priv *rdata;
-	struct list_head list;
-};
-
 /*
  * This function is called if link_down_tmo is in use.  If we get a link up and
  * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
@@ -335,9 +351,8 @@
 {
 	struct qedf_ctx *qedf =
 	    container_of(work, struct qedf_ctx, link_recovery.work);
-	struct qedf_rport *fcport;
+	struct fc_lport *lport = qedf->lport;
 	struct fc_rport_priv *rdata;
-	struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item;
 	bool rc;
 	int retries = 30;
 	int rval, i;
@@ -404,33 +419,14 @@
 	 * Call lport->tt.rport_login which will cause libfc to send an
 	 * ADISC since the rport is in state ready.
 	 */
-	rcu_read_lock();
-	list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
-		rdata = fcport->rdata;
-		if (rdata == NULL)
-			continue;
-		rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item),
-		    GFP_ATOMIC);
-		if (!rdata_item)
-			continue;
+	mutex_lock(&lport->disc.disc_mutex);
+	list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
 		if (kref_get_unless_zero(&rdata->kref)) {
-			rdata_item->rdata = rdata;
-			list_add(&rdata_item->list, &rdata_login_list);
-		} else
-			kfree(rdata_item);
+			fc_rport_login(rdata);
+			kref_put(&rdata->kref, fc_rport_destroy);
+		}
 	}
-	rcu_read_unlock();
-	/*
-	 * Do the fc_rport_login outside of the rcu lock so we don't take a
-	 * mutex in an atomic context.
-	 */
-	list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list,
-	    list) {
-		list_del(&rdata_item->list);
-		fc_rport_login(rdata_item->rdata);
-		kref_put(&rdata_item->rdata->kref, fc_rport_destroy);
-		kfree(rdata_item);
-	}
+	mutex_unlock(&lport->disc.disc_mutex);
 }
 
 static void qedf_update_link_speed(struct qedf_ctx *qedf,
@@ -458,6 +454,9 @@
 	case 100000:
 		lport->link_speed = FC_PORTSPEED_100GBIT;
 		break;
+	case 20000:
+		lport->link_speed = FC_PORTSPEED_20GBIT;
+		break;
 	default:
 		lport->link_speed = FC_PORTSPEED_UNKNOWN;
 		break;
@@ -467,16 +466,40 @@
 	 * Set supported link speed by querying the supported
 	 * capabilities of the link.
 	 */
-	if (link->supported_caps & SUPPORTED_10000baseKR_Full)
+	if ((link->supported_caps & QED_LM_10000baseT_Full_BIT) ||
+	    (link->supported_caps & QED_LM_10000baseKX4_Full_BIT) ||
+	    (link->supported_caps & QED_LM_10000baseR_FEC_BIT) ||
+	    (link->supported_caps & QED_LM_10000baseCR_Full_BIT) ||
+	    (link->supported_caps & QED_LM_10000baseSR_Full_BIT) ||
+	    (link->supported_caps & QED_LM_10000baseLR_Full_BIT) ||
+	    (link->supported_caps & QED_LM_10000baseLRM_Full_BIT) ||
+	    (link->supported_caps & QED_LM_10000baseKR_Full_BIT)) {
 		lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
-	if (link->supported_caps & SUPPORTED_25000baseKR_Full)
+	}
+	if ((link->supported_caps & QED_LM_25000baseKR_Full_BIT) ||
+	    (link->supported_caps & QED_LM_25000baseCR_Full_BIT) ||
+	    (link->supported_caps & QED_LM_25000baseSR_Full_BIT)) {
 		lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
-	if (link->supported_caps & SUPPORTED_40000baseLR4_Full)
+	}
+	if ((link->supported_caps & QED_LM_40000baseLR4_Full_BIT) ||
+	    (link->supported_caps & QED_LM_40000baseKR4_Full_BIT) ||
+	    (link->supported_caps & QED_LM_40000baseCR4_Full_BIT) ||
+	    (link->supported_caps & QED_LM_40000baseSR4_Full_BIT)) {
 		lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
-	if (link->supported_caps & SUPPORTED_50000baseKR2_Full)
+	}
+	if ((link->supported_caps & QED_LM_50000baseKR2_Full_BIT) ||
+	    (link->supported_caps & QED_LM_50000baseCR2_Full_BIT) ||
+	    (link->supported_caps & QED_LM_50000baseSR2_Full_BIT)) {
 		lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
-	if (link->supported_caps & SUPPORTED_100000baseKR4_Full)
+	}
+	if ((link->supported_caps & QED_LM_100000baseKR4_Full_BIT) ||
+	    (link->supported_caps & QED_LM_100000baseSR4_Full_BIT) ||
+	    (link->supported_caps & QED_LM_100000baseCR4_Full_BIT) ||
+	    (link->supported_caps & QED_LM_100000baseLR4_ER4_Full_BIT)) {
 		lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
+	}
+	if (link->supported_caps & QED_LM_20000baseKR2_Full_BIT)
+		lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
 	fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
 }
 
@@ -484,6 +507,16 @@
 {
 	struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
 
+	/*
+	 * Prevent race where we're removing the module and we get link update
+	 * for qed.
+	 */
+	if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Ignore link update, driver getting unload.\n");
+		return;
+	}
+
 	if (link->link_up) {
 		if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
 			QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
@@ -563,7 +596,7 @@
 		tmp_prio = get->operational.app_prio.fcoe;
 		if (qedf_default_prio > -1)
 			qedf->prio = qedf_default_prio;
-		else if (tmp_prio < 0 || tmp_prio > 7) {
+		else if (tmp_prio > 7) {
 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 			    "FIP/FCoE prio %d out of range, setting to %d.\n",
 			    tmp_prio, QEDF_DEFAULT_PRIO);
@@ -615,50 +648,113 @@
 static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
 {
 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
-	struct fc_rport_libfc_priv *rp = rport->dd_data;
-	struct qedf_rport *fcport;
 	struct fc_lport *lport;
 	struct qedf_ctx *qedf;
 	struct qedf_ioreq *io_req;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_rport_priv *rdata;
+	struct qedf_rport *fcport = NULL;
 	int rc = FAILED;
+	int wait_count = 100;
+	int refcount = 0;
 	int rval;
-
-	if (fc_remote_port_chkready(rport)) {
-		QEDF_ERR(NULL, "rport not ready\n");
-		goto out;
-	}
+	int got_ref = 0;
 
 	lport = shost_priv(sc_cmd->device->host);
 	qedf = (struct qedf_ctx *)lport_priv(lport);
 
-	if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
-		QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n");
+	/* rport and tgt are allocated together, so tgt should be non-NULL */
+	fcport = (struct qedf_rport *)&rp[1];
+	rdata = fcport->rdata;
+	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+		QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
+		rc = 1;
 		goto out;
 	}
 
-	fcport = (struct qedf_rport *)&rp[1];
 
 	io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
 	if (!io_req) {
-		QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n");
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
+			 sc_cmd, sc_cmd->cmnd[0],
+			 rdata->ids.port_id);
 		rc = SUCCESS;
-		goto out;
+		goto drop_rdata_kref;
 	}
 
-	QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x "
-		  "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx);
+	rval = kref_get_unless_zero(&io_req->refcount);	/* ID: 005 */
+	if (rval)
+		got_ref = 1;
+
+	/* If we got a valid io_req, confirm it belongs to this sc_cmd. */
+	if (!rval || io_req->sc_cmd != sc_cmd) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
+			 io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
+
+		goto drop_rdata_kref;
+	}
+
+	if (fc_remote_port_chkready(rport)) {
+		refcount = kref_read(&io_req->refcount);
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
+			 io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
+			 refcount, rdata->ids.port_id);
+
+		goto drop_rdata_kref;
+	}
+
+	rc = fc_block_scsi_eh(sc_cmd);
+	if (rc)
+		goto drop_rdata_kref;
+
+	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Connection uploading, xid=0x%x., port_id=%06x\n",
+			 io_req->xid, rdata->ids.port_id);
+		while (io_req->sc_cmd && (wait_count != 0)) {
+			msleep(100);
+			wait_count--;
+		}
+		if (wait_count) {
+			QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
+			rc = SUCCESS;
+		} else {
+			QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
+			rc = FAILED;
+		}
+		goto drop_rdata_kref;
+	}
+
+	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+		QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
+		goto drop_rdata_kref;
+	}
+
+	QEDF_ERR(&qedf->dbg_ctx,
+		 "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
+		 io_req, sc_cmd, io_req->xid, io_req->fp_idx,
+		 rdata->ids.port_id);
 
 	if (qedf->stop_io_on_error) {
 		qedf_stop_all_io(qedf);
 		rc = SUCCESS;
-		goto out;
+		goto drop_rdata_kref;
 	}
 
 	init_completion(&io_req->abts_done);
 	rval = qedf_initiate_abts(io_req, true);
 	if (rval) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
-		goto out;
+		/*
+		 * If we fail to queue the ABTS then return this command to
+		 * the SCSI layer as it will own and free the xid
+		 */
+		rc = SUCCESS;
+		qedf_scsi_done(qedf, io_req, DID_ERROR);
+		goto drop_rdata_kref;
 	}
 
 	wait_for_completion(&io_req->abts_done);
@@ -684,38 +780,68 @@
 		QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
 			  io_req->xid);
 
+drop_rdata_kref:
+	kref_put(&rdata->kref, fc_rport_destroy);
 out:
+	if (got_ref)
+		kref_put(&io_req->refcount, qedf_release_cmd);
 	return rc;
 }
 
 static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
 {
-	QEDF_ERR(NULL, "TARGET RESET Issued...");
+	QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...",
+		 sc_cmd->device->host->host_no, sc_cmd->device->id,
+		 sc_cmd->device->lun);
 	return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
 }
 
 static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
 {
-	QEDF_ERR(NULL, "LUN RESET Issued...\n");
+	QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ",
+		 sc_cmd->device->host->host_no, sc_cmd->device->id,
+		 sc_cmd->device->lun);
 	return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
 }
 
-void qedf_wait_for_upload(struct qedf_ctx *qedf)
+bool qedf_wait_for_upload(struct qedf_ctx *qedf)
 {
-	while (1) {
+	struct qedf_rport *fcport = NULL;
+	int wait_cnt = 120;
+
+	while (wait_cnt--) {
 		if (atomic_read(&qedf->num_offloads))
-			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
-			    "Waiting for all uploads to complete.\n");
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+				  "Waiting for all uploads to complete num_offloads = 0x%x.\n",
+				  atomic_read(&qedf->num_offloads));
 		else
-			break;
+			return true;
 		msleep(500);
 	}
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
+		if (fcport && test_bit(QEDF_RPORT_SESSION_READY,
+				       &fcport->flags)) {
+			if (fcport->rdata)
+				QEDF_ERR(&qedf->dbg_ctx,
+					 "Waiting for fcport %p portid=%06x.\n",
+					 fcport, fcport->rdata->ids.port_id);
+			} else {
+				QEDF_ERR(&qedf->dbg_ctx,
+					 "Waiting for fcport %p.\n", fcport);
+			}
+	}
+	rcu_read_unlock();
+	return false;
+
 }
 
 /* Performs soft reset of qedf_ctx by simulating a link down/up */
-static void qedf_ctx_soft_reset(struct fc_lport *lport)
+void qedf_ctx_soft_reset(struct fc_lport *lport)
 {
 	struct qedf_ctx *qedf;
+	struct qed_link_output if_link;
 
 	if (lport->vport) {
 		QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
@@ -726,11 +852,32 @@
 
 	/* For host reset, essentially do a soft link up/down */
 	atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+		  "Queuing link down work.\n");
 	queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
 	    0);
-	qedf_wait_for_upload(qedf);
+
+	if (qedf_wait_for_upload(qedf) == false) {
+		QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
+		WARN_ON(atomic_read(&qedf->num_offloads));
+	}
+
+	/* Before setting link up query physical link state */
+	qed_ops->common->get_link(qedf->cdev, &if_link);
+	/* Bail if the physical link is not up */
+	if (!if_link.link_up) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+			  "Physical link is not up.\n");
+		return;
+	}
+	/* Flush and wait to make sure link down is processed */
+	flush_delayed_work(&qedf->link_update);
+	msleep(500);
+
 	atomic_set(&qedf->link_state, QEDF_LINK_UP);
 	qedf->vlan_id  = 0;
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+		  "Queue link up work.\n");
 	queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
 	    0);
 }
@@ -740,22 +887,6 @@
 {
 	struct fc_lport *lport;
 	struct qedf_ctx *qedf;
-	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
-	struct fc_rport_libfc_priv *rp = rport->dd_data;
-	struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
-	int rval;
-
-	rval = fc_remote_port_chkready(rport);
-
-	if (rval) {
-		QEDF_ERR(NULL, "device_reset rport not ready\n");
-		return FAILED;
-	}
-
-	if (fcport == NULL) {
-		QEDF_ERR(NULL, "device_reset: rport is NULL\n");
-		return FAILED;
-	}
 
 	lport = shost_priv(sc_cmd->device->host);
 	qedf = lport_priv(lport);
@@ -785,7 +916,6 @@
 	.name 		= QEDF_MODULE_NAME,
 	.this_id 	= -1,
 	.cmd_per_lun	= 32,
-	.use_clustering = ENABLE_CLUSTERING,
 	.max_sectors 	= 0xffff,
 	.queuecommand 	= qedf_queuecommand,
 	.shost_attrs	= qedf_host_attrs,
@@ -908,8 +1038,10 @@
 		    "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
 		kfree_skb(skb);
 		rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
-		if (rdata)
+		if (rdata) {
 			rdata->retries = lport->max_rport_retry_count;
+			kref_put(&rdata->kref, fc_rport_destroy);
+		}
 		return -EINVAL;
 	}
 	/* End NPIV filtering */
@@ -969,7 +1101,7 @@
 			return -ENOMEM;
 		}
 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
-		cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+		cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
 	} else {
 		cp = skb_put(skb, tlen);
 	}
@@ -1032,7 +1164,12 @@
 	if (qedf_dump_frames)
 		print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
 		    1, skb->data, skb->len, false);
-	qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+	rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+	if (rc) {
+		QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
+		kfree_skb(skb);
+		return rc;
+	}
 
 	return 0;
 }
@@ -1051,16 +1188,17 @@
 	    sizeof(void *);
 	fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
 
-	fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev,
-	    fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL);
+	fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
+					&fcport->sq_dma, GFP_KERNEL);
 	if (!fcport->sq) {
 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
 		rval = 1;
 		goto out;
 	}
 
-	fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev,
-	    fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL);
+	fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
+					    fcport->sq_pbl_size,
+					    &fcport->sq_pbl_dma, GFP_KERNEL);
 	if (!fcport->sq_pbl) {
 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
 		rval = 1;
@@ -1224,6 +1362,8 @@
 static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
 	struct qedf_rport *fcport)
 {
+	struct fc_rport_priv *rdata = fcport->rdata;
+
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
 	    fcport->rdata->ids.port_id);
 
@@ -1235,6 +1375,7 @@
 	qedf_free_sq(qedf, fcport);
 	fcport->rdata = NULL;
 	fcport->qedf = NULL;
+	kref_put(&rdata->kref, fc_rport_destroy);
 }
 
 /**
@@ -1310,6 +1451,8 @@
 			break;
 		}
 
+		/* Initial reference held on entry, so this can't fail */
+		kref_get(&rdata->kref);
 		fcport->rdata = rdata;
 		fcport->rport = rport;
 
@@ -1369,11 +1512,15 @@
 		 */
 		fcport = (struct qedf_rport *)&rp[1];
 
+		spin_lock_irqsave(&fcport->rport_lock, flags);
 		/* Only free this fcport if it is offloaded already */
-		if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
-			set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags);
+		if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
+		    !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
+		    &fcport->flags)) {
+			set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
+				&fcport->flags);
+			spin_unlock_irqrestore(&fcport->rport_lock, flags);
 			qedf_cleanup_fcport(qedf, fcport);
-
 			/*
 			 * Remove fcport to list of qedf_ctx list of offloaded
 			 * ports
@@ -1385,8 +1532,9 @@
 			clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
 			    &fcport->flags);
 			atomic_dec(&qedf->num_offloads);
+		} else {
+			spin_unlock_irqrestore(&fcport->rport_lock, flags);
 		}
-
 		break;
 
 	case RPORT_EV_NONE:
@@ -1418,7 +1566,7 @@
 
 static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
 {
-	fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO);
+	fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
 
 	qedf->ctlr.send = qedf_fip_send;
 	qedf->ctlr.get_src_addr = qedf_get_src_mac;
@@ -1498,11 +1646,15 @@
 	fc_set_wwnn(lport, qedf->wwnn);
 	fc_set_wwpn(lport, qedf->wwpn);
 
-	fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0);
+	if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "fcoe_libfc_config failed.\n");
+		return -ENOMEM;
+	}
 
 	/* Allocate the exchange manager */
-	fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1,
-	    qedf->max_els_xid, NULL);
+	fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
+			  0xfffe, NULL);
 
 	if (fc_lport_init_stats(lport))
 		return -ENOMEM;
@@ -1625,14 +1777,15 @@
 	vport_qedf->wwpn = vn_port->wwpn;
 
 	vn_port->host->transportt = qedf_fc_vport_transport_template;
-	vn_port->host->can_queue = QEDF_MAX_ELS_XID;
+	vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
 	vn_port->host->max_lun = qedf_max_lun;
 	vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
 	vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
 
 	rc = scsi_add_host(vn_port->host, &vport->dev);
 	if (rc) {
-		QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n");
+		QEDF_WARN(&base_qedf->dbg_ctx,
+			  "Error adding Scsi_Host rc=0x%x.\n", rc);
 		goto err2;
 	}
 
@@ -2086,16 +2239,21 @@
 static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
 {
 	int i;
+	u16 vector_idx = 0;
+	u32 vector;
 
 	if (qedf->int_info.msix_cnt) {
 		for (i = 0; i < qedf->int_info.used_cnt; i++) {
-			synchronize_irq(qedf->int_info.msix[i].vector);
-			irq_set_affinity_hint(qedf->int_info.msix[i].vector,
-			    NULL);
-			irq_set_affinity_notifier(qedf->int_info.msix[i].vector,
-			    NULL);
-			free_irq(qedf->int_info.msix[i].vector,
-			    &qedf->fp_array[i]);
+			vector_idx = i * qedf->dev_info.common.num_hwfns +
+				qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+				  "Freeing IRQ #%d vector_idx=%d.\n",
+				  i, vector_idx);
+			vector = qedf->int_info.msix[vector_idx].vector;
+			synchronize_irq(vector);
+			irq_set_affinity_hint(vector, NULL);
+			irq_set_affinity_notifier(vector, NULL);
+			free_irq(vector, &qedf->fp_array[i]);
 		}
 	} else
 		qed_ops->common->simd_handler_clean(qedf->cdev,
@@ -2108,11 +2266,19 @@
 static int qedf_request_msix_irq(struct qedf_ctx *qedf)
 {
 	int i, rc, cpu;
+	u16 vector_idx = 0;
+	u32 vector;
 
 	cpu = cpumask_first(cpu_online_mask);
 	for (i = 0; i < qedf->num_queues; i++) {
-		rc = request_irq(qedf->int_info.msix[i].vector,
-		    qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]);
+		vector_idx = i * qedf->dev_info.common.num_hwfns +
+			qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+			  "Requesting IRQ #%d vector_idx=%d.\n",
+			  i, vector_idx);
+		vector = qedf->int_info.msix[vector_idx].vector;
+		rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
+				 &qedf->fp_array[i]);
 
 		if (rc) {
 			QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
@@ -2121,8 +2287,7 @@
 		}
 
 		qedf->int_info.used_cnt++;
-		rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector,
-		    get_cpu_mask(cpu));
+		rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
 		cpu = cpumask_next(cpu, cpu_online_mask);
 	}
 
@@ -2155,7 +2320,8 @@
 	    QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
 	qedf->int_info.used_cnt = 1;
 
-	QEDF_ERR(&qedf->dbg_ctx, "Only MSI-X supported. Failing probe.\n");
+	QEDF_ERR(&qedf->dbg_ctx,
+		 "Cannot load driver due to a lack of MSI-X vectors.\n");
 	return -EINVAL;
 }
 
@@ -2198,12 +2364,14 @@
 	fr_dev(fp) = lport;
 	fr_sof(fp) = hp->fcoe_sof;
 	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+		QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
 		kfree_skb(skb);
 		return;
 	}
 	fr_eof(fp) = crc_eof.fcoe_eof;
 	fr_crc(fp) = crc_eof.fcoe_crc32;
 	if (pskb_trim(skb, fr_len)) {
+		QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
 		kfree_skb(skb);
 		return;
 	}
@@ -2264,9 +2432,9 @@
 	 * empty then this is not addressed to our port so simply drop it.
 	 */
 	if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
-		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
-		    "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
-		    lport->port_id, ntoh24(fh->fh_d_id));
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+			  "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
+			  lport->port_id, ntoh24(fh->fh_d_id));
 		kfree_skb(skb);
 		return;
 	}
@@ -2275,6 +2443,8 @@
 	if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
 	    (f_ctl & FC_FC_EX_CTX)) {
 		/* Drop incoming ABTS response that has both SEQ/EX CTX set */
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+			  "Dropping ABTS response as both SEQ/EX CTX set.\n");
 		kfree_skb(skb);
 		return;
 	}
@@ -2356,6 +2526,13 @@
 	struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
 	struct qedf_skb_work *skb_work;
 
+	if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+			  "Dropping frame as link state is down.\n");
+		kfree_skb(skb);
+		return 0;
+	}
+
 	skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
 	if (!skb_work) {
 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
@@ -2411,8 +2588,9 @@
 	    sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
 
 	if (!sb_virt) {
-		QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
-			  "for id = %d.\n", sb_id);
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Status block allocation failed for id = %d.\n",
+			 sb_id);
 		return -ENOMEM;
 	}
 
@@ -2420,8 +2598,9 @@
 	    sb_id, QED_SB_TYPE_STORAGE);
 
 	if (ret) {
-		QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization "
-			  "failed for id = %d.\n", sb_id);
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Status block initialization failed (0x%x) for id = %d.\n",
+			 ret, sb_id);
 		return ret;
 	}
 
@@ -2505,13 +2684,18 @@
 	io_req = &qedf->cmd_mgr->cmds[xid];
 
 	/* Completion not for a valid I/O anymore so just return */
-	if (!io_req)
+	if (!io_req) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "io_req is NULL for xid=0x%x.\n", xid);
 		return;
+	}
 
 	fcport = io_req->fcport;
 
 	if (fcport == NULL) {
-		QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n");
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "fcport is NULL for xid=0x%x io_req=%p.\n",
+			 xid, io_req);
 		return;
 	}
 
@@ -2520,7 +2704,8 @@
 	 * isn't valid and shouldn't be taken. We should just return.
 	 */
 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
-		QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Session not offloaded yet, fcport = %p.\n", fcport);
 		return;
 	}
 
@@ -2681,8 +2866,10 @@
 	}
 
 	/* Allocate list of PBL pages */
-	qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev,
-	    QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL);
+	qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
+						QEDF_PAGE_SIZE,
+						&qedf->bdq_pbl_list_dma,
+						GFP_KERNEL);
 	if (!qedf->bdq_pbl_list) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
 		return -ENOMEM;
@@ -2730,6 +2917,7 @@
 	 */
 	if (!qedf->p_cpuq) {
 		status = 1;
+		QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
 		goto mem_alloc_failure;
 	}
 
@@ -2745,8 +2933,10 @@
 
 	/* Allocate DMA coherent buffers for BDQ */
 	rc = qedf_alloc_bdq(qedf);
-	if (rc)
+	if (rc) {
+		QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
 		goto mem_alloc_failure;
+	}
 
 	/* Allocate a CQ and an associated PBL for each MSI-X vector */
 	for (i = 0; i < qedf->num_queues; i++) {
@@ -2771,9 +2961,10 @@
 		    ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
 
 		qedf->global_queues[i]->cq =
-		    dma_zalloc_coherent(&qedf->pdev->dev,
-			qedf->global_queues[i]->cq_mem_size,
-			&qedf->global_queues[i]->cq_dma, GFP_KERNEL);
+		    dma_alloc_coherent(&qedf->pdev->dev,
+				       qedf->global_queues[i]->cq_mem_size,
+				       &qedf->global_queues[i]->cq_dma,
+				       GFP_KERNEL);
 
 		if (!qedf->global_queues[i]->cq) {
 			QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
@@ -2782,9 +2973,10 @@
 		}
 
 		qedf->global_queues[i]->cq_pbl =
-		    dma_zalloc_coherent(&qedf->pdev->dev,
-			qedf->global_queues[i]->cq_pbl_size,
-			&qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL);
+		    dma_alloc_coherent(&qedf->pdev->dev,
+				       qedf->global_queues[i]->cq_pbl_size,
+				       &qedf->global_queues[i]->cq_pbl_dma,
+				       GFP_KERNEL);
 
 		if (!qedf->global_queues[i]->cq_pbl) {
 			QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
@@ -2855,12 +3047,12 @@
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
 		   qedf->num_queues);
 
-	qedf->p_cpuq = pci_alloc_consistent(qedf->pdev,
+	qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
 	    qedf->num_queues * sizeof(struct qedf_glbl_q_params),
-	    &qedf->hw_p_cpuq);
+	    &qedf->hw_p_cpuq, GFP_KERNEL);
 
 	if (!qedf->p_cpuq) {
-		QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n");
+		QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
 		return 1;
 	}
 
@@ -2929,14 +3121,13 @@
 
 	if (qedf->p_cpuq) {
 		size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
-		pci_free_consistent(qedf->pdev, size, qedf->p_cpuq,
+		dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
 		    qedf->hw_p_cpuq);
 	}
 
 	qedf_free_global_queues(qedf);
 
-	if (qedf->global_queues)
-		kfree(qedf->global_queues);
+	kfree(qedf->global_queues);
 }
 
 /*
@@ -2955,6 +3146,7 @@
 	.id_table = qedf_pci_tbl,
 	.probe = qedf_probe,
 	.remove = qedf_remove,
+	.shutdown = qedf_shutdown,
 };
 
 static int __qedf_probe(struct pci_dev *pdev, int mode)
@@ -2987,6 +3179,8 @@
 			goto err0;
 		}
 
+		fc_disc_init(lport);
+
 		/* Initialize qedf_ctx */
 		qedf = lport_priv(lport);
 		qedf->lport = lport;
@@ -3002,6 +3196,7 @@
 		pci_set_drvdata(pdev, qedf);
 		init_completion(&qedf->fipvlan_compl);
 		mutex_init(&qedf->stats_mutex);
+		mutex_init(&qedf->flush_mutex);
 
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
 		   "QLogic FastLinQ FCoE Module qedf %s, "
@@ -3054,6 +3249,7 @@
 	qed_params.is_vf = is_vf;
 	qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
 	if (!qedf->cdev) {
+		QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
 		rc = -ENODEV;
 		goto err1;
 	}
@@ -3065,6 +3261,11 @@
 		goto err1;
 	}
 
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+		  "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
+		  qedf->dev_info.common.num_hwfns,
+		  qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
+
 	/* queue allocation code should come here
 	 * order should be
 	 * 	slowpath_start
@@ -3117,8 +3318,10 @@
 
 	/* Setup interrupts */
 	rc = qedf_setup_int(qedf);
-	if (rc)
+	if (rc) {
+		QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
 		goto err3;
+	}
 
 	rc = qed_ops->start(qedf->cdev, &qedf->tasks);
 	if (rc) {
@@ -3178,11 +3381,6 @@
 	sprintf(host_buf, "host_%d", host->host_no);
 	qed_ops->common->set_name(qedf->cdev, host_buf);
 
-
-	/* Set xid max values */
-	qedf->max_scsi_xid = QEDF_MAX_SCSI_XID;
-	qedf->max_els_xid = QEDF_MAX_ELS_XID;
-
 	/* Allocate cmd mgr */
 	qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
 	if (!qedf->cmd_mgr) {
@@ -3193,16 +3391,19 @@
 
 	if (mode != QEDF_MODE_RECOVERY) {
 		host->transportt = qedf_fc_transport_template;
-		host->can_queue = QEDF_MAX_ELS_XID;
 		host->max_lun = qedf_max_lun;
 		host->max_cmd_len = QEDF_MAX_CDB_LEN;
+		host->can_queue = FCOE_PARAMS_NUM_TASKS;
 		rc = scsi_add_host(host, &pdev->dev);
-		if (rc)
+		if (rc) {
+			QEDF_WARN(&qedf->dbg_ctx,
+				  "Error adding Scsi_Host rc=0x%x.\n", rc);
 			goto err6;
+		}
 	}
 
 	memset(&params, 0, sizeof(params));
-	params.mtu = 9000;
+	params.mtu = QEDF_LL2_BUF_SIZE;
 	ether_addr_copy(params.ll2_mac_address, qedf->mac);
 
 	/* Start LL2 processing thread */
@@ -3374,7 +3575,9 @@
 		fcoe_ctlr_link_down(&qedf->ctlr);
 	else
 		fc_fabric_logoff(qedf->lport);
-	qedf_wait_for_upload(qedf);
+
+	if (qedf_wait_for_upload(qedf) == false)
+		QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
 
 #ifdef CONFIG_DEBUG_FS
 	qedf_dbg_host_exit(&(qedf->dbg_ctx));
@@ -3559,6 +3762,11 @@
 	fcoe->scsi_tsk_full = qedf->task_set_fulls;
 }
 
+static void qedf_shutdown(struct pci_dev *pdev)
+{
+	__qedf_remove(pdev, QEDF_MODE_NORMAL);
+}
+
 /* Generic TLV data callback */
 void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
 {
@@ -3685,7 +3893,7 @@
 }
 
 MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver");
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
 MODULE_AUTHOR("QLogic Corporation");
 MODULE_VERSION(QEDF_VERSION);
 module_init(qedf_init);
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index 9455faa..b0e37af 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -1,15 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  *  QLogic FCoE Offload Driver
  *  Copyright (c) 2016-2018 Cavium Inc.
- *
- *  This software is available under the terms of the GNU General Public License
- *  (GPL) Version 2, available from the file COPYING in the main directory of
- *  this source tree.
  */
 
-#define QEDF_VERSION		"8.33.16.20"
+#define QEDF_VERSION		"8.42.3.0"
 #define QEDF_DRIVER_MAJOR_VER		8
-#define QEDF_DRIVER_MINOR_VER		33
-#define QEDF_DRIVER_REV_VER		16
-#define QEDF_DRIVER_ENG_VER		20
+#define QEDF_DRIVER_MINOR_VER		42
+#define QEDF_DRIVER_REV_VER		3
+#define QEDF_DRIVER_ENG_VER		0
 
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
index d1db92d..7ab07f3 100644
--- a/drivers/scsi/qedi/Kconfig
+++ b/drivers/scsi/qedi/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config QEDI
 	tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
 	depends on PCI && SCSI && UIO
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
index 90a6925..d84eedf 100644
--- a/drivers/scsi/qedi/Makefile
+++ b/drivers/scsi/qedi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_QEDI) := qedi.o
 qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
 	    qedi_dbg.o qedi_fw_api.o
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index a6f96b3..9513fd3 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #ifndef _QEDI_H_
@@ -45,7 +42,7 @@
 #define QEDI_MAX_TASK_NUM		0x0FFF
 #define QEDI_MAX_ISCSI_CONNS_PER_HBA	1024
 #define QEDI_ISCSI_MAX_BDS_PER_CMD	255	/* Firmware max BDs is 255 */
-#define MAX_OUSTANDING_TASKS_PER_CON	1024
+#define MAX_OUTSTANDING_TASKS_PER_CON	1024
 
 #define QEDI_MAX_BD_LEN		0xffff
 #define QEDI_BD_SPLIT_SZ	0x1000
@@ -63,12 +60,9 @@
 #define QEDI_LOCAL_PORT_INVALID	0xffff
 #define TX_RX_RING		16
 #define RX_RING			(TX_RX_RING - 1)
-#define LL2_SINGLE_BUF_SIZE	0x400
-#define QEDI_PAGE_SIZE		4096
 #define QEDI_PAGE_ALIGN(addr)	ALIGN(addr, QEDI_PAGE_SIZE)
 #define QEDI_PAGE_MASK		(~((QEDI_PAGE_SIZE) - 1))
 
-#define QEDI_PAGE_SIZE		4096
 #define QEDI_HW_DMA_BOUNDARY	0xfff
 #define QEDI_PATH_HANDLE	0xFE0000000UL
 
@@ -146,7 +140,7 @@
 };
 
 /* Queue sizes in number of elements */
-#define QEDI_SQ_SIZE		MAX_OUSTANDING_TASKS_PER_CON
+#define QEDI_SQ_SIZE		MAX_OUTSTANDING_TASKS_PER_CON
 #define QEDI_CQ_SIZE		2048
 #define QEDI_CMDQ_SIZE		QEDI_MAX_ISCSI_TASK
 #define QEDI_PROTO_CQ_PROD_IDX	0
diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
index 8fd28b0..2ebef4d 100644
--- a/drivers/scsi/qedi/qedi_dbg.c
+++ b/drivers/scsi/qedi/qedi_dbg.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #include "qedi_dbg.h"
@@ -16,10 +13,6 @@
 {
 	va_list va;
 	struct va_format vaf;
-	char nfunc[32];
-
-	memset(nfunc, 0, sizeof(nfunc));
-	memcpy(nfunc, func, sizeof(nfunc) - 1);
 
 	va_start(va, fmt);
 
@@ -28,9 +21,9 @@
 
 	if (likely(qedi) && likely(qedi->pdev))
 		pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
-		       nfunc, line, qedi->host_no, &vaf);
+		       func, line, qedi->host_no, &vaf);
 	else
-		pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+		pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
 
 	va_end(va);
 }
@@ -41,10 +34,6 @@
 {
 	va_list va;
 	struct va_format vaf;
-	char nfunc[32];
-
-	memset(nfunc, 0, sizeof(nfunc));
-	memcpy(nfunc, func, sizeof(nfunc) - 1);
 
 	va_start(va, fmt);
 
@@ -56,9 +45,9 @@
 
 	if (likely(qedi) && likely(qedi->pdev))
 		pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
-			nfunc, line, qedi->host_no, &vaf);
+			func, line, qedi->host_no, &vaf);
 	else
-		pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+		pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
 
 ret:
 	va_end(va);
@@ -70,10 +59,6 @@
 {
 	va_list va;
 	struct va_format vaf;
-	char nfunc[32];
-
-	memset(nfunc, 0, sizeof(nfunc));
-	memcpy(nfunc, func, sizeof(nfunc) - 1);
 
 	va_start(va, fmt);
 
@@ -85,10 +70,10 @@
 
 	if (likely(qedi) && likely(qedi->pdev))
 		pr_notice("[%s]:[%s:%d]:%d: %pV",
-			  dev_name(&qedi->pdev->dev), nfunc, line,
+			  dev_name(&qedi->pdev->dev), func, line,
 			  qedi->host_no, &vaf);
 	else
-		pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+		pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
 
 ret:
 	va_end(va);
@@ -100,10 +85,6 @@
 {
 	va_list va;
 	struct va_format vaf;
-	char nfunc[32];
-
-	memset(nfunc, 0, sizeof(nfunc));
-	memcpy(nfunc, func, sizeof(nfunc) - 1);
 
 	va_start(va, fmt);
 
@@ -115,9 +96,9 @@
 
 	if (likely(qedi) && likely(qedi->pdev))
 		pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
-			nfunc, line, qedi->host_no, &vaf);
+			func, line, qedi->host_no, &vaf);
 	else
-		pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+		pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
 
 ret:
 	va_end(va);
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
index 0bc9c31..243acc8 100644
--- a/drivers/scsi/qedi/qedi_dbg.h
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #ifndef _QEDI_DBG_H_
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index fd914ca..42f5afb 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #include "qedi.h"
@@ -23,27 +20,16 @@
 		   const struct file_operations *fops)
 {
 	char host_dirname[32];
-	struct dentry *file_dentry = NULL;
 
 	sprintf(host_dirname, "host%u", qedi->host_no);
 	qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root);
-	if (!qedi->bdf_dentry)
-		return;
 
 	while (dops) {
 		if (!(dops->name))
 			break;
 
-		file_dentry = debugfs_create_file(dops->name, 0600,
-						  qedi->bdf_dentry, qedi,
-						  fops);
-		if (!file_dentry) {
-			QEDI_INFO(qedi, QEDI_LOG_DEBUGFS,
-				  "Debugfs entry %s creation failed\n",
-				  dops->name);
-			debugfs_remove_recursive(qedi->bdf_dentry);
-			return;
-		}
+		debugfs_create_file(dops->name, 0600, qedi->bdf_dentry, qedi,
+				    fops);
 		dops++;
 		fops++;
 	}
@@ -60,8 +46,6 @@
 qedi_dbg_init(char *drv_name)
 {
 	qedi_dbg_root = debugfs_create_dir(drv_name, NULL);
-	if (!qedi_dbg_root)
-		QEDI_INFO(NULL, QEDI_LOG_DEBUGFS, "Init of debugfs failed\n");
 }
 
 void
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 25d763a..946cebc 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #include <linux/blkdev.h>
@@ -155,12 +152,10 @@
 	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
 	struct iscsi_session *session = conn->session;
 	struct iscsi_tm_rsp *resp_hdr_ptr;
-	struct iscsi_cls_session *cls_sess;
 	int rval = 0;
 
 	set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
 	resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
-	cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
 
 	iscsi_block_session(session->cls_session);
 	rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
@@ -616,13 +611,6 @@
 		goto error;
 	}
 
-	if (!sc_cmd->request->special) {
-		QEDI_WARN(&qedi->dbg_ctx,
-			  "request->special is NULL so request not valid, sc_cmd=%p.\n",
-			  sc_cmd);
-		goto error;
-	}
-
 	if (!sc_cmd->request->q) {
 		QEDI_WARN(&qedi->dbg_ctx,
 			  "request->q is NULL so request is not valid, sc_cmd=%p.\n",
@@ -992,7 +980,6 @@
 	 * others they are two different assembly operations.
 	 */
 	wmb();
-	mmiowb();
 	QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
 		  "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
 		  qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
@@ -1374,7 +1361,6 @@
 	struct qedi_conn *qedi_conn = qedi_cmd->conn;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
 	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
-	struct iscsi_cls_session *cls_sess;
 	struct qedi_work_map *list_work = NULL;
 	struct iscsi_task *mtask;
 	struct qedi_cmd *cmd;
@@ -1385,7 +1371,6 @@
 
 	mtask = qedi_cmd->task;
 	tmf_hdr = (struct iscsi_tm *)mtask->hdr;
-	cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
 	set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
 
 	ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index 387dc87..5277290 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #include <linux/types.h>
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
index c3deb77..10f19f0 100644
--- a/drivers/scsi/qedi/qedi_fw_iscsi.h
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #ifndef _QEDI_FW_ISCSI_H_
diff --git a/drivers/scsi/qedi/qedi_fw_scsi.h b/drivers/scsi/qedi/qedi_fw_scsi.h
index cdaf918..2524f9f 100644
--- a/drivers/scsi/qedi/qedi_fw_scsi.h
+++ b/drivers/scsi/qedi/qedi_fw_scsi.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #ifndef _QEDI_FW_SCSI_H_
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index a2aa06e..8ba7c77 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #ifndef _QEDI_GBL_H_
diff --git a/drivers/scsi/qedi/qedi_hsi.h b/drivers/scsi/qedi/qedi_hsi.h
index 8ca44c7..d82ab99 100644
--- a/drivers/scsi/qedi/qedi_hsi.h
+++ b/drivers/scsi/qedi/qedi_hsi.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 #ifndef __QEDI_HSI__
 #define __QEDI_HSI__
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 2f0a4f2..8829880 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #include <linux/blkdev.h>
@@ -61,7 +58,6 @@
 	.max_sectors = 0xffff,
 	.dma_boundary = QEDI_HW_DMA_BOUNDARY,
 	.cmd_per_lun = 128,
-	.use_clustering = ENABLE_CLUSTERING,
 	.shost_attrs = qedi_shost_attrs,
 };
 
@@ -580,7 +576,7 @@
 	rval = qedi_iscsi_update_conn(qedi, qedi_conn);
 	if (rval) {
 		iscsi_conn_printk(KERN_ALERT, conn,
-				  "conn_start: FW oflload conn failed.\n");
+				  "conn_start: FW offload conn failed.\n");
 		rval = -EINVAL;
 		goto start_err;
 	}
@@ -591,7 +587,7 @@
 	rval = iscsi_conn_start(cls_conn);
 	if (rval) {
 		iscsi_conn_printk(KERN_ALERT, conn,
-				  "iscsi_conn_start: FW oflload conn failed!!\n");
+				  "iscsi_conn_start: FW offload conn failed!!\n");
 	}
 
 start_err:
@@ -810,8 +806,6 @@
 	struct qedi_endpoint *qedi_ep;
 	struct sockaddr_in *addr;
 	struct sockaddr_in6 *addr6;
-	struct qed_dev *cdev  =  NULL;
-	struct qedi_uio_dev *udev = NULL;
 	struct iscsi_path path_req;
 	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
 	u32 iscsi_cid = QEDI_CID_RESERVED;
@@ -831,8 +825,6 @@
 	}
 
 	qedi = iscsi_host_priv(shost);
-	cdev = qedi->cdev;
-	udev = qedi->udev;
 
 	if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) ||
 	    test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
@@ -954,6 +946,7 @@
 
 	qedi_ep = ep->dd_data;
 	if (qedi_ep->state == EP_STATE_IDLE ||
+	    qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
 	    qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
 		return -1;
 
@@ -993,13 +986,17 @@
 	struct iscsi_conn *conn = NULL;
 	struct qedi_ctx *qedi;
 	int ret = 0;
-	int wait_delay = 20 * HZ;
+	int wait_delay;
 	int abrt_conn = 0;
 	int count = 10;
 
+	wait_delay = 60 * HZ + DEF_MAX_RT_TIME;
 	qedi_ep = ep->dd_data;
 	qedi = qedi_ep->qedi;
 
+	if (qedi_ep->state == EP_STATE_OFLDCONN_START)
+		goto ep_exit_recover;
+
 	flush_work(&qedi_ep->offload_work);
 
 	if (qedi_ep->conn) {
@@ -1036,6 +1033,7 @@
 
 	switch (qedi_ep->state) {
 	case EP_STATE_OFLDCONN_START:
+	case EP_STATE_OFLDCONN_NONE:
 		goto ep_release_conn;
 	case EP_STATE_OFLDCONN_FAILED:
 			break;
@@ -1162,7 +1160,7 @@
 	struct qedi_endpoint *qedi_ep =
 		container_of(work, struct qedi_endpoint, offload_work);
 	struct qedi_ctx *qedi;
-	int wait_delay = 20 * HZ;
+	int wait_delay = 5 * HZ;
 	int ret;
 
 	qedi = qedi_ep->qedi;
@@ -1226,6 +1224,7 @@
 
 	if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
 		QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+		qedi_ep->state = EP_STATE_OFLDCONN_NONE;
 		ret = -EIO;
 		goto set_path_exit;
 	}
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 1126077..67c3b73 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #ifndef _QEDI_ISCSI_H_
@@ -59,6 +56,7 @@
 	EP_STATE_OFLDCONN_FAILED        = 0x2000,
 	EP_STATE_CONNECT_FAILED         = 0x4000,
 	EP_STATE_DISCONN_TIMEDOUT       = 0x8000,
+	EP_STATE_OFLDCONN_NONE          = 0x10000,
 };
 
 struct qedi_conn;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e5bd035..acb930b 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #include <linux/module.h>
@@ -44,6 +41,11 @@
 MODULE_PARM_DESC(qedi_io_tracing,
 		 " Enable logging of SCSI requests/completions into trace buffer. (default off).");
 
+uint qedi_ll2_buf_size = 0x400;
+module_param(qedi_ll2_buf_size, uint, 0644);
+MODULE_PARM_DESC(qedi_ll2_buf_size,
+		 "parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400.");
+
 const struct qed_iscsi_ops *qedi_ops;
 static struct scsi_transport_template *qedi_scsi_transport;
 static struct pci_driver qedi_pci_driver;
@@ -228,7 +230,7 @@
 	}
 
 	/* Allocating memory for Tx/Rx pkt buffer */
-	udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE;
+	udev->ll2_buf_size = TX_RX_RING * qedi_ll2_buf_size;
 	udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
 	udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
 						 __GFP_ZERO, 2);
@@ -283,7 +285,7 @@
 	qedi->udev = udev;
 
 	udev->tx_pkt = udev->ll2_buf;
-	udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
+	udev->rx_pkt = udev->ll2_buf + qedi_ll2_buf_size;
 	return 0;
 
  err_uctrl:
@@ -644,8 +646,7 @@
 	qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
 	qedi->max_sqes = QEDI_SQ_SIZE;
 
-	if (shost_use_blk_mq(shost))
-		shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
+	shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
 
 	pci_set_drvdata(pdev, qedi);
 
@@ -659,7 +660,7 @@
 	struct qedi_uio_dev *udev;
 	struct qedi_uio_ctrl *uctrl;
 	struct skb_work_list *work;
-	u32 prod;
+	struct ethhdr *eh;
 
 	if (!qedi) {
 		QEDI_ERR(NULL, "qedi is NULL\n");
@@ -673,6 +674,29 @@
 		return 0;
 	}
 
+	eh = (struct ethhdr *)skb->data;
+	/* Undo VLAN encapsulation */
+	if (eh->h_proto == htons(ETH_P_8021Q)) {
+		memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
+		eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
+		skb_reset_mac_header(skb);
+	}
+
+	/* Filter out non FIP/FCoE frames here to free them faster */
+	if (eh->h_proto != htons(ETH_P_ARP) &&
+	    eh->h_proto != htons(ETH_P_IP) &&
+	    eh->h_proto != htons(ETH_P_IPV6)) {
+		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
+			  "Dropping frame ethertype [0x%x] len [0x%x].\n",
+			  eh->h_proto, skb->len);
+		kfree_skb(skb);
+		return 0;
+	}
+
+	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
+		  "Allowed frame ethertype [0x%x] len [0x%x].\n",
+		  eh->h_proto, skb->len);
+
 	udev = qedi->udev;
 	uctrl = udev->uctrl;
 
@@ -695,17 +719,10 @@
 
 	spin_lock_bh(&qedi->ll2_lock);
 	list_add_tail(&work->list, &qedi->ll2_skb_list);
-
-	++uctrl->hw_rx_prod_cnt;
-	prod = (uctrl->hw_rx_prod + 1) % RX_RING;
-	if (prod != uctrl->host_rx_cons) {
-		uctrl->hw_rx_prod = prod;
-		spin_unlock_bh(&qedi->ll2_lock);
-		wake_up_process(qedi->ll2_recv_thread);
-		return 0;
-	}
-
 	spin_unlock_bh(&qedi->ll2_lock);
+
+	wake_up_process(qedi->ll2_recv_thread);
+
 	return 0;
 }
 
@@ -720,6 +737,7 @@
 	u32 rx_bd_prod;
 	void *pkt;
 	int len = 0;
+	u32 prod;
 
 	if (!qedi) {
 		QEDI_ERR(NULL, "qedi is NULL\n");
@@ -728,12 +746,16 @@
 
 	udev = qedi->udev;
 	uctrl = udev->uctrl;
-	pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE);
-	len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE);
+
+	++uctrl->hw_rx_prod_cnt;
+	prod = (uctrl->hw_rx_prod + 1) % RX_RING;
+
+	pkt = udev->rx_pkt + (prod * qedi_ll2_buf_size);
+	len = min_t(u32, skb->len, (u32)qedi_ll2_buf_size);
 	memcpy(pkt, skb->data, len);
 
 	memset(&rxbd, 0, sizeof(rxbd));
-	rxbd.rx_pkt_index = uctrl->hw_rx_prod;
+	rxbd.rx_pkt_index = prod;
 	rxbd.rx_pkt_len = len;
 	rxbd.vlan_id = vlan_id;
 
@@ -744,6 +766,16 @@
 
 	memcpy(p_rxbd, &rxbd, sizeof(rxbd));
 
+	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
+		  "hw_rx_prod [%d] prod [%d] hw_rx_bd_prod [%d] rx_pkt_idx [%d] rx_len [%d].\n",
+		  uctrl->hw_rx_prod, prod, uctrl->hw_rx_bd_prod,
+		  rxbd.rx_pkt_index, rxbd.rx_pkt_len);
+	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
+		  "host_rx_cons [%d] hw_rx_bd_cons [%d].\n",
+		  uctrl->host_rx_cons, uctrl->host_rx_bd_cons);
+
+	uctrl->hw_rx_prod = prod;
+
 	/* notify the iscsiuio about new packet */
 	uio_event_notify(&udev->qedi_uinfo);
 
@@ -796,7 +828,7 @@
 	int rval = 0;
 
 
-	num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
+	num_sq_pages = (MAX_OUTSTANDING_TASKS_PER_CON * 8) / QEDI_PAGE_SIZE;
 
 	qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
 
@@ -806,11 +838,11 @@
 	memset(&qedi->pf_params.iscsi_pf_params, 0,
 	       sizeof(qedi->pf_params.iscsi_pf_params));
 
-	qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
+	qedi->p_cpuq = dma_alloc_coherent(&qedi->pdev->dev,
 			qedi->num_queues * sizeof(struct qedi_glbl_q_params),
-			&qedi->hw_p_cpuq);
+			&qedi->hw_p_cpuq, GFP_KERNEL);
 	if (!qedi->p_cpuq) {
-		QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
+		QEDI_ERR(&qedi->dbg_ctx, "dma_alloc_coherent fail\n");
 		rval = -1;
 		goto err_alloc_mem;
 	}
@@ -834,7 +866,7 @@
 	qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
 
 	for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
-		if ((1 << log_page_size) == PAGE_SIZE)
+		if ((1 << log_page_size) == QEDI_PAGE_SIZE)
 			break;
 	}
 	qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
@@ -871,7 +903,7 @@
 
 	if (qedi->p_cpuq) {
 		size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
-		pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
+		dma_free_coherent(&qedi->pdev->dev, size, qedi->p_cpuq,
 				    qedi->hw_p_cpuq);
 	}
 
@@ -952,6 +984,12 @@
 		cls_sess = iscsi_conn_to_session(cls_conn);
 		sess = cls_sess->dd_data;
 
+		if (!iscsi_is_session_online(cls_sess))
+			continue;
+
+		if (!sess->targetname)
+			continue;
+
 		if (pri_ctrl_flags) {
 			if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
 			    !strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
@@ -1275,13 +1313,20 @@
 static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
 {
 	int i;
+	u16 idx;
 
 	if (qedi->int_info.msix_cnt) {
 		for (i = 0; i < qedi->int_info.used_cnt; i++) {
-			synchronize_irq(qedi->int_info.msix[i].vector);
-			irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+			idx = i * qedi->dev_info.common.num_hwfns +
+			qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
+
+			QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+				  "Freeing IRQ #%d vector_idx=%d.\n", i, idx);
+
+			synchronize_irq(qedi->int_info.msix[idx].vector);
+			irq_set_affinity_hint(qedi->int_info.msix[idx].vector,
 					      NULL);
-			free_irq(qedi->int_info.msix[i].vector,
+			free_irq(qedi->int_info.msix[idx].vector,
 				 &qedi->fp_array[i]);
 		}
 	} else {
@@ -1296,20 +1341,28 @@
 static int qedi_request_msix_irq(struct qedi_ctx *qedi)
 {
 	int i, rc, cpu;
+	u16 idx;
 
 	cpu = cpumask_first(cpu_online_mask);
 	for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
-		rc = request_irq(qedi->int_info.msix[i].vector,
+		idx = i * qedi->dev_info.common.num_hwfns +
+			  qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
+
+		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+			  "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
+			  qedi->dev_info.common.num_hwfns,
+			  qedi_ops->common->get_affin_hwfn_idx(qedi->cdev));
+
+		rc = request_irq(qedi->int_info.msix[idx].vector,
 				 qedi_msix_handler, 0, "qedi",
 				 &qedi->fp_array[i]);
-
 		if (rc) {
 			QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n");
 			qedi_sync_free_irqs(qedi);
 			return rc;
 		}
 		qedi->int_info.used_cnt++;
-		rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+		rc = irq_set_affinity_hint(qedi->int_info.msix[idx].vector,
 					   get_cpu_mask(cpu));
 		cpu = cpumask_next(cpu, cpu_online_mask);
 	}
@@ -1354,12 +1407,9 @@
 
 static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 {
-	struct qedi_nvm_iscsi_image nvm_image;
-
-	qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
-						sizeof(nvm_image),
-						&qedi->nvm_buf_dma,
-						GFP_KERNEL);
+	qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
+					       sizeof(struct qedi_nvm_iscsi_image),
+					       &qedi->nvm_buf_dma, GFP_KERNEL);
 	if (!qedi->iscsi_image) {
 		QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
 		return -ENOMEM;
@@ -1376,7 +1426,7 @@
 	int i;
 
 	if (qedi->bdq_pbl_list)
-		dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE,
+		dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
 				  qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
 
 	if (qedi->bdq_pbl)
@@ -1437,7 +1487,7 @@
 
 	/* Alloc dma memory for BDQ page buffer list */
 	qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
-	qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE);
+	qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, QEDI_PAGE_SIZE);
 	qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
 
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
@@ -1472,9 +1522,10 @@
 	}
 
 	/* Allocate list of PBL pages */
-	qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, PAGE_SIZE,
-						 &qedi->bdq_pbl_list_dma,
-						 GFP_KERNEL);
+	qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
+						QEDI_PAGE_SIZE,
+						&qedi->bdq_pbl_list_dma,
+						GFP_KERNEL);
 	if (!qedi->bdq_pbl_list) {
 		QEDI_ERR(&qedi->dbg_ctx,
 			 "Could not allocate list of PBL pages.\n");
@@ -1485,13 +1536,14 @@
 	 * Now populate PBL list with pages that contain pointers to the
 	 * individual buffers.
 	 */
-	qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE;
+	qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size /
+					 QEDI_PAGE_SIZE;
 	list = (u64 *)qedi->bdq_pbl_list;
 	page = qedi->bdq_pbl_list_dma;
 	for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
 		*list = qedi->bdq_pbl_dma;
 		list++;
-		page += PAGE_SIZE;
+		page += QEDI_PAGE_SIZE;
 	}
 
 	return 0;
@@ -1569,10 +1621,10 @@
 		    (qedi->global_queues[i]->cq_pbl_size +
 		    (QEDI_PAGE_SIZE - 1));
 
-		qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev,
-								 qedi->global_queues[i]->cq_mem_size,
-								 &qedi->global_queues[i]->cq_dma,
-								 GFP_KERNEL);
+		qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev,
+								qedi->global_queues[i]->cq_mem_size,
+								&qedi->global_queues[i]->cq_dma,
+								GFP_KERNEL);
 
 		if (!qedi->global_queues[i]->cq) {
 			QEDI_WARN(&qedi->dbg_ctx,
@@ -1580,10 +1632,10 @@
 			status = -ENOMEM;
 			goto mem_alloc_failure;
 		}
-		qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev,
-								     qedi->global_queues[i]->cq_pbl_size,
-								     &qedi->global_queues[i]->cq_pbl_dma,
-								     GFP_KERNEL);
+		qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
+								    qedi->global_queues[i]->cq_pbl_size,
+								    &qedi->global_queues[i]->cq_pbl_dma,
+								    GFP_KERNEL);
 
 		if (!qedi->global_queues[i]->cq_pbl) {
 			QEDI_WARN(&qedi->dbg_ctx,
@@ -1651,16 +1703,16 @@
 	ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
 	ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
 
-	ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
-				     &ep->sq_dma, GFP_KERNEL);
+	ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+				    &ep->sq_dma, GFP_KERNEL);
 	if (!ep->sq) {
 		QEDI_WARN(&qedi->dbg_ctx,
 			  "Could not allocate send queue.\n");
 		rval = -ENOMEM;
 		goto out;
 	}
-	ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
-					 &ep->sq_pbl_dma, GFP_KERNEL);
+	ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+					&ep->sq_pbl_dma, GFP_KERNEL);
 	if (!ep->sq_pbl) {
 		QEDI_WARN(&qedi->dbg_ctx,
 			  "Could not allocate send queue PBL.\n");
@@ -2197,14 +2249,13 @@
 static int qedi_get_boot_info(struct qedi_ctx *qedi)
 {
 	int ret = 1;
-	struct qedi_nvm_iscsi_image nvm_image;
 
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
 		  "Get NVM iSCSI CFG image\n");
 	ret = qedi_ops->common->nvm_get_image(qedi->cdev,
 					      QED_NVM_IMAGE_ISCSI_CFG,
 					      (char *)qedi->iscsi_image,
-					      sizeof(nvm_image));
+					      sizeof(struct qedi_nvm_iscsi_image));
 	if (ret)
 		QEDI_ERR(&qedi->dbg_ctx,
 			 "Could not get NVM image. ret = %d\n", ret);
@@ -2379,6 +2430,11 @@
 	if (rc)
 		goto free_host;
 
+	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+		  "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
+		  qedi->dev_info.common.num_hwfns,
+		  qedi_ops->common->get_affin_hwfn_idx(qedi->cdev));
+
 	if (mode != QEDI_MODE_RECOVERY) {
 		rc = qedi_set_iscsi_pf_param(qedi);
 		if (rc) {
diff --git a/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
index df39b69..760864e 100644
--- a/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
+++ b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #ifndef NVM_ISCSI_CFG_H
diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c
index b10c48b..04ee68e 100644
--- a/drivers/scsi/qedi/qedi_sysfs.c
+++ b/drivers/scsi/qedi/qedi_sysfs.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
 #include "qedi.h"
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index 8a0e523..0ac1055 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -1,14 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * QLogic iSCSI Offload Driver
  * Copyright (c) 2016 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
  */
 
-#define QEDI_MODULE_VERSION	"8.33.0.20"
+#define QEDI_MODULE_VERSION	"8.37.0.20"
 #define QEDI_DRIVER_MAJOR_VER		8
-#define QEDI_DRIVER_MINOR_VER		33
+#define QEDI_DRIVER_MINOR_VER		37
 #define QEDI_DRIVER_REV_VER		0
 #define QEDI_DRIVER_ENG_VER		20
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 390775d..832af42 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /******************************************************************************
 *                  QLOGIC LINUX SOFTWARE
 *
@@ -6,16 +7,6 @@
 * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
 * Copyright (C) 2003-2004 Christoph Hellwig
 *
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License as published by the
-* Free Software Foundation; either version 2, or (at your option) any
-* later version.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-* General Public License for more details.
-*
 ******************************************************************************/
 #define QLA1280_VERSION      "3.27.1"
 /*****************************************************************************
@@ -366,10 +357,6 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_tcq.h>
 
-#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
-#include <asm/sn/io.h>
-#endif
-
 
 /*
  * Compile time Options:
@@ -383,27 +370,12 @@
 
 #include "qla1280.h"
 
-#ifndef BITS_PER_LONG
-#error "BITS_PER_LONG not defined!"
-#endif
-#if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 #define QLA_64BIT_PTR	1
 #endif
 
-#ifdef QLA_64BIT_PTR
-#define pci_dma_hi32(a)			((a >> 16) >> 16)
-#else
-#define pci_dma_hi32(a)			0
-#endif
-#define pci_dma_lo32(a)			(a & 0xffffffff)
-
 #define NVRAM_DELAY()			udelay(500)	/* 2 microseconds */
 
-#if defined(__ia64__) && !defined(ia64_platform_is)
-#define ia64_platform_is(foo)		(!strcmp(x, platform_name))
-#endif
-
-
 #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
 #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
 			ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
@@ -1446,15 +1418,6 @@
 	ha->flags.reset_active = 0;
 	ha->flags.abort_isp_active = 0;
 
-#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
-	if (ia64_platform_is("sn2")) {
-		printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
-		       "dual channel lockup workaround\n", ha->host_no);
-		ha->flags.use_pci_vchannel = 1;
-		driver_setup.no_nvram = 1;
-	}
-#endif
-
 	/* TODO: implement support for the 1040 nvram format */
 	if (IS_ISP1040(ha))
 		driver_setup.no_nvram = 1;
@@ -1750,7 +1713,7 @@
 	uint8_t *sp, *tbuf;
 	dma_addr_t p_tbuf;
 
-	tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
+	tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL);
 	if (!tbuf)
 		return -ENOMEM;
 #endif
@@ -1790,8 +1753,8 @@
 		mb[4] = cnt;
 		mb[3] = ha->request_dma & 0xffff;
 		mb[2] = (ha->request_dma >> 16) & 0xffff;
-		mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
-		mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
+		mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
+		mb[6] = upper_32_bits(ha->request_dma) >> 16;
 		dprintk(2, "%s: op=%d  0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
 				__func__, mb[0],
 				(void *)(long)ha->request_dma,
@@ -1810,8 +1773,8 @@
 		mb[4] = cnt;
 		mb[3] = p_tbuf & 0xffff;
 		mb[2] = (p_tbuf >> 16) & 0xffff;
-		mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
-		mb[6] = pci_dma_hi32(p_tbuf) >> 16;
+		mb[7] = upper_32_bits(p_tbuf) & 0xffff;
+		mb[6] = upper_32_bits(p_tbuf) >> 16;
 
 		err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
 				BIT_1 | BIT_0, mb);
@@ -1841,7 +1804,7 @@
 
  out:
 #if DUMP_IT_BACK
-	pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
+	dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf);
 #endif
 	return err;
 }
@@ -1933,8 +1896,8 @@
 	mb[3] = ha->request_dma & 0xffff;
 	mb[2] = (ha->request_dma >> 16) & 0xffff;
 	mb[4] = 0;
-	mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
-	mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
+	mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
+	mb[6] = upper_32_bits(ha->request_dma) >> 16;
 	if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
 					       BIT_3 | BIT_2 | BIT_1 | BIT_0,
 					       &mb[0]))) {
@@ -1947,8 +1910,8 @@
 		mb[3] = ha->response_dma & 0xffff;
 		mb[2] = (ha->response_dma >> 16) & 0xffff;
 		mb[5] = 0;
-		mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
-		mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
+		mb[7] = upper_32_bits(ha->response_dma) & 0xffff;
+		mb[6] = upper_32_bits(ha->response_dma) >> 16;
 		status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
 						 BIT_3 | BIT_2 | BIT_1 | BIT_0,
 						 &mb[0]);
@@ -2270,13 +2233,6 @@
 	mb[1] = nv->firmware_feature.f.enable_fast_posting;
 	mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
 	mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
-#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
-	if (ia64_platform_is("sn2")) {
-		printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
-		       "workaround\n", ha->host_no);
-		mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
-	}
-#endif
 	status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
 
 	/* Retry count and delay. */
@@ -2907,20 +2863,14 @@
 				break;
 
 			dma_handle = sg_dma_address(s);
-#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
-			if (ha->flags.use_pci_vchannel)
-				sn_pci_set_vchan(ha->pdev,
-						 (unsigned long *)&dma_handle,
-						 SCSI_BUS_32(cmd));
-#endif
 			*dword_ptr++ =
-				cpu_to_le32(pci_dma_lo32(dma_handle));
+				cpu_to_le32(lower_32_bits(dma_handle));
 			*dword_ptr++ =
-				cpu_to_le32(pci_dma_hi32(dma_handle));
+				cpu_to_le32(upper_32_bits(dma_handle));
 			*dword_ptr++ = cpu_to_le32(sg_dma_len(s));
 			dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
-				cpu_to_le32(pci_dma_hi32(dma_handle)),
-				cpu_to_le32(pci_dma_lo32(dma_handle)),
+				cpu_to_le32(upper_32_bits(dma_handle)),
+				cpu_to_le32(lower_32_bits(dma_handle)),
 				cpu_to_le32(sg_dma_len(sg_next(s))));
 			remseg--;
 		}
@@ -2969,21 +2919,15 @@
 				if (cnt == 5)
 					break;
 				dma_handle = sg_dma_address(s);
-#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
-				if (ha->flags.use_pci_vchannel)
-					sn_pci_set_vchan(ha->pdev,
-							 (unsigned long *)&dma_handle,
-							 SCSI_BUS_32(cmd));
-#endif
 				*dword_ptr++ =
-					cpu_to_le32(pci_dma_lo32(dma_handle));
+					cpu_to_le32(lower_32_bits(dma_handle));
 				*dword_ptr++ =
-					cpu_to_le32(pci_dma_hi32(dma_handle));
+					cpu_to_le32(upper_32_bits(dma_handle));
 				*dword_ptr++ =
 					cpu_to_le32(sg_dma_len(s));
 				dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
-					cpu_to_le32(pci_dma_hi32(dma_handle)),
-					cpu_to_le32(pci_dma_lo32(dma_handle)),
+					cpu_to_le32(upper_32_bits(dma_handle)),
+					cpu_to_le32(lower_32_bits(dma_handle)),
 					cpu_to_le32(sg_dma_len(s)));
 			}
 			remseg -= cnt;
@@ -3014,8 +2958,6 @@
 	sp->flags |= SRB_SENT;
 	ha->actthreads++;
 	WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
-	/* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
-	mmiowb();
 
  out:
 	if (status)
@@ -3178,10 +3120,10 @@
 			if (cnt == 4)
 				break;
 			*dword_ptr++ =
-				cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
+				cpu_to_le32(lower_32_bits(sg_dma_address(s)));
 			*dword_ptr++ = cpu_to_le32(sg_dma_len(s));
 			dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
-				(pci_dma_lo32(sg_dma_address(s))),
+				(lower_32_bits(sg_dma_address(s))),
 				(sg_dma_len(s)));
 			remseg--;
 		}
@@ -3224,13 +3166,13 @@
 				if (cnt == 7)
 					break;
 				*dword_ptr++ =
-					cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
+					cpu_to_le32(lower_32_bits(sg_dma_address(s)));
 				*dword_ptr++ =
 					cpu_to_le32(sg_dma_len(s));
 				dprintk(1,
 					"S/G Segment Cont. phys_addr=0x%x, "
 					"len=0x%x\n",
-					cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
+					cpu_to_le32(lower_32_bits(sg_dma_address(s))),
 					cpu_to_le32(sg_dma_len(s)));
 			}
 			remseg -= cnt;
@@ -3264,8 +3206,6 @@
 	sp->flags |= SRB_SENT;
 	ha->actthreads++;
 	WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
-	/* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
-	mmiowb();
 
 out:
 	if (status)
@@ -3377,19 +3317,8 @@
 
 	/*
 	 * Update request index to mailbox4 (Request Queue In).
-	 * The mmiowb() ensures that this write is ordered with writes by other
-	 * CPUs.  Without the mmiowb(), it is possible for the following:
-	 *    CPUA posts write of index 5 to mailbox4
-	 *    CPUA releases host lock
-	 *    CPUB acquires host lock
-	 *    CPUB posts write of index 6 to mailbox4
-	 *    On PCI bus, order reverses and write of 6 posts, then index 5,
-	 *       causing chip to issue full queue of stale commands
-	 * The mmiowb() prevents future writes from crossing the barrier.
-	 * See Documentation/driver-api/device-io.rst for more information.
 	 */
 	WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
-	mmiowb();
 
 	LEAVE("qla1280_isp_cmd");
 }
@@ -4213,7 +4142,6 @@
 	.can_queue		= MAX_OUTSTANDING_COMMANDS,
 	.this_id		= -1,
 	.sg_tablesize		= SG_ALL,
-	.use_clustering		= ENABLE_CLUSTERING,
 };
 
 
@@ -4259,8 +4187,8 @@
 	ha->devnum = devnum;	/* specifies microcode load address */
 
 #ifdef QLA_64BIT_PTR
-	if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
-		if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
+	if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+		if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
 			printk(KERN_WARNING "scsi(%li): Unable to set a "
 			       "suitable DMA mask - aborting\n", ha->host_no);
 			error = -ENODEV;
@@ -4270,7 +4198,7 @@
 		dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
 			ha->host_no);
 #else
-	if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
+	if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
 		printk(KERN_WARNING "scsi(%li): Unable to set a "
 		       "suitable DMA mask - aborting\n", ha->host_no);
 		error = -ENODEV;
@@ -4278,17 +4206,17 @@
 	}
 #endif
 
-	ha->request_ring = pci_alloc_consistent(ha->pdev,
+	ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
 			((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
-			&ha->request_dma);
+			&ha->request_dma, GFP_KERNEL);
 	if (!ha->request_ring) {
 		printk(KERN_INFO "qla1280: Failed to get request memory\n");
 		goto error_put_host;
 	}
 
-	ha->response_ring = pci_alloc_consistent(ha->pdev,
+	ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
 			((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
-			&ha->response_dma);
+			&ha->response_dma, GFP_KERNEL);
 	if (!ha->response_ring) {
 		printk(KERN_INFO "qla1280: Failed to get response memory\n");
 		goto error_free_request_ring;
@@ -4370,11 +4298,11 @@
 	release_region(host->io_port, 0xff);
 #endif
  error_free_response_ring:
-	pci_free_consistent(ha->pdev,
+	dma_free_coherent(&ha->pdev->dev,
 			((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
 			ha->response_ring, ha->response_dma);
  error_free_request_ring:
-	pci_free_consistent(ha->pdev,
+	dma_free_coherent(&ha->pdev->dev,
 			((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
 			ha->request_ring, ha->request_dma);
  error_put_host:
@@ -4404,10 +4332,10 @@
 	release_region(host->io_port, 0xff);
 #endif
 
-	pci_free_consistent(ha->pdev,
+	dma_free_coherent(&ha->pdev->dev,
 			((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
 			ha->request_ring, ha->request_dma);
-	pci_free_consistent(ha->pdev,
+	dma_free_coherent(&ha->pdev->dev,
 			((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
 			ha->response_ring, ha->response_dma);
 
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index 1522aca..a1a8aef 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /******************************************************************************
 *                  QLOGIC LINUX SOFTWARE
 *
@@ -5,16 +6,6 @@
 * Copyright (C) 2000 Qlogic Corporation
 * (www.qlogic.com)
 *
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License as published by the
-* Free Software Foundation; either version 2, or (at your option) any
-* later version.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-* General Public License for more details.
-*
 ******************************************************************************/
 
 #ifndef	_QLA1280_H
@@ -1064,9 +1055,6 @@
 		uint32_t reset_active:1;		/* 3 */
 		uint32_t abort_isp_active:1;		/* 4 */
 		uint32_t disable_risc_code_load:1;	/* 5 */
-#ifdef __ia64__
-		uint32_t use_pci_vchannel:1;
-#endif
 	} flags;
 
 	struct nvram nvram;
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 036cc3f..7645018 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config SCSI_QLA_FC
 	tristate "QLogic QLA2XXX Fibre Channel Support"
 	depends on PCI && SCSI
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4888b99..7259bce 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -29,24 +29,27 @@
 	if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
 		return 0;
 
+	mutex_lock(&ha->optrom_mutex);
 	if (IS_P3P_TYPE(ha)) {
 		if (off < ha->md_template_size) {
 			rval = memory_read_from_buffer(buf, count,
 			    &off, ha->md_tmplt_hdr, ha->md_template_size);
-			return rval;
+		} else {
+			off -= ha->md_template_size;
+			rval = memory_read_from_buffer(buf, count,
+			    &off, ha->md_dump, ha->md_dump_size);
 		}
-		off -= ha->md_template_size;
-		rval = memory_read_from_buffer(buf, count,
-		    &off, ha->md_dump, ha->md_dump_size);
-		return rval;
-	} else if (ha->mctp_dumped && ha->mctp_dump_reading)
-		return memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
+	} else if (ha->mctp_dumped && ha->mctp_dump_reading) {
+		rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
 		    MCTP_DUMP_SIZE);
-	else if (ha->fw_dump_reading)
-		return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+	} else if (ha->fw_dump_reading) {
+		rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
 					ha->fw_dump_len);
-	else
-		return 0;
+	} else {
+		rval = 0;
+	}
+	mutex_unlock(&ha->optrom_mutex);
+	return rval;
 }
 
 static ssize_t
@@ -154,13 +157,33 @@
 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
 	struct qla_hw_data *ha = vha->hw;
+	uint32_t faddr;
+	struct active_regions active_regions = { };
 
 	if (!capable(CAP_SYS_ADMIN))
 		return 0;
 
-	if (IS_NOCACHE_VPD_TYPE(ha))
-		ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
-		    ha->nvram_size);
+	mutex_lock(&ha->optrom_mutex);
+	if (qla2x00_chip_is_down(vha)) {
+		mutex_unlock(&ha->optrom_mutex);
+		return -EAGAIN;
+	}
+
+	if (!IS_NOCACHE_VPD_TYPE(ha)) {
+		mutex_unlock(&ha->optrom_mutex);
+		goto skip;
+	}
+
+	faddr = ha->flt_region_nvram;
+	if (IS_QLA28XX(ha)) {
+		if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
+			faddr = ha->flt_region_nvram_sec;
+	}
+	ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
+
+	mutex_unlock(&ha->optrom_mutex);
+
+skip:
 	return memory_read_from_buffer(buf, count, &off, ha->nvram,
 					ha->nvram_size);
 }
@@ -208,10 +231,17 @@
 		return -EAGAIN;
 	}
 
+	mutex_lock(&ha->optrom_mutex);
+	if (qla2x00_chip_is_down(vha)) {
+		mutex_unlock(&ha->optrom_mutex);
+		return -EAGAIN;
+	}
+
 	/* Write NVRAM. */
-	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
-	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
+	ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
+	ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
 	    count);
+	mutex_unlock(&ha->optrom_mutex);
 
 	ql_dbg(ql_dbg_user, vha, 0x7060,
 	    "Setting ISP_ABORT_NEEDED\n");
@@ -322,6 +352,10 @@
 		size = ha->optrom_size - start;
 
 	mutex_lock(&ha->optrom_mutex);
+	if (qla2x00_chip_is_down(vha)) {
+		mutex_unlock(&ha->optrom_mutex);
+		return -EAGAIN;
+	}
 	switch (val) {
 	case 0:
 		if (ha->optrom_state != QLA_SREADING &&
@@ -345,10 +379,10 @@
 		}
 
 		ha->optrom_region_start = start;
-		ha->optrom_region_size = start + size;
+		ha->optrom_region_size = size;
 
 		ha->optrom_state = QLA_SREADING;
-		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+		ha->optrom_buffer = vzalloc(ha->optrom_region_size);
 		if (ha->optrom_buffer == NULL) {
 			ql_log(ql_log_warn, vha, 0x7062,
 			    "Unable to allocate memory for optrom retrieval "
@@ -370,7 +404,6 @@
 		    "Reading flash region -- 0x%x/0x%x.\n",
 		    ha->optrom_region_start, ha->optrom_region_size);
 
-		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
 		ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
 		    ha->optrom_region_start, ha->optrom_region_size);
 		break;
@@ -399,16 +432,15 @@
 		 * 	0x000000 -> 0x07ffff -- Boot code.
 		 * 	0x080000 -> 0x0fffff -- Firmware.
 		 * 	0x120000 -> 0x12ffff -- VPD and HBA parameters.
+		 *
+		 * > ISP25xx type boards:
+		 *
+		 *      None -- should go through BSG.
 		 */
 		valid = 0;
 		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
 			valid = 1;
-		else if (start == (ha->flt_region_boot * 4) ||
-		    start == (ha->flt_region_fw * 4))
-			valid = 1;
-		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
-			|| IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
-			|| IS_QLA27XX(ha))
+		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
 			valid = 1;
 		if (!valid) {
 			ql_log(ql_log_warn, vha, 0x7065,
@@ -418,10 +450,10 @@
 		}
 
 		ha->optrom_region_start = start;
-		ha->optrom_region_size = start + size;
+		ha->optrom_region_size = size;
 
 		ha->optrom_state = QLA_SWRITING;
-		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+		ha->optrom_buffer = vzalloc(ha->optrom_region_size);
 		if (ha->optrom_buffer == NULL) {
 			ql_log(ql_log_warn, vha, 0x7066,
 			    "Unable to allocate memory for optrom update "
@@ -436,7 +468,6 @@
 		    "Staging flash region write -- 0x%x/0x%x.\n",
 		    ha->optrom_region_start, ha->optrom_region_size);
 
-		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
 		break;
 	case 3:
 		if (ha->optrom_state != QLA_SWRITING) {
@@ -455,8 +486,10 @@
 		    "Writing flash region -- 0x%x/0x%x.\n",
 		    ha->optrom_region_start, ha->optrom_region_size);
 
-		ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+		rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
 		    ha->optrom_region_start, ha->optrom_region_size);
+		if (rval)
+			rval = -EIO;
 		break;
 	default:
 		rval = -EINVAL;
@@ -485,6 +518,7 @@
 	    struct device, kobj)));
 	struct qla_hw_data *ha = vha->hw;
 	uint32_t faddr;
+	struct active_regions active_regions = { };
 
 	if (unlikely(pci_channel_offline(ha->pdev)))
 		return -EAGAIN;
@@ -492,16 +526,33 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EINVAL;
 
-	if (IS_NOCACHE_VPD_TYPE(ha)) {
-		faddr = ha->flt_region_vpd << 2;
+	if (IS_NOCACHE_VPD_TYPE(ha))
+		goto skip;
 
-		if (IS_QLA27XX(ha) &&
-		    qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+	faddr = ha->flt_region_vpd << 2;
+
+	if (IS_QLA28XX(ha)) {
+		qla28xx_get_aux_images(vha, &active_regions);
+		if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
 			faddr = ha->flt_region_vpd_sec << 2;
 
-		ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
-		    ha->vpd_size);
+		ql_dbg(ql_dbg_init, vha, 0x7070,
+		    "Loading %s nvram image.\n",
+		    active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
+		    "primary" : "secondary");
 	}
+
+	mutex_lock(&ha->optrom_mutex);
+	if (qla2x00_chip_is_down(vha)) {
+		mutex_unlock(&ha->optrom_mutex);
+		return -EAGAIN;
+	}
+
+	ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
+	mutex_unlock(&ha->optrom_mutex);
+
+	ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
+skip:
 	return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
 }
 
@@ -531,16 +582,25 @@
 		return -EAGAIN;
 	}
 
+	mutex_lock(&ha->optrom_mutex);
+	if (qla2x00_chip_is_down(vha)) {
+		mutex_unlock(&ha->optrom_mutex);
+		return -EAGAIN;
+	}
+
 	/* Write NVRAM. */
-	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
-	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
+	ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
+	ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
 
 	/* Update flash version information for 4Gb & above. */
-	if (!IS_FWI2_CAPABLE(ha))
+	if (!IS_FWI2_CAPABLE(ha)) {
+		mutex_unlock(&ha->optrom_mutex);
 		return -EINVAL;
+	}
 
 	tmp_data = vmalloc(256);
 	if (!tmp_data) {
+		mutex_unlock(&ha->optrom_mutex);
 		ql_log(ql_log_warn, vha, 0x706b,
 		    "Unable to allocate memory for VPD information update.\n");
 		return -ENOMEM;
@@ -548,6 +608,8 @@
 	ha->isp_ops->get_flash_version(vha, tmp_data);
 	vfree(tmp_data);
 
+	mutex_unlock(&ha->optrom_mutex);
+
 	return count;
 }
 
@@ -573,10 +635,15 @@
 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
 		return 0;
 
-	if (qla2x00_chip_is_down(vha))
+	mutex_lock(&vha->hw->optrom_mutex);
+	if (qla2x00_chip_is_down(vha)) {
+		mutex_unlock(&vha->hw->optrom_mutex);
 		return 0;
+	}
 
 	rval = qla2x00_read_sfp_dev(vha, buf, count);
+	mutex_unlock(&vha->hw->optrom_mutex);
+
 	if (rval)
 		return -EIO;
 
@@ -604,6 +671,7 @@
 	int type;
 	uint32_t idc_control;
 	uint8_t *tmp_data = NULL;
+
 	if (off != 0)
 		return -EINVAL;
 
@@ -641,7 +709,7 @@
 		ql_log(ql_log_info, vha, 0x706f,
 		    "Issuing MPI reset.\n");
 
-		if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 			uint32_t idc_control;
 
 			qla83xx_idc_lock(vha, 0);
@@ -655,7 +723,8 @@
 			break;
 		} else {
 			/* Make sure FC side is not in reset */
-			qla2x00_wait_for_hba_online(vha);
+			WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
+				     QLA_SUCCESS);
 
 			/* Issue MPI reset */
 			scsi_block_requests(vha->host);
@@ -785,9 +854,11 @@
 
 	if (unlikely(pci_channel_offline(ha->pdev)))
 		return 0;
-
-	if (qla2x00_chip_is_down(vha))
+	mutex_lock(&vha->hw->optrom_mutex);
+	if (qla2x00_chip_is_down(vha)) {
+		mutex_unlock(&vha->hw->optrom_mutex);
 		return 0;
+	}
 
 	if (ha->xgmac_data)
 		goto do_read;
@@ -795,6 +866,7 @@
 	ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
 	    &ha->xgmac_data_dma, GFP_KERNEL);
 	if (!ha->xgmac_data) {
+		mutex_unlock(&vha->hw->optrom_mutex);
 		ql_log(ql_log_warn, vha, 0x7076,
 		    "Unable to allocate memory for XGMAC read-data.\n");
 		return 0;
@@ -806,13 +878,15 @@
 
 	rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
 	    XGMAC_DATA_SIZE, &actual_size);
+
+	mutex_unlock(&vha->hw->optrom_mutex);
 	if (rval != QLA_SUCCESS) {
 		ql_log(ql_log_warn, vha, 0x7077,
 		    "Unable to read XGMAC data (%x).\n", rval);
 		count = 0;
 	}
 
-	count = actual_size > count ? count: actual_size;
+	count = actual_size > count ? count : actual_size;
 	memcpy(buf, ha->xgmac_data, count);
 
 	return count;
@@ -842,13 +916,16 @@
 
 	if (ha->dcbx_tlv)
 		goto do_read;
-
-	if (qla2x00_chip_is_down(vha))
+	mutex_lock(&vha->hw->optrom_mutex);
+	if (qla2x00_chip_is_down(vha)) {
+		mutex_unlock(&vha->hw->optrom_mutex);
 		return 0;
+	}
 
 	ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
 	    &ha->dcbx_tlv_dma, GFP_KERNEL);
 	if (!ha->dcbx_tlv) {
+		mutex_unlock(&vha->hw->optrom_mutex);
 		ql_log(ql_log_warn, vha, 0x7078,
 		    "Unable to allocate memory for DCBX TLV read-data.\n");
 		return -ENOMEM;
@@ -859,6 +936,9 @@
 
 	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
 	    DCBX_TLV_DATA_SIZE);
+
+	mutex_unlock(&vha->hw->optrom_mutex);
+
 	if (rval != QLA_SUCCESS) {
 		ql_log(ql_log_warn, vha, 0x7079,
 		    "Unable to read DCBX TLV (%x).\n", rval);
@@ -882,7 +962,7 @@
 static struct sysfs_entry {
 	char *name;
 	struct bin_attribute *attr;
-	int is4GBp_only;
+	int type;
 } bin_file_entries[] = {
 	{ "fw_dump", &sysfs_fw_dump_attr, },
 	{ "nvram", &sysfs_nvram_attr, },
@@ -905,11 +985,11 @@
 	int ret;
 
 	for (iter = bin_file_entries; iter->name; iter++) {
-		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
+		if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
 			continue;
-		if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
+		if (iter->type == 2 && !IS_QLA25XX(vha->hw))
 			continue;
-		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
+		if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
 			continue;
 
 		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -933,13 +1013,14 @@
 	struct qla_hw_data *ha = vha->hw;
 
 	for (iter = bin_file_entries; iter->name; iter++) {
-		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
+		if (iter->type && !IS_FWI2_CAPABLE(ha))
 			continue;
-		if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
+		if (iter->type == 2 && !IS_QLA25XX(ha))
 			continue;
-		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
+		if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
 			continue;
-		if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+		if (iter->type == 0x27 &&
+		    (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
 			continue;
 
 		sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -953,7 +1034,7 @@
 /* Scsi_Host attributes. */
 
 static ssize_t
-qla2x00_drvr_version_show(struct device *dev,
+qla2x00_driver_version_show(struct device *dev,
 			  struct device_attribute *attr, char *buf)
 {
 	return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
@@ -997,6 +1078,7 @@
 		      char *buf)
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
 	return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
 }
 
@@ -1030,6 +1112,7 @@
 			char *buf)
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
 	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
 }
 
@@ -1041,7 +1124,8 @@
 	char pci_info[30];
 
 	return scnprintf(buf, PAGE_SIZE, "%s\n",
-	    vha->hw->isp_ops->pci_info_str(vha, pci_info));
+			 vha->hw->isp_ops->pci_info_str(vha, pci_info,
+							sizeof(pci_info)));
 }
 
 static ssize_t
@@ -1159,6 +1243,34 @@
 }
 
 static ssize_t
+qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
+	    vha->hw->last_zio_threshold);
+}
+
+static ssize_t
+qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
+    const char *buf, size_t count)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int val = 0;
+
+	if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
+		return -EINVAL;
+	if (sscanf(buf, "%d", &val) != 1)
+		return -EINVAL;
+	if (val < 0 || val > 256)
+		return -ERANGE;
+
+	atomic_set(&vha->hw->zio_threshold, val);
+	return strlen(buf);
+}
+
+static ssize_t
 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
 		    char *buf)
 {
@@ -1184,15 +1296,17 @@
 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
 		return -EPERM;
 
+	if (sscanf(buf, "%d", &val) != 1)
+		return -EINVAL;
+
+	mutex_lock(&vha->hw->optrom_mutex);
 	if (qla2x00_chip_is_down(vha)) {
+		mutex_unlock(&vha->hw->optrom_mutex);
 		ql_log(ql_log_warn, vha, 0x707a,
 		    "Abort ISP active -- ignoring beacon request.\n");
 		return -EBUSY;
 	}
 
-	if (sscanf(buf, "%d", &val) != 1)
-		return -EINVAL;
-
 	if (val)
 		rval = ha->isp_ops->beacon_on(vha);
 	else
@@ -1201,6 +1315,8 @@
 	if (rval != QLA_SUCCESS)
 		count = 0;
 
+	mutex_unlock(&vha->hw->optrom_mutex);
+
 	return count;
 }
 
@@ -1210,6 +1326,7 @@
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
+
 	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
 	    ha->bios_revision[0]);
 }
@@ -1220,6 +1337,7 @@
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
+
 	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
 	    ha->efi_revision[0]);
 }
@@ -1230,6 +1348,7 @@
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
+
 	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
 	    ha->fcode_revision[0]);
 }
@@ -1240,6 +1359,7 @@
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
+
 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
 	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
 	    ha->fw_revision[3]);
@@ -1252,7 +1372,8 @@
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return scnprintf(buf, PAGE_SIZE, "\n");
 
 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1265,6 +1386,7 @@
 			      struct device_attribute *attr, char *buf)
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
 	return scnprintf(buf, PAGE_SIZE, "%d\n",
 	    vha->qla_stats.total_isp_aborts);
 }
@@ -1274,24 +1396,40 @@
 	struct device_attribute *attr, char *buf)
 {
 	int rval = QLA_SUCCESS;
-	uint16_t status[2] = {0, 0};
+	uint16_t status[2] = { 0 };
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
 	if (!IS_QLA84XX(ha))
 		return scnprintf(buf, PAGE_SIZE, "\n");
 
-	if (ha->cs84xx->op_fw_version == 0)
+	if (!ha->cs84xx->op_fw_version) {
 		rval = qla84xx_verify_chip(vha, status);
 
-	if ((rval == QLA_SUCCESS) && (status[0] == 0))
-		return scnprintf(buf, PAGE_SIZE, "%u\n",
-			(uint32_t)ha->cs84xx->op_fw_version);
+		if (!rval && !status[0])
+			return scnprintf(buf, PAGE_SIZE, "%u\n",
+			    (uint32_t)ha->cs84xx->op_fw_version);
+	}
 
 	return scnprintf(buf, PAGE_SIZE, "\n");
 }
 
 static ssize_t
+qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
+	    ha->serdes_version[0], ha->serdes_version[1],
+	    ha->serdes_version[2]);
+}
+
+static ssize_t
 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
     char *buf)
 {
@@ -1299,7 +1437,7 @@
 	struct qla_hw_data *ha = vha->hw;
 
 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
-	    !IS_QLA27XX(ha))
+	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return scnprintf(buf, PAGE_SIZE, "\n");
 
 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1370,18 +1508,24 @@
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	uint16_t temp = 0;
+	int rc;
 
+	mutex_lock(&vha->hw->optrom_mutex);
 	if (qla2x00_chip_is_down(vha)) {
+		mutex_unlock(&vha->hw->optrom_mutex);
 		ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
 		goto done;
 	}
 
 	if (vha->hw->flags.eeh_busy) {
+		mutex_unlock(&vha->hw->optrom_mutex);
 		ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
 		goto done;
 	}
 
-	if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+	rc = qla2x00_get_thermal_temp(vha, &temp);
+	mutex_unlock(&vha->hw->optrom_mutex);
+	if (rc == QLA_SUCCESS)
 		return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
 
 done:
@@ -1402,13 +1546,24 @@
 		return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
 	}
 
-	if (qla2x00_chip_is_down(vha))
+	mutex_lock(&vha->hw->optrom_mutex);
+	if (qla2x00_chip_is_down(vha)) {
+		mutex_unlock(&vha->hw->optrom_mutex);
 		ql_log(ql_log_warn, vha, 0x707c,
 		    "ISP reset active.\n");
-	else if (!vha->hw->flags.eeh_busy)
-		rval = qla2x00_get_firmware_state(vha, state);
-	if (rval != QLA_SUCCESS)
+		goto out;
+	} else if (vha->hw->flags.eeh_busy) {
+		mutex_unlock(&vha->hw->optrom_mutex);
+		goto out;
+	}
+
+	rval = qla2x00_get_firmware_state(vha, state);
+	mutex_unlock(&vha->hw->optrom_mutex);
+out:
+	if (rval != QLA_SUCCESS) {
 		memset(state, -1, sizeof(state));
+		rval = qla2x00_get_firmware_state(vha, state);
+	}
 
 	return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
 	    state[0], state[1], state[2], state[3], state[4], state[5]);
@@ -1495,7 +1650,7 @@
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA27XX(ha))
+	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return scnprintf(buf, PAGE_SIZE, "\n");
 
 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1503,38 +1658,596 @@
 }
 
 static ssize_t
-qla2x00_min_link_speed_show(struct device *dev, struct device_attribute *attr,
-    char *buf)
+qla2x00_min_supported_speed_show(struct device *dev,
+    struct device_attribute *attr, char *buf)
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA27XX(ha))
+	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return scnprintf(buf, PAGE_SIZE, "\n");
 
 	return scnprintf(buf, PAGE_SIZE, "%s\n",
-	    ha->min_link_speed == 5 ? "32Gps" :
-	    ha->min_link_speed == 4 ? "16Gps" :
-	    ha->min_link_speed == 3 ? "8Gps" :
-	    ha->min_link_speed == 2 ? "4Gps" :
-	    ha->min_link_speed != 0 ? "unknown" : "");
+	    ha->min_supported_speed == 6 ? "64Gps" :
+	    ha->min_supported_speed == 5 ? "32Gps" :
+	    ha->min_supported_speed == 4 ? "16Gps" :
+	    ha->min_supported_speed == 3 ? "8Gps" :
+	    ha->min_supported_speed == 2 ? "4Gps" :
+	    ha->min_supported_speed != 0 ? "unknown" : "");
 }
 
 static ssize_t
-qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
-    char *buf)
+qla2x00_max_supported_speed_show(struct device *dev,
+    struct device_attribute *attr, char *buf)
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA27XX(ha))
+	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return scnprintf(buf, PAGE_SIZE, "\n");
 
 	return scnprintf(buf, PAGE_SIZE, "%s\n",
-	    ha->max_speed_sup ? "32Gps" : "16Gps");
+	    ha->max_supported_speed  == 2 ? "64Gps" :
+	    ha->max_supported_speed  == 1 ? "32Gps" :
+	    ha->max_supported_speed  == 0 ? "16Gps" : "unknown");
 }
 
-static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
+static ssize_t
+qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
+    const char *buf, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
+	ulong type, speed;
+	int oldspeed, rval;
+	int mode = QLA_SET_DATA_RATE_LR;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
+		ql_log(ql_log_warn, vha, 0x70d8,
+		    "Speed setting not supported \n");
+		return -EINVAL;
+	}
+
+	rval = kstrtol(buf, 10, &type);
+	if (rval)
+		return rval;
+	speed = type;
+	if (type == 40 || type == 80 || type == 160 ||
+	    type == 320) {
+		ql_dbg(ql_dbg_user, vha, 0x70d9,
+		    "Setting will be affected after a loss of sync\n");
+		type = type/10;
+		mode = QLA_SET_DATA_RATE_NOLR;
+	}
+
+	oldspeed = ha->set_data_rate;
+
+	switch (type) {
+	case 0:
+		ha->set_data_rate = PORT_SPEED_AUTO;
+		break;
+	case 4:
+		ha->set_data_rate = PORT_SPEED_4GB;
+		break;
+	case 8:
+		ha->set_data_rate = PORT_SPEED_8GB;
+		break;
+	case 16:
+		ha->set_data_rate = PORT_SPEED_16GB;
+		break;
+	case 32:
+		ha->set_data_rate = PORT_SPEED_32GB;
+		break;
+	default:
+		ql_log(ql_log_warn, vha, 0x1199,
+		    "Unrecognized speed setting:%lx. Setting Autoneg\n",
+		    speed);
+		ha->set_data_rate = PORT_SPEED_AUTO;
+	}
+
+	if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
+		return -EINVAL;
+
+	ql_log(ql_log_info, vha, 0x70da,
+	    "Setting speed to %lx Gbps \n", type);
+
+	rval = qla2x00_set_data_rate(vha, mode);
+	if (rval != QLA_SUCCESS)
+		return -EIO;
+
+	return strlen(buf);
+}
+
+static ssize_t
+qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+	ssize_t rval;
+	char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
+
+	rval = qla2x00_get_data_rate(vha);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x70db,
+		    "Unable to get port speed rval:%zd\n", rval);
+		return -EINVAL;
+	}
+
+	ql_log(ql_log_info, vha, 0x70d6,
+	    "port speed:%d\n", ha->link_data_rate);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
+}
+
+/* ----- */
+
+static ssize_t
+qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int len = 0;
+
+	len += scnprintf(buf + len, PAGE_SIZE-len,
+	    "Supported options: enabled | disabled | dual | exclusive\n");
+
+	/* --- */
+	len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
+
+	switch (vha->qlini_mode) {
+	case QLA2XXX_INI_MODE_EXCLUSIVE:
+		len += scnprintf(buf + len, PAGE_SIZE-len,
+		    QLA2XXX_INI_MODE_STR_EXCLUSIVE);
+		break;
+	case QLA2XXX_INI_MODE_DISABLED:
+		len += scnprintf(buf + len, PAGE_SIZE-len,
+		    QLA2XXX_INI_MODE_STR_DISABLED);
+		break;
+	case QLA2XXX_INI_MODE_ENABLED:
+		len += scnprintf(buf + len, PAGE_SIZE-len,
+		    QLA2XXX_INI_MODE_STR_ENABLED);
+		break;
+	case QLA2XXX_INI_MODE_DUAL:
+		len += scnprintf(buf + len, PAGE_SIZE-len,
+		    QLA2XXX_INI_MODE_STR_DUAL);
+		break;
+	}
+	len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
+
+	return len;
+}
+
+static char *mode_to_str[] = {
+	"exclusive",
+	"disabled",
+	"enabled",
+	"dual",
+};
+
+#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
+static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
+{
+	int rc = 0;
+	enum {
+		NO_ACTION,
+		MODE_CHANGE_ACCEPT,
+		MODE_CHANGE_NO_ACTION,
+		TARGET_STILL_ACTIVE,
+	};
+	int action = NO_ACTION;
+	int set_mode = 0;
+	u8  eo_toggle = 0;	/* exchange offload flipped */
+
+	switch (vha->qlini_mode) {
+	case QLA2XXX_INI_MODE_DISABLED:
+		switch (op) {
+		case QLA2XXX_INI_MODE_DISABLED:
+			if (qla_tgt_mode_enabled(vha)) {
+				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+				    vha->hw->flags.exchoffld_enabled)
+					eo_toggle = 1;
+				if (((vha->ql2xexchoffld !=
+				    vha->u_ql2xexchoffld) &&
+				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+				    eo_toggle) {
+					/*
+					 * The number of exchange to be offload
+					 * was tweaked or offload option was
+					 * flipped
+					 */
+					action = MODE_CHANGE_ACCEPT;
+				} else {
+					action = MODE_CHANGE_NO_ACTION;
+				}
+			} else {
+				action = MODE_CHANGE_NO_ACTION;
+			}
+			break;
+		case QLA2XXX_INI_MODE_EXCLUSIVE:
+			if (qla_tgt_mode_enabled(vha)) {
+				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+				    vha->hw->flags.exchoffld_enabled)
+					eo_toggle = 1;
+				if (((vha->ql2xexchoffld !=
+				    vha->u_ql2xexchoffld) &&
+				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+				    eo_toggle) {
+					/*
+					 * The number of exchange to be offload
+					 * was tweaked or offload option was
+					 * flipped
+					 */
+					action = MODE_CHANGE_ACCEPT;
+				} else {
+					action = MODE_CHANGE_NO_ACTION;
+				}
+			} else {
+				action = MODE_CHANGE_ACCEPT;
+			}
+			break;
+		case QLA2XXX_INI_MODE_DUAL:
+			action = MODE_CHANGE_ACCEPT;
+			/* active_mode is target only, reset it to dual */
+			if (qla_tgt_mode_enabled(vha)) {
+				set_mode = 1;
+				action = MODE_CHANGE_ACCEPT;
+			} else {
+				action = MODE_CHANGE_NO_ACTION;
+			}
+			break;
+
+		case QLA2XXX_INI_MODE_ENABLED:
+			if (qla_tgt_mode_enabled(vha))
+				action = TARGET_STILL_ACTIVE;
+			else {
+				action = MODE_CHANGE_ACCEPT;
+				set_mode = 1;
+			}
+			break;
+		}
+		break;
+
+	case QLA2XXX_INI_MODE_EXCLUSIVE:
+		switch (op) {
+		case QLA2XXX_INI_MODE_EXCLUSIVE:
+			if (qla_tgt_mode_enabled(vha)) {
+				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+				    vha->hw->flags.exchoffld_enabled)
+					eo_toggle = 1;
+				if (((vha->ql2xexchoffld !=
+				    vha->u_ql2xexchoffld) &&
+				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+				    eo_toggle)
+					/*
+					 * The number of exchange to be offload
+					 * was tweaked or offload option was
+					 * flipped
+					 */
+					action = MODE_CHANGE_ACCEPT;
+				else
+					action = NO_ACTION;
+			} else
+				action = NO_ACTION;
+
+			break;
+
+		case QLA2XXX_INI_MODE_DISABLED:
+			if (qla_tgt_mode_enabled(vha)) {
+				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+				    vha->hw->flags.exchoffld_enabled)
+					eo_toggle = 1;
+				if (((vha->ql2xexchoffld !=
+				      vha->u_ql2xexchoffld) &&
+				    NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+				    eo_toggle)
+					action = MODE_CHANGE_ACCEPT;
+				else
+					action = MODE_CHANGE_NO_ACTION;
+			} else
+				action = MODE_CHANGE_NO_ACTION;
+			break;
+
+		case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
+			if (qla_tgt_mode_enabled(vha)) {
+				action = MODE_CHANGE_ACCEPT;
+				set_mode = 1;
+			} else
+				action = MODE_CHANGE_ACCEPT;
+			break;
+
+		case QLA2XXX_INI_MODE_ENABLED:
+			if (qla_tgt_mode_enabled(vha))
+				action = TARGET_STILL_ACTIVE;
+			else {
+				if (vha->hw->flags.fw_started)
+					action = MODE_CHANGE_NO_ACTION;
+				else
+					action = MODE_CHANGE_ACCEPT;
+			}
+			break;
+		}
+		break;
+
+	case QLA2XXX_INI_MODE_ENABLED:
+		switch (op) {
+		case QLA2XXX_INI_MODE_ENABLED:
+			if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
+			    vha->hw->flags.exchoffld_enabled)
+				eo_toggle = 1;
+			if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
+				NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
+			    eo_toggle)
+				action = MODE_CHANGE_ACCEPT;
+			else
+				action = NO_ACTION;
+			break;
+		case QLA2XXX_INI_MODE_DUAL:
+		case QLA2XXX_INI_MODE_DISABLED:
+			action = MODE_CHANGE_ACCEPT;
+			break;
+		default:
+			action = MODE_CHANGE_NO_ACTION;
+			break;
+		}
+		break;
+
+	case QLA2XXX_INI_MODE_DUAL:
+		switch (op) {
+		case QLA2XXX_INI_MODE_DUAL:
+			if (qla_tgt_mode_enabled(vha) ||
+			    qla_dual_mode_enabled(vha)) {
+				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
+					vha->u_ql2xiniexchg) !=
+				    vha->hw->flags.exchoffld_enabled)
+					eo_toggle = 1;
+
+				if ((((vha->ql2xexchoffld +
+				       vha->ql2xiniexchg) !=
+				    (vha->u_ql2xiniexchg +
+				     vha->u_ql2xexchoffld)) &&
+				    NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
+					vha->u_ql2xexchoffld)) || eo_toggle)
+					action = MODE_CHANGE_ACCEPT;
+				else
+					action = NO_ACTION;
+			} else {
+				if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
+					vha->u_ql2xiniexchg) !=
+				    vha->hw->flags.exchoffld_enabled)
+					eo_toggle = 1;
+
+				if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
+				    != (vha->u_ql2xiniexchg +
+					vha->u_ql2xexchoffld)) &&
+				    NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
+					vha->u_ql2xexchoffld)) || eo_toggle)
+					action = MODE_CHANGE_NO_ACTION;
+				else
+					action = NO_ACTION;
+			}
+			break;
+
+		case QLA2XXX_INI_MODE_DISABLED:
+			if (qla_tgt_mode_enabled(vha) ||
+			    qla_dual_mode_enabled(vha)) {
+				/* turning off initiator mode */
+				set_mode = 1;
+				action = MODE_CHANGE_ACCEPT;
+			} else {
+				action = MODE_CHANGE_NO_ACTION;
+			}
+			break;
+
+		case QLA2XXX_INI_MODE_EXCLUSIVE:
+			if (qla_tgt_mode_enabled(vha) ||
+			    qla_dual_mode_enabled(vha)) {
+				set_mode = 1;
+				action = MODE_CHANGE_ACCEPT;
+			} else {
+				action = MODE_CHANGE_ACCEPT;
+			}
+			break;
+
+		case QLA2XXX_INI_MODE_ENABLED:
+			if (qla_tgt_mode_enabled(vha) ||
+			    qla_dual_mode_enabled(vha)) {
+				action = TARGET_STILL_ACTIVE;
+			} else {
+				action = MODE_CHANGE_ACCEPT;
+			}
+		}
+		break;
+	}
+
+	switch (action) {
+	case MODE_CHANGE_ACCEPT:
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
+		    mode_to_str[vha->qlini_mode], mode_to_str[op],
+		    vha->ql2xexchoffld, vha->u_ql2xexchoffld,
+		    vha->ql2xiniexchg, vha->u_ql2xiniexchg);
+
+		vha->qlini_mode = op;
+		vha->ql2xexchoffld = vha->u_ql2xexchoffld;
+		vha->ql2xiniexchg = vha->u_ql2xiniexchg;
+		if (set_mode)
+			qlt_set_mode(vha);
+		vha->flags.online = 1;
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		break;
+
+	case MODE_CHANGE_NO_ACTION:
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
+		    mode_to_str[vha->qlini_mode], mode_to_str[op],
+		    vha->ql2xexchoffld, vha->u_ql2xexchoffld,
+		    vha->ql2xiniexchg, vha->u_ql2xiniexchg);
+		vha->qlini_mode = op;
+		vha->ql2xexchoffld = vha->u_ql2xexchoffld;
+		vha->ql2xiniexchg = vha->u_ql2xiniexchg;
+		break;
+
+	case TARGET_STILL_ACTIVE:
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "Target Mode is active. Unable to change Mode.\n");
+		break;
+
+	case NO_ACTION:
+	default:
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
+		    vha->qlini_mode, op,
+		    vha->ql2xexchoffld, vha->u_ql2xexchoffld);
+		break;
+	}
+
+	return rc;
+}
+
+static ssize_t
+qlini_mode_store(struct device *dev, struct device_attribute *attr,
+    const char *buf, size_t count)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int ini;
+
+	if (!buf)
+		return -EINVAL;
+
+	if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
+		strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
+		ini = QLA2XXX_INI_MODE_EXCLUSIVE;
+	else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
+		strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
+		ini = QLA2XXX_INI_MODE_DISABLED;
+	else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
+		  strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
+		ini = QLA2XXX_INI_MODE_ENABLED;
+	else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
+		strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
+		ini = QLA2XXX_INI_MODE_DUAL;
+	else
+		return -EINVAL;
+
+	qla_set_ini_mode(vha, ini);
+	return strlen(buf);
+}
+
+static ssize_t
+ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int len = 0;
+
+	len += scnprintf(buf + len, PAGE_SIZE-len,
+		"target exchange: new %d : current: %d\n\n",
+		vha->u_ql2xexchoffld, vha->ql2xexchoffld);
+
+	len += scnprintf(buf + len, PAGE_SIZE-len,
+	    "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
+	    vha->host_no);
+
+	return len;
+}
+
+static ssize_t
+ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
+    const char *buf, size_t count)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int val = 0;
+
+	if (sscanf(buf, "%d", &val) != 1)
+		return -EINVAL;
+
+	if (val > FW_MAX_EXCHANGES_CNT)
+		val = FW_MAX_EXCHANGES_CNT;
+	else if (val < 0)
+		val = 0;
+
+	vha->u_ql2xexchoffld = val;
+	return strlen(buf);
+}
+
+static ssize_t
+ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int len = 0;
+
+	len += scnprintf(buf + len, PAGE_SIZE-len,
+		"target exchange: new %d : current: %d\n\n",
+		vha->u_ql2xiniexchg, vha->ql2xiniexchg);
+
+	len += scnprintf(buf + len, PAGE_SIZE-len,
+	    "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
+	    vha->host_no);
+
+	return len;
+}
+
+static ssize_t
+ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
+    const char *buf, size_t count)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int val = 0;
+
+	if (sscanf(buf, "%d", &val) != 1)
+		return -EINVAL;
+
+	if (val > FW_MAX_EXCHANGES_CNT)
+		val = FW_MAX_EXCHANGES_CNT;
+	else if (val < 0)
+		val = 0;
+
+	vha->u_ql2xiniexchg = val;
+	return strlen(buf);
+}
+
+static ssize_t
+qla2x00_dif_bundle_statistics_show(struct device *dev,
+    struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	return scnprintf(buf, PAGE_SIZE,
+	    "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
+	    ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
+	    ha->dif_bundle_writes, ha->dif_bundle_kallocs,
+	    ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
+}
+
+static ssize_t
+qla2x00_fw_attr_show(struct device *dev,
+    struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%llx\n",
+	    (uint64_t)ha->fw_attributes_ext[1] << 48 |
+	    (uint64_t)ha->fw_attributes_ext[0] << 32 |
+	    (uint64_t)ha->fw_attributes_h << 16 |
+	    (uint64_t)ha->fw_attributes);
+}
+
+static ssize_t
+qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
+}
+
+static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
@@ -1562,6 +2275,7 @@
 		   NULL);
 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
 		   NULL);
+static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
@@ -1579,8 +2293,23 @@
 		   qla2x00_allow_cna_fw_dump_show,
 		   qla2x00_allow_cna_fw_dump_store);
 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
-static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL);
-static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL);
+static DEVICE_ATTR(min_supported_speed, 0444,
+		   qla2x00_min_supported_speed_show, NULL);
+static DEVICE_ATTR(max_supported_speed, 0444,
+		   qla2x00_max_supported_speed_show, NULL);
+static DEVICE_ATTR(zio_threshold, 0644,
+    qla_zio_threshold_show,
+    qla_zio_threshold_store);
+static DEVICE_ATTR_RW(qlini_mode);
+static DEVICE_ATTR_RW(ql2xexchoffld);
+static DEVICE_ATTR_RW(ql2xiniexchg);
+static DEVICE_ATTR(dif_bundle_statistics, 0444,
+    qla2x00_dif_bundle_statistics_show, NULL);
+static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
+    qla2x00_port_speed_store);
+static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
+static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
+
 
 struct device_attribute *qla2x00_host_attrs[] = {
 	&dev_attr_driver_version,
@@ -1601,6 +2330,7 @@
 	&dev_attr_optrom_fw_version,
 	&dev_attr_84xx_fw_version,
 	&dev_attr_total_isp_aborts,
+	&dev_attr_serdes_version,
 	&dev_attr_mpi_version,
 	&dev_attr_phy_version,
 	&dev_attr_flash_block_size,
@@ -1615,11 +2345,34 @@
 	&dev_attr_fw_dump_size,
 	&dev_attr_allow_cna_fw_dump,
 	&dev_attr_pep_version,
-	&dev_attr_min_link_speed,
-	&dev_attr_max_speed_sup,
+	&dev_attr_min_supported_speed,
+	&dev_attr_max_supported_speed,
+	&dev_attr_zio_threshold,
+	&dev_attr_dif_bundle_statistics,
+	&dev_attr_port_speed,
+	&dev_attr_port_no,
+	&dev_attr_fw_attr,
+	NULL, /* reserve for qlini_mode */
+	NULL, /* reserve for ql2xiniexchg */
+	NULL, /* reserve for ql2xexchoffld */
 	NULL,
 };
 
+void qla_insert_tgt_attrs(void)
+{
+	struct device_attribute **attr;
+
+	/* advance to empty slot */
+	for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
+		continue;
+
+	*attr = &dev_attr_qlini_mode;
+	attr++;
+	*attr = &dev_attr_ql2xiniexchg;
+	attr++;
+	*attr = &dev_attr_ql2xexchoffld;
+}
+
 /* Host attributes. */
 
 static void
@@ -1634,16 +2387,15 @@
 static void
 qla2x00_get_host_speed(struct Scsi_Host *shost)
 {
-	struct qla_hw_data *ha = ((struct scsi_qla_host *)
-					(shost_priv(shost)))->hw;
-	u32 speed = FC_PORTSPEED_UNKNOWN;
+	scsi_qla_host_t *vha = shost_priv(shost);
+	u32 speed;
 
-	if (IS_QLAFX00(ha)) {
+	if (IS_QLAFX00(vha->hw)) {
 		qlafx00_get_host_speed(shost);
 		return;
 	}
 
-	switch (ha->link_data_rate) {
+	switch (vha->hw->link_data_rate) {
 	case PORT_SPEED_1GB:
 		speed = FC_PORTSPEED_1GBIT;
 		break;
@@ -1665,7 +2417,14 @@
 	case PORT_SPEED_32GB:
 		speed = FC_PORTSPEED_32GBIT;
 		break;
+	case PORT_SPEED_64GB:
+		speed = FC_PORTSPEED_64GBIT;
+		break;
+	default:
+		speed = FC_PORTSPEED_UNKNOWN;
+		break;
 	}
+
 	fc_host_speed(shost) = speed;
 }
 
@@ -1673,7 +2432,7 @@
 qla2x00_get_host_port_type(struct Scsi_Host *shost)
 {
 	scsi_qla_host_t *vha = shost_priv(shost);
-	uint32_t port_type = FC_PORTTYPE_UNKNOWN;
+	uint32_t port_type;
 
 	if (vha->vp_idx) {
 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
@@ -1692,7 +2451,11 @@
 	case ISP_CFG_F:
 		port_type = FC_PORTTYPE_NPORT;
 		break;
+	default:
+		port_type = FC_PORTTYPE_UNKNOWN;
+		break;
 	}
+
 	fc_host_port_type(shost) = port_type;
 }
 
@@ -1754,13 +2517,10 @@
 	fc_starget_port_id(starget) = port_id;
 }
 
-static void
+static inline void
 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
 {
-	if (timeout)
-		rport->dev_loss_tmo = timeout;
-	else
-		rport->dev_loss_tmo = 1;
+	rport->dev_loss_tmo = timeout ? timeout : 1;
 }
 
 static void
@@ -1864,8 +2624,8 @@
 	if (qla2x00_chip_is_down(vha))
 		goto done;
 
-	stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
-				    &stats_dma, GFP_KERNEL);
+	stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
+				   GFP_KERNEL);
 	if (!stats) {
 		ql_log(ql_log_warn, vha, 0x707d,
 		    "Failed to allocate memory for stats.\n");
@@ -1970,8 +2730,9 @@
 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
 {
 	scsi_qla_host_t *vha = shost_priv(shost);
-	uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
-		0xFF, 0xFF, 0xFF, 0xFF};
+	static const uint8_t node_name[WWN_SIZE] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+	};
 	u64 fabric_name = wwn_to_u64(node_name);
 
 	if (vha->device_flags & SWITCH_FOUND)
@@ -2049,8 +2810,8 @@
 
 	/* initialized vport states */
 	atomic_set(&vha->loop_state, LOOP_DOWN);
-	vha->vp_err_state=  VP_ERR_PORTDWN;
-	vha->vp_prev_err_state=  VP_ERR_UNKWN;
+	vha->vp_err_state = VP_ERR_PORTDWN;
+	vha->vp_prev_err_state = VP_ERR_UNKWN;
 	/* Check if physical ha port is Up */
 	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
 	    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
@@ -2065,6 +2826,7 @@
 	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
 		if (ha->fw_attributes & BIT_4) {
 			int prot = 0, guard;
+
 			vha->flags.difdix_supported = 1;
 			ql_dbg(ql_dbg_user, vha, 0x7082,
 			    "Registered for DIF/DIX type 1 and 3 protection.\n");
@@ -2157,10 +2919,14 @@
 	struct qla_hw_data *ha = vha->hw;
 	uint16_t id = vha->vp_idx;
 
+	set_bit(VPORT_DELETE, &vha->dpc_flags);
+
 	while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
 	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
 		msleep(1000);
 
+	qla_nvme_delete(vha);
+
 	qla24xx_disable_vp(vha);
 	qla2x00_wait_for_sess_deletion(vha);
 
@@ -2191,6 +2957,8 @@
 	dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
 	    vha->gnl.ldma);
 
+	vha->gnl.l = NULL;
+
 	vfree(vha->scan.l);
 
 	if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
@@ -2313,7 +3081,7 @@
 qla2x00_init_host_attr(scsi_qla_host_t *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
-	u32 speed = FC_PORTSPEED_UNKNOWN;
+	u32 speeds = FC_PORTSPEED_UNKNOWN;
 
 	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
 	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
@@ -2324,25 +3092,45 @@
 	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
 
 	if (IS_CNA_CAPABLE(ha))
-		speed = FC_PORTSPEED_10GBIT;
-	else if (IS_QLA2031(ha))
-		speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
-		    FC_PORTSPEED_4GBIT;
-	else if (IS_QLA25XX(ha))
-		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
-		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+		speeds = FC_PORTSPEED_10GBIT;
+	else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
+		if (ha->max_supported_speed == 2) {
+			if (ha->min_supported_speed <= 6)
+				speeds |= FC_PORTSPEED_64GBIT;
+		}
+		if (ha->max_supported_speed == 2 ||
+		    ha->max_supported_speed == 1) {
+			if (ha->min_supported_speed <= 5)
+				speeds |= FC_PORTSPEED_32GBIT;
+		}
+		if (ha->max_supported_speed == 2 ||
+		    ha->max_supported_speed == 1 ||
+		    ha->max_supported_speed == 0) {
+			if (ha->min_supported_speed <= 4)
+				speeds |= FC_PORTSPEED_16GBIT;
+		}
+		if (ha->max_supported_speed == 1 ||
+		    ha->max_supported_speed == 0) {
+			if (ha->min_supported_speed <= 3)
+				speeds |= FC_PORTSPEED_8GBIT;
+		}
+		if (ha->max_supported_speed == 0) {
+			if (ha->min_supported_speed <= 2)
+				speeds |= FC_PORTSPEED_4GBIT;
+		}
+	} else if (IS_QLA2031(ha))
+		speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
+			FC_PORTSPEED_4GBIT;
+	else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
+		speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
+			FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
 	else if (IS_QLA24XX_TYPE(ha))
-		speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
-		    FC_PORTSPEED_1GBIT;
+		speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
+			FC_PORTSPEED_1GBIT;
 	else if (IS_QLA23XX(ha))
-		speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
-	else if (IS_QLAFX00(ha))
-		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
-		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
-	else if (IS_QLA27XX(ha))
-		speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
-		    FC_PORTSPEED_8GBIT;
+		speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
 	else
-		speed = FC_PORTSPEED_1GBIT;
-	fc_host_supported_speeds(vha->host) = speed;
+		speeds = FC_PORTSPEED_1GBIT;
+
+	fc_host_supported_speeds(vha->host) = speeds;
 }
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 4a9fd8d..99f0a1a 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,4 +1,4 @@
-	/*
+/*
  * QLogic Fibre Channel HBA Driver
  * Copyright (c)  2003-2014 QLogic Corporation
  *
@@ -12,10 +12,8 @@
 #include <linux/bsg-lib.h>
 
 /* BSG support for ELS/CT pass through */
-void
-qla2x00_bsg_job_done(void *ptr, int res)
+void qla2x00_bsg_job_done(srb_t *sp, int res)
 {
-	srb_t *sp = ptr;
 	struct bsg_job *bsg_job = sp->u.bsg_job;
 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
 
@@ -25,10 +23,8 @@
 	sp->free(sp);
 }
 
-void
-qla2x00_bsg_sp_free(void *ptr)
+void qla2x00_bsg_sp_free(srb_t *sp)
 {
-	srb_t *sp = ptr;
 	struct qla_hw_data *ha = sp->vha->hw;
 	struct bsg_job *bsg_job = sp->u.bsg_job;
 	struct fc_bsg_request *bsg_request = bsg_job->request;
@@ -84,8 +80,7 @@
 		return 0;
 	}
 
-	if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
-			bcode[3] != 'S') {
+	if (memcmp(bcode, "HQOS", 4)) {
 		/* Invalid FCP priority data header*/
 		ql_dbg(ql_dbg_user, vha, 0x7052,
 		    "Invalid FCP Priority data header. bcode=0x%x.\n",
@@ -258,7 +253,7 @@
 	srb_t *sp;
 	const char *type;
 	int req_sg_cnt, rsp_sg_cnt;
-	int rval =  (DRIVER_ERROR << 16);
+	int rval =  (DID_ERROR << 16);
 	uint16_t nextlid = 0;
 
 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
@@ -342,6 +337,8 @@
 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 	if (!req_sg_cnt) {
+		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 		rval = -ENOMEM;
 		goto done_free_fcport;
 	}
@@ -349,6 +346,8 @@
 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
         if (!rsp_sg_cnt) {
+		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 		rval = -ENOMEM;
 		goto done_free_fcport;
 	}
@@ -433,7 +432,7 @@
 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
-	int rval = (DRIVER_ERROR << 16);
+	int rval = (DID_ERROR << 16);
 	int req_sg_cnt, rsp_sg_cnt;
 	uint16_t loop_id;
 	struct fc_port *fcport;
@@ -1044,7 +1043,7 @@
 	}
 
 	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
-	fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
+	fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
 
 	mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
 	mn->entry_count = 1;
@@ -1057,9 +1056,8 @@
 	mn->fw_ver =  cpu_to_le32(fw_ver);
 	mn->fw_size =  cpu_to_le32(data_len);
 	mn->fw_seq_size =  cpu_to_le32(data_len);
-	mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
-	mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
-	mn->dseg_length = cpu_to_le32(data_len);
+	put_unaligned_le64(fw_dma, &mn->dsd.address);
+	mn->dsd.length = cpu_to_le32(data_len);
 	mn->data_seg_cnt = cpu_to_le16(1);
 
 	rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
@@ -1238,9 +1236,8 @@
 	if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
 		mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
 		mn->dseg_count = cpu_to_le16(1);
-		mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
-		mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
-		mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
+		put_unaligned_le64(mgmt_dma, &mn->dsd.address);
+		mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
 	}
 
 	rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
@@ -1354,7 +1351,7 @@
 
 	if (rval) {
 		ql_log(ql_log_warn, vha, 0x704c,
-		    "iIDMA cmd failed for %8phN -- "
+		    "iiDMA cmd failed for %8phN -- "
 		    "%04x %x %04x %04x.\n", fcport->port_name,
 		    rval, fcport->fp_speed, mb[0], mb[1]);
 		rval = (DID_ERROR << 16);
@@ -1412,7 +1409,8 @@
 		    start == (ha->flt_region_fw * 4))
 			valid = 1;
 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
-		    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
+		    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+		    IS_QLA28XX(ha))
 			valid = 1;
 		if (!valid) {
 			ql_log(ql_log_warn, vha, 0x7058,
@@ -1534,6 +1532,7 @@
 	uint32_t count;
 	dma_addr_t sfp_dma;
 	void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
 	if (!sfp) {
 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_NO_MEMORY;
@@ -1584,6 +1583,7 @@
 	struct qla_status_reg *sr = (void *)bsg;
 	dma_addr_t sfp_dma;
 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
 	if (!sfp) {
 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_NO_MEMORY;
@@ -1634,6 +1634,7 @@
 	struct qla_status_reg *sr = (void *)bsg;
 	dma_addr_t sfp_dma;
 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
 	if (!sfp) {
 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_NO_MEMORY;
@@ -1680,6 +1681,7 @@
 	struct qla_i2c_access *i2c = (void *)bsg;
 	dma_addr_t sfp_dma;
 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
 	if (!sfp) {
 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_NO_MEMORY;
@@ -1725,6 +1727,7 @@
 	struct qla_i2c_access *i2c = (void *)bsg;
 	dma_addr_t sfp_dma;
 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
 	if (!sfp) {
 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_NO_MEMORY;
@@ -1775,8 +1778,8 @@
 	uint16_t nextlid = 0;
 	uint32_t tot_dsds;
 	srb_t *sp = NULL;
-	uint32_t req_data_len = 0;
-	uint32_t rsp_data_len = 0;
+	uint32_t req_data_len;
+	uint32_t rsp_data_len;
 
 	/* Check the type of the adapter */
 	if (!IS_BIDI_CAPABLE(ha)) {
@@ -1881,6 +1884,9 @@
 		goto done_unmap_sg;
 	}
 
+	req_data_len = bsg_job->request_payload.payload_len;
+	rsp_data_len = bsg_job->reply_payload.payload_len;
+
 	if (req_data_len != rsp_data_len) {
 		rval = EXT_STATUS_BUSY;
 		ql_log(ql_log_warn, vha, 0x70aa,
@@ -1888,10 +1894,6 @@
 		goto done_unmap_sg;
 	}
 
-	req_data_len = bsg_job->request_payload.payload_len;
-	rsp_data_len = bsg_job->reply_payload.payload_len;
-
-
 	/* Alloc SRB structure */
 	sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
 	if (!sp) {
@@ -1948,7 +1950,7 @@
 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
-	int rval = (DRIVER_ERROR << 16);
+	int rval = (DID_ERROR << 16);
 	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
 	srb_t *sp;
 	int req_sg_cnt = 0, rsp_sg_cnt = 0;
@@ -1961,7 +1963,7 @@
 
 	/* Dump the vendor information */
 	ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
-	    (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
+	    piocb_rqst, sizeof(*piocb_rqst));
 
 	if (!vha->flags.online) {
 		ql_log(ql_log_warn, vha, 0x70d0,
@@ -2157,7 +2159,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_flash_update_caps cap;
 
-	if (!(IS_QLA27XX(ha)))
+	if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
 		return -EPERM;
 
 	memset(&cap, 0, sizeof(cap));
@@ -2190,7 +2192,7 @@
 	uint64_t online_fw_attr = 0;
 	struct qla_flash_update_caps cap;
 
-	if (!(IS_QLA27XX(ha)))
+	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return -EPERM;
 
 	memset(&cap, 0, sizeof(cap));
@@ -2238,7 +2240,7 @@
 	uint8_t domain, area, al_pa, state;
 	int rval;
 
-	if (!(IS_QLA27XX(ha)))
+	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return -EPERM;
 
 	memset(&bbcr, 0, sizeof(bbcr));
@@ -2312,8 +2314,8 @@
 	if (!IS_FWI2_CAPABLE(ha))
 		return -EPERM;
 
-	stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
-				    &stats_dma, GFP_KERNEL);
+	stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
+				   GFP_KERNEL);
 	if (!stats) {
 		ql_log(ql_log_warn, vha, 0x70e2,
 		    "Failed to allocate memory for stats.\n");
@@ -2323,8 +2325,8 @@
 	rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
 
 	if (rval == QLA_SUCCESS) {
-		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
-		    (uint8_t *)stats, sizeof(*stats));
+		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
+			stats, sizeof(*stats));
 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
 			bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
 	}
@@ -2353,7 +2355,8 @@
 	int rval;
 	struct qla_dport_diag *dd;
 
-	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
+	    !IS_QLA28XX(vha->hw))
 		return -EPERM;
 
 	dd = kmalloc(sizeof(*dd), GFP_KERNEL);
@@ -2388,6 +2391,45 @@
 }
 
 static int
+qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
+{
+	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_active_regions regions = { };
+	struct active_regions active_regions = { };
+
+	qla28xx_get_aux_images(vha, &active_regions);
+	regions.global_image = active_regions.global;
+
+	if (IS_QLA28XX(ha)) {
+		qla27xx_get_active_image(vha, &active_regions);
+		regions.board_config = active_regions.aux.board_config;
+		regions.vpd_nvram = active_regions.aux.vpd_nvram;
+		regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
+		regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
+	}
+
+	ql_dbg(ql_dbg_user, vha, 0x70e1,
+	    "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
+	    __func__, vha->host_no, regions.global_image,
+	    regions.board_config, regions.vpd_nvram,
+	    regions.npiv_config_0_1, regions.npiv_config_2_3);
+
+	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
+
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+	bsg_reply->reply_payload_rcv_len = sizeof(regions);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_job_done(bsg_job, bsg_reply->result,
+	    bsg_reply->reply_payload_rcv_len);
+
+	return 0;
+}
+
+static int
 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
 {
 	struct fc_bsg_request *bsg_request = bsg_job->request;
@@ -2460,6 +2502,9 @@
 	case QL_VND_DPORT_DIAGNOSTICS:
 		return qla2x00_do_dport_diagnostics(bsg_job);
 
+	case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
+		return qla2x00_get_flash_image_status(bsg_job);
+
 	default:
 		return -ENOSYS;
 	}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index d97dfd5..7594fad 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -31,6 +31,7 @@
 #define QL_VND_GET_PRIV_STATS	0x18
 #define QL_VND_DPORT_DIAGNOSTICS	0x19
 #define QL_VND_GET_PRIV_STATS_EX	0x1A
+#define QL_VND_SS_GET_FLASH_IMAGE_STATUS	0x1E
 
 /* BSG Vendor specific subcode returns */
 #define EXT_STATUS_OK			0
@@ -279,4 +280,14 @@
 #define QLA_DPORT_RESULT	0x0
 #define QLA_DPORT_START		0x2
 
+/* active images in flash */
+struct qla_active_regions {
+	uint8_t global_image;
+	uint8_t board_config;
+	uint8_t vpd_nvram;
+	uint8_t npiv_config_0_1;
+	uint8_t npiv_config_2_3;
+	uint8_t reserved[32];
+} __packed;
+
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index c7533fa..30afc59 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -111,30 +111,25 @@
 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
 	uint32_t ram_dwords, void **nxt)
 {
-	int rval;
-	uint32_t cnt, stat, timer, dwords, idx;
-	uint16_t mb0;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 	dma_addr_t dump_dma = ha->gid_list_dma;
-	uint32_t *dump = (uint32_t *)ha->gid_list;
+	uint32_t *chunk = (void *)ha->gid_list;
+	uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
+	uint32_t stat;
+	ulong i, j, timer = 6000000;
+	int rval = QLA_FUNCTION_FAILED;
 
-	rval = QLA_SUCCESS;
-	mb0 = 0;
-
-	WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+	for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
+		if (i + dwords > ram_dwords)
+			dwords = ram_dwords - i;
 
-	dwords = qla2x00_gid_list_size(ha) / 4;
-	for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
-	    cnt += dwords, addr += dwords) {
-		if (cnt + dwords > ram_dwords)
-			dwords = ram_dwords - cnt;
-
+		WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
 		WRT_REG_WORD(&reg->mailbox1, LSW(addr));
 		WRT_REG_WORD(&reg->mailbox8, MSW(addr));
 
-		WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
-		WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+		WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
+		WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
 		WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
 		WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
 
@@ -145,76 +140,76 @@
 		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
 
 		ha->flags.mbox_int = 0;
-		for (timer = 6000000; timer; timer--) {
-			/* Check for pending interrupts. */
+		while (timer--) {
+			udelay(5);
+
 			stat = RD_REG_DWORD(&reg->host_status);
-			if (stat & HSRX_RISC_INT) {
-				stat &= 0xff;
+			/* Check for pending interrupts. */
+			if (!(stat & HSRX_RISC_INT))
+				continue;
 
-				if (stat == 0x1 || stat == 0x2 ||
-				    stat == 0x10 || stat == 0x11) {
-					set_bit(MBX_INTERRUPT,
-					    &ha->mbx_cmd_flags);
-
-					mb0 = RD_REG_WORD(&reg->mailbox0);
-					RD_REG_WORD(&reg->mailbox1);
-
-					WRT_REG_DWORD(&reg->hccr,
-					    HCCRX_CLR_RISC_INT);
-					RD_REG_DWORD(&reg->hccr);
-					break;
-				}
+			stat &= 0xff;
+			if (stat != 0x1 && stat != 0x2 &&
+			    stat != 0x10 && stat != 0x11) {
 
 				/* Clear this intr; it wasn't a mailbox intr */
 				WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
 				RD_REG_DWORD(&reg->hccr);
+				continue;
 			}
-			udelay(5);
+
+			set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+			rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
+			WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+			RD_REG_DWORD(&reg->hccr);
+			break;
 		}
 		ha->flags.mbox_int = 1;
+		*nxt = ram + i;
 
-		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
-			rval = mb0 & MBS_MASK;
-			for (idx = 0; idx < dwords; idx++)
-				ram[cnt + idx] = IS_QLA27XX(ha) ?
-				    le32_to_cpu(dump[idx]) : swab32(dump[idx]);
-		} else {
-			rval = QLA_FUNCTION_FAILED;
+		if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+			/* no interrupt, timed out*/
+			return rval;
+		}
+		if (rval) {
+			/* error completion status */
+			return rval;
+		}
+		for (j = 0; j < dwords; j++) {
+			ram[i + j] =
+			    (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
+			    chunk[j] : swab32(chunk[j]);
 		}
 	}
 
-	*nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
-	return rval;
+	*nxt = ram + i;
+	return QLA_SUCCESS;
 }
 
 int
 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
     uint32_t ram_dwords, void **nxt)
 {
-	int rval;
-	uint32_t cnt, stat, timer, dwords, idx;
-	uint16_t mb0;
+	int rval = QLA_FUNCTION_FAILED;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 	dma_addr_t dump_dma = ha->gid_list_dma;
-	uint32_t *dump = (uint32_t *)ha->gid_list;
+	uint32_t *chunk = (void *)ha->gid_list;
+	uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
+	uint32_t stat;
+	ulong i, j, timer = 6000000;
 
-	rval = QLA_SUCCESS;
-	mb0 = 0;
-
-	WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
-	dwords = qla2x00_gid_list_size(ha) / 4;
-	for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
-	    cnt += dwords, addr += dwords) {
-		if (cnt + dwords > ram_dwords)
-			dwords = ram_dwords - cnt;
+	for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
+		if (i + dwords > ram_dwords)
+			dwords = ram_dwords - i;
 
+		WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
 		WRT_REG_WORD(&reg->mailbox1, LSW(addr));
 		WRT_REG_WORD(&reg->mailbox8, MSW(addr));
 
-		WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
-		WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+		WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
+		WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
 		WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
 		WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
 
@@ -223,45 +218,48 @@
 		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
 
 		ha->flags.mbox_int = 0;
-		for (timer = 6000000; timer; timer--) {
-			/* Check for pending interrupts. */
+		while (timer--) {
+			udelay(5);
 			stat = RD_REG_DWORD(&reg->host_status);
-			if (stat & HSRX_RISC_INT) {
-				stat &= 0xff;
 
-				if (stat == 0x1 || stat == 0x2 ||
-				    stat == 0x10 || stat == 0x11) {
-					set_bit(MBX_INTERRUPT,
-					    &ha->mbx_cmd_flags);
+			/* Check for pending interrupts. */
+			if (!(stat & HSRX_RISC_INT))
+				continue;
 
-					mb0 = RD_REG_WORD(&reg->mailbox0);
-
-					WRT_REG_DWORD(&reg->hccr,
-					    HCCRX_CLR_RISC_INT);
-					RD_REG_DWORD(&reg->hccr);
-					break;
-				}
-
-				/* Clear this intr; it wasn't a mailbox intr */
+			stat &= 0xff;
+			if (stat != 0x1 && stat != 0x2 &&
+			    stat != 0x10 && stat != 0x11) {
 				WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
 				RD_REG_DWORD(&reg->hccr);
+				continue;
 			}
-			udelay(5);
+
+			set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+			rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
+			WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+			RD_REG_DWORD(&reg->hccr);
+			break;
 		}
 		ha->flags.mbox_int = 1;
+		*nxt = ram + i;
 
-		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
-			rval = mb0 & MBS_MASK;
-			for (idx = 0; idx < dwords; idx++)
-				ram[cnt + idx] = IS_QLA27XX(ha) ?
-				    le32_to_cpu(dump[idx]) : swab32(dump[idx]);
-		} else {
-			rval = QLA_FUNCTION_FAILED;
+		if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+			/* no interrupt, timed out*/
+			return rval;
+		}
+		if (rval) {
+			/* error completion status */
+			return rval;
+		}
+		for (j = 0; j < dwords; j++) {
+			ram[i + j] =
+			    (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
+			    chunk[j] : swab32(chunk[j]);
 		}
 	}
 
-	*nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
-	return rval;
+	*nxt = ram + i;
+	return QLA_SUCCESS;
 }
 
 static int
@@ -447,7 +445,7 @@
 		}
 	}
 
-	*nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
+	*nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
 	return rval;
 }
 
@@ -669,7 +667,8 @@
 	struct qla2xxx_mq_chain *mq = ptr;
 	device_reg_t *reg;
 
-	if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+	if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+	    IS_QLA28XX(ha))
 		return ptr;
 
 	mq = ptr;
@@ -2521,7 +2520,7 @@
 /****************************************************************************/
 
 static inline int
-ql_mask_match(uint32_t level)
+ql_mask_match(uint level)
 {
 	return (level & ql2xextended_error_logging) == level;
 }
@@ -2540,7 +2539,7 @@
  * msg:   The message to be displayed.
  */
 void
-ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
 {
 	va_list va;
 	struct va_format vaf;
@@ -2583,8 +2582,7 @@
  * msg:   The message to be displayed.
  */
 void
-ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
-	   const char *fmt, ...)
+ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
 {
 	va_list va;
 	struct va_format vaf;
@@ -2620,7 +2618,7 @@
  * msg:   The message to be displayed.
  */
 void
-ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
 {
 	va_list va;
 	struct va_format vaf;
@@ -2678,8 +2676,7 @@
  * msg:   The message to be displayed.
  */
 void
-ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
-	   const char *fmt, ...)
+ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
 {
 	va_list va;
 	struct va_format vaf;
@@ -2719,7 +2716,7 @@
 }
 
 void
-ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
+ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
 {
 	int i;
 	struct qla_hw_data *ha = vha->hw;
@@ -2741,13 +2738,13 @@
 	ql_dbg(level, vha, id, "Mailbox registers:\n");
 	for (i = 0; i < 6; i++, mbx_reg++)
 		ql_dbg(level, vha, id,
-		    "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg));
+		    "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg));
 }
 
 
 void
-ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
-	uint8_t *buf, uint size)
+ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf,
+	       uint size)
 {
 	uint cnt;
 
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 8877aa9..bb01b68 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -318,20 +318,20 @@
 				   * as compared to other log levels.
 				   */
 
-extern int ql_errlev;
+extern uint ql_errlev;
 
 void __attribute__((format (printf, 4, 5)))
-ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+ql_dbg(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...);
 void __attribute__((format (printf, 4, 5)))
-ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+ql_dbg_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...);
 void __attribute__((format (printf, 4, 5)))
 ql_dbg_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
 
 
 void __attribute__((format (printf, 4, 5)))
-ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+ql_log(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...);
 void __attribute__((format (printf, 4, 5)))
-ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+ql_log_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...);
 
 void __attribute__((format (printf, 4, 5)))
 ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a9dc9c4..6ffa987 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -34,7 +34,22 @@
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_bsg_fc.h>
 
+/* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */
+typedef struct {
+	uint8_t domain;
+	uint8_t area;
+	uint8_t al_pa;
+} be_id_t;
+
+/* Little endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */
+typedef struct {
+	uint8_t al_pa;
+	uint8_t area;
+	uint8_t domain;
+} le_id_t;
+
 #include "qla_bsg.h"
+#include "qla_dsd.h"
 #include "qla_nx.h"
 #include "qla_nx2.h"
 #include "qla_nvme.h"
@@ -116,9 +131,9 @@
 #define RD_REG_BYTE_RELAXED(addr)	readb_relaxed(addr)
 #define RD_REG_WORD_RELAXED(addr)	readw_relaxed(addr)
 #define RD_REG_DWORD_RELAXED(addr)	readl_relaxed(addr)
-#define WRT_REG_BYTE(addr, data)	writeb(data,addr)
-#define WRT_REG_WORD(addr, data)	writew(data,addr)
-#define WRT_REG_DWORD(addr, data)	writel(data,addr)
+#define WRT_REG_BYTE(addr, data)	writeb(data, addr)
+#define WRT_REG_WORD(addr, data)	writew(data, addr)
+#define WRT_REG_DWORD(addr, data)	writel(data, addr)
 
 /*
  * ISP83XX specific remote register addresses
@@ -206,7 +221,7 @@
  * 133Mhz slot.
  */
 #define RD_REG_WORD_PIO(addr)		(inw((unsigned long)addr))
-#define WRT_REG_WORD_PIO(addr, data)	(outw(data,(unsigned long)addr))
+#define WRT_REG_WORD_PIO(addr, data)	(outw(data, (unsigned long)addr))
 
 /*
  * Fibre Channel device definitions.
@@ -262,8 +277,8 @@
 	struct get_name_list_extended *l;
 	dma_addr_t		ldma;
 	struct list_head	fcports;
-	spinlock_t		fcports_lock;
 	u32			size;
+	u8			sent;
 };
 /*
  * Timeout timer counts in seconds
@@ -302,7 +317,8 @@
 	uint32_t request_sense_length;
 	uint32_t fw_sense_length;
 	uint8_t *request_sense_ptr;
-	void *ctx;
+	struct ct6_dsd *ct6_ctx;
+	struct crc_context *crc_ctx;
 };
 
 /*
@@ -314,6 +330,7 @@
 #define SRB_CRC_PROT_DMA_VALID		BIT_4	/* DIF: prot DMA valid */
 #define SRB_CRC_CTX_DSD_VALID		BIT_5	/* DIF: dsd_list valid */
 #define SRB_WAKEUP_ON_COMP		BIT_6
+#define SRB_DIF_BUNDL_DMA_VALID		BIT_7   /* DIF: DMA list valid */
 
 /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
 #define IS_PROT_IO(sp)	(sp->flags & SRB_CRC_CTX_DSD_VALID)
@@ -341,6 +358,51 @@
 } port_id_t;
 #define INVALID_PORT_ID	0xFFFFFF
 
+static inline le_id_t be_id_to_le(be_id_t id)
+{
+	le_id_t res;
+
+	res.domain = id.domain;
+	res.area   = id.area;
+	res.al_pa  = id.al_pa;
+
+	return res;
+}
+
+static inline be_id_t le_id_to_be(le_id_t id)
+{
+	be_id_t res;
+
+	res.domain = id.domain;
+	res.area   = id.area;
+	res.al_pa  = id.al_pa;
+
+	return res;
+}
+
+static inline port_id_t be_to_port_id(be_id_t id)
+{
+	port_id_t res;
+
+	res.b.domain = id.domain;
+	res.b.area   = id.area;
+	res.b.al_pa  = id.al_pa;
+	res.b.rsvd_1 = 0;
+
+	return res;
+}
+
+static inline be_id_t port_id_to_be_id(port_id_t port_id)
+{
+	be_id_t res;
+
+	res.domain = port_id.b.domain;
+	res.area   = port_id.b.area;
+	res.al_pa  = port_id.b.al_pa;
+
+	return res;
+}
+
 struct els_logo_payload {
 	uint8_t opcode;
 	uint8_t rsvd[3];
@@ -393,7 +455,7 @@
 			struct els_logo_payload *els_logo_pyld;
 			dma_addr_t els_logo_pyld_dma;
 		} els_logo;
-		struct {
+		struct els_plogi {
 #define ELS_DCMD_PLOGI 0x3
 			uint32_t flags;
 			uint32_t els_cmd;
@@ -519,6 +581,7 @@
 enum {
 	TYPE_SRB,
 	TYPE_TGT_CMD,
+	TYPE_TGT_TMCMD,		/* task management */
 };
 
 typedef struct srb {
@@ -529,9 +592,12 @@
 	uint8_t cmd_type;
 	uint8_t pad[3];
 	atomic_t ref_count;
+	struct kref cmd_kref;	/* need to migrate ref_count over to this */
+	void *priv;
 	wait_queue_head_t nvme_ls_waitq;
 	struct fc_port *fcport;
 	struct scsi_qla_host *vha;
+	unsigned int start_timer:1;
 	uint32_t handle;
 	uint16_t flags;
 	uint16_t type;
@@ -543,19 +609,28 @@
 	u32 gen2;	/* scratch */
 	int rc;
 	int retry_count;
-	struct completion comp;
+	struct completion *comp;
 	union {
 		struct srb_iocb iocb_cmd;
 		struct bsg_job *bsg_job;
 		struct srb_cmd scmd;
 	} u;
-	void (*done)(void *, int);
-	void (*free)(void *);
+	/*
+	 * Report completion status @res and call sp_put(@sp). @res is
+	 * an NVMe status code, a SCSI result (e.g. DID_OK << 16) or a
+	 * QLA_* status value.
+	 */
+	void (*done)(struct srb *sp, int res);
+	/* Stop the timer and free @sp. Only used by the FCP code. */
+	void (*free)(struct srb *sp);
+	/*
+	 * Call nvme_private->fd->done() and free @sp. Only used by the NVMe
+	 * code.
+	 */
+	void (*put_fn)(struct kref *kref);
 } srb_t;
 
 #define GET_CMD_SP(sp) (sp->u.scmd.cmd)
-#define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd)
-#define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx)
 
 #define GET_CMD_SENSE_LEN(sp) \
 	(sp->u.scmd.request_sense_length)
@@ -915,6 +990,11 @@
 #define MBS_LINK_DOWN_ERROR		0x400B
 #define MBS_DIAG_ECHO_TEST_ERROR	0x400C
 
+static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
+{
+	return MBS_COMMAND_COMPLETE <= mbs && mbs <= MBS_DIAG_ECHO_TEST_ERROR;
+}
+
 /*
  * ISP mailbox asynchronous event status codes
  */
@@ -1031,6 +1111,7 @@
 #define MBC_GET_FIRMWARE_VERSION	8	/* Get firmware revision. */
 #define MBC_LOAD_RISC_RAM		9	/* Load RAM command. */
 #define MBC_DUMP_RISC_RAM		0xa	/* Dump RAM command. */
+#define MBC_SECURE_FLASH_UPDATE		0xa	/* Secure Flash Update(28xx) */
 #define MBC_LOAD_RISC_RAM_EXTENDED	0xb	/* Load RAM extended. */
 #define MBC_DUMP_RISC_RAM_EXTENDED	0xc	/* Dump RAM extended. */
 #define MBC_WRITE_RAM_WORD_EXTENDED	0xd	/* Write RAM word extended */
@@ -1201,6 +1282,10 @@
 #define QLA27XX_IMG_STATUS_VER_MAJOR   0x01
 #define QLA27XX_IMG_STATUS_VER_MINOR    0x00
 #define QLA27XX_IMG_STATUS_SIGN   0xFACEFADE
+#define QLA28XX_IMG_STATUS_SIGN    0xFACEFADF
+#define QLA28XX_IMG_STATUS_SIGN		0xFACEFADF
+#define QLA28XX_AUX_IMG_STATUS_SIGN	0xFACEFAED
+#define QLA27XX_DEFAULT_IMAGE		0
 #define QLA27XX_PRIMARY_IMAGE  1
 #define QLA27XX_SECONDARY_IMAGE    2
 
@@ -1321,8 +1406,8 @@
 	uint16_t response_q_inpointer;
 	uint16_t request_q_length;
 	uint16_t response_q_length;
-	uint32_t request_q_address[2];
-	uint32_t response_q_address[2];
+	__le64   request_q_address __packed;
+	__le64   response_q_address __packed;
 
 	uint16_t lun_enables;
 	uint8_t  command_resource_count;
@@ -1747,12 +1832,10 @@
 	uint16_t dseg_count;		/* Data segment count. */
 	uint8_t scsi_cdb[MAX_CMDSZ]; 	/* SCSI command words. */
 	uint32_t byte_count;		/* Total byte count. */
-	uint32_t dseg_0_address;	/* Data segment 0 address. */
-	uint32_t dseg_0_length;		/* Data segment 0 length. */
-	uint32_t dseg_1_address;	/* Data segment 1 address. */
-	uint32_t dseg_1_length;		/* Data segment 1 length. */
-	uint32_t dseg_2_address;	/* Data segment 2 address. */
-	uint32_t dseg_2_length;		/* Data segment 2 length. */
+	union {
+		struct dsd32 dsd32[3];
+		struct dsd64 dsd64[2];
+	};
 } cmd_entry_t;
 
 /*
@@ -1773,10 +1856,7 @@
 	uint16_t dseg_count;		/* Data segment count. */
 	uint8_t scsi_cdb[MAX_CMDSZ];	/* SCSI command words. */
 	uint32_t byte_count;		/* Total byte count. */
-	uint32_t dseg_0_address[2];	/* Data segment 0 address. */
-	uint32_t dseg_0_length;		/* Data segment 0 length. */
-	uint32_t dseg_1_address[2];	/* Data segment 1 address. */
-	uint32_t dseg_1_length;		/* Data segment 1 length. */
+	struct dsd64 dsd[2];
 } cmd_a64_entry_t, request_t;
 
 /*
@@ -1789,20 +1869,7 @@
 	uint8_t sys_define;		/* System defined. */
 	uint8_t entry_status;		/* Entry Status. */
 	uint32_t reserved;
-	uint32_t dseg_0_address;	/* Data segment 0 address. */
-	uint32_t dseg_0_length;		/* Data segment 0 length. */
-	uint32_t dseg_1_address;	/* Data segment 1 address. */
-	uint32_t dseg_1_length;		/* Data segment 1 length. */
-	uint32_t dseg_2_address;	/* Data segment 2 address. */
-	uint32_t dseg_2_length;		/* Data segment 2 length. */
-	uint32_t dseg_3_address;	/* Data segment 3 address. */
-	uint32_t dseg_3_length;		/* Data segment 3 length. */
-	uint32_t dseg_4_address;	/* Data segment 4 address. */
-	uint32_t dseg_4_length;		/* Data segment 4 length. */
-	uint32_t dseg_5_address;	/* Data segment 5 address. */
-	uint32_t dseg_5_length;		/* Data segment 5 length. */
-	uint32_t dseg_6_address;	/* Data segment 6 address. */
-	uint32_t dseg_6_length;		/* Data segment 6 length. */
+	struct dsd32 dsd[7];
 } cont_entry_t;
 
 /*
@@ -1814,16 +1881,7 @@
 	uint8_t entry_count;		/* Entry count. */
 	uint8_t sys_define;		/* System defined. */
 	uint8_t entry_status;		/* Entry Status. */
-	uint32_t dseg_0_address[2];	/* Data segment 0 address. */
-	uint32_t dseg_0_length;		/* Data segment 0 length. */
-	uint32_t dseg_1_address[2];	/* Data segment 1 address. */
-	uint32_t dseg_1_length;		/* Data segment 1 length. */
-	uint32_t dseg_2_address	[2];	/* Data segment 2 address. */
-	uint32_t dseg_2_length;		/* Data segment 2 length. */
-	uint32_t dseg_3_address[2];	/* Data segment 3 address. */
-	uint32_t dseg_3_length;		/* Data segment 3 length. */
-	uint32_t dseg_4_address[2];	/* Data segment 4 address. */
-	uint32_t dseg_4_length;		/* Data segment 4 length. */
+	struct dsd64 dsd[5];
 } cont_a64_entry_t;
 
 #define PO_MODE_DIF_INSERT	0
@@ -1867,8 +1925,7 @@
 			uint16_t	reserved_2;
 			uint16_t	reserved_3;
 			uint32_t	reserved_4;
-			uint32_t	data_address[2];
-			uint32_t	data_length;
+			struct dsd64	data_dsd[1];
 			uint32_t	reserved_5[2];
 			uint32_t	reserved_6;
 		} nobundling;
@@ -1878,11 +1935,8 @@
 			uint16_t	reserved_1;
 			__le16	dseg_count;	/* Data segment count */
 			uint32_t	reserved_2;
-			uint32_t	data_address[2];
-			uint32_t	data_length;
-			uint32_t	dif_address[2];
-			uint32_t	dif_length;	/* Data segment 0
-							 * length */
+			struct dsd64	data_dsd[1];
+			struct dsd64	dif_dsd;
 		} bundling;
 	} u;
 
@@ -1891,6 +1945,13 @@
 	/* List of DMA context transfers */
 	struct list_head dsd_list;
 
+	/* List of DIF Bundling context DMA address */
+	struct list_head ldif_dsd_list;
+	u8 no_ldif_dsd;
+
+	struct list_head ldif_dma_hndl_list;
+	u32 dif_bundl_len;
+	u8 no_dif_bundl;
 	/* This structure should not exceed 512 bytes */
 };
 
@@ -2074,10 +2135,8 @@
 	uint32_t handle2;
 	uint32_t rsp_bytecount;
 	uint32_t req_bytecount;
-	uint32_t dseg_req_address[2];	/* Data segment 0 address. */
-	uint32_t dseg_req_length;	/* Data segment 0 length. */
-	uint32_t dseg_rsp_address[2];	/* Data segment 1 address. */
-	uint32_t dseg_rsp_length;	/* Data segment 1 length. */
+	struct dsd64 req_dsd;
+	struct dsd64 rsp_dsd;
 } ms_iocb_entry_t;
 
 
@@ -2249,7 +2308,10 @@
 	FCT_BROADCAST,
 	FCT_INITIATOR,
 	FCT_TARGET,
-	FCT_NVME
+	FCT_NVME_INITIATOR = 0x10,
+	FCT_NVME_TARGET = 0x20,
+	FCT_NVME_DISCOVERY = 0x40,
+	FCT_NVME = 0xf0,
 } fc_port_type_t;
 
 enum qla_sess_deletion {
@@ -2280,7 +2342,6 @@
 enum discovery_state {
 	DSC_DELETED,
 	DSC_GNN_ID,
-	DSC_GID_PN,
 	DSC_GNL,
 	DSC_LOGIN_PEND,
 	DSC_LOGIN_FAILED,
@@ -2302,23 +2363,6 @@
 	DSC_LS_LOGO_PEND,
 };
 
-enum fcport_mgt_event {
-	FCME_RELOGIN = 1,
-	FCME_RSCN,
-	FCME_GIDPN_DONE,
-	FCME_PLOGI_DONE,	/* Initiator side sent LLIOCB */
-	FCME_PRLI_DONE,
-	FCME_GNL_DONE,
-	FCME_GPSC_DONE,
-	FCME_GPDB_DONE,
-	FCME_GPNID_DONE,
-	FCME_GFFID_DONE,
-	FCME_ADISC_DONE,
-	FCME_GNNID_DONE,
-	FCME_GFPNID_DONE,
-	FCME_ELS_PLOGI_DONE,
-};
-
 enum rscn_addr_format {
 	RSCN_PORT_ADDR,
 	RSCN_AREA_ADDR,
@@ -2351,16 +2395,18 @@
 	unsigned int login_succ:1;
 	unsigned int query:1;
 	unsigned int id_changed:1;
-	unsigned int rscn_rcvd:1;
+	unsigned int scan_needed:1;
+	unsigned int n2n_flag:1;
 
-	struct work_struct nvme_del_work;
 	struct completion nvme_del_done;
 	uint32_t nvme_prli_service_param;
 #define NVME_PRLI_SP_CONF       BIT_7
 #define NVME_PRLI_SP_INITIATOR  BIT_5
 #define NVME_PRLI_SP_TARGET     BIT_4
 #define NVME_PRLI_SP_DISCOVERY  BIT_3
+#define NVME_PRLI_SP_FIRST_BURST	BIT_0
 	uint8_t nvme_flag;
+	uint32_t nvme_first_burst_size;
 #define NVME_FLAG_REGISTERED 4
 #define NVME_FLAG_DELETING 2
 #define NVME_FLAG_RESETTING 1
@@ -2375,11 +2421,13 @@
 	unsigned long expires;
 	struct list_head del_list_entry;
 	struct work_struct free_work;
-
+	struct work_struct reg_work;
+	uint64_t jiffies_at_registration;
 	struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
 
 	uint16_t tgt_id;
 	uint16_t old_tgt_id;
+	uint16_t sec_since_registration;
 
 	uint8_t fcp_prio;
 
@@ -2399,7 +2447,6 @@
 	uint8_t fc4_type;
 	uint8_t	fc4f_nvme;
 	uint8_t scan_state;
-	uint8_t n2n_flag;
 
 	unsigned long last_queue_full;
 	unsigned long last_ramp_up;
@@ -2412,6 +2459,7 @@
 	struct qla_tgt_sess *tgt_session;
 	struct ct_sns_desc ct_desc;
 	enum discovery_state disc_state;
+	enum discovery_state next_disc_state;
 	enum login_state fw_login_state;
 	unsigned long dm_login_expire;
 	unsigned long plogi_nack_done_deadline;
@@ -2432,7 +2480,6 @@
 #define QLA_FCPORT_FOUND	2
 
 struct event_arg {
-	enum fcport_mgt_event	event;
 	fc_port_t		*fcport;
 	srb_t			*sp;
 	port_id_t		id;
@@ -2451,13 +2498,7 @@
 #define FCS_DEVICE_LOST		3
 #define FCS_ONLINE		4
 
-static const char * const port_state_str[] = {
-	"Unknown",
-	"UNCONFIGURED",
-	"DEAD",
-	"LOST",
-	"ONLINE"
-};
+extern const char *const port_state_str[5];
 
 /*
  * FC port flags.
@@ -2660,6 +2701,7 @@
 #define FDMI_PORT_SPEED_8GB		0x10
 #define FDMI_PORT_SPEED_16GB		0x20
 #define FDMI_PORT_SPEED_32GB		0x40
+#define FDMI_PORT_SPEED_64GB		0x80
 #define FDMI_PORT_SPEED_UNKNOWN		0x8000
 
 #define FC_CLASS_2	0x04
@@ -2760,7 +2802,7 @@
 		/* GA_NXT, GPN_ID, GNN_ID, GFT_ID, GFPN_ID */
 		struct {
 			uint8_t reserved;
-			uint8_t port_id[3];
+			be_id_t port_id;
 		} port_id;
 
 		struct {
@@ -2779,13 +2821,13 @@
 
 		struct {
 			uint8_t reserved;
-			uint8_t port_id[3];
+			be_id_t port_id;
 			uint8_t fc4_types[32];
 		} rft_id;
 
 		struct {
 			uint8_t reserved;
-			uint8_t port_id[3];
+			be_id_t port_id;
 			uint16_t reserved2;
 			uint8_t fc4_feature;
 			uint8_t fc4_type;
@@ -2793,7 +2835,7 @@
 
 		struct {
 			uint8_t reserved;
-			uint8_t port_id[3];
+			be_id_t port_id;
 			uint8_t node_name[8];
 		} rnn_id;
 
@@ -2880,7 +2922,7 @@
 
 struct ct_sns_gid_pt_data {
 	uint8_t control_byte;
-	uint8_t port_id[3];
+	be_id_t port_id;
 };
 
 /* It's the same for both GPN_FT and GNN_FT */
@@ -2910,7 +2952,7 @@
 	union {
 		struct {
 			uint8_t port_type;
-			uint8_t port_id[3];
+			be_id_t port_id;
 			uint8_t port_name[8];
 			uint8_t sym_port_name_len;
 			uint8_t sym_port_name[255];
@@ -2994,6 +3036,7 @@
 enum fc4type_t {
 	FS_FC4TYPE_FCP	= BIT_0,
 	FS_FC4TYPE_NVME	= BIT_1,
+	FS_FCP_IS_N2N = BIT_7,
 };
 
 struct fab_scan_rp {
@@ -3048,7 +3091,7 @@
 		struct {
 			uint16_t buffer_length;
 			uint16_t reserved_1;
-			uint32_t buffer_address[2];
+			__le64	 buffer_address __packed;
 			uint16_t subcommand_length;
 			uint16_t reserved_2;
 			uint16_t subcommand;
@@ -3118,15 +3161,15 @@
 struct isp_operations {
 
 	int (*pci_config) (struct scsi_qla_host *);
-	void (*reset_chip) (struct scsi_qla_host *);
+	int (*reset_chip)(struct scsi_qla_host *);
 	int (*chip_diag) (struct scsi_qla_host *);
 	void (*config_rings) (struct scsi_qla_host *);
-	void (*reset_adapter) (struct scsi_qla_host *);
+	int (*reset_adapter)(struct scsi_qla_host *);
 	int (*nvram_config) (struct scsi_qla_host *);
 	void (*update_fw_options) (struct scsi_qla_host *);
 	int (*load_risc) (struct scsi_qla_host *, uint32_t *);
 
-	char * (*pci_info_str) (struct scsi_qla_host *, char *);
+	char * (*pci_info_str)(struct scsi_qla_host *, char *, size_t);
 	char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t);
 
 	irq_handler_t intr_handler;
@@ -3147,9 +3190,9 @@
 	void *(*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
 	    uint32_t);
 
-	uint8_t *(*read_nvram) (struct scsi_qla_host *, uint8_t *,
+	uint8_t *(*read_nvram)(struct scsi_qla_host *, void *,
 		uint32_t, uint32_t);
-	int (*write_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t,
+	int (*write_nvram)(struct scsi_qla_host *, void *, uint32_t,
 		uint32_t);
 
 	void (*fw_dump) (struct scsi_qla_host *, int);
@@ -3158,16 +3201,16 @@
 	int (*beacon_off) (struct scsi_qla_host *);
 	void (*beacon_blink) (struct scsi_qla_host *);
 
-	uint8_t * (*read_optrom) (struct scsi_qla_host *, uint8_t *,
+	void *(*read_optrom)(struct scsi_qla_host *, void *,
 		uint32_t, uint32_t);
-	int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t,
+	int (*write_optrom)(struct scsi_qla_host *, void *, uint32_t,
 		uint32_t);
 
 	int (*get_flash_version) (struct scsi_qla_host *, void *);
 	int (*start_scsi) (srb_t *);
 	int (*start_scsi_mq) (srb_t *);
 	int (*abort_isp) (struct scsi_qla_host *);
-	int (*iospace_config)(struct qla_hw_data*);
+	int (*iospace_config)(struct qla_hw_data *);
 	int (*initialize_adapter)(struct scsi_qla_host *);
 };
 
@@ -3212,17 +3255,14 @@
 	QLA_EVT_ASYNC_LOGOUT,
 	QLA_EVT_ASYNC_LOGOUT_DONE,
 	QLA_EVT_ASYNC_ADISC,
-	QLA_EVT_ASYNC_ADISC_DONE,
 	QLA_EVT_UEVENT,
 	QLA_EVT_AENFX,
-	QLA_EVT_GIDPN,
 	QLA_EVT_GPNID,
 	QLA_EVT_UNMAP,
 	QLA_EVT_NEW_SESS,
 	QLA_EVT_GPDB,
 	QLA_EVT_PRLI,
 	QLA_EVT_GPSC,
-	QLA_EVT_UPD_FCPORT,
 	QLA_EVT_GNL,
 	QLA_EVT_NACK,
 	QLA_EVT_RELOGIN,
@@ -3359,7 +3399,8 @@
 #define QLA_MQ_SIZE 32
 #define QLA_MAX_QUEUES 256
 #define ISP_QUE_REG(ha, id) \
-	((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \
+	((ha->mqenable || IS_QLA83XX(ha) || \
+	  IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? \
 	 ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
 	 ((void __iomem *)ha->iobase))
 #define QLA_REQ_QUE_ID(tag) \
@@ -3483,6 +3524,9 @@
 	struct list_head qp_list_elem; /* vha->qp_list */
 	struct list_head hints_list;
 	uint16_t cpuid;
+	uint16_t retry_term_cnt;
+	uint32_t retry_term_exchg_addr;
+	uint64_t retry_term_jiff;
 	struct qla_tgt_counters tgt_counters;
 };
 
@@ -3609,6 +3653,8 @@
 		uint32_t	rida_fmt2:1;
 		uint32_t	purge_mbox:1;
 		uint32_t        n2n_bigger:1;
+		uint32_t	secure_adapter:1;
+		uint32_t	secure_fw:1;
 	} flags;
 
 	uint16_t max_exchg;
@@ -3686,12 +3732,15 @@
 #define PORT_SPEED_UNKNOWN 0xFFFF
 #define PORT_SPEED_1GB  0x00
 #define PORT_SPEED_2GB  0x01
+#define PORT_SPEED_AUTO 0x02
 #define PORT_SPEED_4GB  0x03
 #define PORT_SPEED_8GB  0x04
 #define PORT_SPEED_16GB 0x05
 #define PORT_SPEED_32GB 0x06
+#define PORT_SPEED_64GB 0x07
 #define PORT_SPEED_10GB	0x13
 	uint16_t	link_data_rate;         /* F/W operating speed */
+	uint16_t	set_data_rate;		/* Set by user */
 
 	uint8_t		current_topology;
 	uint8_t		prev_topology;
@@ -3715,6 +3764,11 @@
 #define PCI_DEVICE_ID_QLOGIC_ISP2071	0x2071
 #define PCI_DEVICE_ID_QLOGIC_ISP2271	0x2271
 #define PCI_DEVICE_ID_QLOGIC_ISP2261	0x2261
+#define PCI_DEVICE_ID_QLOGIC_ISP2061	0x2061
+#define PCI_DEVICE_ID_QLOGIC_ISP2081	0x2081
+#define PCI_DEVICE_ID_QLOGIC_ISP2089	0x2089
+#define PCI_DEVICE_ID_QLOGIC_ISP2281	0x2281
+#define PCI_DEVICE_ID_QLOGIC_ISP2289	0x2289
 
 	uint32_t	isp_type;
 #define DT_ISP2100                      BIT_0
@@ -3739,7 +3793,12 @@
 #define DT_ISP2071			BIT_19
 #define DT_ISP2271			BIT_20
 #define DT_ISP2261			BIT_21
-#define DT_ISP_LAST			(DT_ISP2261 << 1)
+#define DT_ISP2061			BIT_22
+#define DT_ISP2081			BIT_23
+#define DT_ISP2089			BIT_24
+#define DT_ISP2281			BIT_25
+#define DT_ISP2289			BIT_26
+#define DT_ISP_LAST			(DT_ISP2289 << 1)
 
 	uint32_t	device_type;
 #define DT_T10_PI                       BIT_25
@@ -3774,6 +3833,8 @@
 #define IS_QLA2071(ha)	(DT_MASK(ha) & DT_ISP2071)
 #define IS_QLA2271(ha)	(DT_MASK(ha) & DT_ISP2271)
 #define IS_QLA2261(ha)	(DT_MASK(ha) & DT_ISP2261)
+#define IS_QLA2081(ha)	(DT_MASK(ha) & DT_ISP2081)
+#define IS_QLA2281(ha)	(DT_MASK(ha) & DT_ISP2281)
 
 #define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
 			IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -3783,6 +3844,7 @@
 #define IS_QLA83XX(ha)	(IS_QLA2031(ha) || IS_QLA8031(ha))
 #define IS_QLA84XX(ha)  (IS_QLA8432(ha))
 #define IS_QLA27XX(ha)  (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha))
+#define IS_QLA28XX(ha)	(IS_QLA2081(ha) || IS_QLA2281(ha))
 #define IS_QLA24XX_TYPE(ha)     (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
 				IS_QLA84XX(ha))
 #define IS_CNA_CAPABLE(ha)	(IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -3791,14 +3853,15 @@
 #define IS_QLA2XXX_MIDTYPE(ha)	(IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
 				IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
 				IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
-				IS_QLA8044(ha) || IS_QLA27XX(ha))
+				IS_QLA8044(ha) || IS_QLA27XX(ha) || \
+				IS_QLA28XX(ha))
 #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
-				IS_QLA27XX(ha))
+				IS_QLA27XX(ha) || IS_QLA28XX(ha))
 #define IS_NOPOLLING_TYPE(ha)	(IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
 #define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
-				IS_QLA27XX(ha))
+				IS_QLA27XX(ha) || IS_QLA28XX(ha))
 #define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
-				IS_QLA27XX(ha))
+				IS_QLA27XX(ha) || IS_QLA28XX(ha))
 #define IS_ALOGIO_CAPABLE(ha)	(IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
 
 #define IS_T10_PI_CAPABLE(ha)   ((ha)->device_type & DT_T10_PI)
@@ -3809,28 +3872,34 @@
 #define HAS_EXTENDED_IDS(ha)    ((ha)->device_type & DT_EXTENDED_IDS)
 #define IS_CT6_SUPPORTED(ha)	((ha)->device_type & DT_CT6_SUPPORTED)
 #define IS_MQUE_CAPABLE(ha)	((ha)->mqenable || IS_QLA83XX(ha) || \
-				IS_QLA27XX(ha))
-#define IS_BIDI_CAPABLE(ha)	((IS_QLA25XX(ha) || IS_QLA2031(ha)))
+				IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_BIDI_CAPABLE(ha) \
+    (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 /* Bit 21 of fw_attributes decides the MCTP capabilities */
 #define IS_MCTP_CAPABLE(ha)	(IS_QLA2031(ha) && \
 				((ha)->fw_attributes_ext[0] & BIT_0))
 #define IS_PI_UNINIT_CAPABLE(ha)	(IS_QLA83XX(ha) || IS_QLA27XX(ha))
 #define IS_PI_IPGUARD_CAPABLE(ha)	(IS_QLA83XX(ha) || IS_QLA27XX(ha))
 #define IS_PI_DIFB_DIX0_CAPABLE(ha)	(0)
-#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)	(IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)	(IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+					IS_QLA28XX(ha))
 #define IS_PI_SPLIT_DET_CAPABLE(ha)	(IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
     (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
-#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+				IS_QLA28XX(ha))
 #define IS_TGT_MODE_CAPABLE(ha)	(ha->tgt.atio_q_length)
-#define IS_SHADOW_REG_CAPABLE(ha)  (IS_QLA27XX(ha))
-#define IS_DPORT_CAPABLE(ha)  (IS_QLA83XX(ha) || IS_QLA27XX(ha))
-#define IS_FAWWN_CAPABLE(ha)	(IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_SHADOW_REG_CAPABLE(ha)  (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_DPORT_CAPABLE(ha)  (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+				IS_QLA28XX(ha))
+#define IS_FAWWN_CAPABLE(ha)	(IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+				IS_QLA28XX(ha))
 #define IS_EXCHG_OFFLD_CAPABLE(ha) \
-	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 #define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
-	(IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+	(IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+	 IS_QLA27XX(ha) || IS_QLA28XX(ha))
 #define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
-	IS_QLA83XX(ha) || IS_QLA27XX(ha))
+	IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 
 	/* HBA serial number */
 	uint8_t		serial0;
@@ -3839,7 +3908,7 @@
 
 	/* NVRAM configuration data */
 #define MAX_NVRAM_SIZE  4096
-#define VPD_OFFSET      MAX_NVRAM_SIZE / 2
+#define VPD_OFFSET      (MAX_NVRAM_SIZE / 2)
 	uint16_t	nvram_size;
 	uint16_t	nvram_base;
 	void		*nvram;
@@ -3874,6 +3943,9 @@
 	void		*sfp_data;
 	dma_addr_t	sfp_data_dma;
 
+	void		*flt;
+	dma_addr_t	flt_dma;
+
 #define XGMAC_DATA_SIZE	4096
 	void		*xgmac_data;
 	dma_addr_t	xgmac_data_dma;
@@ -3956,6 +4028,10 @@
 	uint16_t	fw_subminor_version;
 	uint16_t	fw_attributes;
 	uint16_t	fw_attributes_h;
+#define FW_ATTR_H_NVME_FBURST 	BIT_1
+#define FW_ATTR_H_NVME		BIT_10
+#define FW_ATTR_H_NVME_UPDATED  BIT_14
+
 	uint16_t	fw_attributes_ext[2];
 	uint32_t	fw_memory_size;
 	uint32_t	fw_transfer_size;
@@ -3981,18 +4057,23 @@
 	uint8_t		fw_seriallink_options[4];
 	uint16_t	fw_seriallink_options24[4];
 
+	uint8_t		serdes_version[3];
 	uint8_t		mpi_version[3];
 	uint32_t	mpi_capabilities;
 	uint8_t		phy_version[3];
 	uint8_t		pep_version[3];
 
 	/* Firmware dump template */
-	void		*fw_dump_template;
-	uint32_t	fw_dump_template_len;
-	/* Firmware dump information. */
+	struct fwdt {
+		void *template;
+		ulong length;
+		ulong dump_size;
+	} fwdt[2];
 	struct qla2xxx_fw_dump *fw_dump;
 	uint32_t	fw_dump_len;
-	int		fw_dumped;
+	u32		fw_dump_alloc_len;
+	bool		fw_dumped;
+	bool		fw_dump_mpi;
 	unsigned long	fw_dump_cap_flags;
 #define RISC_PAUSE_CMPL		0
 #define DMA_SHUTDOWN_CMPL	1
@@ -4031,7 +4112,6 @@
 	uint16_t	product_id[4];
 
 	uint8_t		model_number[16+1];
-#define BINZERO		"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
 	char		model_desc[80];
 	uint8_t		adapter_id[16+1];
 
@@ -4071,22 +4151,28 @@
 	uint32_t	fdt_protect_sec_cmd;
 	uint32_t	fdt_wrt_sts_reg_cmd;
 
-	uint32_t        flt_region_flt;
-	uint32_t        flt_region_fdt;
-	uint32_t        flt_region_boot;
-	uint32_t        flt_region_boot_sec;
-	uint32_t        flt_region_fw;
-	uint32_t        flt_region_fw_sec;
-	uint32_t        flt_region_vpd_nvram;
-	uint32_t        flt_region_vpd;
-	uint32_t        flt_region_vpd_sec;
-	uint32_t        flt_region_nvram;
-	uint32_t        flt_region_npiv_conf;
-	uint32_t	flt_region_gold_fw;
-	uint32_t	flt_region_fcp_prio;
-	uint32_t	flt_region_bootload;
-	uint32_t	flt_region_img_status_pri;
-	uint32_t	flt_region_img_status_sec;
+	struct {
+		uint32_t	flt_region_flt;
+		uint32_t	flt_region_fdt;
+		uint32_t	flt_region_boot;
+		uint32_t	flt_region_boot_sec;
+		uint32_t	flt_region_fw;
+		uint32_t	flt_region_fw_sec;
+		uint32_t	flt_region_vpd_nvram;
+		uint32_t	flt_region_vpd_nvram_sec;
+		uint32_t	flt_region_vpd;
+		uint32_t	flt_region_vpd_sec;
+		uint32_t	flt_region_nvram;
+		uint32_t	flt_region_nvram_sec;
+		uint32_t	flt_region_npiv_conf;
+		uint32_t	flt_region_gold_fw;
+		uint32_t	flt_region_fcp_prio;
+		uint32_t	flt_region_bootload;
+		uint32_t	flt_region_img_status_pri;
+		uint32_t	flt_region_img_status_sec;
+		uint32_t	flt_region_aux_img_status_pri;
+		uint32_t	flt_region_aux_img_status_sec;
+	};
 	uint8_t         active_image;
 
 	/* Needed for BEACON */
@@ -4179,11 +4265,46 @@
 	struct qlt_hw_data tgt;
 	int	allow_cna_fw_dump;
 	uint32_t fw_ability_mask;
-	uint16_t min_link_speed;
-	uint16_t max_speed_sup;
+	uint16_t min_supported_speed;
+	uint16_t max_supported_speed;
+
+	/* DMA pool for the DIF bundling buffers */
+	struct dma_pool *dif_bundl_pool;
+	#define DIF_BUNDLING_DMA_POOL_SIZE  1024
+	struct {
+		struct {
+			struct list_head head;
+			uint count;
+		} good;
+		struct {
+			struct list_head head;
+			uint count;
+		} unusable;
+	} pool;
+
+	unsigned long long dif_bundle_crossed_pages;
+	unsigned long long dif_bundle_reads;
+	unsigned long long dif_bundle_writes;
+	unsigned long long dif_bundle_kallocs;
+	unsigned long long dif_bundle_dma_allocs;
 
 	atomic_t        nvme_active_aen_cnt;
 	uint16_t        nvme_last_rptd_aen;             /* Last recorded aen count */
+
+	atomic_t zio_threshold;
+	uint16_t last_zio_threshold;
+
+#define DEFAULT_ZIO_THRESHOLD 5
+};
+
+struct active_regions {
+	uint8_t global;
+	struct {
+		uint8_t board_config;
+		uint8_t vpd_nvram;
+		uint8_t npiv_config_0_1;
+		uint8_t npiv_config_2_3;
+	} aux;
 };
 
 #define FW_ABILITY_MAX_SPEED_MASK	0xFUL
@@ -4192,6 +4313,10 @@
 #define FW_ABILITY_MAX_SPEED(ha)	\
 	(ha->fw_ability_mask & FW_ABILITY_MAX_SPEED_MASK)
 
+#define QLA_GET_DATA_RATE	0
+#define QLA_SET_DATA_RATE_NOLR	1
+#define QLA_SET_DATA_RATE_LR	2 /* Set speed and initiate LR */
+
 /*
  * Qlogic scsi host structure
  */
@@ -4223,6 +4348,7 @@
 		uint32_t	qpairs_req_created:1;
 		uint32_t	qpairs_rsp_created:1;
 		uint32_t	nvme_enabled:1;
+		uint32_t        nvme_first_burst:1;
 	} flags;
 
 	atomic_t	loop_state;
@@ -4263,10 +4389,13 @@
 #define FX00_CRITEMP_RECOVERY	25
 #define FX00_HOST_INFO_RESEND	26
 #define QPAIR_ONLINE_CHECK_NEEDED	27
-#define SET_ZIO_THRESHOLD_NEEDED	28
+#define SET_NVME_ZIO_THRESHOLD_NEEDED	28
 #define DETECT_SFP_CHANGE	29
 #define N2N_LOGIN_NEEDED	30
 #define IOCB_WORK_ACTIVE	31
+#define SET_ZIO_THRESHOLD_NEEDED 32
+#define ISP_ABORT_TO_ROM	33
+#define VPORT_DELETE		34
 
 	unsigned long	pci_flags;
 #define PFLG_DISCONNECTED	0	/* PCI device removed */
@@ -4308,7 +4437,6 @@
 
 	struct		nvme_fc_local_port *nvme_local_port;
 	struct completion nvme_del_done;
-	struct list_head nvme_rport_list;
 
 	uint16_t	fcoe_vlan_id;
 	uint16_t	fcoe_fcf_idx;
@@ -4369,29 +4497,45 @@
 	atomic_t	vref_count;
 	struct qla8044_reset_template reset_tmplt;
 	uint16_t	bbcr;
+
+	uint16_t u_ql2xexchoffld;
+	uint16_t u_ql2xiniexchg;
+	uint16_t qlini_mode;
+	uint16_t ql2xexchoffld;
+	uint16_t ql2xiniexchg;
+
 	struct name_list_extended gnl;
 	/* Count of active session/fcport */
 	int fcport_count;
 	wait_queue_head_t fcport_waitQ;
 	wait_queue_head_t vref_waitq;
-	uint8_t min_link_speed_feat;
+	uint8_t min_supported_speed;
 	uint8_t n2n_node_name[WWN_SIZE];
 	uint8_t n2n_port_name[WWN_SIZE];
 	uint16_t	n2n_id;
 	struct list_head gpnid_list;
 	struct fab_scan scan;
+
+	unsigned int irq_offset;
 } scsi_qla_host_t;
 
 struct qla27xx_image_status {
 	uint8_t image_status_mask;
-	uint16_t generation_number;
-	uint8_t reserved[3];
-	uint8_t ver_minor;
+	uint16_t generation;
 	uint8_t ver_major;
+	uint8_t ver_minor;
+	uint8_t bitmap;		/* 28xx only */
+	uint8_t reserved[2];
 	uint32_t checksum;
 	uint32_t signature;
 } __packed;
 
+/* 28xx aux image status bimap values */
+#define QLA28XX_AUX_IMG_BOARD_CONFIG		BIT_0
+#define QLA28XX_AUX_IMG_VPD_NVRAM		BIT_1
+#define QLA28XX_AUX_IMG_NPIV_CONFIG_0_1		BIT_2
+#define QLA28XX_AUX_IMG_NPIV_CONFIG_2_3		BIT_3
+
 #define SET_VP_IDX	1
 #define SET_AL_PA	2
 #define RESET_VP_IDX	3
@@ -4438,6 +4582,24 @@
 	}					\
 }
 
+
+#define SFUB_CHECKSUM_SIZE	4
+
+struct secure_flash_update_block {
+	uint32_t	block_info;
+	uint32_t	signature_lo;
+	uint32_t	signature_hi;
+	uint32_t	signature_upper[0x3e];
+};
+
+struct secure_flash_update_block_pk {
+	uint32_t	block_info;
+	uint32_t	signature_lo;
+	uint32_t	signature_hi;
+	uint32_t	signature_upper[0x3e];
+	uint32_t	public_key[0x41];
+};
+
 /*
  * Macros to help code, maintain, etc.
  */
@@ -4525,6 +4687,7 @@
 #define QLA_SUSPENDED			0x106
 #define QLA_BUSY			0x107
 #define QLA_ALREADY_REGISTERED		0x109
+#define QLA_OS_TIMER_EXPIRED		0x10a
 
 #define NVRAM_DELAY()		udelay(10)
 
@@ -4538,6 +4701,7 @@
 #define OPTROM_SIZE_81XX	0x400000
 #define OPTROM_SIZE_82XX	0x800000
 #define OPTROM_SIZE_83XX	0x1000000
+#define OPTROM_SIZE_28XX	0x2000000
 
 #define OPTROM_BURST_SIZE	0x1000
 #define OPTROM_BURST_DWORDS	(OPTROM_BURST_SIZE / 4)
@@ -4634,10 +4798,13 @@
 #define AUTO_DETECT_SFP_SUPPORT(_vha)\
 	(ql2xautodetectsfp && !_vha->vp_idx &&		\
 	(IS_QLA25XX(_vha->hw) || IS_QLA81XX(_vha->hw) ||\
-	IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw)))
+	IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw) || \
+	 IS_QLA28XX(_vha->hw)))
+
+#define FLASH_SEMAPHORE_REGISTER_ADDR   0x00101016
 
 #define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
-	(IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
+	(IS_QLA27XX(_ha) || IS_QLA28XX(_ha) || IS_QLA83XX(_ha)))
 
 #define SAVE_TOPO(_ha) { \
 	if (_ha->current_topology)				\
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 0b19008..0a6fb35 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -41,6 +41,7 @@
 qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
 {
 	scsi_qla_host_t *vha = inode->i_private;
+
 	return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
 }
 
@@ -56,10 +57,9 @@
 {
 	scsi_qla_host_t *vha = s->private;
 	struct qla_hw_data *ha = vha->hw;
-	struct gid_list_info *gid_list;
+	struct gid_list_info *gid_list, *gid;
 	dma_addr_t gid_list_dma;
 	fc_port_t fc_port;
-	char *id_iter;
 	int rc, i;
 	uint16_t entries, loop_id;
 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
@@ -81,13 +81,11 @@
 		if (rc != QLA_SUCCESS)
 			goto out_free_id_list;
 
-		id_iter = (char *)gid_list;
+		gid = gid_list;
 
 		seq_puts(s, "Port Name	Port ID 	Loop ID\n");
 
 		for (i = 0; i < entries; i++) {
-			struct gid_list_info *gid =
-			    (struct gid_list_info *)id_iter;
 			loop_id = le16_to_cpu(gid->loop_id);
 			memset(&fc_port, 0, sizeof(fc_port_t));
 
@@ -98,7 +96,7 @@
 				fc_port.port_name, fc_port.d_id.b.domain,
 				fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
 				fc_port.loop_id);
-			id_iter += ha->gid_list_info_size;
+			gid = (void *)gid + ha->gid_list_info_size;
 		}
 out_free_id_list:
 		dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
@@ -161,6 +159,7 @@
 qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
 {
 	struct scsi_qla_host *vha = inode->i_private;
+
 	return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
 }
 
@@ -193,6 +192,8 @@
 
 	for (i = 0; i < vha->hw->max_qpairs; i++) {
 		qpair = vha->hw->queue_pair_map[i];
+		if (!qpair)
+			continue;
 		qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
 		core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
 		qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
@@ -248,6 +249,7 @@
 qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
 {
 	struct scsi_qla_host *vha = inode->i_private;
+
 	return single_open(file, qla_dfs_tgt_counters_show, vha);
 }
 
@@ -384,7 +386,7 @@
 	int rc = 0;
 	unsigned long num_act_qp;
 
-	if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha))) {
+	if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
 		pr_err("host%ld: this adapter does not support Multi Q.",
 		    vha->host_no);
 		return -EINVAL;
@@ -436,7 +438,7 @@
 	struct qla_hw_data *ha = vha->hw;
 
 	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
-	    !IS_QLA27XX(ha))
+	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		goto out;
 	if (!ha->fce)
 		goto out;
@@ -446,11 +448,6 @@
 
 	atomic_set(&qla2x00_dfs_root_count, 0);
 	qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
-	if (!qla2x00_dfs_root) {
-		ql_log(ql_log_warn, vha, 0x00f7,
-		    "Unable to create debugfs root directory.\n");
-		goto out;
-	}
 
 create_dir:
 	if (ha->dfs_dir)
@@ -458,64 +455,28 @@
 
 	mutex_init(&ha->fce_mutex);
 	ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
-	if (!ha->dfs_dir) {
-		ql_log(ql_log_warn, vha, 0x00f8,
-		    "Unable to create debugfs ha directory.\n");
-		goto out;
-	}
 
 	atomic_inc(&qla2x00_dfs_root_count);
 
 create_nodes:
 	ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
 	    S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
-	if (!ha->dfs_fw_resource_cnt) {
-		ql_log(ql_log_warn, vha, 0x00fd,
-		    "Unable to create debugFS fw_resource_count node.\n");
-		goto out;
-	}
 
 	ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
 	    ha->dfs_dir, vha, &dfs_tgt_counters_ops);
-	if (!ha->dfs_tgt_counters) {
-		ql_log(ql_log_warn, vha, 0xd301,
-		    "Unable to create debugFS tgt_counters node.\n");
-		goto out;
-	}
 
 	ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
 	    S_IRUSR,  ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
-	if (!ha->tgt.dfs_tgt_port_database) {
-		ql_log(ql_log_warn, vha, 0xd03f,
-		    "Unable to create debugFS tgt_port_database node.\n");
-		goto out;
-	}
 
 	ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
 	    &dfs_fce_ops);
-	if (!ha->dfs_fce) {
-		ql_log(ql_log_warn, vha, 0x00f9,
-		    "Unable to create debugfs fce node.\n");
-		goto out;
-	}
 
 	ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
 		S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
-	if (!ha->tgt.dfs_tgt_sess) {
-		ql_log(ql_log_warn, vha, 0xd040,
-		    "Unable to create debugFS tgt_sess node.\n");
-		goto out;
-	}
 
-	if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
+	if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
 		ha->tgt.dfs_naqp = debugfs_create_file("naqp",
 		    0400, ha->dfs_dir, vha, &dfs_naqp_ops);
-		if (!ha->tgt.dfs_naqp) {
-			ql_log(ql_log_warn, vha, 0xd011,
-			    "Unable to create debugFS naqp node.\n");
-			goto out;
-		}
-	}
 out:
 	return 0;
 }
diff --git a/drivers/scsi/qla2xxx/qla_dsd.h b/drivers/scsi/qla2xxx/qla_dsd.h
new file mode 100644
index 0000000..2078805
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_dsd.h
@@ -0,0 +1,32 @@
+#ifndef _QLA_DSD_H_
+#define _QLA_DSD_H_
+
+#include <asm/unaligned.h>
+
+/* 32-bit data segment descriptor (8 bytes) */
+struct dsd32 {
+	__le32 address;
+	__le32 length;
+};
+
+static inline void append_dsd32(struct dsd32 **dsd, struct scatterlist *sg)
+{
+	put_unaligned_le32(sg_dma_address(sg), &(*dsd)->address);
+	put_unaligned_le32(sg_dma_len(sg),     &(*dsd)->length);
+	(*dsd)++;
+}
+
+/* 64-bit data segment descriptor (12 bytes) */
+struct dsd64 {
+	__le64 address;
+	__le32 length;
+} __packed;
+
+static inline void append_dsd64(struct dsd64 **dsd, struct scatterlist *sg)
+{
+	put_unaligned_le64(sg_dma_address(sg), &(*dsd)->address);
+	put_unaligned_le32(sg_dma_len(sg),     &(*dsd)->length);
+	(*dsd)++;
+}
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 50c1e6c..732bb87 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -10,6 +10,8 @@
 #include <linux/nvme.h>
 #include <linux/nvme-fc.h>
 
+#include "qla_dsd.h"
+
 #define MBS_CHECKSUM_ERROR	0x4010
 #define MBS_INVALID_PRODUCT_KEY	0x4020
 
@@ -339,9 +341,9 @@
 
 	uint16_t prio_request_q_length;
 
-	uint32_t request_q_address[2];
-	uint32_t response_q_address[2];
-	uint32_t prio_request_q_address[2];
+	__le64	 request_q_address __packed;
+	__le64	 response_q_address __packed;
+	__le64	 prio_request_q_address __packed;
 
 	uint16_t msix;
 	uint16_t msix_atio;
@@ -349,7 +351,7 @@
 
 	uint16_t atio_q_inpointer;
 	uint16_t atio_q_length;
-	uint32_t atio_q_address[2];
+	__le64	 atio_q_address __packed;
 
 	uint16_t interrupt_delay_timer;		/* 100us increments. */
 	uint16_t login_timeout;
@@ -453,7 +455,7 @@
 #define BD_WRITE_DATA			BIT_0
 
 	uint16_t fcp_cmnd_dseg_len;		/* Data segment length. */
-	uint32_t fcp_cmnd_dseg_address[2];	/* Data segment address. */
+	__le64	 fcp_cmnd_dseg_address __packed;/* Data segment address. */
 
 	uint16_t reserved[2];			/* Reserved */
 
@@ -463,8 +465,7 @@
 	uint8_t port_id[3];			/* PortID of destination port.*/
 	uint8_t vp_index;
 
-	uint32_t fcp_data_dseg_address[2];	/* Data segment address. */
-	uint16_t fcp_data_dseg_len;		/* Data segment length. */
+	struct dsd64 fcp_dsd;
 };
 
 #define COMMAND_TYPE_6	0x48		/* Command Type 6 entry */
@@ -491,18 +492,18 @@
 #define CF_READ_DATA			BIT_1
 #define CF_WRITE_DATA			BIT_0
 
-	uint16_t fcp_cmnd_dseg_len;		/* Data segment length. */
-	uint32_t fcp_cmnd_dseg_address[2];	/* Data segment address. */
-
-	uint32_t fcp_rsp_dseg_address[2];	/* Data segment address. */
+	uint16_t fcp_cmnd_dseg_len;	/* Data segment length. */
+					/* Data segment address. */
+	__le64	 fcp_cmnd_dseg_address __packed;
+					/* Data segment address. */
+	__le64	 fcp_rsp_dseg_address __packed;
 
 	uint32_t byte_count;		/* Total byte count. */
 
 	uint8_t port_id[3];		/* PortID of destination port. */
 	uint8_t vp_index;
 
-	uint32_t fcp_data_dseg_address[2];	/* Data segment address. */
-	uint32_t fcp_data_dseg_len;		/* Data segment length. */
+	struct dsd64 fcp_dsd;
 };
 
 #define COMMAND_TYPE_7	0x18		/* Command Type 7 entry */
@@ -548,8 +549,7 @@
 	uint8_t port_id[3];		/* PortID of destination port. */
 	uint8_t vp_index;
 
-	uint32_t dseg_0_address[2];	/* Data segment 0 address. */
-	uint32_t dseg_0_len;		/* Data segment 0 length. */
+	struct dsd64 dsd;
 };
 
 #define COMMAND_TYPE_CRC_2	0x6A	/* Command Type CRC_2 (Type 6)
@@ -573,17 +573,17 @@
 
 	uint16_t control_flags;		/* Control flags. */
 
-	uint16_t fcp_cmnd_dseg_len;		/* Data segment length. */
-	uint32_t fcp_cmnd_dseg_address[2];	/* Data segment address. */
-
-	uint32_t fcp_rsp_dseg_address[2];	/* Data segment address. */
+	uint16_t fcp_cmnd_dseg_len;	/* Data segment length. */
+	__le64	 fcp_cmnd_dseg_address __packed;
+					/* Data segment address. */
+	__le64	 fcp_rsp_dseg_address __packed;
 
 	uint32_t byte_count;		/* Total byte count. */
 
 	uint8_t port_id[3];		/* PortID of destination port. */
 	uint8_t vp_index;
 
-	uint32_t crc_context_address[2];	/* Data segment address. */
+	__le64	 crc_context_address __packed;	/* Data segment address. */
 	uint16_t crc_context_len;		/* Data segment length. */
 	uint16_t reserved_1;			/* MUST be set to 0. */
 };
@@ -717,10 +717,7 @@
 	uint32_t rsp_byte_count;
 	uint32_t cmd_byte_count;
 
-	uint32_t dseg_0_address[2];	/* Data segment 0 address. */
-	uint32_t dseg_0_len;		/* Data segment 0 length. */
-	uint32_t dseg_1_address[2];	/* Data segment 1 address. */
-	uint32_t dseg_1_len;		/* Data segment 1 length. */
+	struct dsd64 dsd[2];
 };
 
 /*
@@ -764,13 +761,13 @@
 #define ECF_CLR_PASSTHRU_PEND	BIT_12
 #define ECF_INCL_FRAME_HDR	BIT_11
 
-	uint32_t rx_byte_count;
-	uint32_t tx_byte_count;
+	__le32	 rx_byte_count;
+	__le32	 tx_byte_count;
 
-	uint32_t tx_address[2];		/* Data segment 0 address. */
-	uint32_t tx_len;		/* Data segment 0 length. */
-	uint32_t rx_address[2];		/* Data segment 1 address. */
-	uint32_t rx_len;		/* Data segment 1 length. */
+	__le64	 tx_address __packed;	/* Data segment 0 address. */
+	__le32	 tx_len;		/* Data segment 0 length. */
+	__le64	 rx_address __packed;	/* Data segment 1 address. */
+	__le32	 rx_len;		/* Data segment 1 length. */
 };
 
 struct els_sts_entry_24xx {
@@ -1422,9 +1419,9 @@
         uint16_t control_flags;
         uint32_t io_parameter_0;
         uint32_t io_parameter_1;
-        uint32_t tx_address[2];         /* Data segment 0 address. */
+	__le64	 tx_address __packed;	/* Data segment 0 address. */
         uint32_t tx_len;                /* Data segment 0 length. */
-        uint32_t rx_address[2];         /* Data segment 1 address. */
+	__le64	 rx_address __packed;	/* Data segment 1 address. */
         uint32_t rx_len;                /* Data segment 1 length. */
 };
 
@@ -1515,13 +1512,31 @@
 #define FLT_REG_VPD_SEC_27XX_2	0xD8
 #define FLT_REG_VPD_SEC_27XX_3	0xDA
 
+/* 28xx */
+#define FLT_REG_AUX_IMG_PRI_28XX	0x125
+#define FLT_REG_AUX_IMG_SEC_28XX	0x126
+#define FLT_REG_VPD_SEC_28XX_0		0x10C
+#define FLT_REG_VPD_SEC_28XX_1		0x10E
+#define FLT_REG_VPD_SEC_28XX_2		0x110
+#define FLT_REG_VPD_SEC_28XX_3		0x112
+#define FLT_REG_NVRAM_SEC_28XX_0	0x10D
+#define FLT_REG_NVRAM_SEC_28XX_1	0x10F
+#define FLT_REG_NVRAM_SEC_28XX_2	0x111
+#define FLT_REG_NVRAM_SEC_28XX_3	0x113
+
 struct qla_flt_region {
-	uint32_t code;
+	uint16_t code;
+	uint8_t attribute;
+	uint8_t reserved;
 	uint32_t size;
 	uint32_t start;
 	uint32_t end;
 };
 
+#define FLT_REGION_SIZE		16
+#define FLT_MAX_REGIONS		0xFF
+#define FLT_REGIONS_SIZE	(FLT_REGION_SIZE * FLT_MAX_REGIONS)
+
 /* Flash NPIV Configuration Table ********************************************/
 
 struct qla_npiv_header {
@@ -1588,8 +1603,7 @@
 	uint32_t fw_seq_size;
 	uint32_t relative_offset;
 
-	uint32_t dseg_address[2];
-	uint32_t dseg_length;
+	struct dsd64 dsd;
 };
 
 struct verify_chip_rsp_84xx {
@@ -1646,8 +1660,7 @@
 	uint32_t total_byte_cnt;
 	uint32_t reserved4;
 
-	uint32_t dseg_address[2];
-	uint32_t dseg_length;
+	struct dsd64 dsd;
 };
 
 struct access_chip_rsp_84xx {
@@ -1711,6 +1724,10 @@
 #define LR_DIST_FW_SHIFT	(LR_DIST_FW_POS - LR_DIST_NV_POS)
 #define LR_DIST_FW_FIELD(x)	((x) << LR_DIST_FW_SHIFT & 0xf000)
 
+/* FAC semaphore defines */
+#define FAC_SEMAPHORE_UNLOCK    0
+#define FAC_SEMAPHORE_LOCK      1
+
 struct nvram_81xx {
 	/* NVRAM header. */
 	uint8_t id[4];
@@ -1757,7 +1774,7 @@
 	uint16_t reserved_6_3[14];
 
 	/* Offset 192. */
-	uint8_t min_link_speed;
+	uint8_t min_supported_speed;
 	uint8_t reserved_7_0;
 	uint16_t reserved_7[31];
 
@@ -1911,15 +1928,15 @@
 
 	uint16_t prio_request_q_length;
 
-	uint32_t request_q_address[2];
-	uint32_t response_q_address[2];
-	uint32_t prio_request_q_address[2];
+	__le64	 request_q_address __packed;
+	__le64	 response_q_address __packed;
+	__le64	 prio_request_q_address __packed;
 
 	uint8_t reserved_4[8];
 
 	uint16_t atio_q_inpointer;
 	uint16_t atio_q_length;
-	uint32_t atio_q_address[2];
+	__le64	 atio_q_address __packed;
 
 	uint16_t interrupt_delay_timer;		/* 100us increments. */
 	uint16_t login_timeout;
@@ -2005,6 +2022,8 @@
 
 #define FARX_ACCESS_FLASH_CONF_81XX	0x7FFD0000
 #define FARX_ACCESS_FLASH_DATA_81XX	0x7F800000
+#define FARX_ACCESS_FLASH_CONF_28XX	0x7FFD0000
+#define FARX_ACCESS_FLASH_DATA_28XX	0x7F7D0000
 
 /* FCP priority config defines *************************************/
 /* operations */
@@ -2079,6 +2098,7 @@
 #define FA_NPIV_CONF1_ADDR_81	0xD2000
 
 /* 83XX Flash locations -- occupies second 8MB region. */
-#define FA_FLASH_LAYOUT_ADDR_83	0xFC400
+#define FA_FLASH_LAYOUT_ADDR_83	(0x3F1000/4)
+#define FA_FLASH_LAYOUT_ADDR_28	(0x11000/4)
 
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1789748..d11416d 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -18,14 +18,14 @@
 extern int qla2300_pci_config(struct scsi_qla_host *);
 extern int qla24xx_pci_config(scsi_qla_host_t *);
 extern int qla25xx_pci_config(scsi_qla_host_t *);
-extern void qla2x00_reset_chip(struct scsi_qla_host *);
-extern void qla24xx_reset_chip(struct scsi_qla_host *);
+extern int qla2x00_reset_chip(struct scsi_qla_host *);
+extern int qla24xx_reset_chip(struct scsi_qla_host *);
 extern int qla2x00_chip_diag(struct scsi_qla_host *);
 extern int qla24xx_chip_diag(struct scsi_qla_host *);
 extern void qla2x00_config_rings(struct scsi_qla_host *);
 extern void qla24xx_config_rings(struct scsi_qla_host *);
-extern void qla2x00_reset_adapter(struct scsi_qla_host *);
-extern void qla24xx_reset_adapter(struct scsi_qla_host *);
+extern int qla2x00_reset_adapter(struct scsi_qla_host *);
+extern int qla24xx_reset_adapter(struct scsi_qla_host *);
 extern int qla2x00_nvram_config(struct scsi_qla_host *);
 extern int qla24xx_nvram_config(struct scsi_qla_host *);
 extern int qla81xx_nvram_config(struct scsi_qla_host *);
@@ -38,14 +38,15 @@
 
 extern int qla2x00_perform_loop_resync(scsi_qla_host_t *);
 extern int qla2x00_loop_resync(scsi_qla_host_t *);
-
-extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
+extern void qla2x00_clear_loop_id(fc_port_t *fcport);
 
 extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
 extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
 
 extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
 extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
+extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha,
+				   struct els_plogi *els_plogi);
 
 extern void qla2x00_update_fcports(scsi_qla_host_t *);
 
@@ -54,7 +55,7 @@
 extern void qla2x00_quiesce_io(scsi_qla_host_t *);
 
 extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
-
+void qla_register_fcport_fn(struct work_struct *);
 extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
 extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
 
@@ -73,8 +74,6 @@
     uint16_t *);
 extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
-extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
-    uint16_t *);
 struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
     enum qla_work_type);
 extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
@@ -82,6 +81,7 @@
 extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *);
 extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
 
+extern void qla2x00_set_fcport_state(fc_port_t *fcport, int state);
 extern fc_port_t *
 qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
 
@@ -95,11 +95,14 @@
 extern int
 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
 extern int qla2x00_init_rings(scsi_qla_host_t *);
-extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
 extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
 	int, int, bool);
 extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
-void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *);
+void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea);
+void qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha,
+				     struct event_arg *ea);
+void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
+				  struct event_arg *ea);
 int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
 int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *);
 int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
@@ -109,6 +112,12 @@
 int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
 int qla24xx_detect_sfp(scsi_qla_host_t *vha);
 int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+
+extern void qla28xx_get_aux_images(struct scsi_qla_host *,
+    struct active_regions *);
+extern void qla27xx_get_active_image(struct scsi_qla_host *,
+    struct active_regions *);
+
 void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
@@ -118,6 +127,9 @@
 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
 int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
+void qla_rscn_replay(fc_port_t *fcport);
+extern bool qla24xx_risc_firmware_invalid(uint32_t *);
+
 /*
  * Global Data in qla_os.c source file.
  */
@@ -158,6 +170,8 @@
 extern int ql2xautodetectsfp;
 extern int ql2xenablemsix;
 extern int qla2xuseresexchforels;
+extern int ql2xexlogins;
+extern int ql2xdifbundlinginternalbuffers;
 
 extern int qla2x00_loop_reset(scsi_qla_host_t *);
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -205,14 +219,13 @@
 
 extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
 extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
-extern void qla2x00_sp_compl(void *, int);
-extern void qla2xxx_qpair_sp_free_dma(void *);
-extern void qla2xxx_qpair_sp_compl(void *, int);
-extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
+extern void qla2x00_sp_compl(srb_t *sp, int);
+extern void qla2xxx_qpair_sp_free_dma(srb_t *sp);
+extern void qla2xxx_qpair_sp_compl(srb_t *sp, int);
+extern void qla24xx_sched_upd_fcport(fc_port_t *);
 void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
 	uint16_t *);
 int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
-int qla24xx_async_abort_cmd(srb_t *, bool);
 int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
 void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
 
@@ -235,9 +248,9 @@
     struct vp_rpt_id_entry_24xx *);
 extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
 extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
-extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
+extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
 
-extern void qla2x00_sp_free_dma(void *);
+extern void qla2x00_sp_free_dma(srb_t *sp);
 extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
 
 extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
@@ -265,29 +278,29 @@
 extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
 extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *,
 	uint16_t, struct req_que *);
+extern uint32_t qla2xxx_get_next_handle(struct req_que *req);
 extern int qla2x00_start_scsi(srb_t *sp);
 extern int qla24xx_start_scsi(srb_t *sp);
-int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
-						uint16_t, uint64_t, uint8_t);
+int qla2x00_marker(struct scsi_qla_host *, struct qla_qpair *,
+    uint16_t, uint64_t, uint8_t);
 extern int qla2x00_start_sp(srb_t *);
 extern int qla24xx_dif_start_scsi(srb_t *);
 extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
 extern int qla2xxx_dif_start_scsi_mq(srb_t *);
+extern void qla2x00_init_timer(srb_t *sp, unsigned long tmo);
 extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
 
 extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
 extern void *__qla2x00_alloc_iocbs(struct qla_qpair *, srb_t *);
 extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
 extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
-	uint32_t *, uint16_t, struct qla_tc_param *);
+	struct dsd64 *, uint16_t, struct qla_tc_param *);
 extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
-	uint32_t *, uint16_t, struct qla_tc_param *);
+	struct dsd64 *, uint16_t, struct qla_tc_param *);
 extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
-	uint32_t *, uint16_t, struct qla_tc_param *);
+	struct dsd64 *, uint16_t, struct qla_tgt_cmd *);
 extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
 extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
-extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
-	struct cmd_type_crc_2 *, uint16_t, uint16_t, uint16_t);
 
 /*
  * Global Function Prototypes in qla_mbx.c source file.
@@ -463,6 +476,8 @@
 extern int
 qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
 
+extern int qla81xx_fac_semaphore_access(scsi_qla_host_t *, int);
+
 extern int
 qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *);
 
@@ -508,6 +523,14 @@
 extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
 int qla24xx_res_count_wait(struct scsi_qla_host *, uint16_t *, int);
 
+extern int qla28xx_secure_flash_update(scsi_qla_host_t *, uint16_t, uint16_t,
+    uint32_t, dma_addr_t, uint32_t);
+
+extern int qla2xxx_read_remote_register(scsi_qla_host_t *, uint32_t,
+    uint32_t *);
+extern int qla2xxx_write_remote_register(scsi_qla_host_t *, uint32_t,
+    uint32_t);
+
 /*
  * Global Function Prototypes in qla_isr.c source file.
  */
@@ -538,20 +561,21 @@
  * Global Function Prototypes in qla_sup.c source file.
  */
 extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
-extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
-					 uint32_t, uint32_t);
-extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-					uint32_t);
-extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-					uint32_t);
-extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-				    uint32_t);
-extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-				    uint32_t);
-extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-					uint32_t);
-extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-				    uint32_t);
+extern int qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
+    uint32_t, uint32_t);
+extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+    uint32_t);
+extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+    uint32_t);
+extern int qla2x00_write_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+    uint32_t);
+extern int qla24xx_write_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+    uint32_t);
+extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+    uint32_t);
+extern int qla25xx_write_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+    uint32_t);
+
 extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
 bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *, uint32_t);
 bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *, uint16_t);
@@ -571,18 +595,18 @@
 extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
 				  uint32_t, uint16_t *);
 
-extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern void *qla2x00_read_optrom_data(struct scsi_qla_host *, void *,
 					 uint32_t, uint32_t);
-extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern int qla2x00_write_optrom_data(struct scsi_qla_host *, void *,
 				     uint32_t, uint32_t);
-extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern void *qla24xx_read_optrom_data(struct scsi_qla_host *, void *,
 					 uint32_t, uint32_t);
-extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern int qla24xx_write_optrom_data(struct scsi_qla_host *, void *,
 				     uint32_t, uint32_t);
-extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern void *qla25xx_read_optrom_data(struct scsi_qla_host *, void *,
 					 uint32_t, uint32_t);
-extern uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *,
-					 uint8_t *, uint32_t, uint32_t);
+extern void *qla8044_read_optrom_data(struct scsi_qla_host *,
+					 void *, uint32_t, uint32_t);
 extern void qla8044_watchdog(struct scsi_qla_host *vha);
 
 extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
@@ -607,20 +631,13 @@
 extern void qla8044_fw_dump(scsi_qla_host_t *, int);
 
 extern void qla27xx_fwdump(scsi_qla_host_t *, int);
-extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *);
+extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *);
 extern int qla27xx_fwdt_template_valid(void *);
 extern ulong qla27xx_fwdt_template_size(void *);
-extern const void *qla27xx_fwdt_template_default(void);
-extern ulong qla27xx_fwdt_template_default_size(void);
 
-extern void qla2x00_dump_regs(scsi_qla_host_t *);
-extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
-extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
-extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
-extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
-			   uint8_t *, uint32_t);
 extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
-
+extern void ql_dump_regs(uint, scsi_qla_host_t *, uint);
+extern void ql_dump_buffer(uint, scsi_qla_host_t *, uint, const void *, uint);
 /*
  * Global Function Prototypes in qla_gs.c source file.
  */
@@ -644,9 +661,6 @@
 extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
 	struct ct_sns_rsp *, const char *);
 extern void qla2x00_async_iocb_timeout(void *data);
-extern int qla24xx_async_gidpn(scsi_qla_host_t *, fc_port_t *);
-int qla24xx_post_gidpn_work(struct scsi_qla_host *, fc_port_t *);
-void qla24xx_handle_gidpn_event(scsi_qla_host_t *, struct event_arg *);
 
 extern void qla2x00_free_fcport(fc_port_t *);
 
@@ -677,6 +691,7 @@
  */
 struct device_attribute;
 extern struct device_attribute *qla2x00_host_attrs[];
+extern struct device_attribute *qla2x00_host_attrs_dm[];
 struct fc_function_template;
 extern struct fc_function_template qla2xxx_transport_functions;
 extern struct fc_function_template qla2xxx_transport_vport_functions;
@@ -690,7 +705,7 @@
 extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
 extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
 	struct qla_fcp_prio_cfg *, uint8_t);
-
+void qla_insert_tgt_attrs(void);
 /*
  * Global Function Prototypes in qla_dfs.c source file.
  */
@@ -721,10 +736,10 @@
 /* qlafx00 related functions */
 extern int qlafx00_pci_config(struct scsi_qla_host *);
 extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
-extern void qlafx00_soft_reset(scsi_qla_host_t *);
+extern int qlafx00_soft_reset(scsi_qla_host_t *);
 extern int qlafx00_chip_diag(scsi_qla_host_t *);
 extern void qlafx00_config_rings(struct scsi_qla_host *);
-extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
+extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *, size_t);
 extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t);
 extern irqreturn_t qlafx00_intr_handler(int, void *);
 extern void qlafx00_enable_intrs(struct qla_hw_data *);
@@ -764,16 +779,16 @@
 extern int qla82xx_iospace_config(struct qla_hw_data *);
 
 /* Initialization related functions */
-extern void qla82xx_reset_chip(struct scsi_qla_host *);
+extern int qla82xx_reset_chip(struct scsi_qla_host *);
 extern void qla82xx_config_rings(struct scsi_qla_host *);
 extern void qla82xx_watchdog(scsi_qla_host_t *);
 extern int qla82xx_start_firmware(scsi_qla_host_t *);
 
 /* Firmware and flash related functions */
 extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
-extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern void *qla82xx_read_optrom_data(struct scsi_qla_host *, void *,
 					 uint32_t, uint32_t);
-extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern int qla82xx_write_optrom_data(struct scsi_qla_host *, void *,
 				     uint32_t, uint32_t);
 
 /* Mailbox related functions */
@@ -782,10 +797,10 @@
 
 /* IOCB related functions */
 extern int qla82xx_start_scsi(srb_t *);
-extern void qla2x00_sp_free(void *);
+extern void qla2x00_sp_free(srb_t *sp);
 extern void qla2x00_sp_timeout(struct timer_list *);
-extern void qla2x00_bsg_job_done(void *, int);
-extern void qla2x00_bsg_sp_free(void *);
+extern void qla2x00_bsg_job_done(srb_t *sp, int);
+extern void qla2x00_bsg_sp_free(srb_t *sp);
 extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
 
 /* Interrupt related */
@@ -814,8 +829,8 @@
 extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
 extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
 
-extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
-				   size_t, char *);
+extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, size_t,
+				   const char *);
 extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
 extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
 extern void qla82xx_start_iocbs(scsi_qla_host_t *);
@@ -869,7 +884,7 @@
 void qla8044_get_minidump(struct scsi_qla_host *vha);
 int qla8044_collect_md_data(struct scsi_qla_host *vha);
 extern int qla8044_md_get_template(scsi_qla_host_t *);
-extern int qla8044_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern int qla8044_write_optrom_data(struct scsi_qla_host *, void *,
 				     uint32_t, uint32_t);
 extern irqreturn_t qla8044_intr_handler(int, void *);
 extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
@@ -897,5 +912,9 @@
 void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
 void qlt_remove_target_resources(struct qla_hw_data *);
 void qlt_clr_qp_table(struct scsi_qla_host *vha);
+void qlt_set_mode(struct scsi_qla_host *);
+int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
 
+/* nvme.c */
+void qla_nvme_unregister_remote_port(struct fc_port *fcport);
 #endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index de3f2a0..5298ed1 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -45,13 +45,11 @@
 	ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
 	ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
 
-	ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma));
-	ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma));
-	ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+	put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
+	ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
 
-	ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
-	ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
-	ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
+	put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
+	ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
 
 	vha->qla_stats.control_requests++;
 
@@ -83,13 +81,11 @@
 	ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
 	ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
 
-	ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma));
-	ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma));
-	ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+	put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
+	ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
 
-	ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
-	ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
-	ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+	put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
+	ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
 	ct_pkt->vp_index = vha->vp_idx;
 
 	vha->qla_stats.control_requests++;
@@ -152,8 +148,8 @@
 				    vha->d_id.b.area, vha->d_id.b.al_pa,
 				    comp_status, ct_rsp->header.response);
 				ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
-				    0x2078, (uint8_t *)&ct_rsp->header,
-				    sizeof(struct ct_rsp_hdr));
+				    0x2078, ct_rsp,
+				    offsetof(typeof(*ct_rsp), rsp));
 				rval = QLA_INVALID_COMMAND;
 			} else
 				rval = QLA_SUCCESS;
@@ -230,9 +226,7 @@
 	ct_rsp = &ha->ct_sns->p.rsp;
 
 	/* Prepare CT arguments -- port_id */
-	ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
-	ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
-	ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+	ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
 
 	/* Execute MS IOCB */
 	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -246,9 +240,7 @@
 		rval = QLA_FUNCTION_FAILED;
 	} else {
 		/* Populate fc_port_t entry. */
-		fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0];
-		fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1];
-		fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2];
+		fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id);
 
 		memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
 		    WWN_SIZE);
@@ -341,9 +333,7 @@
 		/* Set port IDs in switch info list. */
 		for (i = 0; i < ha->max_fibre_devices; i++) {
 			gid_data = &ct_rsp->rsp.gid_pt.entries[i];
-			list[i].d_id.b.domain = gid_data->port_id[0];
-			list[i].d_id.b.area = gid_data->port_id[1];
-			list[i].d_id.b.al_pa = gid_data->port_id[2];
+			list[i].d_id = be_to_port_id(gid_data->port_id);
 			memset(list[i].fabric_port_name, 0, WWN_SIZE);
 			list[i].fp_speed = PORT_SPEED_UNKNOWN;
 
@@ -407,9 +397,7 @@
 		ct_rsp = &ha->ct_sns->p.rsp;
 
 		/* Prepare CT arguments -- port_id */
-		ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
-		ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
-		ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+		ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
 
 		/* Execute MS IOCB */
 		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -476,9 +464,7 @@
 		ct_rsp = &ha->ct_sns->p.rsp;
 
 		/* Prepare CT arguments -- port_id */
-		ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
-		ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
-		ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+		ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
 
 		/* Execute MS IOCB */
 		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -513,9 +499,8 @@
 	return (rval);
 }
 
-static void qla2x00_async_sns_sp_done(void *s, int rc)
+static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
 {
-	struct srb *sp = s;
 	struct scsi_qla_host *vha = sp->vha;
 	struct ct_sns_pkt *ct_sns;
 	struct qla_work_evt *e;
@@ -643,9 +628,7 @@
 	ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
 
 	/* Prepare CT arguments -- port_id, FC-4 types */
-	ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
-	ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
-	ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
+	ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
 	ct_req->req.rft_id.fc4_types[2] = 0x01;		/* FCP-3 */
 
 	if (vha->flags.nvme_enabled)
@@ -657,15 +640,16 @@
 	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
 	sp->done = qla2x00_async_sns_sp_done;
 
+	ql_dbg(ql_dbg_disc, vha, 0xffff,
+	    "Async-%s - hdl=%x portid %06x.\n",
+	    sp->name, sp->handle, d_id->b24);
+
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_disc, vha, 0x2043,
 		    "RFT_ID issue IOCB failed (%d).\n", rval);
 		goto done_free_sp;
 	}
-	ql_dbg(ql_dbg_disc, vha, 0xffff,
-	    "Async-%s - hdl=%x portid %06x.\n",
-	    sp->name, sp->handle, d_id->b24);
 	return rval;
 done_free_sp:
 	sp->free(sp);
@@ -740,9 +724,7 @@
 	ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
 
 	/* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
-	ct_req->req.rff_id.port_id[0] = d_id->b.domain;
-	ct_req->req.rff_id.port_id[1] = d_id->b.area;
-	ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
+	ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
 	ct_req->req.rff_id.fc4_feature = fc4feature;
 	ct_req->req.rff_id.fc4_type = fc4type;		/* SCSI - FCP */
 
@@ -752,6 +734,10 @@
 	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
 	sp->done = qla2x00_async_sns_sp_done;
 
+	ql_dbg(ql_dbg_disc, vha, 0xffff,
+	    "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
+	    sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
+
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_disc, vha, 0x2047,
@@ -759,9 +745,6 @@
 		goto done_free_sp;
 	}
 
-	ql_dbg(ql_dbg_disc, vha, 0xffff,
-	    "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
-	    sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
 	return rval;
 
 done_free_sp:
@@ -832,9 +815,7 @@
 	ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
 
 	/* Prepare CT arguments -- port_id, node_name */
-	ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
-	ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
-	ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
+	ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
 	memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
 
 	sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
@@ -844,15 +825,16 @@
 	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
 	sp->done = qla2x00_async_sns_sp_done;
 
+	ql_dbg(ql_dbg_disc, vha, 0xffff,
+	    "Async-%s - hdl=%x portid %06x\n",
+	    sp->name, sp->handle, d_id->b24);
+
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_disc, vha, 0x204d,
 		    "RNN_ID issue IOCB failed (%d).\n", rval);
 		goto done_free_sp;
 	}
-	ql_dbg(ql_dbg_disc, vha, 0xffff,
-	    "Async-%s - hdl=%x portid %06x\n",
-	    sp->name, sp->handle, d_id->b24);
 
 	return rval;
 
@@ -957,15 +939,16 @@
 	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
 	sp->done = qla2x00_async_sns_sp_done;
 
+	ql_dbg(ql_dbg_disc, vha, 0xffff,
+	    "Async-%s - hdl=%x.\n",
+	    sp->name, sp->handle);
+
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_disc, vha, 0x2043,
 		    "RFT_ID issue IOCB failed (%d).\n", rval);
 		goto done_free_sp;
 	}
-	ql_dbg(ql_dbg_disc, vha, 0xffff,
-	    "Async-%s - hdl=%x.\n",
-	    sp->name, sp->handle);
 
 	return rval;
 
@@ -996,8 +979,7 @@
 	memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
 	wc = data_size / 2;			/* Size in 16bit words. */
 	sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
-	sns_cmd->p.cmd.buffer_address[0] = cpu_to_le32(LSD(ha->sns_cmd_dma));
-	sns_cmd->p.cmd.buffer_address[1] = cpu_to_le32(MSD(ha->sns_cmd_dma));
+	put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
 	sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
 	sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
 	wc = (data_size - 16) / 4;		/* Size in 32bit words. */
@@ -1381,6 +1363,7 @@
 	int ret, rval;
 	uint16_t mb[MAILBOX_REGISTER_COUNT];
 	struct qla_hw_data *ha = vha->hw;
+
 	ret = QLA_SUCCESS;
 	if (vha->flags.management_server_logged_in)
 		return ret;
@@ -1419,6 +1402,7 @@
 {
 	ms_iocb_entry_t *ms_pkt;
 	struct qla_hw_data *ha = vha->hw;
+
 	ms_pkt = ha->ms_iocb;
 	memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
 
@@ -1432,13 +1416,11 @@
 	ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
 	ms_pkt->req_bytecount = cpu_to_le32(req_size);
 
-	ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
-	ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
-	ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+	put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
+	ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
 
-	ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
-	ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
-	ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
+	put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
+	ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
 
 	return ms_pkt;
 }
@@ -1470,19 +1452,17 @@
 	ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
 	ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
 
-	ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
-	ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
-	ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+	put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
+	ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
 
-	ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
-	ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
-	ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+	put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
+	ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
 	ct_pkt->vp_index = vha->vp_idx;
 
 	return ct_pkt;
 }
 
-static inline ms_iocb_entry_t *
+static void
 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
 {
 	struct qla_hw_data *ha = vha->hw;
@@ -1491,13 +1471,11 @@
 
 	if (IS_FWI2_CAPABLE(ha)) {
 		ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
-		ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+		ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
 	} else {
 		ms_pkt->req_bytecount = cpu_to_le32(req_size);
-		ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+		ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
 	}
-
-	return ms_pkt;
 }
 
 /**
@@ -1560,7 +1538,7 @@
 	/* Attributes */
 	ct_req->req.rhba.attrs.count =
 	    cpu_to_be32(FDMI_HBA_ATTR_COUNT);
-	entries = ct_req->req.rhba.hba_identifier;
+	entries = &ct_req->req;
 
 	/* Nodename. */
 	eiter = entries + size;
@@ -1769,7 +1747,7 @@
 
 	/* Attributes */
 	ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
-	entries = ct_req->req.rpa.port_name;
+	entries = &ct_req->req;
 
 	/* FC4 types. */
 	eiter = entries + size;
@@ -1790,7 +1768,7 @@
 	if (IS_CNA_CAPABLE(ha))
 		eiter->a.sup_speed = cpu_to_be32(
 		    FDMI_PORT_SPEED_10GB);
-	else if (IS_QLA27XX(ha))
+	else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		eiter->a.sup_speed = cpu_to_be32(
 		    FDMI_PORT_SPEED_32GB|
 		    FDMI_PORT_SPEED_16GB|
@@ -1982,7 +1960,7 @@
 
 	/* Attributes */
 	ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
-	entries = ct_req->req.rhba2.hba_identifier;
+	entries = &ct_req->req;
 
 	/* Nodename. */
 	eiter = entries + size;
@@ -2341,7 +2319,7 @@
 
 	/* Attributes */
 	ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
-	entries = ct_req->req.rpa2.port_name;
+	entries = &ct_req->req;
 
 	/* FC4 types. */
 	eiter = entries + size;
@@ -2369,7 +2347,7 @@
 	if (IS_CNA_CAPABLE(ha))
 		eiter->a.sup_speed = cpu_to_be32(
 		    FDMI_PORT_SPEED_10GB);
-	else if (IS_QLA27XX(ha))
+	else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		eiter->a.sup_speed = cpu_to_be32(
 		    FDMI_PORT_SPEED_32GB|
 		    FDMI_PORT_SPEED_16GB|
@@ -2442,7 +2420,7 @@
 	eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
 	eiter->len = cpu_to_be16(4 + 4);
 	eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
-	    le16_to_cpu(icb24->frame_payload_size):
+	    le16_to_cpu(icb24->frame_payload_size) :
 	    le16_to_cpu(ha->init_cb->frame_payload_size);
 	eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
 	size += 4 + 4;
@@ -2733,9 +2711,7 @@
 		ct_rsp = &ha->ct_sns->p.rsp;
 
 		/* Prepare CT arguments -- port_id */
-		ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
-		ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
-		ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+		ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
 
 		/* Execute MS IOCB */
 		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -2779,6 +2755,31 @@
 	return &p->p.req;
 }
 
+static uint16_t
+qla2x00_port_speed_capability(uint16_t speed)
+{
+	switch (speed) {
+	case BIT_15:
+		return PORT_SPEED_1GB;
+	case BIT_14:
+		return PORT_SPEED_2GB;
+	case BIT_13:
+		return PORT_SPEED_4GB;
+	case BIT_12:
+		return PORT_SPEED_10GB;
+	case BIT_11:
+		return PORT_SPEED_8GB;
+	case BIT_10:
+		return PORT_SPEED_16GB;
+	case BIT_8:
+		return PORT_SPEED_32GB;
+	case BIT_7:
+		return PORT_SPEED_64GB;
+	default:
+		return PORT_SPEED_UNKNOWN;
+	}
+}
+
 /**
  * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
  * @vha: HA context
@@ -2851,31 +2852,8 @@
 			}
 			rval = QLA_FUNCTION_FAILED;
 		} else {
-			/* Save port-speed */
-			switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
-			case BIT_15:
-				list[i].fp_speed = PORT_SPEED_1GB;
-				break;
-			case BIT_14:
-				list[i].fp_speed = PORT_SPEED_2GB;
-				break;
-			case BIT_13:
-				list[i].fp_speed = PORT_SPEED_4GB;
-				break;
-			case BIT_12:
-				list[i].fp_speed = PORT_SPEED_10GB;
-				break;
-			case BIT_11:
-				list[i].fp_speed = PORT_SPEED_8GB;
-				break;
-			case BIT_10:
-				list[i].fp_speed = PORT_SPEED_16GB;
-				break;
-			case BIT_8:
-				list[i].fp_speed = PORT_SPEED_32GB;
-				break;
-			}
-
+			list->fp_speed = qla2x00_port_speed_capability(
+			    be16_to_cpu(ct_rsp->rsp.gpsc.speed));
 			ql_dbg(ql_dbg_disc, vha, 0x205b,
 			    "GPSC ext entry - fpn "
 			    "%8phN speeds=%04x speed=%04x.\n",
@@ -2937,9 +2915,7 @@
 		ct_rsp = &ha->ct_sns->p.rsp;
 
 		/* Prepare CT arguments -- port_id */
-		ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
-		ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
-		ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+		ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
 
 		/* Execute MS IOCB */
 		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -2973,237 +2949,6 @@
 	}
 }
 
-/* GID_PN completion processing. */
-void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
-	fc_port_t *fcport = ea->fcport;
-
-	ql_dbg(ql_dbg_disc, vha, 0x201d,
-	    "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
-	    __func__, fcport->port_name, fcport->disc_state,
-	    fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
-	    fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
-
-	if (fcport->disc_state == DSC_DELETE_PEND)
-		return;
-
-	if (ea->sp->gen2 != fcport->login_gen) {
-		/* PLOGI/PRLI/LOGO came in while cmd was out.*/
-		ql_dbg(ql_dbg_disc, vha, 0x201e,
-		    "%s %8phC generation changed rscn %d|%d n",
-		    __func__, fcport->port_name, fcport->last_rscn_gen,
-		    fcport->rscn_gen);
-		return;
-	}
-
-	if (!ea->rc) {
-		if (ea->sp->gen1 == fcport->rscn_gen) {
-			fcport->scan_state = QLA_FCPORT_FOUND;
-			fcport->flags |= FCF_FABRIC_DEVICE;
-
-			if (fcport->d_id.b24 == ea->id.b24) {
-				/* cable plugged into the same place */
-				switch (vha->host->active_mode) {
-				case MODE_TARGET:
-					if (fcport->fw_login_state ==
-					    DSC_LS_PRLI_COMP) {
-						u16 data[2];
-						/*
-						 * Late RSCN was delivered.
-						 * Remote port already login'ed.
-						 */
-						ql_dbg(ql_dbg_disc, vha, 0x201f,
-						    "%s %d %8phC post adisc\n",
-						    __func__, __LINE__,
-						    fcport->port_name);
-						data[0] = data[1] = 0;
-						qla2x00_post_async_adisc_work(
-						    vha, fcport, data);
-					}
-					break;
-				case MODE_INITIATOR:
-				case MODE_DUAL:
-				default:
-					ql_dbg(ql_dbg_disc, vha, 0x201f,
-					    "%s %d %8phC post %s\n", __func__,
-					    __LINE__, fcport->port_name,
-					    (atomic_read(&fcport->state) ==
-					    FCS_ONLINE) ? "adisc" : "gnl");
-
-					if (atomic_read(&fcport->state) ==
-					    FCS_ONLINE) {
-						u16 data[2];
-
-						data[0] = data[1] = 0;
-						qla2x00_post_async_adisc_work(
-						    vha, fcport, data);
-					} else {
-						qla24xx_post_gnl_work(vha,
-						    fcport);
-					}
-					break;
-				}
-			} else { /* fcport->d_id.b24 != ea->id.b24 */
-				fcport->d_id.b24 = ea->id.b24;
-				fcport->id_changed = 1;
-				if (fcport->deleted != QLA_SESS_DELETED) {
-					ql_dbg(ql_dbg_disc, vha, 0x2021,
-					    "%s %d %8phC post del sess\n",
-					    __func__, __LINE__, fcport->port_name);
-					qlt_schedule_sess_for_deletion(fcport);
-				}
-			}
-		} else { /* ea->sp->gen1 != fcport->rscn_gen */
-			ql_dbg(ql_dbg_disc, vha, 0x2022,
-			    "%s %d %8phC post gidpn\n",
-			    __func__, __LINE__, fcport->port_name);
-			/* rscn came in while cmd was out */
-			qla24xx_post_gidpn_work(vha, fcport);
-		}
-	} else { /* ea->rc */
-		/* cable pulled */
-		if (ea->sp->gen1 == fcport->rscn_gen) {
-			if (ea->sp->gen2 == fcport->login_gen) {
-				ql_dbg(ql_dbg_disc, vha, 0x2042,
-				    "%s %d %8phC post del sess\n", __func__,
-				    __LINE__, fcport->port_name);
-				qlt_schedule_sess_for_deletion(fcport);
-			} else {
-				ql_dbg(ql_dbg_disc, vha, 0x2045,
-				    "%s %d %8phC login\n", __func__, __LINE__,
-				    fcport->port_name);
-				qla24xx_fcport_handle_login(vha, fcport);
-			}
-		} else {
-			ql_dbg(ql_dbg_disc, vha, 0x2049,
-			    "%s %d %8phC post gidpn\n", __func__, __LINE__,
-			    fcport->port_name);
-			qla24xx_post_gidpn_work(vha, fcport);
-		}
-	}
-} /* gidpn_event */
-
-static void qla2x00_async_gidpn_sp_done(void *s, int res)
-{
-	struct srb *sp = s;
-	struct scsi_qla_host *vha = sp->vha;
-	fc_port_t *fcport = sp->fcport;
-	u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
-	struct event_arg ea;
-
-	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-
-	memset(&ea, 0, sizeof(ea));
-	ea.fcport = fcport;
-	ea.id.b.domain = id[0];
-	ea.id.b.area = id[1];
-	ea.id.b.al_pa = id[2];
-	ea.sp = sp;
-	ea.rc = res;
-	ea.event = FCME_GIDPN_DONE;
-
-	if (res == QLA_FUNCTION_TIMEOUT) {
-		ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
-		    "Async done-%s WWPN %8phC timed out.\n",
-		    sp->name, fcport->port_name);
-		qla24xx_post_gidpn_work(sp->vha, fcport);
-		sp->free(sp);
-		return;
-	} else if (res) {
-		ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
-		    "Async done-%s fail res %x, WWPN %8phC\n",
-		    sp->name, res, fcport->port_name);
-	} else {
-		ql_dbg(ql_dbg_disc, vha, 0x204f,
-		    "Async done-%s good WWPN %8phC ID %3phC\n",
-		    sp->name, fcport->port_name, id);
-	}
-
-	qla2x00_fcport_event_handler(vha, &ea);
-
-	sp->free(sp);
-}
-
-int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
-{
-	int rval = QLA_FUNCTION_FAILED;
-	struct ct_sns_req       *ct_req;
-	srb_t *sp;
-
-	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
-		return rval;
-
-	fcport->disc_state = DSC_GID_PN;
-	fcport->scan_state = QLA_FCPORT_SCAN;
-	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
-	if (!sp)
-		goto done;
-
-	fcport->flags |= FCF_ASYNC_SENT;
-	sp->type = SRB_CT_PTHRU_CMD;
-	sp->name = "gidpn";
-	sp->gen1 = fcport->rscn_gen;
-	sp->gen2 = fcport->login_gen;
-
-	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
-	/* CT_IU preamble  */
-	ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
-		GID_PN_RSP_SIZE);
-
-	/* GIDPN req */
-	memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
-		WWN_SIZE);
-
-	/* req & rsp use the same buffer */
-	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
-	sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
-	sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
-	sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
-	sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
-	sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
-	sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
-
-	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
-	sp->done = qla2x00_async_gidpn_sp_done;
-
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
-	ql_dbg(ql_dbg_disc, vha, 0x20a4,
-	    "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
-	    sp->name, fcport->port_name,
-	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
-	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
-	return rval;
-
-done_free_sp:
-	sp->free(sp);
-done:
-	fcport->flags &= ~FCF_ASYNC_ACTIVE;
-	return rval;
-}
-
-int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
-{
-	struct qla_work_evt *e;
-	int ls;
-
-	ls = atomic_read(&vha->loop_state);
-	if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
-		test_bit(UNLOADING, &vha->dpc_flags))
-		return 0;
-
-	e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
-	if (!e)
-		return QLA_FUNCTION_FAILED;
-
-	e->u.fcport.fcport = fcport;
-	fcport->flags |= FCF_ASYNC_ACTIVE;
-	return qla2x00_post_work(vha, e);
-}
-
 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
 	struct qla_work_evt *e;
@@ -3237,18 +2982,14 @@
 		    __func__, fcport->port_name);
 		return;
 	} else if (ea->sp->gen1 != fcport->rscn_gen) {
-		ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
-		    __func__, __LINE__, fcport->port_name);
-		qla24xx_post_gidpn_work(vha, fcport);
 		return;
 	}
 
 	qla_post_iidma_work(vha, fcport);
 }
 
-static void qla24xx_async_gpsc_sp_done(void *s, int res)
+static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
 {
-	struct srb *sp = s;
 	struct scsi_qla_host *vha = sp->vha;
 	struct qla_hw_data *ha = vha->hw;
 	fc_port_t *fcport = sp->fcport;
@@ -3261,6 +3002,8 @@
 	    "Async done-%s res %x, WWPN %8phC \n",
 	    sp->name, res, fcport->port_name);
 
+	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
 	if (res == QLA_FUNCTION_TIMEOUT)
 		return;
 
@@ -3275,32 +3018,11 @@
 			ql_dbg(ql_dbg_disc, vha, 0x2019,
 			    "GPSC command unsupported, disabling query.\n");
 			ha->flags.gpsc_supported = 0;
-			res = QLA_SUCCESS;
+			goto done;
 		}
 	} else {
-		switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
-		case BIT_15:
-			fcport->fp_speed = PORT_SPEED_1GB;
-			break;
-		case BIT_14:
-			fcport->fp_speed = PORT_SPEED_2GB;
-			break;
-		case BIT_13:
-			fcport->fp_speed = PORT_SPEED_4GB;
-			break;
-		case BIT_12:
-			fcport->fp_speed = PORT_SPEED_10GB;
-			break;
-		case BIT_11:
-			fcport->fp_speed = PORT_SPEED_8GB;
-			break;
-		case BIT_10:
-			fcport->fp_speed = PORT_SPEED_16GB;
-			break;
-		case BIT_8:
-			fcport->fp_speed = PORT_SPEED_32GB;
-			break;
-		}
+		fcport->fp_speed = qla2x00_port_speed_capability(
+		    be16_to_cpu(ct_rsp->rsp.gpsc.speed));
 
 		ql_dbg(ql_dbg_disc, vha, 0x2054,
 		    "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
@@ -3308,14 +3030,13 @@
 		    be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
 		    be16_to_cpu(ct_rsp->rsp.gpsc.speed));
 	}
-done:
 	memset(&ea, 0, sizeof(ea));
-	ea.event = FCME_GPSC_DONE;
 	ea.rc = res;
 	ea.fcport = fcport;
 	ea.sp = sp;
-	qla2x00_fcport_event_handler(vha, &ea);
+	qla24xx_handle_gpsc_event(vha, &ea);
 
+done:
 	sp->free(sp);
 }
 
@@ -3358,15 +3079,15 @@
 	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
 	sp->done = qla24xx_async_gpsc_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
 	ql_dbg(ql_dbg_disc, vha, 0x205e,
 	    "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
 	    sp->name, fcport->port_name, sp->handle,
 	    fcport->loop_id, fcport->d_id.b.domain,
 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
 	return rval;
 
 done_free_sp:
@@ -3381,7 +3102,8 @@
 {
 	struct qla_work_evt *e;
 
-	if (test_bit(UNLOADING, &vha->dpc_flags))
+	if (test_bit(UNLOADING, &vha->dpc_flags) ||
+	    (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)))
 		return 0;
 
 	e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
@@ -3398,17 +3120,7 @@
 
 	switch (sp->type) {
 	case SRB_ELS_DCMD:
-		if (c->u.els_plogi.els_plogi_pyld)
-			dma_free_coherent(&vha->hw->pdev->dev,
-			    c->u.els_plogi.tx_size,
-			    c->u.els_plogi.els_plogi_pyld,
-			    c->u.els_plogi.els_plogi_pyld_dma);
-
-		if (c->u.els_plogi.els_resp_pyld)
-			dma_free_coherent(&vha->hw->pdev->dev,
-			    c->u.els_plogi.rx_size,
-			    c->u.els_plogi.els_resp_pyld,
-			    c->u.els_plogi.els_resp_pyld_dma);
+		qla2x00_els_dcmd2_free(vha, &c->u.els_plogi);
 		break;
 	case SRB_CT_PTHRU_CMD:
 	default:
@@ -3445,26 +3157,10 @@
 	if (ea->rc) {
 		/* cable is disconnected */
 		list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
-			if (fcport->d_id.b24 == ea->id.b24) {
-				ql_dbg(ql_dbg_disc, vha, 0xffff,
-				    "%s %d %8phC DS %d\n",
-				    __func__, __LINE__,
-				    fcport->port_name,
-				    fcport->disc_state);
+			if (fcport->d_id.b24 == ea->id.b24)
 				fcport->scan_state = QLA_FCPORT_SCAN;
-				switch (fcport->disc_state) {
-				case DSC_DELETED:
-				case DSC_DELETE_PEND:
-					break;
-				default:
-					ql_dbg(ql_dbg_disc, vha, 0xffff,
-					    "%s %d %8phC post del sess\n",
-					    __func__, __LINE__,
-					    fcport->port_name);
-					qlt_schedule_sess_for_deletion(fcport);
-					break;
-				}
-			}
+
+			qlt_schedule_sess_for_deletion(fcport);
 		}
 	} else {
 		/* cable is connected */
@@ -3473,34 +3169,19 @@
 			list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
 			    list) {
 				if ((conflict->d_id.b24 == ea->id.b24) &&
-				    (fcport != conflict)) {
-					/* 2 fcports with conflict Nport ID or
+				    (fcport != conflict))
+					/*
+					 * 2 fcports with conflict Nport ID or
 					 * an existing fcport is having nport ID
 					 * conflict with new fcport.
 					 */
 
-					ql_dbg(ql_dbg_disc, vha, 0xffff,
-					    "%s %d %8phC DS %d\n",
-					    __func__, __LINE__,
-					    conflict->port_name,
-					    conflict->disc_state);
 					conflict->scan_state = QLA_FCPORT_SCAN;
-					switch (conflict->disc_state) {
-					case DSC_DELETED:
-					case DSC_DELETE_PEND:
-						break;
-					default:
-						ql_dbg(ql_dbg_disc, vha, 0xffff,
-						    "%s %d %8phC post del sess\n",
-						    __func__, __LINE__,
-						    conflict->port_name);
-						qlt_schedule_sess_for_deletion
-							(conflict);
-						break;
-					}
-				}
+
+				qlt_schedule_sess_for_deletion(conflict);
 			}
 
+			fcport->scan_needed = 0;
 			fcport->rscn_gen++;
 			fcport->scan_state = QLA_FCPORT_FOUND;
 			fcport->flags |= FCF_FABRIC_DEVICE;
@@ -3551,19 +3232,7 @@
 					    conflict->disc_state);
 
 					conflict->scan_state = QLA_FCPORT_SCAN;
-					switch (conflict->disc_state) {
-					case DSC_DELETED:
-					case DSC_DELETE_PEND:
-						break;
-					default:
-						ql_dbg(ql_dbg_disc, vha, 0xffff,
-						    "%s %d %8phC post del sess\n",
-						    __func__, __LINE__,
-						    conflict->port_name);
-						qlt_schedule_sess_for_deletion
-							(conflict);
-						break;
-					}
+					qlt_schedule_sess_for_deletion(conflict);
 				}
 			}
 
@@ -3577,9 +3246,8 @@
 	}
 }
 
-static void qla2x00_async_gpnid_sp_done(void *s, int res)
+static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
 {
-	struct srb *sp = s;
 	struct scsi_qla_host *vha = sp->vha;
 	struct ct_sns_req *ct_req =
 	    (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
@@ -3592,22 +3260,19 @@
 	if (res)
 		ql_dbg(ql_dbg_disc, vha, 0x2066,
 		    "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
-		    sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
+		    sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
 		    ct_rsp->rsp.gpn_id.port_name);
 	else
 		ql_dbg(ql_dbg_disc, vha, 0x2066,
 		    "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
-		    sp->name, sp->gen1, ct_req->req.port_id.port_id,
+		    sp->name, sp->gen1, &ct_req->req.port_id.port_id,
 		    ct_rsp->rsp.gpn_id.port_name);
 
 	memset(&ea, 0, sizeof(ea));
 	memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
 	ea.sp = sp;
-	ea.id.b.domain = ct_req->req.port_id.port_id[0];
-	ea.id.b.area = ct_req->req.port_id.port_id[1];
-	ea.id.b.al_pa = ct_req->req.port_id.port_id[2];
+	ea.id = be_to_port_id(ct_req->req.port_id.port_id);
 	ea.rc = res;
-	ea.event = FCME_GPNID_DONE;
 
 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 	list_del(&sp->elem);
@@ -3626,25 +3291,22 @@
 		return;
 	}
 
-	qla2x00_fcport_event_handler(vha, &ea);
+	qla24xx_handle_gpnid_event(vha, &ea);
 
 	e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
 	if (!e) {
 		/* please ignore kernel warning. otherwise, we have mem leak. */
-		if (sp->u.iocb_cmd.u.ctarg.req) {
-			dma_free_coherent(&vha->hw->pdev->dev,
-				sp->u.iocb_cmd.u.ctarg.req_allocated_size,
-				sp->u.iocb_cmd.u.ctarg.req,
-				sp->u.iocb_cmd.u.ctarg.req_dma);
-			sp->u.iocb_cmd.u.ctarg.req = NULL;
-		}
-		if (sp->u.iocb_cmd.u.ctarg.rsp) {
-			dma_free_coherent(&vha->hw->pdev->dev,
-				sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
-				sp->u.iocb_cmd.u.ctarg.rsp,
-				sp->u.iocb_cmd.u.ctarg.rsp_dma);
-			sp->u.iocb_cmd.u.ctarg.rsp = NULL;
-		}
+		dma_free_coherent(&vha->hw->pdev->dev,
+				  sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+				  sp->u.iocb_cmd.u.ctarg.req,
+				  sp->u.iocb_cmd.u.ctarg.req_dma);
+		sp->u.iocb_cmd.u.ctarg.req = NULL;
+
+		dma_free_coherent(&vha->hw->pdev->dev,
+				  sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+				  sp->u.iocb_cmd.u.ctarg.rsp,
+				  sp->u.iocb_cmd.u.ctarg.rsp_dma);
+		sp->u.iocb_cmd.u.ctarg.rsp = NULL;
 
 		sp->free(sp);
 		return;
@@ -3716,9 +3378,7 @@
 	ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
 
 	/* GPN_ID req */
-	ct_req->req.port_id.port_id[0] = id->b.domain;
-	ct_req->req.port_id.port_id[1] = id->b.area;
-	ct_req->req.port_id.port_id[2] = id->b.al_pa;
+	ct_req->req.port_id.port_id = port_id_to_be_id(*id);
 
 	sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
 	sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
@@ -3727,13 +3387,14 @@
 	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
 	sp->done = qla2x00_async_gpnid_sp_done;
 
+	ql_dbg(ql_dbg_disc, vha, 0x2067,
+	    "Async-%s hdl=%x ID %3phC.\n", sp->name,
+	    sp->handle, &ct_req->req.port_id.port_id);
+
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS)
 		goto done_free_sp;
 
-	ql_dbg(ql_dbg_disc, vha, 0x2067,
-	    "Async-%s hdl=%x ID %3phC.\n", sp->name,
-	    sp->handle, ct_req->req.port_id.port_id);
 	return rval;
 
 done_free_sp:
@@ -3763,54 +3424,52 @@
 
 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
 {
-       fc_port_t *fcport = ea->fcport;
+	fc_port_t *fcport = ea->fcport;
 
-       qla24xx_post_gnl_work(vha, fcport);
+	qla24xx_post_gnl_work(vha, fcport);
 }
 
-void qla24xx_async_gffid_sp_done(void *s, int res)
+void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
 {
-       struct srb *sp = s;
-       struct scsi_qla_host *vha = sp->vha;
-       fc_port_t *fcport = sp->fcport;
-       struct ct_sns_rsp *ct_rsp;
-       struct event_arg ea;
+	struct scsi_qla_host *vha = sp->vha;
+	fc_port_t *fcport = sp->fcport;
+	struct ct_sns_rsp *ct_rsp;
+	struct event_arg ea;
 
-       ql_dbg(ql_dbg_disc, vha, 0x2133,
-	   "Async done-%s res %x ID %x. %8phC\n",
-	   sp->name, res, fcport->d_id.b24, fcport->port_name);
+	ql_dbg(ql_dbg_disc, vha, 0x2133,
+	       "Async done-%s res %x ID %x. %8phC\n",
+	       sp->name, res, fcport->d_id.b24, fcport->port_name);
 
-       fcport->flags &= ~FCF_ASYNC_SENT;
-       ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
-       /*
-	* FC-GS-7, 5.2.3.12 FC-4 Features - format
-	* The format of the FC-4 Features object, as defined by the FC-4,
-	* Shall be an array of 4-bit values, one for each type code value
-	*/
-       if (!res) {
-	       if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
-		       /* w1 b00:03 */
-		       fcport->fc4_type =
-			   ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
-		       fcport->fc4_type &= 0xf;
+	fcport->flags &= ~FCF_ASYNC_SENT;
+	ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+	/*
+	 * FC-GS-7, 5.2.3.12 FC-4 Features - format
+	 * The format of the FC-4 Features object, as defined by the FC-4,
+	 * Shall be an array of 4-bit values, one for each type code value
+	 */
+	if (!res) {
+		if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
+			/* w1 b00:03 */
+			fcport->fc4_type =
+			    ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
+			fcport->fc4_type &= 0xf;
 	       }
 
-	       if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
-		       /* w5 [00:03]/28h */
-		       fcport->fc4f_nvme =
-			   ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
-		       fcport->fc4f_nvme &= 0xf;
-	       }
-       }
+		if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
+			/* w5 [00:03]/28h */
+			fcport->fc4f_nvme =
+			    ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+			fcport->fc4f_nvme &= 0xf;
+		}
+	}
 
-       memset(&ea, 0, sizeof(ea));
-       ea.sp = sp;
-       ea.fcport = sp->fcport;
-       ea.rc = res;
-       ea.event = FCME_GFFID_DONE;
+	memset(&ea, 0, sizeof(ea));
+	ea.sp = sp;
+	ea.fcport = sp->fcport;
+	ea.rc = res;
 
-       qla2x00_fcport_event_handler(vha, &ea);
-       sp->free(sp);
+	qla24xx_handle_gffid_event(vha, &ea);
+	sp->free(sp);
 }
 
 /* Get FC4 Feature with Nport ID. */
@@ -3854,14 +3513,14 @@
 
 	sp->done = qla24xx_async_gffid_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
 	ql_dbg(ql_dbg_disc, vha, 0x2132,
 	    "Async-%s hdl=%x  %8phC.\n", sp->name,
 	    sp->handle, fcport->port_name);
 
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
 	return rval;
 done_free_sp:
 	sp->free(sp);
@@ -3899,9 +3558,10 @@
 	fc_port_t *fcport;
 	u32 i, rc;
 	bool found;
-	struct fab_scan_rp *rp;
+	struct fab_scan_rp *rp, *trp;
 	unsigned long flags;
 	u8 recheck = 0;
+	u16 dup = 0, dup_cnt = 0;
 
 	ql_dbg(ql_dbg_disc, vha, 0xffff,
 	    "%s enter\n", __func__);
@@ -3932,6 +3592,7 @@
 
 	for (i = 0; i < vha->hw->max_fibre_devices; i++) {
 		u64 wwn;
+		int k;
 
 		rp = &vha->scan.l[i];
 		found = false;
@@ -3940,6 +3601,20 @@
 		if (wwn == 0)
 			continue;
 
+		/* Remove duplicate NPORT ID entries from switch data base */
+		for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
+			trp = &vha->scan.l[k];
+			if (rp->id.b24 == trp->id.b24) {
+				dup = 1;
+				dup_cnt++;
+				ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+				    vha, 0xffff,
+				    "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
+				    rp->id.b24, rp->port_name, trp->port_name);
+				memset(trp, 0, sizeof(*trp));
+			}
+		}
+
 		if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
 			continue;
 
@@ -3954,7 +3629,6 @@
 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
 			if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
 				continue;
-			fcport->rscn_rcvd = 0;
 			fcport->scan_state = QLA_FCPORT_FOUND;
 			found = true;
 			/*
@@ -3963,10 +3637,12 @@
 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
 				qla2x00_clear_loop_id(fcport);
 				fcport->flags |= FCF_FABRIC_DEVICE;
-			} else if (fcport->d_id.b24 != rp->id.b24) {
+			} else if (fcport->d_id.b24 != rp->id.b24 ||
+				fcport->scan_needed) {
 				qlt_schedule_sess_for_deletion(fcport);
 			}
 			fcport->d_id.b24 = rp->id.b24;
+			fcport->scan_needed = 0;
 			break;
 		}
 
@@ -3979,25 +3655,30 @@
 		}
 	}
 
+	if (dup) {
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "Detected %d duplicate NPORT ID(s) from switch data base\n",
+		    dup_cnt);
+	}
+
 	/*
 	 * Logout all previous fabric dev marked lost, except FCP2 devices.
 	 */
 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 		if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
-			fcport->rscn_rcvd = 0;
+			fcport->scan_needed = 0;
 			continue;
 		}
 
 		if (fcport->scan_state != QLA_FCPORT_FOUND) {
-			fcport->rscn_rcvd = 0;
+			fcport->scan_needed = 0;
 			if ((qla_dual_mode_enabled(vha) ||
 				qla_ini_mode_enabled(vha)) &&
 			    atomic_read(&fcport->state) == FCS_ONLINE) {
-				qla2x00_mark_device_lost(vha, fcport,
-				    ql2xplogiabsentdevice, 0);
+				if (fcport->loop_id != FC_NO_LOOP_ID) {
+					if (fcport->flags & FCF_FCP2_DEVICE)
+						fcport->logout_on_delete = 0;
 
-				if (fcport->loop_id != FC_NO_LOOP_ID &&
-				    (fcport->flags & FCF_FCP2_DEVICE) == 0) {
 					ql_dbg(ql_dbg_disc, vha, 0x20f0,
 					    "%s %d %8phC post del sess\n",
 					    __func__, __LINE__,
@@ -4008,7 +3689,7 @@
 				}
 			}
 		} else {
-			if (fcport->rscn_rcvd ||
+			if (fcport->scan_needed ||
 			    fcport->disc_state != DSC_LOGIN_COMPLETE) {
 				if (fcport->login_retry == 0) {
 					fcport->login_retry =
@@ -4018,7 +3699,7 @@
 					    fcport->port_name, fcport->loop_id,
 					    fcport->login_retry);
 				}
-				fcport->rscn_rcvd = 0;
+				fcport->scan_needed = 0;
 				qla24xx_fcport_handle_login(vha, fcport);
 			}
 		}
@@ -4033,7 +3714,7 @@
 
 	if (recheck) {
 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
-			if (fcport->rscn_rcvd) {
+			if (fcport->scan_needed) {
 				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 				break;
@@ -4042,6 +3723,41 @@
 	}
 }
 
+static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
+    srb_t *sp, int cmd)
+{
+	struct qla_work_evt *e;
+
+	if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
+		return QLA_PARAMETER_ERROR;
+
+	e = qla2x00_alloc_work(vha, cmd);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.iosb.sp = sp;
+
+	return qla2x00_post_work(vha, e);
+}
+
+static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
+    srb_t *sp, int cmd)
+{
+	struct qla_work_evt *e;
+
+	if (cmd != QLA_EVT_GPNFT)
+		return QLA_PARAMETER_ERROR;
+
+	e = qla2x00_alloc_work(vha, cmd);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.gpnft.fc4_type = FC4_TYPE_NVME;
+	e->u.gpnft.sp = sp;
+
+	return qla2x00_post_work(vha, e);
+}
+
 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
 	struct srb *sp)
 {
@@ -4138,124 +3854,88 @@
 	}
 }
 
-static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
+static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
 {
-	struct srb *sp = s;
 	struct scsi_qla_host *vha = sp->vha;
-	struct qla_work_evt *e;
 	struct ct_sns_req *ct_req =
 		(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
 	u16 cmd = be16_to_cpu(ct_req->command);
 	u8 fc4_type = sp->gen2;
 	unsigned long flags;
+	int rc;
 
 	/* gen2 field is holding the fc4type */
 	ql_dbg(ql_dbg_disc, vha, 0xffff,
 	    "Async done-%s res %x FC4Type %x\n",
 	    sp->name, res, sp->gen2);
 
+	del_timer(&sp->u.iocb_cmd.timer);
+	sp->rc = res;
 	if (res) {
 		unsigned long flags;
+		const char *name = sp->name;
 
-		sp->free(sp);
-		spin_lock_irqsave(&vha->work_lock, flags);
-		vha->scan.scan_flags &= ~SF_SCANNING;
-		vha->scan.scan_retry++;
-		spin_unlock_irqrestore(&vha->work_lock, flags);
+		/*
+		 * We are in an Interrupt context, queue up this
+		 * sp for GNNFT_DONE work. This will allow all
+		 * the resource to get freed up.
+		 */
+		rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+		    QLA_EVT_GNNFT_DONE);
+		if (rc) {
+			/* Cleanup here to prevent memory leak */
+			qla24xx_sp_unmap(vha, sp);
 
-		if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
-			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
-			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-			qla2xxx_wake_dpc(vha);
-		} else {
-			ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
-			    "Async done-%s rescan failed on all retries\n",
-			    sp->name);
+			spin_lock_irqsave(&vha->work_lock, flags);
+			vha->scan.scan_flags &= ~SF_SCANNING;
+			vha->scan.scan_retry++;
+			spin_unlock_irqrestore(&vha->work_lock, flags);
+
+			if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+				qla2xxx_wake_dpc(vha);
+			} else {
+				ql_dbg(ql_dbg_disc, vha, 0xffff,
+				    "Async done-%s rescan failed on all retries.\n",
+				    name);
+			}
 		}
 		return;
 	}
 
-	if (!res)
-		qla2x00_find_free_fcp_nvme_slot(vha, sp);
+	qla2x00_find_free_fcp_nvme_slot(vha, sp);
 
 	if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
 	    cmd == GNN_FT_CMD) {
-		del_timer(&sp->u.iocb_cmd.timer);
 		spin_lock_irqsave(&vha->work_lock, flags);
 		vha->scan.scan_flags &= ~SF_SCANNING;
 		spin_unlock_irqrestore(&vha->work_lock, flags);
 
-		e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT);
-		if (!e) {
-			/*
-			 * please ignore kernel warning. Otherwise,
-			 * we have mem leak.
-			 */
-			if (sp->u.iocb_cmd.u.ctarg.req) {
-				dma_free_coherent(&vha->hw->pdev->dev,
-				    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
-				    sp->u.iocb_cmd.u.ctarg.req,
-				    sp->u.iocb_cmd.u.ctarg.req_dma);
-				sp->u.iocb_cmd.u.ctarg.req = NULL;
-			}
-			if (sp->u.iocb_cmd.u.ctarg.rsp) {
-				dma_free_coherent(&vha->hw->pdev->dev,
-				    sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
-				    sp->u.iocb_cmd.u.ctarg.rsp,
-				    sp->u.iocb_cmd.u.ctarg.rsp_dma);
-				sp->u.iocb_cmd.u.ctarg.rsp = NULL;
-			}
-
-			ql_dbg(ql_dbg_disc, vha, 0xffff,
-			    "Async done-%s unable to alloc work element\n",
-			    sp->name);
-			sp->free(sp);
+		sp->rc = res;
+		rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
+		if (rc) {
+			qla24xx_sp_unmap(vha, sp);
 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-			return;
 		}
-		e->u.gpnft.fc4_type = FC4_TYPE_NVME;
-		sp->rc = res;
-		e->u.gpnft.sp = sp;
-
-		qla2x00_post_work(vha, e);
 		return;
 	}
 
-	if (cmd == GPN_FT_CMD)
-		e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
-	else
-		e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
-	if (!e) {
-		/* please ignore kernel warning. Otherwise, we have mem leak. */
-		if (sp->u.iocb_cmd.u.ctarg.req) {
-			dma_free_coherent(&vha->hw->pdev->dev,
-			    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
-			    sp->u.iocb_cmd.u.ctarg.req,
-			    sp->u.iocb_cmd.u.ctarg.req_dma);
-			sp->u.iocb_cmd.u.ctarg.req = NULL;
-		}
-		if (sp->u.iocb_cmd.u.ctarg.rsp) {
-			dma_free_coherent(&vha->hw->pdev->dev,
-			    sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
-			    sp->u.iocb_cmd.u.ctarg.rsp,
-			    sp->u.iocb_cmd.u.ctarg.rsp_dma);
-			sp->u.iocb_cmd.u.ctarg.rsp = NULL;
-		}
+	if (cmd == GPN_FT_CMD) {
+		rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+		    QLA_EVT_GPNFT_DONE);
+	} else {
+		rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+		    QLA_EVT_GNNFT_DONE);
+	}
 
-		ql_dbg(ql_dbg_disc, vha, 0xffff,
-		    "Async done-%s unable to alloc work element\n",
-		    sp->name);
-		sp->free(sp);
+	if (rc) {
+		qla24xx_sp_unmap(vha, sp);
 		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 		return;
 	}
-
-	sp->rc = res;
-	e->u.iosb.sp = sp;
-
-	qla2x00_post_work(vha, e);
 }
 
 /*
@@ -4288,11 +3968,13 @@
 		vha->scan.scan_flags &= ~SF_SCANNING;
 		spin_unlock_irqrestore(&vha->work_lock, flags);
 		WARN_ON(1);
+		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 		goto done_free_sp;
 	}
 
 	ql_dbg(ql_dbg_disc, vha, 0xfffff,
-	    "%s: FC4Type %x, CT-PASSTRHU %s command ctarg rsp size %d, ctarg req size %d\n",
+	    "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
 	    __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
 	     sp->u.iocb_cmd.u.ctarg.req_size);
 
@@ -4320,13 +4002,15 @@
 
 	sp->done = qla2x00_async_gpnft_gnnft_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
 	ql_dbg(ql_dbg_disc, vha, 0xffff,
 	    "Async-%s hdl=%x FC4Type %x.\n", sp->name,
 	    sp->handle, ct_req->req.gpn_ft.port_type);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		goto done_free_sp;
+	}
+
 	return rval;
 
 done_free_sp:
@@ -4347,6 +4031,17 @@
 
 	sp->free(sp);
 
+	spin_lock_irqsave(&vha->work_lock, flags);
+	vha->scan.scan_flags &= ~SF_SCANNING;
+	if (vha->scan.scan_flags == 0) {
+		ql_dbg(ql_dbg_disc, vha, 0xffff,
+		    "%s: schedule\n", __func__);
+		vha->scan.scan_flags |= SF_QUEUED;
+		schedule_delayed_work(&vha->scan.scan_work, 5);
+	}
+	spin_unlock_irqrestore(&vha->work_lock, flags);
+
+
 	return rval;
 } /* GNNFT */
 
@@ -4354,7 +4049,6 @@
 {
 	ql_dbg(ql_dbg_disc, vha, 0xffff,
 	    "%s enter\n", __func__);
-	del_timer(&sp->u.iocb_cmd.timer);
 	qla24xx_async_gnnft(vha, sp, sp->gen2);
 }
 
@@ -4397,9 +4091,10 @@
 			return rval;
 		}
 
-		sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
-			&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
-			&sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
+		sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+								sizeof(struct ct_sns_pkt),
+								&sp->u.iocb_cmd.u.ctarg.req_dma,
+								GFP_KERNEL);
 		sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
 		if (!sp->u.iocb_cmd.u.ctarg.req) {
 			ql_log(ql_log_warn, vha, 0xffff,
@@ -4407,7 +4102,8 @@
 			spin_lock_irqsave(&vha->work_lock, flags);
 			vha->scan.scan_flags &= ~SF_SCANNING;
 			spin_unlock_irqrestore(&vha->work_lock, flags);
-			goto done_free_sp;
+			qla2x00_rel_sp(sp);
+			return rval;
 		}
 		sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
 
@@ -4415,17 +4111,24 @@
 			((vha->hw->max_fibre_devices - 1) *
 			    sizeof(struct ct_sns_gpn_ft_data));
 
-		sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
-			&vha->hw->pdev->dev, rspsz,
-			&sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
-		sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
+		sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+								rspsz,
+								&sp->u.iocb_cmd.u.ctarg.rsp_dma,
+								GFP_KERNEL);
+		sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
 		if (!sp->u.iocb_cmd.u.ctarg.rsp) {
 			ql_log(ql_log_warn, vha, 0xffff,
 			    "Failed to allocate ct_sns request.\n");
 			spin_lock_irqsave(&vha->work_lock, flags);
 			vha->scan.scan_flags &= ~SF_SCANNING;
 			spin_unlock_irqrestore(&vha->work_lock, flags);
-			goto done_free_sp;
+			dma_free_coherent(&vha->hw->pdev->dev,
+			    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+			    sp->u.iocb_cmd.u.ctarg.req,
+			    sp->u.iocb_cmd.u.ctarg.req_dma);
+			sp->u.iocb_cmd.u.ctarg.req = NULL;
+			qla2x00_rel_sp(sp);
+			return rval;
 		}
 		sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
 
@@ -4462,17 +4165,15 @@
 
 	sp->done = qla2x00_async_gpnft_gnnft_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS) {
-		spin_lock_irqsave(&vha->work_lock, flags);
-		vha->scan.scan_flags &= ~SF_SCANNING;
-		spin_unlock_irqrestore(&vha->work_lock, flags);
-		goto done_free_sp;
-	}
-
 	ql_dbg(ql_dbg_disc, vha, 0xffff,
 	    "Async-%s hdl=%x FC4Type %x.\n", sp->name,
 	    sp->handle, ct_req->req.gpn_ft.port_type);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		goto done_free_sp;
+	}
+
 	return rval;
 
 done_free_sp:
@@ -4493,6 +4194,17 @@
 
 	sp->free(sp);
 
+	spin_lock_irqsave(&vha->work_lock, flags);
+	vha->scan.scan_flags &= ~SF_SCANNING;
+	if (vha->scan.scan_flags == 0) {
+		ql_dbg(ql_dbg_disc, vha, 0xffff,
+		    "%s: schedule\n", __func__);
+		vha->scan.scan_flags |= SF_QUEUED;
+		schedule_delayed_work(&vha->scan.scan_work, 5);
+	}
+	spin_unlock_irqrestore(&vha->work_lock, flags);
+
+
 	return rval;
 }
 
@@ -4520,9 +4232,8 @@
 	qla24xx_post_gnl_work(vha, ea->fcport);
 }
 
-static void qla2x00_async_gnnid_sp_done(void *s, int res)
+static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
 {
-	struct srb *sp = s;
 	struct scsi_qla_host *vha = sp->vha;
 	fc_port_t *fcport = sp->fcport;
 	u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
@@ -4538,13 +4249,12 @@
 	ea.fcport = fcport;
 	ea.sp = sp;
 	ea.rc = res;
-	ea.event = FCME_GNNID_DONE;
 
 	ql_dbg(ql_dbg_disc, vha, 0x204f,
 	    "Async done-%s res %x, WWPN %8phC %8phC\n",
 	    sp->name, res, fcport->port_name, fcport->node_name);
 
-	qla2x00_fcport_event_handler(vha, &ea);
+	qla24xx_handle_gnnid_event(vha, &ea);
 
 	sp->free(sp);
 }
@@ -4577,9 +4287,7 @@
 	    GNN_ID_RSP_SIZE);
 
 	/* GNN_ID req */
-	ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
-	ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
-	ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+	ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
 
 
 	/* req & rsp use the same buffer */
@@ -4593,17 +4301,19 @@
 
 	sp->done = qla2x00_async_gnnid_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
 	ql_dbg(ql_dbg_disc, vha, 0xffff,
 	    "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
 	    sp->name, fcport->port_name,
 	    sp->handle, fcport->loop_id, fcport->d_id.b24);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
 	return rval;
 
 done_free_sp:
 	sp->free(sp);
+	fcport->flags &= ~FCF_ASYNC_SENT;
 done:
 	return rval;
 }
@@ -4647,18 +4357,14 @@
 		    __func__, fcport->port_name);
 		return;
 	} else if (ea->sp->gen1 != fcport->rscn_gen) {
-		ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
-		    __func__, __LINE__, fcport->port_name);
-		qla24xx_post_gidpn_work(vha, fcport);
 		return;
 	}
 
 	qla24xx_post_gpsc_work(vha, fcport);
 }
 
-static void qla2x00_async_gfpnid_sp_done(void *s, int res)
+static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
 {
-	struct srb *sp = s;
 	struct scsi_qla_host *vha = sp->vha;
 	fc_port_t *fcport = sp->fcport;
 	u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
@@ -4673,13 +4379,12 @@
 	ea.fcport = fcport;
 	ea.sp = sp;
 	ea.rc = res;
-	ea.event = FCME_GFPNID_DONE;
 
 	ql_dbg(ql_dbg_disc, vha, 0x204f,
 	    "Async done-%s res %x, WWPN %8phC %8phC\n",
 	    sp->name, res, fcport->port_name, fcport->fabric_port_name);
 
-	qla2x00_fcport_event_handler(vha, &ea);
+	qla24xx_handle_gfpnid_event(vha, &ea);
 
 	sp->free(sp);
 }
@@ -4710,9 +4415,7 @@
 	    GFPN_ID_RSP_SIZE);
 
 	/* GFPN_ID req */
-	ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
-	ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
-	ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+	ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
 
 
 	/* req & rsp use the same buffer */
@@ -4726,14 +4429,15 @@
 
 	sp->done = qla2x00_async_gfpnid_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
 	ql_dbg(ql_dbg_disc, vha, 0xffff,
 	    "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
 	    sp->name, fcport->port_name,
 	    sp->handle, fcport->loop_id, fcport->d_id.b24);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
 	return rval;
 
 done_free_sp:
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 5352c9b..1d04131 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -37,8 +37,8 @@
 static int qla84xx_init_chip(scsi_qla_host_t *);
 static int qla25xx_init_queues(struct qla_hw_data *);
 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
-static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
-    struct event_arg *);
+static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
+				      struct event_arg *ea);
 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
     struct event_arg *);
 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
@@ -54,7 +54,7 @@
 	unsigned long flags;
 	struct qla_hw_data *ha = sp->vha->hw;
 
-	WARN_ON(irqs_disabled());
+	WARN_ON_ONCE(irqs_disabled());
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	req = sp->qpair->req;
 	req->outstanding_cmds[sp->handle] = NULL;
@@ -63,10 +63,8 @@
 	iocb->timeout(sp);
 }
 
-void
-qla2x00_sp_free(void *ptr)
+void qla2x00_sp_free(srb_t *sp)
 {
-	srb_t *sp = ptr;
 	struct srb_iocb *iocb = &sp->u.iocb_cmd;
 
 	del_timer(&iocb->timer);
@@ -95,6 +93,88 @@
 	return tmo;
 }
 
+static void qla24xx_abort_iocb_timeout(void *data)
+{
+	srb_t *sp = data;
+	struct srb_iocb *abt = &sp->u.iocb_cmd;
+	struct qla_qpair *qpair = sp->qpair;
+	u32 handle;
+	unsigned long flags;
+
+	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+	for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
+		/* removing the abort */
+		if (qpair->req->outstanding_cmds[handle] == sp) {
+			qpair->req->outstanding_cmds[handle] = NULL;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+	abt->u.abt.comp_status = CS_TIMEOUT;
+	sp->done(sp, QLA_OS_TIMER_EXPIRED);
+}
+
+static void qla24xx_abort_sp_done(srb_t *sp, int res)
+{
+	struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+	del_timer(&sp->u.iocb_cmd.timer);
+	if (sp->flags & SRB_WAKEUP_ON_COMP)
+		complete(&abt->u.abt.comp);
+	else
+		sp->free(sp);
+}
+
+static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
+{
+	scsi_qla_host_t *vha = cmd_sp->vha;
+	struct srb_iocb *abt_iocb;
+	srb_t *sp;
+	int rval = QLA_FUNCTION_FAILED;
+
+	sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
+				  GFP_ATOMIC);
+	if (!sp)
+		return rval;
+
+	abt_iocb = &sp->u.iocb_cmd;
+	sp->type = SRB_ABT_CMD;
+	sp->name = "abort";
+	sp->qpair = cmd_sp->qpair;
+	if (wait)
+		sp->flags = SRB_WAKEUP_ON_COMP;
+
+	abt_iocb->timeout = qla24xx_abort_iocb_timeout;
+	init_completion(&abt_iocb->u.abt.comp);
+	/* FW can send 2 x ABTS's timeout/20s */
+	qla2x00_init_timer(sp, 42);
+
+	abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+	abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
+
+	sp->done = qla24xx_abort_sp_done;
+
+	ql_dbg(ql_dbg_async, vha, 0x507c,
+	       "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
+	       cmd_sp->type);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		sp->free(sp);
+		return rval;
+	}
+
+	if (wait) {
+		wait_for_completion(&abt_iocb->u.abt.comp);
+		rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+			QLA_SUCCESS : QLA_FUNCTION_FAILED;
+		sp->free(sp);
+	}
+
+	return rval;
+}
+
 void
 qla2x00_async_iocb_timeout(void *data)
 {
@@ -161,13 +241,15 @@
 			sp->done(sp, QLA_FUNCTION_TIMEOUT);
 		}
 		break;
+	default:
+		WARN_ON_ONCE(true);
+		sp->done(sp, QLA_FUNCTION_TIMEOUT);
+		break;
 	}
 }
 
-static void
-qla2x00_async_login_sp_done(void *ptr, int res)
+static void qla2x00_async_login_sp_done(srb_t *sp, int res)
 {
-	srb_t *sp = ptr;
 	struct scsi_qla_host *vha = sp->vha;
 	struct srb_iocb *lio = &sp->u.iocb_cmd;
 	struct event_arg ea;
@@ -179,14 +261,13 @@
 
 	if (!test_bit(UNLOADING, &vha->dpc_flags)) {
 		memset(&ea, 0, sizeof(ea));
-		ea.event = FCME_PLOGI_DONE;
 		ea.fcport = sp->fcport;
 		ea.data[0] = lio->u.logio.data[0];
 		ea.data[1] = lio->u.logio.data[1];
 		ea.iop[0] = lio->u.logio.iop[0];
 		ea.iop[1] = lio->u.logio.iop[1];
 		ea.sp = sp;
-		qla2x00_fcport_event_handler(vha, &ea);
+		qla24xx_handle_plogi_done_event(vha, &ea);
 	}
 
 	sp->free(sp);
@@ -216,8 +297,13 @@
 	struct srb_iocb *lio;
 	int rval = QLA_FUNCTION_FAILED;
 
-	if (!vha->flags.online)
-		goto done;
+	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
+	    fcport->loop_id == FC_NO_LOOP_ID) {
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "%s: %8phC - not sending command.\n",
+		    __func__, fcport->port_name);
+		return rval;
+	}
 
 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
@@ -237,15 +323,19 @@
 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
 
 	sp->done = qla2x00_async_login_sp_done;
-	if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
+	if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
 		lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
-	} else {
+	else
 		lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
 
-		if (fcport->fc4f_nvme)
-			lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
+	if (fcport->fc4f_nvme)
+		lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
 
-	}
+	ql_dbg(ql_dbg_disc, vha, 0x2072,
+	    "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
+		"retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
+	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+	    fcport->login_retry);
 
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS) {
@@ -254,11 +344,6 @@
 		goto done_free_sp;
 	}
 
-	ql_dbg(ql_dbg_disc, vha, 0x2072,
-	    "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
-		"retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
-	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
-	    fcport->login_retry);
 	return rval;
 
 done_free_sp:
@@ -269,11 +354,8 @@
 	return rval;
 }
 
-static void
-qla2x00_async_logout_sp_done(void *ptr, int res)
+static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
 {
-	srb_t *sp = ptr;
-
 	sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
 	sp->fcport->login_gen++;
 	qlt_logo_completion_handler(sp->fcport, res);
@@ -287,9 +369,6 @@
 	struct srb_iocb *lio;
 	int rval = QLA_FUNCTION_FAILED;
 
-	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
-		return rval;
-
 	fcport->flags |= FCF_ASYNC_SENT;
 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
@@ -303,15 +382,16 @@
 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
 
 	sp->done = qla2x00_async_logout_sp_done;
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
 
 	ql_dbg(ql_dbg_disc, vha, 0x2070,
 	    "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
 	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
 		fcport->d_id.b.area, fcport->d_id.b.al_pa,
 		fcport->port_name);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
 	return rval;
 
 done_free_sp:
@@ -332,10 +412,8 @@
 	qlt_logo_completion_handler(fcport, data[0]);
 }
 
-static void
-qla2x00_async_prlo_sp_done(void *s, int res)
+static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
 {
-	srb_t *sp = (srb_t *)s;
 	struct srb_iocb *lio = &sp->u.iocb_cmd;
 	struct scsi_qla_host *vha = sp->vha;
 
@@ -366,14 +444,16 @@
 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
 
 	sp->done = qla2x00_async_prlo_sp_done;
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
 
 	ql_dbg(ql_dbg_disc, vha, 0x2070,
 	    "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
 	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
 	return rval;
 
 done_free_sp:
@@ -394,10 +474,16 @@
 	    fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
 	    fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
 
+	WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
+		  ea->data[0]);
+
 	if (ea->data[0] != MBS_COMMAND_COMPLETE) {
 		ql_dbg(ql_dbg_disc, vha, 0x2066,
 		    "%s %8phC: adisc fail: post delete\n",
 		    __func__, ea->fcport->port_name);
+		/* deleted = 0 & logout_on_delete = force fw cleanup */
+		fcport->deleted = 0;
+		fcport->logout_on_delete = 1;
 		qlt_schedule_sess_for_deletion(ea->fcport);
 		return;
 	}
@@ -412,16 +498,15 @@
 		    __func__, ea->fcport->port_name);
 		return;
 	} else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
-		ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
-		    __func__, __LINE__, ea->fcport->port_name);
-		qla24xx_post_gidpn_work(vha, ea->fcport);
+		qla_rscn_replay(fcport);
+		qlt_schedule_sess_for_deletion(fcport);
 		return;
 	}
 
 	__qla24xx_handle_gpdb_event(vha, ea);
 }
 
-int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
 	struct qla_work_evt *e;
 
@@ -434,10 +519,8 @@
 	return qla2x00_post_work(vha, e);
 }
 
-static void
-qla2x00_async_adisc_sp_done(void *ptr, int res)
+static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
 {
-	srb_t *sp = ptr;
 	struct scsi_qla_host *vha = sp->vha;
 	struct event_arg ea;
 	struct srb_iocb *lio = &sp->u.iocb_cmd;
@@ -449,7 +532,6 @@
 	sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
 
 	memset(&ea, 0, sizeof(ea));
-	ea.event = FCME_ADISC_DONE;
 	ea.rc = res;
 	ea.data[0] = lio->u.logio.data[0];
 	ea.data[1] = lio->u.logio.data[1];
@@ -458,7 +540,7 @@
 	ea.fcport = sp->fcport;
 	ea.sp = sp;
 
-	qla2x00_fcport_event_handler(vha, &ea);
+	qla24xx_handle_adisc_event(vha, &ea);
 
 	sp->free(sp);
 }
@@ -469,9 +551,11 @@
 {
 	srb_t *sp;
 	struct srb_iocb *lio;
-	int rval;
+	int rval = QLA_FUNCTION_FAILED;
 
-	rval = QLA_FUNCTION_FAILED;
+	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+		return rval;
+
 	fcport->flags |= FCF_ASYNC_SENT;
 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
@@ -489,13 +573,15 @@
 	sp->done = qla2x00_async_adisc_sp_done;
 	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
 		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
 
 	ql_dbg(ql_dbg_disc, vha, 0x206f,
 	    "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
 	    sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
 	return rval;
 
 done_free_sp:
@@ -506,6 +592,72 @@
 	return rval;
 }
 
+static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_FWI2_CAPABLE(ha))
+		return loop_id > NPH_LAST_HANDLE;
+
+	return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
+		loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
+}
+
+/**
+ * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
+ * @vha: adapter state pointer.
+ * @dev: port structure pointer.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
+{
+	int	rval;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags = 0;
+
+	rval = QLA_SUCCESS;
+
+	spin_lock_irqsave(&ha->vport_slock, flags);
+
+	dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
+	if (dev->loop_id >= LOOPID_MAP_SIZE ||
+	    qla2x00_is_reserved_id(vha, dev->loop_id)) {
+		dev->loop_id = FC_NO_LOOP_ID;
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		set_bit(dev->loop_id, ha->loop_id_map);
+	}
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+	if (rval == QLA_SUCCESS)
+		ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
+		       "Assigning new loopid=%x, portid=%x.\n",
+		       dev->loop_id, dev->d_id.b24);
+	else
+		ql_log(ql_log_warn, dev->vha, 0x2087,
+		       "No loop_id's available, portid=%x.\n",
+		       dev->d_id.b24);
+
+	return rval;
+}
+
+void qla2x00_clear_loop_id(fc_port_t *fcport)
+{
+	struct qla_hw_data *ha = fcport->vha->hw;
+
+	if (fcport->loop_id == FC_NO_LOOP_ID ||
+	    qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
+		return;
+
+	clear_bit(fcport->loop_id, ha->loop_id_map);
+	fcport->loop_id = FC_NO_LOOP_ID;
+}
+
 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
 	struct event_arg *ea)
 {
@@ -538,11 +690,8 @@
 	}
 
 	if (fcport->last_rscn_gen != fcport->rscn_gen) {
-		ql_dbg(ql_dbg_disc, vha, 0x20df,
-		    "%s %8phC rscn gen changed rscn %d|%d \n",
-		    __func__, fcport->port_name,
-		    fcport->last_rscn_gen, fcport->rscn_gen);
-		qla24xx_post_gidpn_work(vha, fcport);
+		qla_rscn_replay(fcport);
+		qlt_schedule_sess_for_deletion(fcport);
 		return;
 	} else if (fcport->last_login_gen != fcport->login_gen) {
 		ql_dbg(ql_dbg_disc, vha, 0x20e0,
@@ -597,12 +746,15 @@
 			break;
 		default:
 			if ((id.b24 != fcport->d_id.b24 &&
-			    fcport->d_id.b24) ||
+			    fcport->d_id.b24 &&
+			    fcport->loop_id != FC_NO_LOOP_ID) ||
 			    (fcport->loop_id != FC_NO_LOOP_ID &&
 				fcport->loop_id != loop_id)) {
 				ql_dbg(ql_dbg_disc, vha, 0x20e3,
 				    "%s %d %8phC post del sess\n",
 				    __func__, __LINE__, fcport->port_name);
+				if (fcport->n2n_flag)
+					fcport->d_id.b24 = 0;
 				qlt_schedule_sess_for_deletion(fcport);
 				return;
 			}
@@ -610,6 +762,8 @@
 		}
 
 		fcport->loop_id = loop_id;
+		if (fcport->n2n_flag)
+			fcport->d_id.b24 = id.b24;
 
 		wwn = wwn_to_u64(fcport->port_name);
 		qlt_find_sess_invalidate_other(vha, wwn,
@@ -643,11 +797,14 @@
 				break;
 			case DSC_LS_PORT_UNAVAIL:
 			default:
-				if (fcport->loop_id != FC_NO_LOOP_ID)
-					qla2x00_clear_loop_id(fcport);
-
-				fcport->loop_id = loop_id;
-				fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+				if (fcport->loop_id == FC_NO_LOOP_ID) {
+					qla2x00_find_new_loop_id(vha, fcport);
+					fcport->fw_login_state =
+					    DSC_LS_PORT_UNAVAIL;
+				}
+				ql_dbg(ql_dbg_disc, vha, 0x20e5,
+				    "%s %d %8phC\n", __func__, __LINE__,
+				    fcport->port_name);
 				qla24xx_fcport_handle_login(vha, fcport);
 				break;
 			}
@@ -656,6 +813,15 @@
 			fcport->fw_login_state = current_login_state;
 			fcport->d_id = id;
 			switch (current_login_state) {
+			case DSC_LS_PRLI_PEND:
+				/*
+				 * In the middle of PRLI. Let it finish.
+				 * Allow relogin code to recheck state again
+				 * with GNL. Push disc_state back to DELETED
+				 * so GNL can go out again
+				 */
+				fcport->disc_state = DSC_DELETED;
+				break;
 			case DSC_LS_PRLI_COMP:
 				if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
 					fcport->port_type = FCT_INITIATOR;
@@ -677,7 +843,7 @@
 					    fcport);
 					break;
 				}
-				/* drop through */
+				/* fall through */
 			default:
 				if (fcport_is_smaller(fcport)) {
 					/* local adapter is bigger */
@@ -770,10 +936,8 @@
 	}
 } /* gnl_event */
 
-static void
-qla24xx_async_gnl_sp_done(void *s, int res)
+static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
 {
-	struct srb *sp = s;
 	struct scsi_qla_host *vha = sp->vha;
 	unsigned long flags;
 	struct fc_port *fcport = NULL, *tf;
@@ -789,10 +953,13 @@
 	    sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
 	    sp->u.iocb_cmd.u.mbx.in_mb[2]);
 
+	if (res == QLA_FUNCTION_TIMEOUT)
+		return;
+
+	sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
 	memset(&ea, 0, sizeof(ea));
 	ea.sp = sp;
 	ea.rc = res;
-	ea.event = FCME_GNL_DONE;
 
 	if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
 	    sizeof(struct get_name_list_extended)) {
@@ -810,31 +977,30 @@
 		wwn = wwn_to_u64(e->port_name);
 
 		ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
-		    "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
+		    "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
 		    __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
 		    e->port_id[0], e->current_login_state, e->last_login_state,
 		    (loop_id & 0x7fff));
 	}
 
-	spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 
 	INIT_LIST_HEAD(&h);
 	fcport = tf = NULL;
 	if (!list_empty(&vha->gnl.fcports))
 		list_splice_init(&vha->gnl.fcports, &h);
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
 	list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
 		list_del_init(&fcport->gnl_entry);
-		spin_lock(&vha->hw->tgt.sess_lock);
+		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 		fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-		spin_unlock(&vha->hw->tgt.sess_lock);
+		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 		ea.fcport = fcport;
 
-		qla2x00_fcport_event_handler(vha, &ea);
+		qla24xx_handle_gnl_done_event(vha, &ea);
 	}
-	spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
 
-	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 	/* create new fcport if fw has knowledge of new sessions */
 	for (i = 0; i < n; i++) {
 		port_id_t id;
@@ -867,6 +1033,8 @@
 		}
 	}
 
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	vha->gnl.sent = 0;
 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
 	sp->free(sp);
@@ -886,27 +1054,24 @@
 	ql_dbg(ql_dbg_disc, vha, 0x20d9,
 	    "Async-gnlist WWPN %8phC \n", fcport->port_name);
 
-	spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
-	if (!list_empty(&fcport->gnl_entry)) {
-		spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
-		rval = QLA_SUCCESS;
-		goto done;
-	}
-
-	spin_lock(&vha->hw->tgt.sess_lock);
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	fcport->flags |= FCF_ASYNC_SENT;
 	fcport->disc_state = DSC_GNL;
 	fcport->last_rscn_gen = fcport->rscn_gen;
 	fcport->last_login_gen = fcport->login_gen;
-	spin_unlock(&vha->hw->tgt.sess_lock);
 
 	list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
-	spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
+	if (vha->gnl.sent) {
+		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+		return QLA_SUCCESS;
+	}
+	vha->gnl.sent = 1;
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
 		goto done;
 
-	fcport->flags |= FCF_ASYNC_SENT;
 	sp->type = SRB_MB_IOCB;
 	sp->name = "gnlist";
 	sp->gen1 = fcport->rscn_gen;
@@ -928,14 +1093,14 @@
 
 	sp->done = qla24xx_async_gnl_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
 	ql_dbg(ql_dbg_disc, vha, 0x20da,
 	    "Async-%s - OUT WWPN %8phC hndl %x\n",
 	    sp->name, fcport->port_name, sp->handle);
 
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
 	return rval;
 
 done_free_sp:
@@ -958,10 +1123,8 @@
 	return qla2x00_post_work(vha, e);
 }
 
-static
-void qla24xx_async_gpdb_sp_done(void *s, int res)
+static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
 {
-	struct srb *sp = s;
 	struct scsi_qla_host *vha = sp->vha;
 	struct qla_hw_data *ha = vha->hw;
 	fc_port_t *fcport = sp->fcport;
@@ -972,23 +1135,18 @@
 	    "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
 	    sp->name, res, fcport->port_name, mb[1], mb[2]);
 
-	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-
-	if (res == QLA_FUNCTION_TIMEOUT)
-		return;
-
 	if (res == QLA_FUNCTION_TIMEOUT) {
 		dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
 			sp->u.iocb_cmd.u.mbx.in_dma);
 		return;
 	}
 
+	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
 	memset(&ea, 0, sizeof(ea));
-	ea.event = FCME_GPDB_DONE;
 	ea.fcport = fcport;
 	ea.sp = sp;
 
-	qla2x00_fcport_event_handler(vha, &ea);
+	qla24xx_handle_gpdb_event(vha, &ea);
 
 	dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
 		sp->u.iocb_cmd.u.mbx.in_dma);
@@ -1009,10 +1167,8 @@
 	return qla2x00_post_work(vha, e);
 }
 
-static void
-qla2x00_async_prli_sp_done(void *ptr, int res)
+static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
 {
-	srb_t *sp = ptr;
 	struct scsi_qla_host *vha = sp->vha;
 	struct srb_iocb *lio = &sp->u.iocb_cmd;
 	struct event_arg ea;
@@ -1025,7 +1181,6 @@
 
 	if (!test_bit(UNLOADING, &vha->dpc_flags)) {
 		memset(&ea, 0, sizeof(ea));
-		ea.event = FCME_PRLI_DONE;
 		ea.fcport = sp->fcport;
 		ea.data[0] = lio->u.logio.data[0];
 		ea.data[1] = lio->u.logio.data[1];
@@ -1033,7 +1188,7 @@
 		ea.iop[1] = lio->u.logio.iop[1];
 		ea.sp = sp;
 
-		qla2x00_fcport_event_handler(vha, &ea);
+		qla24xx_handle_prli_done_event(vha, &ea);
 	}
 
 	sp->free(sp);
@@ -1073,6 +1228,11 @@
 	if  (fcport->fc4f_nvme)
 		lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
 
+	ql_dbg(ql_dbg_disc, vha, 0x211b,
+	    "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
+	    fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
+	    fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
+
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS) {
 		fcport->flags |= FCF_LOGIN_NEEDED;
@@ -1080,11 +1240,6 @@
 		goto done_free_sp;
 	}
 
-	ql_dbg(ql_dbg_disc, vha, 0x211b,
-	    "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
-	    fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
-	    fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
-
 	return rval;
 
 done_free_sp:
@@ -1117,8 +1272,13 @@
 	struct port_database_24xx *pd;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
+	    fcport->loop_id == FC_NO_LOOP_ID) {
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "%s: %8phC - not sending command.\n",
+		    __func__, fcport->port_name);
 		return rval;
+	}
 
 	fcport->disc_state = DSC_GPDB;
 
@@ -1158,14 +1318,13 @@
 
 	sp->done = qla24xx_async_gpdb_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
 	ql_dbg(ql_dbg_disc, vha, 0x20dc,
 	    "Async-%s %8phC hndl %x opt %x\n",
 	    sp->name, fcport->port_name, sp->handle, opt);
 
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
 	return rval;
 
 done_free_sp:
@@ -1193,11 +1352,9 @@
 		vha->fcport_count++;
 		ea->fcport->login_succ = 1;
 
-		ql_dbg(ql_dbg_disc, vha, 0x20d6,
-		    "%s %d %8phC post upd_fcport fcp_cnt %d\n",
-		    __func__, __LINE__,  ea->fcport->port_name,
-		    vha->fcport_count);
-		qla24xx_post_upd_fcport_work(vha, ea->fcport);
+		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+		qla24xx_sched_upd_fcport(ea->fcport);
+		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 	} else if (ea->fcport->login_succ) {
 		/*
 		 * We have an existing session. A late RSCN delivery
@@ -1237,6 +1394,19 @@
 	else
 		ls = pd->current_login_state & 0xf;
 
+	if (ea->sp->gen2 != fcport->login_gen) {
+		/* target side must have changed it. */
+
+		ql_dbg(ql_dbg_disc, vha, 0x20d3,
+		    "%s %8phC generation changed\n",
+		    __func__, fcport->port_name);
+		return;
+	} else if (ea->sp->gen1 != fcport->rscn_gen) {
+		qla_rscn_replay(fcport);
+		qlt_schedule_sess_for_deletion(fcport);
+		return;
+	}
+
 	switch (ls) {
 	case PDS_PRLI_COMPLETE:
 		__qla24xx_parse_gpdb(vha, fcport, pd);
@@ -1291,7 +1461,8 @@
 		login = 1;
 	}
 
-	if (login) {
+	if (login && fcport->login_retry) {
+		fcport->login_retry--;
 		if (fcport->loop_id == FC_NO_LOOP_ID) {
 			fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
 			rc = qla2x00_find_new_loop_id(vha, fcport);
@@ -1315,24 +1486,26 @@
 {
 	u16 data[2];
 	u64 wwn;
+	u16 sec;
 
 	ql_dbg(ql_dbg_disc, vha, 0x20d8,
-	    "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
+	    "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
 	    __func__, fcport->port_name, fcport->disc_state,
 	    fcport->fw_login_state, fcport->login_pause, fcport->flags,
 	    fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
-	    fcport->login_gen, fcport->login_retry,
-	    fcport->loop_id, fcport->scan_state);
+	    fcport->login_gen, fcport->loop_id, fcport->scan_state);
 
 	if (fcport->scan_state != QLA_FCPORT_FOUND)
 		return 0;
 
 	if ((fcport->loop_id != FC_NO_LOOP_ID) &&
+	    qla_dual_mode_enabled(vha) &&
 	    ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
 	     (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
 		return 0;
 
-	if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+	if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
+	    !N2N_TOPO(vha->hw)) {
 		if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
 			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 			return 0;
@@ -1403,8 +1576,9 @@
 				qla24xx_post_gpdb_work(vha, fcport, 0);
 			}  else {
 				ql_dbg(ql_dbg_disc, vha, 0x2118,
-				    "%s %d %8phC post NVMe PRLI\n",
-				    __func__, __LINE__, fcport->port_name);
+				    "%s %d %8phC post %s PRLI\n",
+				    __func__, __LINE__, fcport->port_name,
+				    fcport->fc4f_nvme ? "NVME" : "FC");
 				qla24xx_post_prli_work(vha, fcport);
 			}
 			break;
@@ -1421,22 +1595,14 @@
 		break;
 
 	case DSC_LOGIN_FAILED:
-		fcport->login_retry--;
-		ql_dbg(ql_dbg_disc, vha, 0x20d0,
-		    "%s %d %8phC post gidpn\n",
-		    __func__, __LINE__, fcport->port_name);
 		if (N2N_TOPO(vha->hw))
 			qla_chk_n2n_b4_login(vha, fcport);
 		else
-			qla24xx_post_gidpn_work(vha, fcport);
+			qlt_schedule_sess_for_deletion(fcport);
 		break;
 
 	case DSC_LOGIN_COMPLETE:
 		/* recheck login state */
-		ql_dbg(ql_dbg_disc, vha, 0x20d1,
-		    "%s %d %8phC post adisc\n",
-		    __func__, __LINE__, fcport->port_name);
-		fcport->login_retry--;
 		data[0] = data[1] = 0;
 		qla2x00_post_async_adisc_work(vha, fcport, data);
 		break;
@@ -1446,6 +1612,22 @@
 			qla24xx_post_prli_work(vha, fcport);
 		break;
 
+	case DSC_UPD_FCPORT:
+		sec =  jiffies_to_msecs(jiffies -
+		    fcport->jiffies_at_registration)/1000;
+		if (fcport->sec_since_registration < sec && sec &&
+		    !(sec % 60)) {
+			fcport->sec_since_registration = sec;
+			ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+			    "%s %8phC - Slow Rport registration(%d Sec)\n",
+			    __func__, fcport->port_name, sec);
+		}
+
+		if (fcport->next_disc_state != DSC_DELETE_PEND)
+			fcport->next_disc_state = DSC_ADISC;
+		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+		break;
+
 	default:
 		break;
 	}
@@ -1453,33 +1635,11 @@
 	return 0;
 }
 
-static
-void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
-{
-	fcport->rscn_gen++;
-
-	ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
-	    "%s %8phC DS %d LS %d\n",
-	    __func__, fcport->port_name, fcport->disc_state,
-	    fcport->fw_login_state);
-
-	if (fcport->flags & FCF_ASYNC_SENT)
-		return;
-
-	switch (fcport->disc_state) {
-	case DSC_DELETED:
-	case DSC_LOGIN_COMPLETE:
-		qla24xx_post_gpnid_work(fcport->vha, &ea->id);
-		break;
-	default:
-		break;
-	}
-}
-
 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
     u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
 {
 	struct qla_work_evt *e;
+
 	e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
 	if (!e)
 		return QLA_FUNCTION_FAILED;
@@ -1494,12 +1654,34 @@
 	return qla2x00_post_work(vha, e);
 }
 
-static
+void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+	fc_port_t *fcport;
+	unsigned long flags;
+
+	fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+	if (fcport) {
+		fcport->scan_needed = 1;
+		fcport->rscn_gen++;
+	}
+
+	spin_lock_irqsave(&vha->work_lock, flags);
+	if (vha->scan.scan_flags == 0) {
+		ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
+		vha->scan.scan_flags |= SF_QUEUED;
+		schedule_delayed_work(&vha->scan.scan_work, 5);
+	}
+	spin_unlock_irqrestore(&vha->work_lock, flags);
+}
+
 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
 	struct event_arg *ea)
 {
 	fc_port_t *fcport = ea->fcport;
 
+	if (test_bit(UNLOADING, &vha->dpc_flags))
+		return;
+
 	ql_dbg(ql_dbg_disc, vha, 0x2102,
 	    "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
 	    __func__, fcport->port_name, fcport->disc_state,
@@ -1509,145 +1691,37 @@
 	    fcport->last_login_gen, fcport->login_gen,
 	    fcport->flags);
 
-	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
-	    (fcport->fw_login_state == DSC_LS_PRLI_PEND))
-		return;
-
-	if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
-		if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
-			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
-			return;
-		}
-	}
-
 	if (fcport->last_rscn_gen != fcport->rscn_gen) {
-		ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
+		ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
 		    __func__, __LINE__, fcport->port_name);
-
-		qla24xx_post_gidpn_work(vha, fcport);
+		qla24xx_post_gnl_work(vha, fcport);
 		return;
 	}
 
 	qla24xx_fcport_handle_login(vha, fcport);
 }
 
-
-void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea)
+/*
+ * RSCN(s) came in for this fcport, but the RSCN(s) was not able
+ * to be consumed by the fcport
+ */
+void qla_rscn_replay(fc_port_t *fcport)
 {
-	ql_dbg(ql_dbg_disc, vha, 0x2118,
-	    "%s %d %8phC post PRLI\n",
-	    __func__, __LINE__, ea->fcport->port_name);
-	qla24xx_post_prli_work(vha, ea->fcport);
-}
+	struct event_arg ea;
 
-void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
-{
-	fc_port_t *f, *tf;
-	uint32_t id = 0, mask, rid;
-	unsigned long flags;
-	fc_port_t *fcport;
-
-	switch (ea->event) {
-	case FCME_RELOGIN:
-		if (test_bit(UNLOADING, &vha->dpc_flags))
-			return;
-
-		qla24xx_handle_relogin_event(vha, ea);
-		break;
-	case FCME_RSCN:
-		if (test_bit(UNLOADING, &vha->dpc_flags))
-			return;
-		switch (ea->id.b.rsvd_1) {
-		case RSCN_PORT_ADDR:
-			fcport = qla2x00_find_fcport_by_nportid
-				(vha, &ea->id, 1);
-			if (fcport)
-				fcport->rscn_rcvd = 1;
-
-			spin_lock_irqsave(&vha->work_lock, flags);
-			if (vha->scan.scan_flags == 0) {
-				ql_dbg(ql_dbg_disc, vha, 0xffff,
-				    "%s: schedule\n", __func__);
-				vha->scan.scan_flags |= SF_QUEUED;
-				schedule_delayed_work(&vha->scan.scan_work, 5);
-			}
-			spin_unlock_irqrestore(&vha->work_lock, flags);
-
-			break;
-		case RSCN_AREA_ADDR:
-		case RSCN_DOM_ADDR:
-			if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
-				mask = 0xffff00;
-				ql_dbg(ql_dbg_async, vha, 0x5044,
-				    "RSCN: Area 0x%06x was affected\n",
-				    ea->id.b24);
-			} else {
-				mask = 0xff0000;
-				ql_dbg(ql_dbg_async, vha, 0x507a,
-				    "RSCN: Domain 0x%06x was affected\n",
-				    ea->id.b24);
-			}
-
-			rid = ea->id.b24 & mask;
-			list_for_each_entry_safe(f, tf, &vha->vp_fcports,
-			    list) {
-				id = f->d_id.b24 & mask;
-				if (rid == id) {
-					ea->fcport = f;
-					qla24xx_handle_rscn_event(f, ea);
-				}
-			}
-			break;
-		case RSCN_FAB_ADDR:
-		default:
-			ql_log(ql_log_warn, vha, 0xd045,
-			    "RSCN: Fabric was affected. Addr format %d\n",
-			    ea->id.b.rsvd_1);
-			qla2x00_mark_all_devices_lost(vha, 1);
-			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
-		}
-		break;
-	case FCME_GIDPN_DONE:
-		qla24xx_handle_gidpn_event(vha, ea);
-		break;
-	case FCME_GNL_DONE:
-		qla24xx_handle_gnl_done_event(vha, ea);
-		break;
-	case FCME_GPSC_DONE:
-		qla24xx_handle_gpsc_event(vha, ea);
-		break;
-	case FCME_PLOGI_DONE:	/* Initiator side sent LLIOCB */
-		qla24xx_handle_plogi_done_event(vha, ea);
-		break;
-	case FCME_PRLI_DONE:
-		qla24xx_handle_prli_done_event(vha, ea);
-		break;
-	case FCME_GPDB_DONE:
-		qla24xx_handle_gpdb_event(vha, ea);
-		break;
-	case FCME_GPNID_DONE:
-		qla24xx_handle_gpnid_event(vha, ea);
-		break;
-	case FCME_GFFID_DONE:
-		qla24xx_handle_gffid_event(vha, ea);
-		break;
-	case FCME_ADISC_DONE:
-		qla24xx_handle_adisc_event(vha, ea);
-		break;
-	case FCME_GNNID_DONE:
-		qla24xx_handle_gnnid_event(vha, ea);
-		break;
-	case FCME_GFPNID_DONE:
-		qla24xx_handle_gfpnid_event(vha, ea);
-		break;
-	case FCME_ELS_PLOGI_DONE:
-		qla_handle_els_plogi_done(vha, ea);
-		break;
+	switch (fcport->disc_state) {
+	case DSC_DELETE_PEND:
+		return;
 	default:
-		BUG_ON(1);
 		break;
 	}
+
+	if (fcport->scan_needed) {
+		memset(&ea, 0, sizeof(ea));
+		ea.id = fcport->d_id;
+		ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
+		qla2x00_handle_rscn(fcport->vha, &ea);
+	}
 }
 
 static void
@@ -1660,10 +1734,8 @@
 	complete(&tmf->u.tmf.comp);
 }
 
-static void
-qla2x00_tmf_sp_done(void *ptr, int res)
+static void qla2x00_tmf_sp_done(srb_t *sp, int res)
 {
-	srb_t *sp = ptr;
 	struct srb_iocb *tmf = &sp->u.iocb_cmd;
 
 	complete(&tmf->u.tmf.comp);
@@ -1695,15 +1767,14 @@
 	tm_iocb->u.tmf.data = tag;
 	sp->done = qla2x00_tmf_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
 	ql_dbg(ql_dbg_taskm, vha, 0x802f,
 	    "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
 	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
 
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
 	wait_for_completion(&tm_iocb->u.tmf.comp);
 
 	rval = tm_iocb->u.tmf.data;
@@ -1718,93 +1789,14 @@
 		lun = (uint16_t)tm_iocb->u.tmf.lun;
 
 		/* Issue Marker IOCB */
-		qla2x00_marker(vha, vha->hw->req_q_map[0],
-		    vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
+		qla2x00_marker(vha, vha->hw->base_qpair,
+		    fcport->loop_id, lun,
 		    flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
 	}
 
 done_free_sp:
 	sp->free(sp);
-	sp->fcport->flags &= ~FCF_ASYNC_SENT;
-done:
-	return rval;
-}
-
-static void
-qla24xx_abort_iocb_timeout(void *data)
-{
-	srb_t *sp = data;
-	struct srb_iocb *abt = &sp->u.iocb_cmd;
-
-	abt->u.abt.comp_status = CS_TIMEOUT;
-	sp->done(sp, QLA_FUNCTION_TIMEOUT);
-}
-
-static void
-qla24xx_abort_sp_done(void *ptr, int res)
-{
-	srb_t *sp = ptr;
-	struct srb_iocb *abt = &sp->u.iocb_cmd;
-
-	if (del_timer(&sp->u.iocb_cmd.timer)) {
-		if (sp->flags & SRB_WAKEUP_ON_COMP)
-			complete(&abt->u.abt.comp);
-		else
-			sp->free(sp);
-	}
-}
-
-int
-qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
-{
-	scsi_qla_host_t *vha = cmd_sp->vha;
-	fc_port_t *fcport = cmd_sp->fcport;
-	struct srb_iocb *abt_iocb;
-	srb_t *sp;
-	int rval = QLA_FUNCTION_FAILED;
-
-	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
-	if (!sp)
-		goto done;
-
-	abt_iocb = &sp->u.iocb_cmd;
-	sp->type = SRB_ABT_CMD;
-	sp->name = "abort";
-	if (wait)
-		sp->flags = SRB_WAKEUP_ON_COMP;
-
-	abt_iocb->timeout = qla24xx_abort_iocb_timeout;
-	init_completion(&abt_iocb->u.abt.comp);
-	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
-
-	abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
-
-	if (vha->flags.qpairs_available && cmd_sp->qpair)
-		abt_iocb->u.abt.req_que_no =
-		    cpu_to_le16(cmd_sp->qpair->req->id);
-	else
-		abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
-
-	sp->done = qla24xx_abort_sp_done;
-
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
-	ql_dbg(ql_dbg_async, vha, 0x507c,
-	    "Abort command issued - hdl=%x, target_id=%x\n",
-	    cmd_sp->handle, fcport->tgt_id);
-
-	if (wait) {
-		wait_for_completion(&abt_iocb->u.abt.comp);
-		rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
-			QLA_SUCCESS : QLA_FUNCTION_FAILED;
-	} else {
-		goto done;
-	}
-
-done_free_sp:
-	sp->free(sp);
+	fcport->flags &= ~FCF_ASYNC_SENT;
 done:
 	return rval;
 }
@@ -1816,19 +1808,17 @@
 
 	uint32_t	handle;
 	fc_port_t	*fcport = sp->fcport;
+	struct qla_qpair *qpair = sp->qpair;
 	struct scsi_qla_host *vha = fcport->vha;
-	struct qla_hw_data *ha = vha->hw;
-	struct req_que *req = vha->req;
+	struct req_que *req = qpair->req;
 
-	if (vha->flags.qpairs_available && sp->qpair)
-		req = sp->qpair->req;
-
-	spin_lock_irqsave(&ha->hardware_lock, flags);
+	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
 		if (req->outstanding_cmds[handle] == sp)
 			break;
 	}
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
 	if (handle == req->num_outstanding_cmds) {
 		/* Command not found. */
 		return QLA_FUNCTION_FAILED;
@@ -1843,6 +1833,9 @@
 static void
 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
 {
+	WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
+		  ea->data[0]);
+
 	switch (ea->data[0]) {
 	case MBS_COMMAND_COMPLETE:
 		ql_dbg(ql_dbg_disc, vha, 0x2118,
@@ -1851,6 +1844,12 @@
 
 		ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
 		ea->fcport->logout_on_delete = 1;
+		ea->fcport->nvme_prli_service_param = ea->iop[0];
+		if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
+			ea->fcport->nvme_first_burst_size =
+			    (ea->iop[1] & 0xffff) * 512;
+		else
+			ea->fcport->nvme_first_burst_size = 0;
 		qla24xx_post_gpdb_work(vha, ea->fcport, 0);
 		break;
 	default:
@@ -1861,22 +1860,43 @@
 			break;
 		}
 
-		if (ea->fcport->n2n_flag) {
+		if (ea->fcport->fc4f_nvme) {
 			ql_dbg(ql_dbg_disc, vha, 0x2118,
 				"%s %d %8phC post fc4 prli\n",
 				__func__, __LINE__, ea->fcport->port_name);
 			ea->fcport->fc4f_nvme = 0;
-			ea->fcport->n2n_flag = 0;
 			qla24xx_post_prli_work(vha, ea->fcport);
+			return;
 		}
-		ql_dbg(ql_dbg_disc, vha, 0x2119,
-		    "%s %d %8phC unhandle event of %x\n",
-		    __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
+
+		/* at this point both PRLI NVME & PRLI FCP failed */
+		if (N2N_TOPO(vha->hw)) {
+			if (ea->fcport->n2n_link_reset_cnt < 3) {
+				ea->fcport->n2n_link_reset_cnt++;
+				/*
+				 * remote port is not sending Plogi. Reset
+				 * link to kick start his state machine
+				 */
+				set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+			} else {
+				ql_log(ql_log_warn, vha, 0x2119,
+				    "%s %d %8phC Unable to reconnect\n",
+				    __func__, __LINE__, ea->fcport->port_name);
+			}
+		} else {
+			/*
+			 * switch connect. login failed. Take connection
+			 * down and allow relogin to retrigger
+			 */
+			ea->fcport->flags &= ~FCF_ASYNC_SENT;
+			ea->fcport->keep_nport_handle = 0;
+			qlt_schedule_sess_for_deletion(ea->fcport);
+		}
 		break;
 	}
 }
 
-static void
+void
 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
 {
 	port_id_t cid;	/* conflict Nport id */
@@ -1889,7 +1909,7 @@
 	    "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
 	    __func__, fcport->port_name, fcport->disc_state,
 	    fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
-	    ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
+	    ea->sp->gen1, fcport->rscn_gen,
 	    ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
 
 	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
@@ -1900,8 +1920,11 @@
 		return;
 	}
 
-	if (fcport->disc_state == DSC_DELETE_PEND)
+	if ((fcport->disc_state == DSC_DELETE_PEND) ||
+	    (fcport->disc_state == DSC_DELETED)) {
+		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 		return;
+	}
 
 	if (ea->sp->gen2 != fcport->login_gen) {
 		/* target side must have changed it. */
@@ -1911,12 +1934,17 @@
 		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 		return;
 	} else if (ea->sp->gen1 != fcport->rscn_gen) {
-		ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
-		    __func__, __LINE__, fcport->port_name);
-		qla24xx_post_gidpn_work(vha, fcport);
+		ql_dbg(ql_dbg_disc, vha, 0x20d3,
+		    "%s %8phC RSCN generation changed\n",
+		    __func__, fcport->port_name);
+		qla_rscn_replay(fcport);
+		qlt_schedule_sess_for_deletion(fcport);
 		return;
 	}
 
+	WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
+		  ea->data[0]);
+
 	switch (ea->data[0]) {
 	case MBS_COMMAND_COMPLETE:
 		/*
@@ -1992,8 +2020,6 @@
 			    "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
 			    __func__, __LINE__, ea->fcport->port_name,
 			    ea->fcport->d_id.b24, lid);
-			qla2x00_clear_loop_id(ea->fcport);
-			qla24xx_post_gidpn_work(vha, ea->fcport);
 		} else {
 			ql_dbg(ql_dbg_disc, vha, 0x20ed,
 			    "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
@@ -2021,26 +2047,6 @@
 	return;
 }
 
-void
-qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
-    uint16_t *data)
-{
-	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-	if (data[0] == MBS_COMMAND_COMPLETE) {
-		qla2x00_update_fcport(vha, fcport);
-
-		return;
-	}
-
-	/* Retry login. */
-	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
-		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
-	else
-		qla2x00_mark_device_lost(vha, fcport, 1, 0);
-
-	return;
-}
-
 /****************************************************************************/
 /*                QLogic ISP2x00 Hardware Support Functions.                */
 /****************************************************************************/
@@ -2134,6 +2140,7 @@
 	int	rval;
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = ha->req_q_map[0];
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
 	memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
 	memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
@@ -2168,6 +2175,15 @@
 
 	ha->isp_ops->reset_chip(vha);
 
+	/* Check for secure flash support */
+	if (IS_QLA28XX(ha)) {
+		if (RD_REG_DWORD(&reg->mailbox12) & BIT_0) {
+			ql_log(ql_log_info, vha, 0xffff, "Adapter is Secure\n");
+			ha->flags.secure_adapter = 1;
+		}
+	}
+
+
 	rval = qla2xxx_get_flash_info(vha);
 	if (rval) {
 		ql_log(ql_log_fatal, vha, 0x004f,
@@ -2223,6 +2239,10 @@
 	if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
 		rval = qla2x00_init_rings(vha);
 
+	/* No point in continuing if firmware initialization failed. */
+	if (rval != QLA_SUCCESS)
+		return rval;
+
 	ha->flags.chip_reset_done = 1;
 
 	if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@@ -2484,7 +2504,7 @@
  *
  * Returns 0 on success.
  */
-void
+int
 qla2x00_reset_chip(scsi_qla_host_t *vha)
 {
 	unsigned long   flags = 0;
@@ -2492,9 +2512,10 @@
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	uint32_t	cnt;
 	uint16_t	cmd;
+	int rval = QLA_FUNCTION_FAILED;
 
 	if (unlikely(pci_channel_offline(ha->pdev)))
-		return;
+		return rval;
 
 	ha->isp_ops->disable_intrs(ha);
 
@@ -2620,6 +2641,8 @@
 	}
 
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_SUCCESS;
 }
 
 /**
@@ -2860,14 +2883,15 @@
  *
  * Returns 0 on success.
  */
-void
+int
 qla24xx_reset_chip(scsi_qla_host_t *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
+	int rval = QLA_FUNCTION_FAILED;
 
 	if (pci_channel_offline(ha->pdev) &&
 	    ha->flags.pci_channel_io_perm_failure) {
-		return;
+		return rval;
 	}
 
 	ha->isp_ops->disable_intrs(ha);
@@ -2875,7 +2899,9 @@
 	qla25xx_manipulate_risc_semaphore(vha);
 
 	/* Perform RISC reset. */
-	qla24xx_reset_risc(vha);
+	rval = qla24xx_reset_risc(vha);
+
+	return rval;
 }
 
 /**
@@ -3033,89 +3059,102 @@
 }
 
 static void
-qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
+qla2x00_init_fce_trace(scsi_qla_host_t *vha)
 {
 	int rval;
 	dma_addr_t tc_dma;
 	void *tc;
 	struct qla_hw_data *ha = vha->hw;
 
+	if (!IS_FWI2_CAPABLE(ha))
+		return;
+
+	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+		return;
+
+	if (ha->fce) {
+		ql_dbg(ql_dbg_init, vha, 0x00bd,
+		       "%s: FCE Mem is already allocated.\n",
+		       __func__);
+		return;
+	}
+
+	/* Allocate memory for Fibre Channel Event Buffer. */
+	tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
+				GFP_KERNEL);
+	if (!tc) {
+		ql_log(ql_log_warn, vha, 0x00be,
+		       "Unable to allocate (%d KB) for FCE.\n",
+		       FCE_SIZE / 1024);
+		return;
+	}
+
+	rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
+					ha->fce_mb, &ha->fce_bufs);
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x00bf,
+		       "Unable to initialize FCE (%d).\n", rval);
+		dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
+		return;
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x00c0,
+	       "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
+
+	ha->flags.fce_enabled = 1;
+	ha->fce_dma = tc_dma;
+	ha->fce = tc;
+}
+
+static void
+qla2x00_init_eft_trace(scsi_qla_host_t *vha)
+{
+	int rval;
+	dma_addr_t tc_dma;
+	void *tc;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_FWI2_CAPABLE(ha))
+		return;
+
 	if (ha->eft) {
 		ql_dbg(ql_dbg_init, vha, 0x00bd,
-		    "%s: Offload Mem is already allocated.\n",
+		    "%s: EFT Mem is already allocated.\n",
 		    __func__);
 		return;
 	}
 
-	if (IS_FWI2_CAPABLE(ha)) {
-		/* Allocate memory for Fibre Channel Event Buffer. */
-		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
-		    !IS_QLA27XX(ha))
-			goto try_eft;
-
-		if (ha->fce)
-			dma_free_coherent(&ha->pdev->dev,
-			    FCE_SIZE, ha->fce, ha->fce_dma);
-
-		/* Allocate memory for Fibre Channel Event Buffer. */
-		tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
-					 GFP_KERNEL);
-		if (!tc) {
-			ql_log(ql_log_warn, vha, 0x00be,
-			    "Unable to allocate (%d KB) for FCE.\n",
-			    FCE_SIZE / 1024);
-			goto try_eft;
-		}
-
-		rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
-		    ha->fce_mb, &ha->fce_bufs);
-		if (rval) {
-			ql_log(ql_log_warn, vha, 0x00bf,
-			    "Unable to initialize FCE (%d).\n", rval);
-			dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
-			    tc_dma);
-			ha->flags.fce_enabled = 0;
-			goto try_eft;
-		}
-		ql_dbg(ql_dbg_init, vha, 0x00c0,
-		    "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
-
-		ha->flags.fce_enabled = 1;
-		ha->fce_dma = tc_dma;
-		ha->fce = tc;
-
-try_eft:
-		if (ha->eft)
-			dma_free_coherent(&ha->pdev->dev,
-			    EFT_SIZE, ha->eft, ha->eft_dma);
-
-		/* Allocate memory for Extended Trace Buffer. */
-		tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
-					 GFP_KERNEL);
-		if (!tc) {
-			ql_log(ql_log_warn, vha, 0x00c1,
-			    "Unable to allocate (%d KB) for EFT.\n",
-			    EFT_SIZE / 1024);
-			goto eft_err;
-		}
-
-		rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
-		if (rval) {
-			ql_log(ql_log_warn, vha, 0x00c2,
-			    "Unable to initialize EFT (%d).\n", rval);
-			dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
-			    tc_dma);
-			goto eft_err;
-		}
-		ql_dbg(ql_dbg_init, vha, 0x00c3,
-		    "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
-
-		ha->eft_dma = tc_dma;
-		ha->eft = tc;
+	/* Allocate memory for Extended Trace Buffer. */
+	tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+				GFP_KERNEL);
+	if (!tc) {
+		ql_log(ql_log_warn, vha, 0x00c1,
+		       "Unable to allocate (%d KB) for EFT.\n",
+		       EFT_SIZE / 1024);
+		return;
 	}
 
-eft_err:
-	return;
+	rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x00c2,
+		       "Unable to initialize EFT (%d).\n", rval);
+		dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
+		return;
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x00c3,
+	       "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+
+	ha->eft_dma = tc_dma;
+	ha->eft = tc;
+}
+
+static void
+qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
+{
+	qla2x00_init_fce_trace(vha);
+	qla2x00_init_eft_trace(vha);
 }
 
 void
@@ -3138,7 +3177,7 @@
 		mem_size = (ha->fw_memory_size - 0x11000 + 1) *
 		    sizeof(uint16_t);
 	} else if (IS_FWI2_CAPABLE(ha)) {
-		if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 			fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
 		else if (IS_QLA81XX(ha))
 			fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -3150,94 +3189,124 @@
 		mem_size = (ha->fw_memory_size - 0x100000 + 1) *
 		    sizeof(uint32_t);
 		if (ha->mqenable) {
-			if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+			if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
+			    !IS_QLA28XX(ha))
 				mq_size = sizeof(struct qla2xxx_mq_chain);
 			/*
-			 * Allocate maximum buffer size for all queues.
+			 * Allocate maximum buffer size for all queues - Q0.
 			 * Resizing must be done at end-of-dump processing.
 			 */
-			mq_size += ha->max_req_queues *
+			mq_size += (ha->max_req_queues - 1) *
 			    (req->length * sizeof(request_t));
-			mq_size += ha->max_rsp_queues *
+			mq_size += (ha->max_rsp_queues - 1) *
 			    (rsp->length * sizeof(response_t));
 		}
 		if (ha->tgt.atio_ring)
 			mq_size += ha->tgt.atio_q_length * sizeof(request_t);
-		/* Allocate memory for Fibre Channel Event Buffer. */
-		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
-		    !IS_QLA27XX(ha))
-			goto try_eft;
 
-		fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
-try_eft:
-		ql_dbg(ql_dbg_init, vha, 0x00c3,
-		    "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
-		eft_size = EFT_SIZE;
+		qla2x00_init_fce_trace(vha);
+		if (ha->fce)
+			fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+		qla2x00_init_eft_trace(vha);
+		if (ha->eft)
+			eft_size = EFT_SIZE;
 	}
 
-	if (IS_QLA27XX(ha)) {
-		if (!ha->fw_dump_template) {
-			ql_log(ql_log_warn, vha, 0x00ba,
-			    "Failed missing fwdump template\n");
-			return;
+	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+		struct fwdt *fwdt = ha->fwdt;
+		uint j;
+
+		for (j = 0; j < 2; j++, fwdt++) {
+			if (!fwdt->template) {
+				ql_dbg(ql_dbg_init, vha, 0x00ba,
+				    "-> fwdt%u no template\n", j);
+				continue;
+			}
+			ql_dbg(ql_dbg_init, vha, 0x00fa,
+			    "-> fwdt%u calculating fwdump size...\n", j);
+			fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
+			    vha, fwdt->template);
+			ql_dbg(ql_dbg_init, vha, 0x00fa,
+			    "-> fwdt%u calculated fwdump size = %#lx bytes\n",
+			    j, fwdt->dump_size);
+			dump_size += fwdt->dump_size;
 		}
-		dump_size = qla27xx_fwdt_calculate_dump_size(vha);
-		ql_dbg(ql_dbg_init, vha, 0x00fa,
-		    "-> allocating fwdump (%x bytes)...\n", dump_size);
-		goto allocate;
+	} else {
+		req_q_size = req->length * sizeof(request_t);
+		rsp_q_size = rsp->length * sizeof(response_t);
+		dump_size = offsetof(struct qla2xxx_fw_dump, isp);
+		dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
+			+ eft_size;
+		ha->chain_offset = dump_size;
+		dump_size += mq_size + fce_size;
+		if (ha->exchoffld_buf)
+			dump_size += sizeof(struct qla2xxx_offld_chain) +
+				ha->exchoffld_size;
+		if (ha->exlogin_buf)
+			dump_size += sizeof(struct qla2xxx_offld_chain) +
+				ha->exlogin_size;
 	}
 
-	req_q_size = req->length * sizeof(request_t);
-	rsp_q_size = rsp->length * sizeof(response_t);
-	dump_size = offsetof(struct qla2xxx_fw_dump, isp);
-	dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
-	ha->chain_offset = dump_size;
-	dump_size += mq_size + fce_size;
+	if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
 
-	if (ha->exchoffld_buf)
-		dump_size += sizeof(struct qla2xxx_offld_chain) +
-			ha->exchoffld_size;
-	if (ha->exlogin_buf)
-		dump_size += sizeof(struct qla2xxx_offld_chain) +
-			ha->exlogin_size;
+		ql_dbg(ql_dbg_init, vha, 0x00c5,
+		    "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
+		    __func__, dump_size, ha->fw_dump_len,
+		    ha->fw_dump_alloc_len);
 
-allocate:
-	if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
 		fw_dump = vmalloc(dump_size);
 		if (!fw_dump) {
 			ql_log(ql_log_warn, vha, 0x00c4,
 			    "Unable to allocate (%d KB) for firmware dump.\n",
 			    dump_size / 1024);
 		} else {
-			if (ha->fw_dump)
+			mutex_lock(&ha->optrom_mutex);
+			if (ha->fw_dumped) {
+				memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
 				vfree(ha->fw_dump);
-			ha->fw_dump = fw_dump;
+				ha->fw_dump = fw_dump;
+				ha->fw_dump_alloc_len =  dump_size;
+				ql_dbg(ql_dbg_init, vha, 0x00c5,
+				    "Re-Allocated (%d KB) and save firmware dump.\n",
+				    dump_size / 1024);
+			} else {
+				if (ha->fw_dump)
+					vfree(ha->fw_dump);
+				ha->fw_dump = fw_dump;
 
-			ha->fw_dump_len = dump_size;
-			ql_dbg(ql_dbg_init, vha, 0x00c5,
-			    "Allocated (%d KB) for firmware dump.\n",
-			    dump_size / 1024);
+				ha->fw_dump_len = ha->fw_dump_alloc_len =
+				    dump_size;
+				ql_dbg(ql_dbg_init, vha, 0x00c5,
+				    "Allocated (%d KB) for firmware dump.\n",
+				    dump_size / 1024);
 
-			if (IS_QLA27XX(ha))
-				return;
+				if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+					mutex_unlock(&ha->optrom_mutex);
+					return;
+				}
 
-			ha->fw_dump->signature[0] = 'Q';
-			ha->fw_dump->signature[1] = 'L';
-			ha->fw_dump->signature[2] = 'G';
-			ha->fw_dump->signature[3] = 'C';
-			ha->fw_dump->version = htonl(1);
+				ha->fw_dump->signature[0] = 'Q';
+				ha->fw_dump->signature[1] = 'L';
+				ha->fw_dump->signature[2] = 'G';
+				ha->fw_dump->signature[3] = 'C';
+				ha->fw_dump->version = htonl(1);
 
-			ha->fw_dump->fixed_size = htonl(fixed_size);
-			ha->fw_dump->mem_size = htonl(mem_size);
-			ha->fw_dump->req_q_size = htonl(req_q_size);
-			ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
+				ha->fw_dump->fixed_size = htonl(fixed_size);
+				ha->fw_dump->mem_size = htonl(mem_size);
+				ha->fw_dump->req_q_size = htonl(req_q_size);
+				ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
 
-			ha->fw_dump->eft_size = htonl(eft_size);
-			ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
-			ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
+				ha->fw_dump->eft_size = htonl(eft_size);
+				ha->fw_dump->eft_addr_l =
+				    htonl(LSD(ha->eft_dma));
+				ha->fw_dump->eft_addr_h =
+				    htonl(MSD(ha->eft_dma));
 
-			ha->fw_dump->header_size =
-				htonl(offsetof(struct qla2xxx_fw_dump, isp));
+				ha->fw_dump->header_size =
+					htonl(offsetof
+					    (struct qla2xxx_fw_dump, isp));
+			}
+			mutex_unlock(&ha->optrom_mutex);
 		}
 	}
 }
@@ -3530,6 +3599,12 @@
 			if (rval == QLA_SUCCESS) {
 				qla24xx_detect_sfp(vha);
 
+				if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+				    IS_QLA28XX(ha)) &&
+				    (ha->zio_mode == QLA_ZIO_MODE_6))
+					qla27xx_set_zio_threshold(vha,
+					    ha->last_zio_threshold);
+
 				rval = qla2x00_set_exlogins_buffer(vha);
 				if (rval != QLA_SUCCESS)
 					goto failed;
@@ -3597,7 +3672,7 @@
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	}
 
-	if (IS_QLA27XX(ha))
+	if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		ha->flags.fac_supported = 1;
 	else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
 		uint32_t size;
@@ -3612,7 +3687,8 @@
 			    ha->fw_major_version, ha->fw_minor_version,
 			    ha->fw_subminor_version);
 
-			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+			if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+			    IS_QLA28XX(ha)) {
 				ha->flags.fac_supported = 0;
 				rval = QLA_SUCCESS;
 			}
@@ -3674,8 +3750,7 @@
 	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
 	    "Serial link options.\n");
 	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
-	    (uint8_t *)&ha->fw_seriallink_options,
-	    sizeof(ha->fw_seriallink_options));
+	    ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
 
 	ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
 	if (ha->fw_seriallink_options[3] & BIT_2) {
@@ -3765,7 +3840,7 @@
 
 	/* Move PUREX, ABTS RX & RIDA to ATIOQ */
 	if (ql2xmvasynctoatio &&
-	    (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
+	    (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
 		if (qla_tgt_mode_enabled(vha) ||
 		    qla_dual_mode_enabled(vha))
 			ha->fw_options[2] |= BIT_11;
@@ -3773,7 +3848,8 @@
 			ha->fw_options[2] &= ~BIT_11;
 	}
 
-	if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+	    IS_QLA28XX(ha)) {
 		/*
 		 * Tell FW to track each exchange to prevent
 		 * driver from using stale exchange.
@@ -3826,10 +3902,8 @@
 	ha->init_cb->response_q_inpointer = cpu_to_le16(0);
 	ha->init_cb->request_q_length = cpu_to_le16(req->length);
 	ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
-	ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
-	ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
-	ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
-	ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+	put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
+	put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
 
 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
 	WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
@@ -3856,21 +3930,19 @@
 	icb->response_q_inpointer = cpu_to_le16(0);
 	icb->request_q_length = cpu_to_le16(req->length);
 	icb->response_q_length = cpu_to_le16(rsp->length);
-	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
-	icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
-	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
-	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+	put_unaligned_le64(req->dma, &icb->request_q_address);
+	put_unaligned_le64(rsp->dma, &icb->response_q_address);
 
 	/* Setup ATIO queue dma pointers for target mode */
 	icb->atio_q_inpointer = cpu_to_le16(0);
 	icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
-	icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
-	icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
+	put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
 
 	if (IS_SHADOW_REG_CAPABLE(ha))
 		icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
 
-	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+	    IS_QLA28XX(ha)) {
 		icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
 		icb->rid = cpu_to_le16(rid);
 		if (ha->flags.msix_enabled) {
@@ -3909,8 +3981,17 @@
 		WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
 		WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
 	}
+
 	qlt_24xx_config_rings(vha);
 
+	/* If the user has configured the speed, set it here */
+	if (ha->set_data_rate) {
+		ql_dbg(ql_dbg_init, vha, 0x00fd,
+		    "Speed set by user : %s Gbps \n",
+		    qla2x00_get_link_speed_str(ha, ha->set_data_rate));
+		icb->firmware_options_3 = (ha->set_data_rate << 13);
+	}
+
 	/* PCI posting */
 	RD_REG_DWORD(&ioreg->hccr);
 }
@@ -4018,6 +4099,7 @@
 		ql_dbg(ql_dbg_init, vha, 0x00d3,
 		    "Init Firmware -- success.\n");
 		QLA_FW_STARTED(ha);
+		vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
 	}
 
 	return (rval);
@@ -4279,16 +4361,19 @@
 
 inline void
 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
-	char *def)
+		       const char *def)
 {
 	char *st, *en;
 	uint16_t index;
+	uint64_t zero[2] = { 0 };
 	struct qla_hw_data *ha = vha->hw;
 	int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
 	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
 
-	if (memcmp(model, BINZERO, len) != 0) {
-		strncpy(ha->model_number, model, len);
+	if (len > sizeof(zero))
+		len = sizeof(zero);
+	if (memcmp(model, &zero, len) != 0) {
+		memcpy(ha->model_number, model, len);
 		st = en = ha->model_number;
 		en += len - 1;
 		while (en > st) {
@@ -4301,21 +4386,23 @@
 		if (use_tbl &&
 		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
 		    index < QLA_MODEL_NAMES)
-			strncpy(ha->model_desc,
+			strlcpy(ha->model_desc,
 			    qla2x00_model_name[index * 2 + 1],
-			    sizeof(ha->model_desc) - 1);
+			    sizeof(ha->model_desc));
 	} else {
 		index = (ha->pdev->subsystem_device & 0xff);
 		if (use_tbl &&
 		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
 		    index < QLA_MODEL_NAMES) {
-			strcpy(ha->model_number,
-			    qla2x00_model_name[index * 2]);
-			strncpy(ha->model_desc,
+			strlcpy(ha->model_number,
+				qla2x00_model_name[index * 2],
+				sizeof(ha->model_number));
+			strlcpy(ha->model_desc,
 			    qla2x00_model_name[index * 2 + 1],
-			    sizeof(ha->model_desc) - 1);
+			    sizeof(ha->model_desc));
 		} else {
-			strcpy(ha->model_number, def);
+			strlcpy(ha->model_number, def,
+				sizeof(ha->model_number));
 		}
 	}
 	if (IS_FWI2_CAPABLE(ha))
@@ -4374,7 +4461,7 @@
 	rval = QLA_SUCCESS;
 
 	/* Determine NVRAM starting address. */
-	ha->nvram_size = sizeof(nvram_t);
+	ha->nvram_size = sizeof(*nv);
 	ha->nvram_base = 0;
 	if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
 		if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
@@ -4388,16 +4475,15 @@
 	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
 	    "Contents of NVRAM.\n");
 	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
-	    (uint8_t *)nv, ha->nvram_size);
+	    nv, ha->nvram_size);
 
 	/* Bad NVRAM data, set defaults parameters. */
-	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
-	    nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
+	if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
+	    nv->nvram_version < 1) {
 		/* Reset NVRAM data. */
 		ql_log(ql_log_warn, vha, 0x0064,
-		    "Inconsistent NVRAM "
-		    "detected: checksum=0x%x id=%c version=0x%x.\n",
-		    chksum, nv->id[0], nv->nvram_version);
+		    "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
+		    chksum, nv->id, nv->nvram_version);
 		ql_log(ql_log_warn, vha, 0x0065,
 		    "Falling back to "
 		    "functioning (yet invalid -- WWPN) defaults.\n");
@@ -4452,20 +4538,6 @@
 		rval = 1;
 	}
 
-#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
-	/*
-	 * The SN2 does not provide BIOS emulation which means you can't change
-	 * potentially bogus BIOS settings. Force the use of default settings
-	 * for link rate and frame size.  Hope that the rest of the settings
-	 * are valid.
-	 */
-	if (ia64_platform_is("sn2")) {
-		nv->frame_payload_size = 2048;
-		if (IS_QLA23XX(ha))
-			nv->special_options[1] = BIT_7;
-	}
-#endif
-
 	/* Reset Initialization control block */
 	memset(icb, 0, ha->init_cb_size);
 
@@ -4646,7 +4718,7 @@
 			ha->zio_mode = icb->add_firmware_options[0] &
 			    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
 			ha->zio_timer = icb->interrupt_delay_timer ?
-			    icb->interrupt_delay_timer: 2;
+			    icb->interrupt_delay_timer : 2;
 		}
 		icb->add_firmware_options[0] &=
 		    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
@@ -4679,7 +4751,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
-	rport = fcport->drport ? fcport->drport: fcport->rport;
+	rport = fcport->drport ? fcport->drport : fcport->rport;
 	fcport->drport = NULL;
 	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
 	if (rport) {
@@ -4692,6 +4764,23 @@
 	}
 }
 
+void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
+{
+	int old_state;
+
+	old_state = atomic_read(&fcport->state);
+	atomic_set(&fcport->state, state);
+
+	/* Don't print state transitions during initial allocation of fcport */
+	if (old_state && old_state != state) {
+		ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
+		       "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
+		       fcport->port_name, port_state_str[old_state],
+		       port_state_str[state], fcport->d_id.b.domain,
+		       fcport->d_id.b.area, fcport->d_id.b.al_pa);
+	}
+}
+
 /**
  * qla2x00_alloc_fcport() - Allocate a generic fcport.
  * @vha: HA context
@@ -4708,6 +4797,16 @@
 	if (!fcport)
 		return NULL;
 
+	fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
+		sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
+		flags);
+	if (!fcport->ct_desc.ct_sns) {
+		ql_log(ql_log_warn, vha, 0xd049,
+		    "Failed to allocate ct_sns request.\n");
+		kfree(fcport);
+		return NULL;
+	}
+
 	/* Setup fcport template structure. */
 	fcport->vha = vha;
 	fcport->port_type = FCT_UNKNOWN;
@@ -4716,22 +4815,22 @@
 	fcport->supported_classes = FC_COS_UNSPECIFIED;
 	fcport->fp_speed = PORT_SPEED_UNKNOWN;
 
-	fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
-		sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
-		flags);
 	fcport->disc_state = DSC_DELETED;
 	fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
 	fcport->deleted = QLA_SESS_DELETED;
 	fcport->login_retry = vha->hw->login_retry_count;
+	fcport->chip_reset = vha->hw->base_qpair->chip_reset;
 	fcport->logout_on_delete = 1;
 
 	if (!fcport->ct_desc.ct_sns) {
 		ql_log(ql_log_warn, vha, 0xd049,
 		    "Failed to allocate ct_sns request.\n");
 		kfree(fcport);
-		fcport = NULL;
+		return NULL;
 	}
+
 	INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+	INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
 	INIT_LIST_HEAD(&fcport->gnl_entry);
 	INIT_LIST_HEAD(&fcport->list);
 
@@ -4748,6 +4847,8 @@
 
 		fcport->ct_desc.ct_sns = NULL;
 	}
+	list_del(&fcport->list);
+	qla2x00_clear_loop_id(fcport);
 	kfree(fcport);
 }
 
@@ -4769,6 +4870,7 @@
 	int  rval;
 	unsigned long flags, save_flags;
 	struct qla_hw_data *ha = vha->hw;
+
 	rval = QLA_SUCCESS;
 
 	/* Get Initiator ID */
@@ -4857,19 +4959,10 @@
 			 */
 			if (qla_tgt_mode_enabled(vha) ||
 			    qla_dual_mode_enabled(vha)) {
-				if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
-					spin_lock_irqsave(&ha->tgt.atio_lock,
-					    flags);
-					qlt_24xx_process_atio_queue(vha, 0);
-					spin_unlock_irqrestore(
-					    &ha->tgt.atio_lock, flags);
-				} else {
-					spin_lock_irqsave(&ha->hardware_lock,
-					    flags);
-					qlt_24xx_process_atio_queue(vha, 1);
-					spin_unlock_irqrestore(
-					    &ha->hardware_lock, flags);
-				}
+				spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+				qlt_24xx_process_atio_queue(vha, 0);
+				spin_unlock_irqrestore(&ha->tgt.atio_lock,
+				    flags);
 			}
 		}
 	}
@@ -4914,35 +5007,54 @@
 
 	uint16_t	index;
 	uint16_t	entries;
-	char		*id_iter;
+	struct gid_list_info *gid;
 	uint16_t	loop_id;
 	uint8_t		domain, area, al_pa;
 	struct qla_hw_data *ha = vha->hw;
 	unsigned long flags;
 
 	/* Inititae N2N login. */
-	if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
-		/* borrowing */
-		u32 *bp, i, sz;
+	if (N2N_TOPO(ha)) {
+		if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+			/* borrowing */
+			u32 *bp, i, sz;
 
-		memset(ha->init_cb, 0, ha->init_cb_size);
-		sz = min_t(int, sizeof(struct els_plogi_payload),
-		    ha->init_cb_size);
-		rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
-		    (void *)ha->init_cb, sz);
-		if (rval == QLA_SUCCESS) {
-			bp = (uint32_t *)ha->init_cb;
-			for (i = 0; i < sz/4 ; i++, bp++)
-				*bp = cpu_to_be32(*bp);
+			memset(ha->init_cb, 0, ha->init_cb_size);
+			sz = min_t(int, sizeof(struct els_plogi_payload),
+			    ha->init_cb_size);
+			rval = qla24xx_get_port_login_templ(vha,
+			    ha->init_cb_dma, (void *)ha->init_cb, sz);
+			if (rval == QLA_SUCCESS) {
+				bp = (uint32_t *)ha->init_cb;
+				for (i = 0; i < sz/4 ; i++, bp++)
+					*bp = cpu_to_be32(*bp);
 
-			memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
-			    sizeof(ha->plogi_els_payld.data));
-			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
-		} else {
-			ql_dbg(ql_dbg_init, vha, 0x00d1,
-			    "PLOGI ELS param read fail.\n");
+				memcpy(&ha->plogi_els_payld.data,
+				    (void *)ha->init_cb,
+				    sizeof(ha->plogi_els_payld.data));
+				set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+			} else {
+				ql_dbg(ql_dbg_init, vha, 0x00d1,
+				    "PLOGI ELS param read fail.\n");
+				goto skip_login;
+			}
 		}
-		return QLA_SUCCESS;
+
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (fcport->n2n_flag) {
+				qla24xx_fcport_handle_login(vha, fcport);
+				return QLA_SUCCESS;
+			}
+		}
+skip_login:
+		spin_lock_irqsave(&vha->work_lock, flags);
+		vha->scan.scan_retry++;
+		spin_unlock_irqrestore(&vha->work_lock, flags);
+
+		if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		}
 	}
 
 	found_devs = 0;
@@ -4959,8 +5071,20 @@
 	ql_dbg(ql_dbg_disc, vha, 0x2011,
 	    "Entries in ID list (%d).\n", entries);
 	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
-	    (uint8_t *)ha->gid_list,
-	    entries * sizeof(struct gid_list_info));
+	    ha->gid_list, entries * sizeof(*ha->gid_list));
+
+	if (entries == 0) {
+		spin_lock_irqsave(&vha->work_lock, flags);
+		vha->scan.scan_retry++;
+		spin_unlock_irqrestore(&vha->work_lock, flags);
+
+		if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		}
+	} else {
+		vha->scan.scan_retry = 0;
+	}
 
 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 		fcport->scan_state = QLA_FCPORT_SCAN;
@@ -4977,28 +5101,28 @@
 	new_fcport->flags &= ~FCF_FABRIC_DEVICE;
 
 	/* Add devices to port list. */
-	id_iter = (char *)ha->gid_list;
+	gid = ha->gid_list;
 	for (index = 0; index < entries; index++) {
-		domain = ((struct gid_list_info *)id_iter)->domain;
-		area = ((struct gid_list_info *)id_iter)->area;
-		al_pa = ((struct gid_list_info *)id_iter)->al_pa;
+		domain = gid->domain;
+		area = gid->area;
+		al_pa = gid->al_pa;
 		if (IS_QLA2100(ha) || IS_QLA2200(ha))
-			loop_id = (uint16_t)
-			    ((struct gid_list_info *)id_iter)->loop_id_2100;
+			loop_id = gid->loop_id_2100;
 		else
-			loop_id = le16_to_cpu(
-			    ((struct gid_list_info *)id_iter)->loop_id);
-		id_iter += ha->gid_list_info_size;
+			loop_id = le16_to_cpu(gid->loop_id);
+		gid = (void *)gid + ha->gid_list_info_size;
 
 		/* Bypass reserved domain fields. */
 		if ((domain & 0xf0) == 0xf0)
 			continue;
 
 		/* Bypass if not same domain and area of adapter. */
-		if (area && domain &&
-		    (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
+		if (area && domain && ((area != vha->d_id.b.area) ||
+		    (domain != vha->d_id.b.domain)) &&
+		    (ha->current_topology == ISP_CFG_NL))
 			continue;
 
+
 		/* Bypass invalid local loop ID. */
 		if (loop_id > LAST_LOCAL_LOOP_ID)
 			continue;
@@ -5195,16 +5319,23 @@
 
 	rport->supported_classes = fcport->supported_classes;
 
-	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+	rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
 	if (fcport->port_type == FCT_INITIATOR)
-		rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+		rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
 	if (fcport->port_type == FCT_TARGET)
-		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+		rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
+	if (fcport->port_type & FCT_NVME_INITIATOR)
+		rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
+	if (fcport->port_type & FCT_NVME_TARGET)
+		rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
+	if (fcport->port_type & FCT_NVME_DISCOVERY)
+		rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
 
 	ql_dbg(ql_dbg_disc, vha, 0x20ee,
 	    "%s %8phN. rport %p is %s mode\n",
 	    __func__, fcport->port_name, rport,
-	    (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
+	    (fcport->port_type == FCT_TARGET) ? "tgt" :
+	    ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
 
 	fc_remote_port_rolechg(rport, rport_ids.roles);
 }
@@ -5227,20 +5358,19 @@
 void
 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
-	fcport->vha = vha;
-
 	if (IS_SW_RESV_ADDR(fcport->d_id))
 		return;
 
+	ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
+	    __func__, fcport->port_name);
+
+	fcport->disc_state = DSC_UPD_FCPORT;
+	fcport->login_retry = vha->hw->login_retry_count;
 	fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
-	fcport->disc_state = DSC_LOGIN_COMPLETE;
 	fcport->deleted = 0;
 	fcport->logout_on_delete = 1;
-	fcport->login_retry = vha->hw->login_retry_count;
 	fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
 
-	qla2x00_iidma_fcport(vha, fcport);
-
 	switch (vha->hw->current_topology) {
 	case ISP_CFG_N:
 	case ISP_CFG_NL:
@@ -5250,6 +5380,8 @@
 		break;
 	}
 
+	qla2x00_iidma_fcport(vha, fcport);
+
 	if (fcport->fc4f_nvme) {
 		qla_nvme_register_remote(vha, fcport);
 		fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -5278,6 +5410,8 @@
 		break;
 	}
 
+	qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
 	if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
 		if (fcport->id_changed) {
 			fcport->id_changed = 0;
@@ -5294,7 +5428,36 @@
 			qla24xx_post_gpsc_work(vha, fcport);
 		}
 	}
-	qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
+	fcport->disc_state = DSC_LOGIN_COMPLETE;
+}
+
+void qla_register_fcport_fn(struct work_struct *work)
+{
+	fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
+	u32 rscn_gen = fcport->rscn_gen;
+	u16 data[2];
+
+	if (IS_SW_RESV_ADDR(fcport->d_id))
+		return;
+
+	qla2x00_update_fcport(fcport->vha, fcport);
+
+	if (rscn_gen != fcport->rscn_gen) {
+		/* RSCN(s) came in while registration */
+		switch (fcport->next_disc_state) {
+		case DSC_DELETE_PEND:
+			qlt_schedule_sess_for_deletion(fcport);
+			break;
+		case DSC_ADISC:
+			data[0] = data[1] = 0;
+			qla2x00_post_async_adisc_work(fcport->vha, fcport,
+			    data);
+			break;
+		default:
+			break;
+		}
+	}
 }
 
 /*
@@ -5746,55 +5909,6 @@
 	return (rval);
 }
 
-/*
- * qla2x00_find_new_loop_id
- *	Scan through our port list and find a new usable loop ID.
- *
- * Input:
- *	ha:	adapter state pointer.
- *	dev:	port structure pointer.
- *
- * Returns:
- *	qla2x00 local function return status code.
- *
- * Context:
- *	Kernel context.
- */
-int
-qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
-{
-	int	rval;
-	struct qla_hw_data *ha = vha->hw;
-	unsigned long flags = 0;
-
-	rval = QLA_SUCCESS;
-
-	spin_lock_irqsave(&ha->vport_slock, flags);
-
-	dev->loop_id = find_first_zero_bit(ha->loop_id_map,
-	    LOOPID_MAP_SIZE);
-	if (dev->loop_id >= LOOPID_MAP_SIZE ||
-	    qla2x00_is_reserved_id(vha, dev->loop_id)) {
-		dev->loop_id = FC_NO_LOOP_ID;
-		rval = QLA_FUNCTION_FAILED;
-	} else
-		set_bit(dev->loop_id, ha->loop_id_map);
-
-	spin_unlock_irqrestore(&ha->vport_slock, flags);
-
-	if (rval == QLA_SUCCESS)
-		ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
-		    "Assigning new loopid=%x, portid=%x.\n",
-		    dev->loop_id, dev->d_id.b24);
-	else
-		ql_log(ql_log_warn, dev->vha, 0x2087,
-		    "No loop_id's available, portid=%x.\n",
-		    dev->d_id.b24);
-
-	return (rval);
-}
-
-
 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */
 int
 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
@@ -6016,11 +6130,6 @@
 {
 	int rval = QLA_SUCCESS;
 	uint32_t wait_time;
-	struct req_que *req;
-	struct rsp_que *rsp;
-
-	req = vha->req;
-	rsp = req->rsp;
 
 	clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
 	if (vha->flags.online) {
@@ -6033,8 +6142,8 @@
 					 * Issue a marker after FW becomes
 					 * ready.
 					 */
-					qla2x00_marker(vha, req, rsp, 0, 0,
-						MK_SYNC_ALL);
+					qla2x00_marker(vha, vha->hw->base_qpair,
+					    0, 0, MK_SYNC_ALL);
 					vha->marker_needed = 0;
 				}
 
@@ -6291,6 +6400,7 @@
 		qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
 	} else {
 		const char *state = qla83xx_dev_state_to_string(dev_state);
+
 		ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
 
 		/* SV: XXX: Is timeout required here? */
@@ -6466,7 +6576,8 @@
 					LOOP_DOWN_TIME);
 	}
 	/* Wait for pending cmds to complete */
-	qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
+	WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
+		     != QLA_SUCCESS);
 }
 
 void
@@ -6498,6 +6609,7 @@
 	if (!(IS_P3P_TYPE(ha)))
 		ha->isp_ops->reset_chip(vha);
 
+	ha->link_data_rate = PORT_SPEED_UNKNOWN;
 	SAVE_TOPO(ha);
 	ha->flags.rida_fmt2 = 0;
 	ha->flags.n2n_ae = 0;
@@ -6553,8 +6665,10 @@
 	}
 
 	/* Clear all async request states across all VPs. */
-	list_for_each_entry(fcport, &vha->vp_fcports, list)
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 		fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+		fcport->scan_state = 0;
+	}
 	spin_lock_irqsave(&ha->vport_slock, flags);
 	list_for_each_entry(vp, &ha->vp_list, list) {
 		atomic_inc(&vp->vref_count);
@@ -6611,6 +6725,14 @@
 	if (vha->flags.online) {
 		qla2x00_abort_isp_cleanup(vha);
 
+		if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
+			ha->flags.chip_reset_done = 1;
+			vha->flags.online = 1;
+			status = 0;
+			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+			return status;
+		}
+
 		if (IS_QLA8031(ha)) {
 			ql_dbg(ql_dbg_p3p, vha, 0xb05c,
 			    "Clearing fcoe driver presence.\n");
@@ -6626,6 +6748,20 @@
 			return status;
 		}
 
+		switch (vha->qlini_mode) {
+		case QLA2XXX_INI_MODE_DISABLED:
+			if (!qla_tgt_mode_enabled(vha))
+				return 0;
+			break;
+		case QLA2XXX_INI_MODE_DUAL:
+			if (!qla_dual_mode_enabled(vha))
+				return 0;
+			break;
+		case QLA2XXX_INI_MODE_ENABLED:
+		default:
+			break;
+		}
+
 		ha->isp_ops->get_flash_version(vha, req->ring);
 
 		ha->isp_ops->nvram_config(vha);
@@ -6757,8 +6893,6 @@
 {
 	int status = 0;
 	struct qla_hw_data *ha = vha->hw;
-	struct req_que *req = ha->req_q_map[0];
-	struct rsp_que *rsp = ha->rsp_q_map[0];
 
 	/* If firmware needs to be loaded */
 	if (qla2x00_isp_firmware(vha)) {
@@ -6778,7 +6912,7 @@
 		status = qla2x00_fw_ready(vha);
 		if (!status) {
 			/* Issue a marker after FW becomes ready. */
-			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+			qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 		}
 
@@ -6839,7 +6973,7 @@
 * Input:
 *      ha = adapter block pointer.
 */
-void
+int
 qla2x00_reset_adapter(scsi_qla_host_t *vha)
 {
 	unsigned long flags = 0;
@@ -6855,17 +6989,20 @@
 	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
 	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_SUCCESS;
 }
 
-void
+int
 qla24xx_reset_adapter(scsi_qla_host_t *vha)
 {
 	unsigned long flags = 0;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	int rval = QLA_SUCCESS;
 
 	if (IS_P3P_TYPE(ha))
-		return;
+		return rval;
 
 	vha->flags.online = 0;
 	ha->isp_ops->disable_intrs(ha);
@@ -6879,6 +7016,8 @@
 
 	if (IS_NOPOLLING_TYPE(ha))
 		ha->isp_ops->enable_intrs(ha);
+
+	return rval;
 }
 
 /* On sparc systems, obtain port and node WWN from firmware
@@ -6929,34 +7068,33 @@
 		ha->vpd_base = FA_NVRAM_VPD1_ADDR;
 	}
 
-	ha->nvram_size = sizeof(struct nvram_24xx);
+	ha->nvram_size = sizeof(*nv);
 	ha->vpd_size = FA_NVRAM_VPD_SIZE;
 
 	/* Get VPD data into cache */
 	ha->vpd = ha->nvram + VPD_OFFSET;
-	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
+	ha->isp_ops->read_nvram(vha, ha->vpd,
 	    ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
 
 	/* Get NVRAM data into cache and calculate checksum. */
 	dptr = (uint32_t *)nv;
-	ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
-	    ha->nvram_size);
+	ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
 		chksum += le32_to_cpu(*dptr);
 
 	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
 	    "Contents of NVRAM\n");
 	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
-	    (uint8_t *)nv, ha->nvram_size);
+	    nv, ha->nvram_size);
 
 	/* Bad NVRAM data, set defaults parameters. */
-	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
-	    || nv->id[3] != ' ' ||
-	    nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
+	if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
+	    le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
 		/* Reset NVRAM data. */
 		ql_log(ql_log_warn, vha, 0x006b,
-		    "Inconsistent NVRAM detected: checksum=0x%x id=%c "
-		    "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
+		    "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
+		    chksum, nv->id, nv->nvram_version);
+		ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
 		ql_log(ql_log_warn, vha, 0x006c,
 		    "Falling back to functioning (yet invalid -- WWPN) "
 		    "defaults.\n");
@@ -7064,11 +7202,11 @@
 	ha->flags.disable_risc_code_load = 0;
 	ha->flags.enable_lip_reset = 0;
 	ha->flags.enable_lip_full_login =
-	    le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
+	    le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
 	ha->flags.enable_target_reset =
-	    le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
+	    le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
 	ha->flags.enable_led_scheme = 0;
-	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
+	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
 
 	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
 	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -7142,7 +7280,7 @@
 		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
 		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
-		    le16_to_cpu(icb->interrupt_delay_timer): 2;
+		    le16_to_cpu(icb->interrupt_delay_timer) : 2;
 	}
 	icb->firmware_options_2 &= cpu_to_le32(
 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
@@ -7165,128 +7303,315 @@
 	return (rval);
 }
 
-uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
+static void
+qla27xx_print_image(struct scsi_qla_host *vha, char *name,
+    struct qla27xx_image_status *image_status)
 {
-	struct qla27xx_image_status pri_image_status, sec_image_status;
-	uint8_t valid_pri_image, valid_sec_image;
-	uint32_t *wptr;
-	uint32_t cnt, chksum, size;
-	struct qla_hw_data *ha = vha->hw;
+	ql_dbg(ql_dbg_init, vha, 0x018b,
+	    "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
+	    name, "status",
+	    image_status->image_status_mask,
+	    le16_to_cpu(image_status->generation),
+	    image_status->ver_major,
+	    image_status->ver_minor,
+	    image_status->bitmap,
+	    le32_to_cpu(image_status->checksum),
+	    le32_to_cpu(image_status->signature));
+}
 
-	valid_pri_image = valid_sec_image = 1;
-	ha->active_image = 0;
-	size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
+static bool
+qla28xx_check_aux_image_status_signature(
+    struct qla27xx_image_status *image_status)
+{
+	ulong signature = le32_to_cpu(image_status->signature);
+
+	return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
+}
+
+static bool
+qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
+{
+	ulong signature = le32_to_cpu(image_status->signature);
+
+	return
+	    signature != QLA27XX_IMG_STATUS_SIGN &&
+	    signature != QLA28XX_IMG_STATUS_SIGN;
+}
+
+static ulong
+qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
+{
+	uint32_t *p = (void *)image_status;
+	uint n = sizeof(*image_status) / sizeof(*p);
+	uint32_t sum = 0;
+
+	for ( ; n--; p++)
+		sum += le32_to_cpup(p);
+
+	return sum;
+}
+
+static inline uint
+qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
+{
+	return aux->bitmap & bitmask ?
+	    QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
+}
+
+static void
+qla28xx_component_status(
+    struct active_regions *active_regions, struct qla27xx_image_status *aux)
+{
+	active_regions->aux.board_config =
+	    qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
+
+	active_regions->aux.vpd_nvram =
+	    qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
+
+	active_regions->aux.npiv_config_0_1 =
+	    qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
+
+	active_regions->aux.npiv_config_2_3 =
+	    qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
+}
+
+static int
+qla27xx_compare_image_generation(
+    struct qla27xx_image_status *pri_image_status,
+    struct qla27xx_image_status *sec_image_status)
+{
+	/* calculate generation delta as uint16 (this accounts for wrap) */
+	int16_t delta =
+	    le16_to_cpu(pri_image_status->generation) -
+	    le16_to_cpu(sec_image_status->generation);
+
+	ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
+
+	return delta;
+}
+
+void
+qla28xx_get_aux_images(
+	struct scsi_qla_host *vha, struct active_regions *active_regions)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
+	bool valid_pri_image = false, valid_sec_image = false;
+	bool active_pri_image = false, active_sec_image = false;
+
+	if (!ha->flt_region_aux_img_status_pri) {
+		ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
+		goto check_sec_image;
+	}
+
+	qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status,
+	    ha->flt_region_aux_img_status_pri,
+	    sizeof(pri_aux_image_status) >> 2);
+	qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
+
+	if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
+		ql_dbg(ql_dbg_init, vha, 0x018b,
+		    "Primary aux image signature (%#x) not valid\n",
+		    le32_to_cpu(pri_aux_image_status.signature));
+		goto check_sec_image;
+	}
+
+	if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
+		ql_dbg(ql_dbg_init, vha, 0x018c,
+		    "Primary aux image checksum failed\n");
+		goto check_sec_image;
+	}
+
+	valid_pri_image = true;
+
+	if (pri_aux_image_status.image_status_mask & 1) {
+		ql_dbg(ql_dbg_init, vha, 0x018d,
+		    "Primary aux image is active\n");
+		active_pri_image = true;
+	}
+
+check_sec_image:
+	if (!ha->flt_region_aux_img_status_sec) {
+		ql_dbg(ql_dbg_init, vha, 0x018a,
+		    "Secondary aux image not addressed\n");
+		goto check_valid_image;
+	}
+
+	qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status,
+	    ha->flt_region_aux_img_status_sec,
+	    sizeof(sec_aux_image_status) >> 2);
+	qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
+
+	if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
+		ql_dbg(ql_dbg_init, vha, 0x018b,
+		    "Secondary aux image signature (%#x) not valid\n",
+		    le32_to_cpu(sec_aux_image_status.signature));
+		goto check_valid_image;
+	}
+
+	if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
+		ql_dbg(ql_dbg_init, vha, 0x018c,
+		    "Secondary aux image checksum failed\n");
+		goto check_valid_image;
+	}
+
+	valid_sec_image = true;
+
+	if (sec_aux_image_status.image_status_mask & 1) {
+		ql_dbg(ql_dbg_init, vha, 0x018d,
+		    "Secondary aux image is active\n");
+		active_sec_image = true;
+	}
+
+check_valid_image:
+	if (valid_pri_image && active_pri_image &&
+	    valid_sec_image && active_sec_image) {
+		if (qla27xx_compare_image_generation(&pri_aux_image_status,
+		    &sec_aux_image_status) >= 0) {
+			qla28xx_component_status(active_regions,
+			    &pri_aux_image_status);
+		} else {
+			qla28xx_component_status(active_regions,
+			    &sec_aux_image_status);
+		}
+	} else if (valid_pri_image && active_pri_image) {
+		qla28xx_component_status(active_regions, &pri_aux_image_status);
+	} else if (valid_sec_image && active_sec_image) {
+		qla28xx_component_status(active_regions, &sec_aux_image_status);
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x018f,
+	    "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
+	    active_regions->aux.board_config,
+	    active_regions->aux.vpd_nvram,
+	    active_regions->aux.npiv_config_0_1,
+	    active_regions->aux.npiv_config_2_3);
+}
+
+void
+qla27xx_get_active_image(struct scsi_qla_host *vha,
+    struct active_regions *active_regions)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla27xx_image_status pri_image_status, sec_image_status;
+	bool valid_pri_image = false, valid_sec_image = false;
+	bool active_pri_image = false, active_sec_image = false;
 
 	if (!ha->flt_region_img_status_pri) {
-		valid_pri_image = 0;
+		ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
 		goto check_sec_image;
 	}
 
-	qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
-	    ha->flt_region_img_status_pri, size);
+	if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
+	    ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
+	    QLA_SUCCESS) {
+		WARN_ON_ONCE(true);
+		goto check_sec_image;
+	}
+	qla27xx_print_image(vha, "Primary image", &pri_image_status);
 
-	if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
+	if (qla27xx_check_image_status_signature(&pri_image_status)) {
 		ql_dbg(ql_dbg_init, vha, 0x018b,
-		    "Primary image signature (0x%x) not valid\n",
-		    pri_image_status.signature);
-		valid_pri_image = 0;
+		    "Primary image signature (%#x) not valid\n",
+		    le32_to_cpu(pri_image_status.signature));
 		goto check_sec_image;
 	}
 
-	wptr = (uint32_t *)(&pri_image_status);
-	cnt = size;
-
-	for (chksum = 0; cnt--; wptr++)
-		chksum += le32_to_cpu(*wptr);
-
-	if (chksum) {
+	if (qla27xx_image_status_checksum(&pri_image_status)) {
 		ql_dbg(ql_dbg_init, vha, 0x018c,
-		    "Checksum validation failed for primary image (0x%x)\n",
-		    chksum);
-		valid_pri_image = 0;
+		    "Primary image checksum failed\n");
+		goto check_sec_image;
+	}
+
+	valid_pri_image = true;
+
+	if (pri_image_status.image_status_mask & 1) {
+		ql_dbg(ql_dbg_init, vha, 0x018d,
+		    "Primary image is active\n");
+		active_pri_image = true;
 	}
 
 check_sec_image:
 	if (!ha->flt_region_img_status_sec) {
-		valid_sec_image = 0;
+		ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
 		goto check_valid_image;
 	}
 
 	qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
-	    ha->flt_region_img_status_sec, size);
+	    ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
+	qla27xx_print_image(vha, "Secondary image", &sec_image_status);
 
-	if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
-		ql_dbg(ql_dbg_init, vha, 0x018d,
-		    "Secondary image signature(0x%x) not valid\n",
-		    sec_image_status.signature);
-		valid_sec_image = 0;
+	if (qla27xx_check_image_status_signature(&sec_image_status)) {
+		ql_dbg(ql_dbg_init, vha, 0x018b,
+		    "Secondary image signature (%#x) not valid\n",
+		    le32_to_cpu(sec_image_status.signature));
 		goto check_valid_image;
 	}
 
-	wptr = (uint32_t *)(&sec_image_status);
-	cnt = size;
-	for (chksum = 0; cnt--; wptr++)
-		chksum += le32_to_cpu(*wptr);
-	if (chksum) {
-		ql_dbg(ql_dbg_init, vha, 0x018e,
-		    "Checksum validation failed for secondary image (0x%x)\n",
-		    chksum);
-		valid_sec_image = 0;
+	if (qla27xx_image_status_checksum(&sec_image_status)) {
+		ql_dbg(ql_dbg_init, vha, 0x018c,
+		    "Secondary image checksum failed\n");
+		goto check_valid_image;
+	}
+
+	valid_sec_image = true;
+
+	if (sec_image_status.image_status_mask & 1) {
+		ql_dbg(ql_dbg_init, vha, 0x018d,
+		    "Secondary image is active\n");
+		active_sec_image = true;
 	}
 
 check_valid_image:
-	if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
-		ha->active_image = QLA27XX_PRIMARY_IMAGE;
-	if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
-		if (!ha->active_image ||
-		    pri_image_status.generation_number <
-		    sec_image_status.generation_number)
-			ha->active_image = QLA27XX_SECONDARY_IMAGE;
+	if (valid_pri_image && active_pri_image)
+		active_regions->global = QLA27XX_PRIMARY_IMAGE;
+
+	if (valid_sec_image && active_sec_image) {
+		if (!active_regions->global ||
+		    qla27xx_compare_image_generation(
+			&pri_image_status, &sec_image_status) < 0) {
+			active_regions->global = QLA27XX_SECONDARY_IMAGE;
+		}
 	}
 
-	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n",
-	    ha->active_image == 0 ? "default bootld and fw" :
-	    ha->active_image == 1 ? "primary" :
-	    ha->active_image == 2 ? "secondary" :
-	    "Invalid");
+	ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
+	    active_regions->global == QLA27XX_DEFAULT_IMAGE ?
+		"default (boot/fw)" :
+	    active_regions->global == QLA27XX_PRIMARY_IMAGE ?
+		"primary" :
+	    active_regions->global == QLA27XX_SECONDARY_IMAGE ?
+		"secondary" : "invalid",
+	    active_regions->global);
+}
 
-	return ha->active_image;
+bool qla24xx_risc_firmware_invalid(uint32_t *dword)
+{
+	return
+	    !(dword[4] | dword[5] | dword[6] | dword[7]) ||
+	    !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
 }
 
 static int
 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
     uint32_t faddr)
 {
-	int	rval = QLA_SUCCESS;
-	int	segments, fragment;
-	uint32_t *dcode, dlen;
-	uint32_t risc_addr;
-	uint32_t risc_size;
-	uint32_t i;
+	int rval;
+	uint templates, segments, fragment;
+	ulong i;
+	uint j;
+	ulong dlen;
+	uint32_t *dcode;
+	uint32_t risc_addr, risc_size, risc_attr = 0;
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = ha->req_q_map[0];
+	struct fwdt *fwdt = ha->fwdt;
 
 	ql_dbg(ql_dbg_init, vha, 0x008b,
 	    "FW: Loading firmware from flash (%x).\n", faddr);
 
-	rval = QLA_SUCCESS;
-
-	segments = FA_RISC_CODE_SEGMENTS;
-	dcode = (uint32_t *)req->ring;
-	*srisc_addr = 0;
-
-	if (IS_QLA27XX(ha) &&
-	    qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
-		faddr = ha->flt_region_fw_sec;
-
-	/* Validate firmware image by checking version. */
-	qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
-	for (i = 0; i < 4; i++)
-		dcode[i] = be32_to_cpu(dcode[i]);
-	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
-	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
-	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
-		dcode[3] == 0)) {
+	dcode = (void *)req->ring;
+	qla24xx_read_flash_data(vha, dcode, faddr, 8);
+	if (qla24xx_risc_firmware_invalid(dcode)) {
 		ql_log(ql_log_fatal, vha, 0x008c,
 		    "Unable to verify the integrity of flash firmware "
 		    "image.\n");
@@ -7297,34 +7622,36 @@
 		return QLA_FUNCTION_FAILED;
 	}
 
-	while (segments && rval == QLA_SUCCESS) {
-		/* Read segment's load information. */
-		qla24xx_read_flash_data(vha, dcode, faddr, 4);
-
+	dcode = (void *)req->ring;
+	*srisc_addr = 0;
+	segments = FA_RISC_CODE_SEGMENTS;
+	for (j = 0; j < segments; j++) {
+		ql_dbg(ql_dbg_init, vha, 0x008d,
+		    "-> Loading segment %u...\n", j);
+		qla24xx_read_flash_data(vha, dcode, faddr, 10);
 		risc_addr = be32_to_cpu(dcode[2]);
-		*srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
 		risc_size = be32_to_cpu(dcode[3]);
+		if (!*srisc_addr) {
+			*srisc_addr = risc_addr;
+			risc_attr = be32_to_cpu(dcode[9]);
+		}
 
-		fragment = 0;
-		while (risc_size > 0 && rval == QLA_SUCCESS) {
-			dlen = (uint32_t)(ha->fw_transfer_size >> 2);
+		dlen = ha->fw_transfer_size >> 2;
+		for (fragment = 0; risc_size; fragment++) {
 			if (dlen > risc_size)
 				dlen = risc_size;
 
 			ql_dbg(ql_dbg_init, vha, 0x008e,
-			    "Loading risc segment@ risc addr %x "
-			    "number of dwords 0x%x offset 0x%x.\n",
-			    risc_addr, dlen, faddr);
-
+			    "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
+			    fragment, risc_addr, faddr, dlen);
 			qla24xx_read_flash_data(vha, dcode, faddr, dlen);
 			for (i = 0; i < dlen; i++)
 				dcode[i] = swab32(dcode[i]);
 
-			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
-			    dlen);
+			rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
 			if (rval) {
 				ql_log(ql_log_fatal, vha, 0x008f,
-				    "Failed to load segment %d of firmware.\n",
+				    "-> Failed load firmware fragment %u.\n",
 				    fragment);
 				return QLA_FUNCTION_FAILED;
 			}
@@ -7332,107 +7659,82 @@
 			faddr += dlen;
 			risc_addr += dlen;
 			risc_size -= dlen;
-			fragment++;
+		}
+	}
+
+	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+		return QLA_SUCCESS;
+
+	templates = (risc_attr & BIT_9) ? 2 : 1;
+	ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
+	for (j = 0; j < templates; j++, fwdt++) {
+		if (fwdt->template)
+			vfree(fwdt->template);
+		fwdt->template = NULL;
+		fwdt->length = 0;
+
+		dcode = (void *)req->ring;
+		qla24xx_read_flash_data(vha, dcode, faddr, 7);
+		risc_size = be32_to_cpu(dcode[2]);
+		ql_dbg(ql_dbg_init, vha, 0x0161,
+		    "-> fwdt%u template array at %#x (%#x dwords)\n",
+		    j, faddr, risc_size);
+		if (!risc_size || !~risc_size) {
+			ql_dbg(ql_dbg_init, vha, 0x0162,
+			    "-> fwdt%u failed to read array\n", j);
+			goto failed;
 		}
 
-		/* Next segment. */
-		segments--;
+		/* skip header and ignore checksum */
+		faddr += 7;
+		risc_size -= 8;
+
+		ql_dbg(ql_dbg_init, vha, 0x0163,
+		    "-> fwdt%u template allocate template %#x words...\n",
+		    j, risc_size);
+		fwdt->template = vmalloc(risc_size * sizeof(*dcode));
+		if (!fwdt->template) {
+			ql_log(ql_log_warn, vha, 0x0164,
+			    "-> fwdt%u failed allocate template.\n", j);
+			goto failed;
+		}
+
+		dcode = fwdt->template;
+		qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
+
+		if (!qla27xx_fwdt_template_valid(dcode)) {
+			ql_log(ql_log_warn, vha, 0x0165,
+			    "-> fwdt%u failed template validate\n", j);
+			goto failed;
+		}
+
+		dlen = qla27xx_fwdt_template_size(dcode);
+		ql_dbg(ql_dbg_init, vha, 0x0166,
+		    "-> fwdt%u template size %#lx bytes (%#lx words)\n",
+		    j, dlen, dlen / sizeof(*dcode));
+		if (dlen > risc_size * sizeof(*dcode)) {
+			ql_log(ql_log_warn, vha, 0x0167,
+			    "-> fwdt%u template exceeds array (%-lu bytes)\n",
+			    j, dlen - risc_size * sizeof(*dcode));
+			goto failed;
+		}
+
+		fwdt->length = dlen;
+		ql_dbg(ql_dbg_init, vha, 0x0168,
+		    "-> fwdt%u loaded template ok\n", j);
+
+		faddr += risc_size + 1;
 	}
 
-	if (!IS_QLA27XX(ha))
-		return rval;
+	return QLA_SUCCESS;
 
-	if (ha->fw_dump_template)
-		vfree(ha->fw_dump_template);
-	ha->fw_dump_template = NULL;
-	ha->fw_dump_template_len = 0;
+failed:
+	if (fwdt->template)
+		vfree(fwdt->template);
+	fwdt->template = NULL;
+	fwdt->length = 0;
 
-	ql_dbg(ql_dbg_init, vha, 0x0161,
-	    "Loading fwdump template from %x\n", faddr);
-	qla24xx_read_flash_data(vha, dcode, faddr, 7);
-	risc_size = be32_to_cpu(dcode[2]);
-	ql_dbg(ql_dbg_init, vha, 0x0162,
-	    "-> array size %x dwords\n", risc_size);
-	if (risc_size == 0 || risc_size == ~0)
-		goto default_template;
-
-	dlen = (risc_size - 8) * sizeof(*dcode);
-	ql_dbg(ql_dbg_init, vha, 0x0163,
-	    "-> template allocating %x bytes...\n", dlen);
-	ha->fw_dump_template = vmalloc(dlen);
-	if (!ha->fw_dump_template) {
-		ql_log(ql_log_warn, vha, 0x0164,
-		    "Failed fwdump template allocate %x bytes.\n", risc_size);
-		goto default_template;
-	}
-
-	faddr += 7;
-	risc_size -= 8;
-	dcode = ha->fw_dump_template;
-	qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
-	for (i = 0; i < risc_size; i++)
-		dcode[i] = le32_to_cpu(dcode[i]);
-
-	if (!qla27xx_fwdt_template_valid(dcode)) {
-		ql_log(ql_log_warn, vha, 0x0165,
-		    "Failed fwdump template validate\n");
-		goto default_template;
-	}
-
-	dlen = qla27xx_fwdt_template_size(dcode);
-	ql_dbg(ql_dbg_init, vha, 0x0166,
-	    "-> template size %x bytes\n", dlen);
-	if (dlen > risc_size * sizeof(*dcode)) {
-		ql_log(ql_log_warn, vha, 0x0167,
-		    "Failed fwdump template exceeds array by %zx bytes\n",
-		    (size_t)(dlen - risc_size * sizeof(*dcode)));
-		goto default_template;
-	}
-	ha->fw_dump_template_len = dlen;
-	return rval;
-
-default_template:
-	ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
-	if (ha->fw_dump_template)
-		vfree(ha->fw_dump_template);
-	ha->fw_dump_template = NULL;
-	ha->fw_dump_template_len = 0;
-
-	dlen = qla27xx_fwdt_template_default_size();
-	ql_dbg(ql_dbg_init, vha, 0x0169,
-	    "-> template allocating %x bytes...\n", dlen);
-	ha->fw_dump_template = vmalloc(dlen);
-	if (!ha->fw_dump_template) {
-		ql_log(ql_log_warn, vha, 0x016a,
-		    "Failed fwdump template allocate %x bytes.\n", risc_size);
-		goto failed_template;
-	}
-
-	dcode = ha->fw_dump_template;
-	risc_size = dlen / sizeof(*dcode);
-	memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
-	for (i = 0; i < risc_size; i++)
-		dcode[i] = be32_to_cpu(dcode[i]);
-
-	if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
-		ql_log(ql_log_warn, vha, 0x016b,
-		    "Failed fwdump template validate\n");
-		goto failed_template;
-	}
-
-	dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
-	ql_dbg(ql_dbg_init, vha, 0x016c,
-	    "-> template size %x bytes\n", dlen);
-	ha->fw_dump_template_len = dlen;
-	return rval;
-
-failed_template:
-	ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
-	if (ha->fw_dump_template)
-		vfree(ha->fw_dump_template);
-	ha->fw_dump_template = NULL;
-	ha->fw_dump_template_len = 0;
-	return rval;
+	return QLA_SUCCESS;
 }
 
 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
@@ -7540,94 +7842,73 @@
 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 {
 	int	rval;
-	int	segments, fragment;
-	uint32_t *dcode, dlen;
-	uint32_t risc_addr;
-	uint32_t risc_size;
-	uint32_t i;
+	uint templates, segments, fragment;
+	uint32_t *dcode;
+	ulong dlen;
+	uint32_t risc_addr, risc_size, risc_attr = 0;
+	ulong i;
+	uint j;
 	struct fw_blob *blob;
-	const uint32_t *fwcode;
-	uint32_t fwclen;
+	uint32_t *fwcode;
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = ha->req_q_map[0];
+	struct fwdt *fwdt = ha->fwdt;
 
-	/* Load firmware blob. */
+	ql_dbg(ql_dbg_init, vha, 0x0090,
+	    "-> FW: Loading via request-firmware.\n");
+
 	blob = qla2x00_request_firmware(vha);
 	if (!blob) {
-		ql_log(ql_log_warn, vha, 0x0090,
-		    "Firmware image unavailable.\n");
-		ql_log(ql_log_warn, vha, 0x0091,
-		    "Firmware images can be retrieved from: "
-		    QLA_FW_URL ".\n");
+		ql_log(ql_log_warn, vha, 0x0092,
+		    "-> Firmware file not found.\n");
 
 		return QLA_FUNCTION_FAILED;
 	}
 
-	ql_dbg(ql_dbg_init, vha, 0x0092,
-	    "FW: Loading via request-firmware.\n");
-
-	rval = QLA_SUCCESS;
-
-	segments = FA_RISC_CODE_SEGMENTS;
-	dcode = (uint32_t *)req->ring;
-	*srisc_addr = 0;
-	fwcode = (uint32_t *)blob->fw->data;
-	fwclen = 0;
-
-	/* Validate firmware image by checking version. */
-	if (blob->fw->size < 8 * sizeof(uint32_t)) {
+	fwcode = (void *)blob->fw->data;
+	dcode = fwcode;
+	if (qla24xx_risc_firmware_invalid(dcode)) {
 		ql_log(ql_log_fatal, vha, 0x0093,
 		    "Unable to verify integrity of firmware image (%zd).\n",
 		    blob->fw->size);
-		return QLA_FUNCTION_FAILED;
-	}
-	for (i = 0; i < 4; i++)
-		dcode[i] = be32_to_cpu(fwcode[i + 4]);
-	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
-	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
-	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
-		dcode[3] == 0)) {
-		ql_log(ql_log_fatal, vha, 0x0094,
-		    "Unable to verify integrity of firmware image (%zd).\n",
-		    blob->fw->size);
 		ql_log(ql_log_fatal, vha, 0x0095,
 		    "Firmware data: %08x %08x %08x %08x.\n",
 		    dcode[0], dcode[1], dcode[2], dcode[3]);
 		return QLA_FUNCTION_FAILED;
 	}
 
-	while (segments && rval == QLA_SUCCESS) {
+	dcode = (void *)req->ring;
+	*srisc_addr = 0;
+	segments = FA_RISC_CODE_SEGMENTS;
+	for (j = 0; j < segments; j++) {
+		ql_dbg(ql_dbg_init, vha, 0x0096,
+		    "-> Loading segment %u...\n", j);
 		risc_addr = be32_to_cpu(fwcode[2]);
-		*srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
 		risc_size = be32_to_cpu(fwcode[3]);
 
-		/* Validate firmware image size. */
-		fwclen += risc_size * sizeof(uint32_t);
-		if (blob->fw->size < fwclen) {
-			ql_log(ql_log_fatal, vha, 0x0096,
-			    "Unable to verify integrity of firmware image "
-			    "(%zd).\n", blob->fw->size);
-			return QLA_FUNCTION_FAILED;
+		if (!*srisc_addr) {
+			*srisc_addr = risc_addr;
+			risc_attr = be32_to_cpu(fwcode[9]);
 		}
 
-		fragment = 0;
-		while (risc_size > 0 && rval == QLA_SUCCESS) {
-			dlen = (uint32_t)(ha->fw_transfer_size >> 2);
+		dlen = ha->fw_transfer_size >> 2;
+		for (fragment = 0; risc_size; fragment++) {
 			if (dlen > risc_size)
 				dlen = risc_size;
 
 			ql_dbg(ql_dbg_init, vha, 0x0097,
-			    "Loading risc segment@ risc addr %x "
-			    "number of dwords 0x%x.\n", risc_addr, dlen);
+			    "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
+			    fragment, risc_addr,
+			    (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
+			    dlen);
 
 			for (i = 0; i < dlen; i++)
 				dcode[i] = swab32(fwcode[i]);
 
-			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
-			    dlen);
+			rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
 			if (rval) {
 				ql_log(ql_log_fatal, vha, 0x0098,
-				    "Failed to load segment %d of firmware.\n",
+				    "-> Failed load firmware fragment %u.\n",
 				    fragment);
 				return QLA_FUNCTION_FAILED;
 			}
@@ -7635,106 +7916,82 @@
 			fwcode += dlen;
 			risc_addr += dlen;
 			risc_size -= dlen;
-			fragment++;
+		}
+	}
+
+	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+		return QLA_SUCCESS;
+
+	templates = (risc_attr & BIT_9) ? 2 : 1;
+	ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
+	for (j = 0; j < templates; j++, fwdt++) {
+		if (fwdt->template)
+			vfree(fwdt->template);
+		fwdt->template = NULL;
+		fwdt->length = 0;
+
+		risc_size = be32_to_cpu(fwcode[2]);
+		ql_dbg(ql_dbg_init, vha, 0x0171,
+		    "-> fwdt%u template array at %#x (%#x dwords)\n",
+		    j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
+		    risc_size);
+		if (!risc_size || !~risc_size) {
+			ql_dbg(ql_dbg_init, vha, 0x0172,
+			    "-> fwdt%u failed to read array\n", j);
+			goto failed;
 		}
 
-		/* Next segment. */
-		segments--;
+		/* skip header and ignore checksum */
+		fwcode += 7;
+		risc_size -= 8;
+
+		ql_dbg(ql_dbg_init, vha, 0x0173,
+		    "-> fwdt%u template allocate template %#x words...\n",
+		    j, risc_size);
+		fwdt->template = vmalloc(risc_size * sizeof(*dcode));
+		if (!fwdt->template) {
+			ql_log(ql_log_warn, vha, 0x0174,
+			    "-> fwdt%u failed allocate template.\n", j);
+			goto failed;
+		}
+
+		dcode = fwdt->template;
+		for (i = 0; i < risc_size; i++)
+			dcode[i] = fwcode[i];
+
+		if (!qla27xx_fwdt_template_valid(dcode)) {
+			ql_log(ql_log_warn, vha, 0x0175,
+			    "-> fwdt%u failed template validate\n", j);
+			goto failed;
+		}
+
+		dlen = qla27xx_fwdt_template_size(dcode);
+		ql_dbg(ql_dbg_init, vha, 0x0176,
+		    "-> fwdt%u template size %#lx bytes (%#lx words)\n",
+		    j, dlen, dlen / sizeof(*dcode));
+		if (dlen > risc_size * sizeof(*dcode)) {
+			ql_log(ql_log_warn, vha, 0x0177,
+			    "-> fwdt%u template exceeds array (%-lu bytes)\n",
+			    j, dlen - risc_size * sizeof(*dcode));
+			goto failed;
+		}
+
+		fwdt->length = dlen;
+		ql_dbg(ql_dbg_init, vha, 0x0178,
+		    "-> fwdt%u loaded template ok\n", j);
+
+		fwcode += risc_size + 1;
 	}
 
-	if (!IS_QLA27XX(ha))
-		return rval;
+	return QLA_SUCCESS;
 
-	if (ha->fw_dump_template)
-		vfree(ha->fw_dump_template);
-	ha->fw_dump_template = NULL;
-	ha->fw_dump_template_len = 0;
+failed:
+	if (fwdt->template)
+		vfree(fwdt->template);
+	fwdt->template = NULL;
+	fwdt->length = 0;
 
-	ql_dbg(ql_dbg_init, vha, 0x171,
-	    "Loading fwdump template from %x\n",
-	    (uint32_t)((void *)fwcode - (void *)blob->fw->data));
-	risc_size = be32_to_cpu(fwcode[2]);
-	ql_dbg(ql_dbg_init, vha, 0x172,
-	    "-> array size %x dwords\n", risc_size);
-	if (risc_size == 0 || risc_size == ~0)
-		goto default_template;
-
-	dlen = (risc_size - 8) * sizeof(*fwcode);
-	ql_dbg(ql_dbg_init, vha, 0x0173,
-	    "-> template allocating %x bytes...\n", dlen);
-	ha->fw_dump_template = vmalloc(dlen);
-	if (!ha->fw_dump_template) {
-		ql_log(ql_log_warn, vha, 0x0174,
-		    "Failed fwdump template allocate %x bytes.\n", risc_size);
-		goto default_template;
-	}
-
-	fwcode += 7;
-	risc_size -= 8;
-	dcode = ha->fw_dump_template;
-	for (i = 0; i < risc_size; i++)
-		dcode[i] = le32_to_cpu(fwcode[i]);
-
-	if (!qla27xx_fwdt_template_valid(dcode)) {
-		ql_log(ql_log_warn, vha, 0x0175,
-		    "Failed fwdump template validate\n");
-		goto default_template;
-	}
-
-	dlen = qla27xx_fwdt_template_size(dcode);
-	ql_dbg(ql_dbg_init, vha, 0x0176,
-	    "-> template size %x bytes\n", dlen);
-	if (dlen > risc_size * sizeof(*fwcode)) {
-		ql_log(ql_log_warn, vha, 0x0177,
-		    "Failed fwdump template exceeds array by %zx bytes\n",
-		    (size_t)(dlen - risc_size * sizeof(*fwcode)));
-		goto default_template;
-	}
-	ha->fw_dump_template_len = dlen;
-	return rval;
-
-default_template:
-	ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
-	if (ha->fw_dump_template)
-		vfree(ha->fw_dump_template);
-	ha->fw_dump_template = NULL;
-	ha->fw_dump_template_len = 0;
-
-	dlen = qla27xx_fwdt_template_default_size();
-	ql_dbg(ql_dbg_init, vha, 0x0179,
-	    "-> template allocating %x bytes...\n", dlen);
-	ha->fw_dump_template = vmalloc(dlen);
-	if (!ha->fw_dump_template) {
-		ql_log(ql_log_warn, vha, 0x017a,
-		    "Failed fwdump template allocate %x bytes.\n", risc_size);
-		goto failed_template;
-	}
-
-	dcode = ha->fw_dump_template;
-	risc_size = dlen / sizeof(*fwcode);
-	fwcode = qla27xx_fwdt_template_default();
-	for (i = 0; i < risc_size; i++)
-		dcode[i] = be32_to_cpu(fwcode[i]);
-
-	if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
-		ql_log(ql_log_warn, vha, 0x017b,
-		    "Failed fwdump template validate\n");
-		goto failed_template;
-	}
-
-	dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
-	ql_dbg(ql_dbg_init, vha, 0x017c,
-	    "-> template size %x bytes\n", dlen);
-	ha->fw_dump_template_len = dlen;
-	return rval;
-
-failed_template:
-	ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
-	if (ha->fw_dump_template)
-		vfree(ha->fw_dump_template);
-	ha->fw_dump_template = NULL;
-	ha->fw_dump_template_len = 0;
-	return rval;
+	return QLA_SUCCESS;
 }
 
 int
@@ -7763,32 +8020,50 @@
 {
 	int rval;
 	struct qla_hw_data *ha = vha->hw;
+	struct active_regions active_regions = { };
 
 	if (ql2xfwloadbin == 2)
 		goto try_blob_fw;
 
-	/*
-	 * FW Load priority:
+	/* FW Load priority:
 	 * 1) Firmware residing in flash.
 	 * 2) Firmware via request-firmware interface (.bin file).
-	 * 3) Golden-Firmware residing in flash -- limited operation.
+	 * 3) Golden-Firmware residing in flash -- (limited operation).
 	 */
+
+	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+		goto try_primary_fw;
+
+	qla27xx_get_active_image(vha, &active_regions);
+
+	if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
+		goto try_primary_fw;
+
+	ql_dbg(ql_dbg_init, vha, 0x008b,
+	    "Loading secondary firmware image.\n");
+	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
+	if (!rval)
+		return rval;
+
+try_primary_fw:
+	ql_dbg(ql_dbg_init, vha, 0x008b,
+	    "Loading primary firmware image.\n");
 	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
-	if (rval == QLA_SUCCESS)
+	if (!rval)
 		return rval;
 
 try_blob_fw:
 	rval = qla24xx_load_risc_blob(vha, srisc_addr);
-	if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
+	if (!rval || !ha->flt_region_gold_fw)
 		return rval;
 
 	ql_log(ql_log_info, vha, 0x0099,
 	    "Attempting to fallback to golden firmware.\n");
 	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
-	if (rval != QLA_SUCCESS)
+	if (rval)
 		return rval;
 
-	ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
+	ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
 	ha->flags.running_gold_fw = 1;
 	return rval;
 }
@@ -7833,22 +8108,15 @@
 	uint16_t mb[MAILBOX_REGISTER_COUNT];
 	struct qla_hw_data *ha = vha->hw;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
-	struct req_que *req;
-	struct rsp_que *rsp;
 
 	if (!vha->vp_idx)
 		return -EINVAL;
 
 	rval = qla2x00_fw_ready(base_vha);
-	if (vha->qpair)
-		req = vha->qpair->req;
-	else
-		req = ha->req_q_map[0];
-	rsp = req->rsp;
 
 	if (rval == QLA_SUCCESS) {
 		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
-		qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+		qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
 	}
 
 	vha->flags.management_server_logged_in = 0;
@@ -7930,6 +8198,7 @@
 qla84xx_put_chip(struct scsi_qla_host *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
+
 	if (ha->cs84xx)
 		kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
 }
@@ -7947,7 +8216,7 @@
 
 	mutex_unlock(&ha->cs84xx->fw_update_mutex);
 
-	return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
+	return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
 	    QLA_SUCCESS;
 }
 
@@ -7964,25 +8233,48 @@
 	uint32_t chksum;
 	uint16_t cnt;
 	struct qla_hw_data *ha = vha->hw;
+	uint32_t faddr;
+	struct active_regions active_regions = { };
 
 	rval = QLA_SUCCESS;
 	icb = (struct init_cb_81xx *)ha->init_cb;
 	nv = ha->nvram;
 
 	/* Determine NVRAM starting address. */
-	ha->nvram_size = sizeof(struct nvram_81xx);
+	ha->nvram_size = sizeof(*nv);
 	ha->vpd_size = FA_NVRAM_VPD_SIZE;
 	if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
 		ha->vpd_size = FA_VPD_SIZE_82XX;
 
+	if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
+		qla28xx_get_aux_images(vha, &active_regions);
+
 	/* Get VPD data into cache */
 	ha->vpd = ha->nvram + VPD_OFFSET;
-	ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
-	    ha->vpd_size);
+
+	faddr = ha->flt_region_vpd;
+	if (IS_QLA28XX(ha)) {
+		if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
+			faddr = ha->flt_region_vpd_sec;
+		ql_dbg(ql_dbg_init, vha, 0x0110,
+		    "Loading %s nvram image.\n",
+		    active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
+		    "primary" : "secondary");
+	}
+	ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
 
 	/* Get NVRAM data into cache and calculate checksum. */
-	ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
-	    ha->nvram_size);
+	faddr = ha->flt_region_nvram;
+	if (IS_QLA28XX(ha)) {
+		if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
+			faddr = ha->flt_region_nvram_sec;
+	}
+	ql_dbg(ql_dbg_init, vha, 0x0110,
+	    "Loading %s nvram image.\n",
+	    active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
+	    "primary" : "secondary");
+	ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
+
 	dptr = (uint32_t *)nv;
 	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
 		chksum += le32_to_cpu(*dptr);
@@ -7990,17 +8282,16 @@
 	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
 	    "Contents of NVRAM:\n");
 	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
-	    (uint8_t *)nv, ha->nvram_size);
+	    nv, ha->nvram_size);
 
 	/* Bad NVRAM data, set defaults parameters. */
-	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
-	    || nv->id[3] != ' ' ||
-	    nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
+	if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
+	    le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
 		/* Reset NVRAM data. */
 		ql_log(ql_log_info, vha, 0x0073,
-		    "Inconsistent NVRAM detected: checksum=0x%x id=%c "
-		    "version=0x%x.\n", chksum, nv->id[0],
-		    le16_to_cpu(nv->nvram_version));
+		    "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
+		    chksum, nv->id, le16_to_cpu(nv->nvram_version));
+		ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
 		ql_log(ql_log_info, vha, 0x0074,
 		    "Falling back to functioning (yet invalid -- WWPN) "
 		    "defaults.\n");
@@ -8121,11 +8412,11 @@
 	ha->flags.disable_risc_code_load = 0;
 	ha->flags.enable_lip_reset = 0;
 	ha->flags.enable_lip_full_login =
-	    le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
+	    le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
 	ha->flags.enable_target_reset =
-	    le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
+	    le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
 	ha->flags.enable_led_scheme = 0;
-	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
+	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
 
 	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
 	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -8189,7 +8480,8 @@
 		ha->login_retry_count = ql2xloginretrycount;
 
 	/* if not running MSI-X we need handshaking on interrupts */
-	if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
+	if (!vha->hw->flags.msix_enabled &&
+	    (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
 		icb->firmware_options_2 |= cpu_to_le32(BIT_22);
 
 	/* Enable ZIO. */
@@ -8197,7 +8489,7 @@
 		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
 		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
 		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
-		    le16_to_cpu(icb->interrupt_delay_timer): 2;
+		    le16_to_cpu(icb->interrupt_delay_timer) : 2;
 	}
 	icb->firmware_options_2 &= cpu_to_le32(
 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
@@ -8222,12 +8514,6 @@
 	/* N2N: driver will initiate Login instead of FW */
 	icb->firmware_options_3 |= BIT_8;
 
-	if (IS_QLA27XX(ha)) {
-		icb->firmware_options_3 |= BIT_8;
-		ql_dbg(ql_log_info, vha, 0x0075,
-		    "Enabling direct connection.\n");
-	}
-
 	if (rval) {
 		ql_log(ql_log_warn, vha, 0x0076,
 		    "NVRAM configuration failed.\n");
@@ -8240,8 +8526,6 @@
 {
 	int status, rval;
 	struct qla_hw_data *ha = vha->hw;
-	struct req_que *req = ha->req_q_map[0];
-	struct rsp_que *rsp = ha->rsp_q_map[0];
 	struct scsi_qla_host *vp;
 	unsigned long flags;
 
@@ -8253,7 +8537,7 @@
 		status = qla2x00_fw_ready(vha);
 		if (!status) {
 			/* Issue a marker after FW becomes ready. */
-			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+			qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
 			vha->flags.online = 1;
 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 		}
@@ -8590,7 +8874,6 @@
 			    "Failed to allocate memory for queue pair.\n");
 			return NULL;
 		}
-		memset(qpair, 0, sizeof(struct qla_qpair));
 
 		qpair->hw = vha->hw;
 		qpair->vha = vha;
@@ -8637,7 +8920,7 @@
 		qpair->msix->in_use = 1;
 		list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
 		qpair->pdev = ha->pdev;
-		if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+		if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
 			qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
 
 		mutex_unlock(&ha->mq_lock);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 4351736..0c3d907 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -91,43 +91,6 @@
 }
 
 static inline void
-qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
-{
-	int i;
-
-	if (IS_FWI2_CAPABLE(ha))
-		return;
-
-	for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
-		set_bit(i, ha->loop_id_map);
-	set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
-	set_bit(BROADCAST, ha->loop_id_map);
-}
-
-static inline int
-qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
-{
-	struct qla_hw_data *ha = vha->hw;
-	if (IS_FWI2_CAPABLE(ha))
-		return (loop_id > NPH_LAST_HANDLE);
-
-	return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
-	    loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
-}
-
-static inline void
-qla2x00_clear_loop_id(fc_port_t *fcport) {
-	struct qla_hw_data *ha = fcport->vha->hw;
-
-	if (fcport->loop_id == FC_NO_LOOP_ID ||
-	    qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
-		return;
-
-	clear_bit(fcport->loop_id, ha->loop_id_map);
-	fcport->loop_id = FC_NO_LOOP_ID;
-}
-
-static inline void
 qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
 {
 	struct dsd_dma *dsd, *tdsd;
@@ -142,25 +105,6 @@
 	INIT_LIST_HEAD(&ctx->dsd_list);
 }
 
-static inline void
-qla2x00_set_fcport_state(fc_port_t *fcport, int state)
-{
-	int old_state;
-
-	old_state = atomic_read(&fcport->state);
-	atomic_set(&fcport->state, state);
-
-	/* Don't print state transitions during initial allocation of fcport */
-	if (old_state && old_state != state) {
-		ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
-		    "FCPort %8phC state transitioned from %s to %s - "
-			"portid=%02x%02x%02x.\n", fcport->port_name,
-		    port_state_str[old_state], port_state_str[state],
-		    fcport->d_id.b.domain, fcport->d_id.b.area,
-		    fcport->d_id.b.al_pa);
-	}
-}
-
 static inline int
 qla2x00_hba_err_chk_enabled(srb_t *sp)
 {
@@ -208,8 +152,21 @@
 	return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
 }
 
+static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
+			    struct qla_qpair *qpair, fc_port_t *fcport)
+{
+	memset(sp, 0, sizeof(*sp));
+	sp->fcport = fcport;
+	sp->iocbs = 1;
+	sp->vha = vha;
+	sp->qpair = qpair;
+	sp->cmd_type = TYPE_SRB;
+	INIT_LIST_HEAD(&sp->elem);
+}
+
 static inline srb_t *
-qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
+qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
+    fc_port_t *fcport, gfp_t flag)
 {
 	srb_t *sp = NULL;
 	uint8_t bail;
@@ -219,17 +176,9 @@
 		return NULL;
 
 	sp = mempool_alloc(qpair->srb_mempool, flag);
-	if (!sp)
-		goto done;
-
-	memset(sp, 0, sizeof(*sp));
-	sp->fcport = fcport;
-	sp->iocbs = 1;
-	sp->vha = qpair->vha;
-	INIT_LIST_HEAD(&sp->elem);
-
-done:
-	if (!sp)
+	if (sp)
+		qla2xxx_init_sp(sp, vha, qpair, fcport);
+	else
 		QLA_QPAIR_MARK_NOT_BUSY(qpair);
 	return sp;
 }
@@ -237,6 +186,7 @@
 static inline void
 qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
 {
+	sp->qpair = NULL;
 	mempool_free(sp, qpair->srb_mempool);
 	QLA_QPAIR_MARK_NOT_BUSY(qpair);
 }
@@ -246,19 +196,17 @@
 {
 	srb_t *sp = NULL;
 	uint8_t bail;
+	struct qla_qpair *qpair;
 
 	QLA_VHA_MARK_BUSY(vha, bail);
 	if (unlikely(bail))
 		return NULL;
 
-	sp = mempool_alloc(vha->hw->srb_mempool, flag);
+	qpair = vha->hw->base_qpair;
+	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
 	if (!sp)
 		goto done;
 
-	memset(sp, 0, sizeof(*sp));
-	sp->fcport = fcport;
-	sp->cmd_type = TYPE_SRB;
-	sp->iocbs = 1;
 	sp->vha = vha;
 done:
 	if (!sp)
@@ -270,19 +218,7 @@
 qla2x00_rel_sp(srb_t *sp)
 {
 	QLA_VHA_MARK_NOT_BUSY(sp->vha);
-	mempool_free(sp, sp->vha->hw->srb_mempool);
-}
-
-static inline void
-qla2x00_init_timer(srb_t *sp, unsigned long tmo)
-{
-	timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
-	sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
-	sp->free = qla2x00_sp_free;
-	init_completion(&sp->comp);
-	if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
-		init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
-	add_timer(&sp->u.iocb_cmd.timer);
+	qla2xxx_rel_qpair_sp(sp->qpair, sp);
 }
 
 static inline int
@@ -317,13 +253,13 @@
 qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
 {
 	if (qla_ini_mode_enabled(vha) &&
-	    (ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
+	    (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
 		return true;
 	else if (qla_tgt_mode_enabled(vha) &&
-	    (ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
+	    (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
 		return true;
 	else if (qla_dual_mode_enabled(vha) &&
-	    ((ql2xiniexchg + ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
+	    ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
 		return true;
 	else
 		return false;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 1199272..518eb95 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -107,7 +107,7 @@
 	cont_pkt = (cont_entry_t *)req->ring_ptr;
 
 	/* Load packet defaults. */
-	*((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
+	put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
 
 	return (cont_pkt);
 }
@@ -136,9 +136,8 @@
 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 
 	/* Load packet defaults. */
-	*((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
-	    cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
-	    cpu_to_le32(CONTINUE_A64_TYPE);
+	put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
+			   CONTINUE_A64_TYPE, &cont_pkt->entry_type);
 
 	return (cont_pkt);
 }
@@ -193,7 +192,7 @@
     uint16_t tot_dsds)
 {
 	uint16_t	avail_dsds;
-	uint32_t	*cur_dsd;
+	struct dsd32	*cur_dsd;
 	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	struct scatterlist *sg;
@@ -202,8 +201,7 @@
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 2 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) =
-	    cpu_to_le32(COMMAND_TYPE);
+	put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
 
 	/* No data transfer */
 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -215,8 +213,8 @@
 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 
 	/* Three DSDs are available in the Command Type 2 IOCB */
-	avail_dsds = 3;
-	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+	avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
+	cur_dsd = cmd_pkt->dsd32;
 
 	/* Load data segments */
 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
@@ -229,12 +227,11 @@
 			 * Type 0 IOCB.
 			 */
 			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
-			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
-			avail_dsds = 7;
+			cur_dsd = cont_pkt->dsd;
+			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 		}
 
-		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		append_dsd32(&cur_dsd, sg);
 		avail_dsds--;
 	}
 }
@@ -251,7 +248,7 @@
     uint16_t tot_dsds)
 {
 	uint16_t	avail_dsds;
-	uint32_t	*cur_dsd;
+	struct dsd64	*cur_dsd;
 	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	struct scatterlist *sg;
@@ -260,7 +257,7 @@
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 3 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
+	put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
 
 	/* No data transfer */
 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -272,12 +269,11 @@
 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 
 	/* Two DSDs are available in the Command Type 3 IOCB */
-	avail_dsds = 2;
-	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+	avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
+	cur_dsd = cmd_pkt->dsd64;
 
 	/* Load data segments */
 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
-		dma_addr_t	sle_dma;
 		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets? */
@@ -287,18 +283,35 @@
 			 * Type 1 IOCB.
 			 */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
-			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
-			avail_dsds = 5;
+			cur_dsd = cont_pkt->dsd;
+			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 		}
 
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
 }
 
+/*
+ * Find the first handle that is not in use, starting from
+ * req->current_outstanding_cmd + 1. The caller must hold the lock that is
+ * associated with @req.
+ */
+uint32_t qla2xxx_get_next_handle(struct req_que *req)
+{
+	uint32_t index, handle = req->current_outstanding_cmd;
+
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			return handle;
+	}
+
+	return 0;
+}
+
 /**
  * qla2x00_start_scsi() - Send a SCSI command to the ISP
  * @sp: command to send to the ISP
@@ -313,7 +326,6 @@
 	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	uint32_t	*clr_ptr;
-	uint32_t        index;
 	uint32_t	handle;
 	cmd_entry_t	*cmd_pkt;
 	uint16_t	cnt;
@@ -336,7 +348,7 @@
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
 		    QLA_SUCCESS) {
 			return (QLA_FUNCTION_FAILED);
 		}
@@ -346,16 +358,8 @@
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Map the sg table so we have an accurate count of sg entries needed */
@@ -467,7 +471,7 @@
 			req->ring_ptr++;
 
 		/* Set chip new ring index. */
-		if (ha->mqenable || IS_QLA27XX(ha)) {
+		if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 			WRT_REG_DWORD(req->req_q_in, req->ring_index);
 		} else if (IS_QLA83XX(ha)) {
 			WRT_REG_DWORD(req->req_q_in, req->ring_index);
@@ -490,8 +494,7 @@
 /**
  * qla2x00_marker() - Send a marker IOCB to the firmware.
  * @vha: HA context
- * @req: request queue
- * @rsp: response queue
+ * @qpair: queue pair pointer
  * @loop_id: loop ID
  * @lun: LUN
  * @type: marker modifier
@@ -501,18 +504,16 @@
  * Returns non-zero if a failure occurred, else zero.
  */
 static int
-__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
-			struct rsp_que *rsp, uint16_t loop_id,
-			uint64_t lun, uint8_t type)
+__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
+    uint16_t loop_id, uint64_t lun, uint8_t type)
 {
 	mrk_entry_t *mrk;
 	struct mrk_entry_24xx *mrk24 = NULL;
-
+	struct req_que *req = qpair->req;
 	struct qla_hw_data *ha = vha->hw;
 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
-	req = ha->req_q_map[0];
-	mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
+	mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
 	if (mrk == NULL) {
 		ql_log(ql_log_warn, base_vha, 0x3026,
 		    "Failed to allocate Marker IOCB.\n");
@@ -543,16 +544,15 @@
 }
 
 int
-qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
-		struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
-		uint8_t type)
+qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
+    uint16_t loop_id, uint64_t lun, uint8_t type)
 {
 	int ret;
 	unsigned long flags = 0;
 
-	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
-	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
-	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+	ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
+	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 
 	return (ret);
 }
@@ -567,11 +567,11 @@
 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
 {
 	if (ha_locked) {
-		if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+		if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 					MK_SYNC_ALL) != QLA_SUCCESS)
 			return QLA_FUNCTION_FAILED;
 	} else {
-		if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+		if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 					MK_SYNC_ALL) != QLA_SUCCESS)
 			return QLA_FUNCTION_FAILED;
 	}
@@ -584,13 +584,11 @@
 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 	uint16_t tot_dsds)
 {
-	uint32_t *cur_dsd = NULL;
+	struct dsd64 *cur_dsd = NULL, *next_dsd;
 	scsi_qla_host_t	*vha;
 	struct qla_hw_data *ha;
 	struct scsi_cmnd *cmd;
 	struct	scatterlist *cur_seg;
-	uint32_t *dsd_seg;
-	void *next_dsd;
 	uint8_t avail_dsds;
 	uint8_t first_iocb = 1;
 	uint32_t dsd_list_len;
@@ -600,7 +598,7 @@
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 3 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
+	put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
 
 	/* No data transfer */
 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -623,7 +621,7 @@
 	}
 
 	cur_seg = scsi_sglist(cmd);
-	ctx = GET_CMD_CTX_SP(sp);
+	ctx = sp->u.scmd.ct6_ctx;
 
 	while (tot_dsds) {
 		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@@ -642,32 +640,27 @@
 
 		if (first_iocb) {
 			first_iocb = 0;
-			dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
-			*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-			*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-			cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
+			put_unaligned_le64(dsd_ptr->dsd_list_dma,
+					   &cmd_pkt->fcp_dsd.address);
+			cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
 		} else {
-			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = cpu_to_le32(dsd_list_len);
+			put_unaligned_le64(dsd_ptr->dsd_list_dma,
+					   &cur_dsd->address);
+			cur_dsd->length = cpu_to_le32(dsd_list_len);
+			cur_dsd++;
 		}
-		cur_dsd = (uint32_t *)next_dsd;
+		cur_dsd = next_dsd;
 		while (avail_dsds) {
-			dma_addr_t	sle_dma;
-
-			sle_dma = sg_dma_address(cur_seg);
-			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+			append_dsd64(&cur_dsd, cur_seg);
 			cur_seg = sg_next(cur_seg);
 			avail_dsds--;
 		}
 	}
 
 	/* Null termination */
-	*cur_dsd++ =  0;
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
+	cur_dsd->address = 0;
+	cur_dsd->length = 0;
+	cur_dsd++;
 	cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
 	return 0;
 }
@@ -706,7 +699,7 @@
 	uint16_t tot_dsds, struct req_que *req)
 {
 	uint16_t	avail_dsds;
-	uint32_t	*cur_dsd;
+	struct dsd64	*cur_dsd;
 	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	struct scatterlist *sg;
@@ -715,7 +708,7 @@
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 3 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
+	put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
 
 	/* No data transfer */
 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -738,12 +731,11 @@
 
 	/* One DSD is available in the Command Type 3 IOCB */
 	avail_dsds = 1;
-	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+	cur_dsd = &cmd_pkt->dsd;
 
 	/* Load data segments */
 
 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
-		dma_addr_t	sle_dma;
 		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets? */
@@ -753,14 +745,11 @@
 			 * Type 1 IOCB.
 			 */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
-			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
-			avail_dsds = 5;
+			cur_dsd = cont_pkt->dsd;
+			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 		}
 
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
 }
@@ -896,14 +885,14 @@
 
 int
 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
-	uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
+	struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 {
 	void *next_dsd;
 	uint8_t avail_dsds = 0;
 	uint32_t dsd_list_len;
 	struct dsd_dma *dsd_ptr;
 	struct scatterlist *sg_prot;
-	uint32_t *cur_dsd = dsd;
+	struct dsd64 *cur_dsd = dsd;
 	uint16_t	used_dsds = tot_dsds;
 	uint32_t	prot_int; /* protection interval */
 	uint32_t	partial;
@@ -965,8 +954,7 @@
 
 			if (sp) {
 				list_add_tail(&dsd_ptr->list,
-				    &((struct crc_context *)
-					    sp->u.scmd.ctx)->dsd_list);
+					      &sp->u.scmd.crc_ctx->dsd_list);
 
 				sp->flags |= SRB_CRC_CTX_DSD_VALID;
 			} else {
@@ -977,14 +965,14 @@
 
 
 			/* add new list to cmd iocb or last list */
-			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = dsd_list_len;
-			cur_dsd = (uint32_t *)next_dsd;
+			put_unaligned_le64(dsd_ptr->dsd_list_dma,
+					   &cur_dsd->address);
+			cur_dsd->length = cpu_to_le32(dsd_list_len);
+			cur_dsd = next_dsd;
 		}
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(sle_dma_len);
+		put_unaligned_le64(sle_dma, &cur_dsd->address);
+		cur_dsd->length = cpu_to_le32(sle_dma_len);
+		cur_dsd++;
 		avail_dsds--;
 
 		if (partial == 0) {
@@ -1003,22 +991,22 @@
 		}
 	}
 	/* Null termination */
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
+	cur_dsd->address = 0;
+	cur_dsd->length = 0;
+	cur_dsd++;
 	return 0;
 }
 
 int
-qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
-	uint16_t tot_dsds, struct qla_tc_param *tc)
+qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
+	struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 {
 	void *next_dsd;
 	uint8_t avail_dsds = 0;
 	uint32_t dsd_list_len;
 	struct dsd_dma *dsd_ptr;
 	struct scatterlist *sg, *sgl;
-	uint32_t *cur_dsd = dsd;
+	struct dsd64 *cur_dsd = dsd;
 	int	i;
 	uint16_t	used_dsds = tot_dsds;
 	struct scsi_cmnd *cmd;
@@ -1035,8 +1023,6 @@
 
 
 	for_each_sg(sgl, sg, tot_dsds, i) {
-		dma_addr_t	sle_dma;
-
 		/* Allocate additional continuation packets? */
 		if (avail_dsds == 0) {
 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
@@ -1065,8 +1051,7 @@
 
 			if (sp) {
 				list_add_tail(&dsd_ptr->list,
-				    &((struct crc_context *)
-					    sp->u.scmd.ctx)->dsd_list);
+					      &sp->u.scmd.crc_ctx->dsd_list);
 
 				sp->flags |= SRB_CRC_CTX_DSD_VALID;
 			} else {
@@ -1076,115 +1061,316 @@
 			}
 
 			/* add new list to cmd iocb or last list */
-			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = dsd_list_len;
-			cur_dsd = (uint32_t *)next_dsd;
+			put_unaligned_le64(dsd_ptr->dsd_list_dma,
+					   &cur_dsd->address);
+			cur_dsd->length = cpu_to_le32(dsd_list_len);
+			cur_dsd = next_dsd;
 		}
-		sle_dma = sg_dma_address(sg);
-
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 
 	}
 	/* Null termination */
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
+	cur_dsd->address = 0;
+	cur_dsd->length = 0;
+	cur_dsd++;
 	return 0;
 }
 
 int
 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
-	uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
+	struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
 {
-	void *next_dsd;
-	uint8_t avail_dsds = 0;
-	uint32_t dsd_list_len;
-	struct dsd_dma *dsd_ptr;
+	struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
 	struct scatterlist *sg, *sgl;
-	int	i;
-	struct scsi_cmnd *cmd;
-	uint32_t *cur_dsd = dsd;
-	uint16_t used_dsds = tot_dsds;
+	struct crc_context *difctx = NULL;
 	struct scsi_qla_host *vha;
+	uint dsd_list_len;
+	uint avail_dsds = 0;
+	uint used_dsds = tot_dsds;
+	bool dif_local_dma_alloc = false;
+	bool direction_to_device = false;
+	int i;
 
 	if (sp) {
-		cmd = GET_CMD_SP(sp);
+		struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
 		sgl = scsi_prot_sglist(cmd);
 		vha = sp->vha;
+		difctx = sp->u.scmd.crc_ctx;
+		direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
+		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
+		  "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
+			__func__, cmd, difctx, sp);
 	} else if (tc) {
 		vha = tc->vha;
 		sgl = tc->prot_sg;
+		difctx = tc->ctx;
+		direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
 	} else {
 		BUG();
 		return 1;
 	}
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe021,
-		"%s: enter\n", __func__);
+	ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
+	    "%s: enter (write=%u)\n", __func__, direction_to_device);
 
-	for_each_sg(sgl, sg, tot_dsds, i) {
-		dma_addr_t	sle_dma;
+	/* if initiator doing write or target doing read */
+	if (direction_to_device) {
+		for_each_sg(sgl, sg, tot_dsds, i) {
+			u64 sle_phys = sg_phys(sg);
 
-		/* Allocate additional continuation packets? */
-		if (avail_dsds == 0) {
-			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
-						QLA_DSDS_PER_IOCB : used_dsds;
-			dsd_list_len = (avail_dsds + 1) * 12;
-			used_dsds -= avail_dsds;
+			/* If SGE addr + len flips bits in upper 32-bits */
+			if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
+				ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
+				    "%s: page boundary crossing (phys=%llx len=%x)\n",
+				    __func__, sle_phys, sg->length);
 
-			/* allocate tracking DS */
-			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
-			if (!dsd_ptr)
-				return 1;
-
-			/* allocate new list */
-			dsd_ptr->dsd_addr = next_dsd =
-			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
-				&dsd_ptr->dsd_list_dma);
-
-			if (!next_dsd) {
-				/*
-				 * Need to cleanup only this dsd_ptr, rest
-				 * will be done by sp_free_dma()
-				 */
-				kfree(dsd_ptr);
-				return 1;
+				if (difctx) {
+					ha->dif_bundle_crossed_pages++;
+					dif_local_dma_alloc = true;
+				} else {
+					ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
+					    vha, 0xe022,
+					    "%s: difctx pointer is NULL\n",
+					    __func__);
+				}
+				break;
 			}
-
-			if (sp) {
-				list_add_tail(&dsd_ptr->list,
-				    &((struct crc_context *)
-					    sp->u.scmd.ctx)->dsd_list);
-
-				sp->flags |= SRB_CRC_CTX_DSD_VALID;
-			} else {
-				list_add_tail(&dsd_ptr->list,
-				    &(tc->ctx->dsd_list));
-				*tc->ctx_dsd_alloced = 1;
-			}
-
-			/* add new list to cmd iocb or last list */
-			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = dsd_list_len;
-			cur_dsd = (uint32_t *)next_dsd;
 		}
-		sle_dma = sg_dma_address(sg);
+		ha->dif_bundle_writes++;
+	} else {
+		ha->dif_bundle_reads++;
+	}
 
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+	if (ql2xdifbundlinginternalbuffers)
+		dif_local_dma_alloc = direction_to_device;
 
-		avail_dsds--;
+	if (dif_local_dma_alloc) {
+		u32 track_difbundl_buf = 0;
+		u32 ldma_sg_len = 0;
+		u8 ldma_needed = 1;
+
+		difctx->no_dif_bundl = 0;
+		difctx->dif_bundl_len = 0;
+
+		/* Track DSD buffers */
+		INIT_LIST_HEAD(&difctx->ldif_dsd_list);
+		/* Track local DMA buffers */
+		INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
+
+		for_each_sg(sgl, sg, tot_dsds, i) {
+			u32 sglen = sg_dma_len(sg);
+
+			ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
+			    "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
+			    __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
+			    difctx->dif_bundl_len, ldma_needed);
+
+			while (sglen) {
+				u32 xfrlen = 0;
+
+				if (ldma_needed) {
+					/*
+					 * Allocate list item to store
+					 * the DMA buffers
+					 */
+					dsd_ptr = kzalloc(sizeof(*dsd_ptr),
+					    GFP_ATOMIC);
+					if (!dsd_ptr) {
+						ql_dbg(ql_dbg_tgt, vha, 0xe024,
+						    "%s: failed alloc dsd_ptr\n",
+						    __func__);
+						return 1;
+					}
+					ha->dif_bundle_kallocs++;
+
+					/* allocate dma buffer */
+					dsd_ptr->dsd_addr = dma_pool_alloc
+						(ha->dif_bundl_pool, GFP_ATOMIC,
+						 &dsd_ptr->dsd_list_dma);
+					if (!dsd_ptr->dsd_addr) {
+						ql_dbg(ql_dbg_tgt, vha, 0xe024,
+						    "%s: failed alloc ->dsd_ptr\n",
+						    __func__);
+						/*
+						 * need to cleanup only this
+						 * dsd_ptr rest will be done
+						 * by sp_free_dma()
+						 */
+						kfree(dsd_ptr);
+						ha->dif_bundle_kallocs--;
+						return 1;
+					}
+					ha->dif_bundle_dma_allocs++;
+					ldma_needed = 0;
+					difctx->no_dif_bundl++;
+					list_add_tail(&dsd_ptr->list,
+					    &difctx->ldif_dma_hndl_list);
+				}
+
+				/* xfrlen is min of dma pool size and sglen */
+				xfrlen = (sglen >
+				   (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
+				    DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
+				    sglen;
+
+				/* replace with local allocated dma buffer */
+				sg_pcopy_to_buffer(sgl, sg_nents(sgl),
+				    dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
+				    difctx->dif_bundl_len);
+				difctx->dif_bundl_len += xfrlen;
+				sglen -= xfrlen;
+				ldma_sg_len += xfrlen;
+				if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
+				    sg_is_last(sg)) {
+					ldma_needed = 1;
+					ldma_sg_len = 0;
+				}
+			}
+		}
+
+		track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
+		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
+		    "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
+		    difctx->dif_bundl_len, difctx->no_dif_bundl,
+		    track_difbundl_buf);
+
+		if (sp)
+			sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
+		else
+			tc->prot_flags = DIF_BUNDL_DMA_VALID;
+
+		list_for_each_entry_safe(dif_dsd, nxt_dsd,
+		    &difctx->ldif_dma_hndl_list, list) {
+			u32 sglen = (difctx->dif_bundl_len >
+			    DIF_BUNDLING_DMA_POOL_SIZE) ?
+			    DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
+
+			BUG_ON(track_difbundl_buf == 0);
+
+			/* Allocate additional continuation packets? */
+			if (avail_dsds == 0) {
+				ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
+				    0xe024,
+				    "%s: adding continuation iocb's\n",
+				    __func__);
+				avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+				    QLA_DSDS_PER_IOCB : used_dsds;
+				dsd_list_len = (avail_dsds + 1) * 12;
+				used_dsds -= avail_dsds;
+
+				/* allocate tracking DS */
+				dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
+				if (!dsd_ptr) {
+					ql_dbg(ql_dbg_tgt, vha, 0xe026,
+					    "%s: failed alloc dsd_ptr\n",
+					    __func__);
+					return 1;
+				}
+				ha->dif_bundle_kallocs++;
+
+				difctx->no_ldif_dsd++;
+				/* allocate new list */
+				dsd_ptr->dsd_addr =
+				    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+					&dsd_ptr->dsd_list_dma);
+				if (!dsd_ptr->dsd_addr) {
+					ql_dbg(ql_dbg_tgt, vha, 0xe026,
+					    "%s: failed alloc ->dsd_addr\n",
+					    __func__);
+					/*
+					 * need to cleanup only this dsd_ptr
+					 *  rest will be done by sp_free_dma()
+					 */
+					kfree(dsd_ptr);
+					ha->dif_bundle_kallocs--;
+					return 1;
+				}
+				ha->dif_bundle_dma_allocs++;
+
+				if (sp) {
+					list_add_tail(&dsd_ptr->list,
+					    &difctx->ldif_dsd_list);
+					sp->flags |= SRB_CRC_CTX_DSD_VALID;
+				} else {
+					list_add_tail(&dsd_ptr->list,
+					    &difctx->ldif_dsd_list);
+					tc->ctx_dsd_alloced = 1;
+				}
+
+				/* add new list to cmd iocb or last list */
+				put_unaligned_le64(dsd_ptr->dsd_list_dma,
+						   &cur_dsd->address);
+				cur_dsd->length = cpu_to_le32(dsd_list_len);
+				cur_dsd = dsd_ptr->dsd_addr;
+			}
+			put_unaligned_le64(dif_dsd->dsd_list_dma,
+					   &cur_dsd->address);
+			cur_dsd->length = cpu_to_le32(sglen);
+			cur_dsd++;
+			avail_dsds--;
+			difctx->dif_bundl_len -= sglen;
+			track_difbundl_buf--;
+		}
+
+		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
+		    "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
+			difctx->no_ldif_dsd, difctx->no_dif_bundl);
+	} else {
+		for_each_sg(sgl, sg, tot_dsds, i) {
+			/* Allocate additional continuation packets? */
+			if (avail_dsds == 0) {
+				avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+				    QLA_DSDS_PER_IOCB : used_dsds;
+				dsd_list_len = (avail_dsds + 1) * 12;
+				used_dsds -= avail_dsds;
+
+				/* allocate tracking DS */
+				dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
+				if (!dsd_ptr) {
+					ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
+					    vha, 0xe027,
+					    "%s: failed alloc dsd_dma...\n",
+					    __func__);
+					return 1;
+				}
+
+				/* allocate new list */
+				dsd_ptr->dsd_addr =
+				    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+					&dsd_ptr->dsd_list_dma);
+				if (!dsd_ptr->dsd_addr) {
+					/* need to cleanup only this dsd_ptr */
+					/* rest will be done by sp_free_dma() */
+					kfree(dsd_ptr);
+					return 1;
+				}
+
+				if (sp) {
+					list_add_tail(&dsd_ptr->list,
+					    &difctx->dsd_list);
+					sp->flags |= SRB_CRC_CTX_DSD_VALID;
+				} else {
+					list_add_tail(&dsd_ptr->list,
+					    &difctx->dsd_list);
+					tc->ctx_dsd_alloced = 1;
+				}
+
+				/* add new list to cmd iocb or last list */
+				put_unaligned_le64(dsd_ptr->dsd_list_dma,
+						   &cur_dsd->address);
+				cur_dsd->length = cpu_to_le32(dsd_list_len);
+				cur_dsd = dsd_ptr->dsd_addr;
+			}
+			append_dsd64(&cur_dsd, sg);
+			avail_dsds--;
+		}
 	}
 	/* Null termination */
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
+	cur_dsd->address = 0;
+	cur_dsd->length = 0;
+	cur_dsd++;
 	return 0;
 }
 
@@ -1195,14 +1381,15 @@
  * @sp: SRB command to process
  * @cmd_pkt: Command type 3 IOCB
  * @tot_dsds: Total number of segments to transfer
- * @tot_prot_dsds:
- * @fw_prot_opts:
+ * @tot_prot_dsds: Total number of segments with protection information
+ * @fw_prot_opts: Protection options to be passed to firmware
  */
-inline int
+static inline int
 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
 {
-	uint32_t		*cur_dsd, *fcp_dl;
+	struct dsd64		*cur_dsd;
+	uint32_t		*fcp_dl;
 	scsi_qla_host_t		*vha;
 	struct scsi_cmnd	*cmd;
 	uint32_t		total_bytes = 0;
@@ -1220,7 +1407,7 @@
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type CRC_2 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
+	put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
 
 	vha = sp->vha;
 	ha = vha->hw;
@@ -1250,7 +1437,7 @@
 		bundling = 0;
 
 	/* Allocate CRC context from global pool */
-	crc_ctx_pkt = sp->u.scmd.ctx =
+	crc_ctx_pkt = sp->u.scmd.crc_ctx =
 	    dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
 
 	if (!crc_ctx_pkt)
@@ -1268,8 +1455,7 @@
 	qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
 	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);
 
-	cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
-	cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+	put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
 	cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
 
 	/* Determine SCSI command length -- align to 4 byte boundary */
@@ -1296,10 +1482,8 @@
 	int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
 	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
 	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
-	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
-	    LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
-	cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
-	    MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
+	put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
+			   &cmd_pkt->fcp_cmnd_dseg_address);
 	fcp_cmnd->task_management = 0;
 	fcp_cmnd->task_attribute = TSK_SIMPLE;
 
@@ -1313,18 +1497,18 @@
 	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
 	case SCSI_PROT_READ_INSERT:
 	case SCSI_PROT_WRITE_STRIP:
-	    total_bytes = data_bytes;
-	    data_bytes += dif_bytes;
-	    break;
+		total_bytes = data_bytes;
+		data_bytes += dif_bytes;
+		break;
 
 	case SCSI_PROT_READ_STRIP:
 	case SCSI_PROT_WRITE_INSERT:
 	case SCSI_PROT_READ_PASS:
 	case SCSI_PROT_WRITE_PASS:
-	    total_bytes = data_bytes + dif_bytes;
-	    break;
+		total_bytes = data_bytes + dif_bytes;
+		break;
 	default:
-	    BUG();
+		BUG();
 	}
 
 	if (!qla2x00_hba_err_chk_enabled(sp))
@@ -1341,7 +1525,7 @@
 	}
 
 	if (!bundling) {
-		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+		cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
 	} else {
 		/*
 		 * Configure Bundling if we need to fetch interlaving
@@ -1351,7 +1535,7 @@
 		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
 		crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
 							tot_prot_dsds);
-		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+		cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
 	}
 
 	/* Finish the common fields of CRC pkt */
@@ -1384,7 +1568,7 @@
 	if (bundling && tot_prot_dsds) {
 		/* Walks dif segments */
 		cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
-		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+		cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
 		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
 				tot_prot_dsds, NULL))
 			goto crc_queuing_error;
@@ -1409,28 +1593,25 @@
 	int		nseg;
 	unsigned long   flags;
 	uint32_t	*clr_ptr;
-	uint32_t        index;
 	uint32_t	handle;
 	struct cmd_type_7 *cmd_pkt;
 	uint16_t	cnt;
 	uint16_t	req_cnt;
 	uint16_t	tot_dsds;
 	struct req_que *req = NULL;
-	struct rsp_que *rsp = NULL;
 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 	struct scsi_qla_host *vha = sp->vha;
 	struct qla_hw_data *ha = vha->hw;
 
 	/* Setup device pointers. */
 	req = vha->req;
-	rsp = req->rsp;
 
 	/* So we know we haven't pci_map'ed anything yet */
 	tot_dsds = 0;
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
 		    QLA_SUCCESS)
 			return QLA_FUNCTION_FAILED;
 		vha->marker_needed = 0;
@@ -1439,16 +1620,8 @@
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Map the sg table so we have an accurate count of sg entries needed */
@@ -1551,7 +1724,6 @@
 	int			nseg;
 	unsigned long		flags;
 	uint32_t		*clr_ptr;
-	uint32_t		index;
 	uint32_t		handle;
 	uint16_t		cnt;
 	uint16_t		req_cnt = 0;
@@ -1583,7 +1755,7 @@
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
 		    QLA_SUCCESS)
 			return QLA_FUNCTION_FAILED;
 		vha->marker_needed = 0;
@@ -1592,17 +1764,8 @@
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Compute number of required data segments */
@@ -1747,14 +1910,12 @@
 	int		nseg;
 	unsigned long   flags;
 	uint32_t	*clr_ptr;
-	uint32_t        index;
 	uint32_t	handle;
 	struct cmd_type_7 *cmd_pkt;
 	uint16_t	cnt;
 	uint16_t	req_cnt;
 	uint16_t	tot_dsds;
 	struct req_que *req = NULL;
-	struct rsp_que *rsp = NULL;
 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 	struct scsi_qla_host *vha = sp->fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
@@ -1764,7 +1925,6 @@
 	spin_lock_irqsave(&qpair->qp_lock, flags);
 
 	/* Setup qpair pointers */
-	rsp = qpair->rsp;
 	req = qpair->req;
 
 	/* So we know we haven't pci_map'ed anything yet */
@@ -1772,7 +1932,7 @@
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
 		    QLA_SUCCESS) {
 			spin_unlock_irqrestore(&qpair->qp_lock, flags);
 			return QLA_FUNCTION_FAILED;
@@ -1780,16 +1940,8 @@
 		vha->marker_needed = 0;
 	}
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Map the sg table so we have an accurate count of sg entries needed */
@@ -1893,7 +2045,6 @@
 	int			nseg;
 	unsigned long		flags;
 	uint32_t		*clr_ptr;
-	uint32_t		index;
 	uint32_t		handle;
 	uint16_t		cnt;
 	uint16_t		req_cnt = 0;
@@ -1940,7 +2091,7 @@
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
 		    QLA_SUCCESS) {
 			spin_unlock_irqrestore(&qpair->qp_lock, flags);
 			return QLA_FUNCTION_FAILED;
@@ -1948,17 +2099,8 @@
 		vha->marker_needed = 0;
 	}
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Compute number of required data segments */
@@ -2105,7 +2247,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = qpair->req;
 	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
-	uint32_t index, handle;
+	uint32_t handle;
 	request_t *pkt;
 	uint16_t cnt, req_cnt;
 
@@ -2122,7 +2264,8 @@
 	if (req->cnt < req_cnt + 2) {
 		if (qpair->use_shadow_reg)
 			cnt = *req->out_ptr;
-		else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+		    IS_QLA28XX(ha))
 			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
 		else if (IS_P3P_TYPE(ha))
 			cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2144,16 +2287,8 @@
 		goto queuing_error;
 
 	if (sp) {
-		/* Check for room in outstanding command list. */
-		handle = req->current_outstanding_cmd;
-		for (index = 1; index < req->num_outstanding_cmds; index++) {
-			handle++;
-			if (handle == req->num_outstanding_cmds)
-				handle = 1;
-			if (!req->outstanding_cmds[handle])
-				break;
-		}
-		if (index == req->num_outstanding_cmds) {
+		handle = qla2xxx_get_next_handle(req);
+		if (handle == 0) {
 			ql_log(ql_log_warn, vha, 0x700b,
 			    "No room on outstanding cmd array.\n");
 			goto queuing_error;
@@ -2208,8 +2343,11 @@
 
 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
 	logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
-	if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
+	if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
 		logio->control_flags |= LCF_NVME_PRLI;
+		if (sp->vha->flags.nvme_first_burst)
+			logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
+	}
 
 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2270,8 +2408,7 @@
 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
 	logio->control_flags =
 	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
-	if (!sp->fcport->se_sess ||
-	    !sp->fcport->keep_nport_handle)
+	if (!sp->fcport->keep_nport_handle)
 		logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2289,7 +2426,7 @@
 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
 	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
 	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
-	    cpu_to_le16(sp->fcport->loop_id):
+	    cpu_to_le16(sp->fcport->loop_id) :
 	    cpu_to_le16(sp->fcport->loop_id << 8);
 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
@@ -2360,10 +2497,18 @@
 	}
 }
 
-static void
-qla2x00_els_dcmd_sp_free(void *data)
+void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
 {
-	srb_t *sp = data;
+	timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
+	sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
+	sp->free = qla2x00_sp_free;
+	if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
+		init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
+	sp->start_timer = 1;
+}
+
+static void qla2x00_els_dcmd_sp_free(srb_t *sp)
+{
 	struct srb_iocb *elsio = &sp->u.iocb_cmd;
 
 	kfree(sp->fcport);
@@ -2393,10 +2538,8 @@
 	complete(&lio->u.els_logo.comp);
 }
 
-static void
-qla2x00_els_dcmd_sp_done(void *ptr, int res)
+static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
 {
-	srb_t *sp = ptr;
 	fc_port_t *fcport = sp->fcport;
 	struct srb_iocb *lio = &sp->u.iocb_cmd;
 	struct scsi_qla_host *vha = sp->vha;
@@ -2513,42 +2656,37 @@
 	els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
 	els_iocb->port_id[1] = sp->fcport->d_id.b.area;
 	els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
-	els_iocb->s_id[0] = vha->d_id.b.al_pa;
-	els_iocb->s_id[1] = vha->d_id.b.area;
-	els_iocb->s_id[2] = vha->d_id.b.domain;
-	els_iocb->control_flags = 0;
+	/* For SID the byte order is different than DID */
+	els_iocb->s_id[1] = vha->d_id.b.al_pa;
+	els_iocb->s_id[2] = vha->d_id.b.area;
+	els_iocb->s_id[0] = vha->d_id.b.domain;
 
 	if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
+		els_iocb->control_flags = 0;
 		els_iocb->tx_byte_count = els_iocb->tx_len =
-			sizeof(struct els_plogi_payload);
-		els_iocb->tx_address[0] =
-			cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
-		els_iocb->tx_address[1] =
-			cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
-
+			cpu_to_le32(sizeof(struct els_plogi_payload));
+		put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
+				   &els_iocb->tx_address);
 		els_iocb->rx_dsd_count = 1;
 		els_iocb->rx_byte_count = els_iocb->rx_len =
-			sizeof(struct els_plogi_payload);
-		els_iocb->rx_address[0] =
-			cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
-		els_iocb->rx_address[1] =
-			cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
+			cpu_to_le32(sizeof(struct els_plogi_payload));
+		put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
+				   &els_iocb->rx_address);
 
 		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
 		    "PLOGI ELS IOCB:\n");
 		ql_dump_buffer(ql_log_info, vha, 0x0109,
 		    (uint8_t *)els_iocb, 0x70);
 	} else {
-		els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
-		els_iocb->tx_address[0] =
-		    cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
-		els_iocb->tx_address[1] =
-		    cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
+		els_iocb->control_flags = 1 << 13;
+		els_iocb->tx_byte_count =
+			cpu_to_le32(sizeof(struct els_logo_payload));
+		put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
+				   &els_iocb->tx_address);
 		els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
 
 		els_iocb->rx_byte_count = 0;
-		els_iocb->rx_address[0] = 0;
-		els_iocb->rx_address[1] = 0;
+		els_iocb->rx_address = 0;
 		els_iocb->rx_len = 0;
 	}
 
@@ -2580,10 +2718,23 @@
 	sp->done(sp, QLA_FUNCTION_TIMEOUT);
 }
 
-static void
-qla2x00_els_dcmd2_sp_done(void *ptr, int res)
+void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
 {
-	srb_t *sp = ptr;
+	if (els_plogi->els_plogi_pyld)
+		dma_free_coherent(&vha->hw->pdev->dev,
+				  els_plogi->tx_size,
+				  els_plogi->els_plogi_pyld,
+				  els_plogi->els_plogi_pyld_dma);
+
+	if (els_plogi->els_resp_pyld)
+		dma_free_coherent(&vha->hw->pdev->dev,
+				  els_plogi->rx_size,
+				  els_plogi->els_resp_pyld,
+				  els_plogi->els_resp_pyld_dma);
+}
+
+static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
+{
 	fc_port_t *fcport = sp->fcport;
 	struct srb_iocb *lio = &sp->u.iocb_cmd;
 	struct scsi_qla_host *vha = sp->vha;
@@ -2605,26 +2756,16 @@
 		} else {
 			memset(&ea, 0, sizeof(ea));
 			ea.fcport = fcport;
-			ea.rc = res;
-			ea.event = FCME_ELS_PLOGI_DONE;
-			qla2x00_fcport_event_handler(vha, &ea);
+			ea.data[0] = MBS_COMMAND_COMPLETE;
+			ea.sp = sp;
+			qla24xx_handle_plogi_done_event(vha, &ea);
 		}
 
 		e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
 		if (!e) {
 			struct srb_iocb *elsio = &sp->u.iocb_cmd;
 
-			if (elsio->u.els_plogi.els_plogi_pyld)
-				dma_free_coherent(&sp->vha->hw->pdev->dev,
-				    elsio->u.els_plogi.tx_size,
-				    elsio->u.els_plogi.els_plogi_pyld,
-				    elsio->u.els_plogi.els_plogi_pyld_dma);
-
-			if (elsio->u.els_plogi.els_resp_pyld)
-				dma_free_coherent(&sp->vha->hw->pdev->dev,
-				    elsio->u.els_plogi.rx_size,
-				    elsio->u.els_plogi.els_resp_pyld,
-				    elsio->u.els_plogi.els_resp_pyld_dma);
+			qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
 			sp->free(sp);
 			return;
 		}
@@ -2642,7 +2783,6 @@
 	struct qla_hw_data *ha = vha->hw;
 	int rval = QLA_SUCCESS;
 	void	*ptr, *resp_ptr;
-	dma_addr_t ptr_dma;
 
 	/* Alloc SRB structure */
 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
@@ -2674,7 +2814,6 @@
 	ptr = elsio->u.els_plogi.els_plogi_pyld =
 	    dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
 		&elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
-	ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
 
 	if (!elsio->u.els_plogi.els_plogi_pyld) {
 		rval = QLA_FUNCTION_FAILED;
@@ -2726,18 +2865,7 @@
 
 out:
 	fcport->flags &= ~(FCF_ASYNC_SENT);
-	if (elsio->u.els_plogi.els_plogi_pyld)
-		dma_free_coherent(&sp->vha->hw->pdev->dev,
-		    elsio->u.els_plogi.tx_size,
-		    elsio->u.els_plogi.els_plogi_pyld,
-		    elsio->u.els_plogi.els_plogi_pyld_dma);
-
-	if (elsio->u.els_plogi.els_resp_pyld)
-		dma_free_coherent(&sp->vha->hw->pdev->dev,
-		    elsio->u.els_plogi.rx_size,
-		    elsio->u.els_plogi.els_resp_pyld,
-		    elsio->u.els_plogi.els_resp_pyld_dma);
-
+	qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
 	sp->free(sp);
 done:
 	return rval;
@@ -2773,17 +2901,13 @@
         els_iocb->tx_byte_count =
             cpu_to_le32(bsg_job->request_payload.payload_len);
 
-        els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
-            (bsg_job->request_payload.sg_list)));
-        els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
-            (bsg_job->request_payload.sg_list)));
+	put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
+			   &els_iocb->tx_address);
         els_iocb->tx_len = cpu_to_le32(sg_dma_len
             (bsg_job->request_payload.sg_list));
 
-        els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
-            (bsg_job->reply_payload.sg_list)));
-        els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
-            (bsg_job->reply_payload.sg_list)));
+	put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
+			   &els_iocb->rx_address);
         els_iocb->rx_len = cpu_to_le32(sg_dma_len
             (bsg_job->reply_payload.sg_list));
 
@@ -2794,14 +2918,13 @@
 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
 {
 	uint16_t        avail_dsds;
-	uint32_t        *cur_dsd;
+	struct dsd64	*cur_dsd;
 	struct scatterlist *sg;
 	int index;
 	uint16_t tot_dsds;
 	scsi_qla_host_t *vha = sp->vha;
 	struct qla_hw_data *ha = vha->hw;
 	struct bsg_job *bsg_job = sp->u.bsg_job;
-	int loop_iterartion = 0;
 	int entry_count = 1;
 
 	memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
@@ -2821,25 +2944,20 @@
 	ct_iocb->rsp_bytecount =
 	    cpu_to_le32(bsg_job->reply_payload.payload_len);
 
-	ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
-	    (bsg_job->request_payload.sg_list)));
-	ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
-	    (bsg_job->request_payload.sg_list)));
-	ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
+	put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
+			   &ct_iocb->req_dsd.address);
+	ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
 
-	ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
-	    (bsg_job->reply_payload.sg_list)));
-	ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
-	    (bsg_job->reply_payload.sg_list)));
-	ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
+	put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
+			   &ct_iocb->rsp_dsd.address);
+	ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
 
 	avail_dsds = 1;
-	cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
+	cur_dsd = &ct_iocb->rsp_dsd;
 	index = 0;
 	tot_dsds = bsg_job->reply_payload.sg_cnt;
 
 	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
-		dma_addr_t       sle_dma;
 		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets? */
@@ -2850,16 +2968,12 @@
 			       */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
 			    vha->hw->req_q_map[0]);
-			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			cur_dsd = cont_pkt->dsd;
 			avail_dsds = 5;
 			entry_count++;
 		}
 
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
-		loop_iterartion++;
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
 	ct_iocb->entry_count = entry_count;
@@ -2871,7 +2985,7 @@
 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
 {
 	uint16_t        avail_dsds;
-	uint32_t        *cur_dsd;
+	struct dsd64	*cur_dsd;
 	struct scatterlist *sg;
 	int index;
 	uint16_t cmd_dsds, rsp_dsds;
@@ -2900,12 +3014,10 @@
             cpu_to_le32(bsg_job->request_payload.payload_len);
 
 	avail_dsds = 2;
-	cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
+	cur_dsd = ct_iocb->dsd;
 	index = 0;
 
 	for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
-		dma_addr_t       sle_dma;
-
 		/* Allocate additional continuation packets? */
 		if (avail_dsds == 0) {
 			/*
@@ -2914,23 +3026,18 @@
 			 */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(
 			    vha, ha->req_q_map[0]);
-			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			cur_dsd = cont_pkt->dsd;
 			avail_dsds = 5;
 			entry_count++;
 		}
 
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
 
 	index = 0;
 
 	for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
-		dma_addr_t       sle_dma;
-
 		/* Allocate additional continuation packets? */
 		if (avail_dsds == 0) {
 			/*
@@ -2939,15 +3046,12 @@
 			       */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
 			    ha->req_q_map[0]);
-			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			cur_dsd = cont_pkt->dsd;
 			avail_dsds = 5;
 			entry_count++;
 		}
 
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
         ct_iocb->entry_count = entry_count;
@@ -2966,7 +3070,6 @@
 	unsigned long   flags;
 	struct scsi_cmnd *cmd;
 	uint32_t	*clr_ptr;
-	uint32_t        index;
 	uint32_t	handle;
 	uint16_t	cnt;
 	uint16_t	req_cnt;
@@ -2994,8 +3097,8 @@
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (qla2x00_marker(vha, req,
-			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+		if (qla2x00_marker(vha, ha->base_qpair,
+			0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
 			ql_log(ql_log_warn, vha, 0x300c,
 			    "qla2x00_marker failed for cmd=%p.\n", cmd);
 			return QLA_FUNCTION_FAILED;
@@ -3006,16 +3109,8 @@
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Map the sg table so we have an accurate count of sg entries needed */
@@ -3086,7 +3181,7 @@
 				goto queuing_error;
 		}
 
-		ctx = sp->u.scmd.ctx =
+		ctx = sp->u.scmd.ct6_ctx =
 		    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
 		if (!ctx) {
 			ql_log(ql_log_fatal, vha, 0x3010,
@@ -3168,10 +3263,8 @@
 		*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
 
 		cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
-		cmd_pkt->fcp_cmnd_dseg_address[0] =
-		    cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
-		cmd_pkt->fcp_cmnd_dseg_address[1] =
-		    cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+		put_unaligned_le64(ctx->fcp_cmnd_dma,
+				   &cmd_pkt->fcp_cmnd_dseg_address);
 
 		sp->flags |= SRB_FCP_CMND_DMA_VALID;
 		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
@@ -3183,6 +3276,7 @@
 		cmd_pkt->entry_status = (uint8_t) rsp->id;
 	} else {
 		struct cmd_type_7 *cmd_pkt;
+
 		req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
 		if (req->cnt < (req_cnt + 2)) {
 			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
@@ -3283,9 +3377,9 @@
 	if (tot_dsds)
 		scsi_dma_unmap(cmd);
 
-	if (sp->u.scmd.ctx) {
-		mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
-		sp->u.scmd.ctx = NULL;
+	if (sp->u.scmd.crc_ctx) {
+		mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
+		sp->u.scmd.crc_ctx = NULL;
 	}
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -3297,19 +3391,21 @@
 {
 	struct srb_iocb *aio = &sp->u.iocb_cmd;
 	scsi_qla_host_t *vha = sp->vha;
-	struct req_que *req = vha->req;
+	struct req_que *req = sp->qpair->req;
 
 	memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
 	abt_iocb->entry_type = ABORT_IOCB_TYPE;
 	abt_iocb->entry_count = 1;
 	abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
-	abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	if (sp->fcport) {
+		abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+		abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+		abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
+		abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+	}
 	abt_iocb->handle_to_abort =
 	    cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
 				    aio->u.abt.cmd_hndl));
-	abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
-	abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
-	abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
 	abt_iocb->vp_index = vha->vp_idx;
 	abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
 	/* Send the command to the firmware */
@@ -3385,15 +3481,13 @@
 
 	cmd_pkt->tx_dseg_count = 1;
 	cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
-	cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
-	cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
-	cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
+	cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
+	put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
 
 	cmd_pkt->rx_dseg_count = 1;
 	cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
-	cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
-	cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
-	cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
+	cmd_pkt->dsd[1].length  = nvme->u.nvme.rsp_len;
+	put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
 
 	return rval;
 }
@@ -3435,22 +3529,22 @@
 int
 qla2x00_start_sp(srb_t *sp)
 {
-	int rval;
+	int rval = QLA_SUCCESS;
 	scsi_qla_host_t *vha = sp->vha;
 	struct qla_hw_data *ha = vha->hw;
+	struct qla_qpair *qp = sp->qpair;
 	void *pkt;
 	unsigned long flags;
 
-	rval = QLA_FUNCTION_FAILED;
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	pkt = qla2x00_alloc_iocbs(vha, sp);
+	spin_lock_irqsave(qp->qp_lock_ptr, flags);
+	pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
 	if (!pkt) {
+		rval = EAGAIN;
 		ql_log(ql_log_warn, vha, 0x700c,
 		    "qla2x00_alloc_iocbs failed.\n");
 		goto done;
 	}
 
-	rval = QLA_SUCCESS;
 	switch (sp->type) {
 	case SRB_LOGIN_CMD:
 		IS_FWI2_CAPABLE(ha) ?
@@ -3520,10 +3614,13 @@
 		break;
 	}
 
+	if (sp->start_timer)
+		add_timer(&sp->u.iocb_cmd.timer);
+
 	wmb();
-	qla2x00_start_iocbs(vha, ha->req_q_map[0]);
+	qla2x00_start_iocbs(vha, qp->req);
 done:
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
 	return rval;
 }
 
@@ -3532,7 +3629,7 @@
 				struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
 {
 	uint16_t avail_dsds;
-	uint32_t *cur_dsd;
+	struct dsd64 *cur_dsd;
 	uint32_t req_data_len = 0;
 	uint32_t rsp_data_len = 0;
 	struct scatterlist *sg;
@@ -3541,8 +3638,7 @@
 	struct bsg_job *bsg_job = sp->u.bsg_job;
 
 	/*Update entry type to indicate bidir command */
-	*((uint32_t *)(&cmd_pkt->entry_type)) =
-		cpu_to_le32(COMMAND_BIDIRECTIONAL);
+	put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
 
 	/* Set the transfer direction, in this set both flags
 	 * Also set the BD_WRAP_BACK flag, firmware will take care
@@ -3568,13 +3664,12 @@
 	 * are bundled in continuation iocb
 	 */
 	avail_dsds = 1;
-	cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+	cur_dsd = &cmd_pkt->fcp_dsd;
 
 	index = 0;
 
 	for_each_sg(bsg_job->request_payload.sg_list, sg,
 				bsg_job->request_payload.sg_cnt, index) {
-		dma_addr_t sle_dma;
 		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets */
@@ -3583,14 +3678,11 @@
 			 * 5 DSDS
 			 */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
-			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			cur_dsd = cont_pkt->dsd;
 			avail_dsds = 5;
 			entry_count++;
 		}
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
 	/* For read request DSD will always goes to continuation IOCB
@@ -3600,7 +3692,6 @@
 	 */
 	for_each_sg(bsg_job->reply_payload.sg_list, sg,
 				bsg_job->reply_payload.sg_cnt, index) {
-		dma_addr_t sle_dma;
 		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets */
@@ -3609,14 +3700,11 @@
 			 * 5 DSDS
 			 */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
-			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			cur_dsd = cont_pkt->dsd;
 			avail_dsds = 5;
 			entry_count++;
 		}
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
 	/* This value should be same as number of IOCB required for this cmd */
@@ -3630,7 +3718,6 @@
 	struct qla_hw_data *ha = vha->hw;
 	unsigned long flags;
 	uint32_t handle;
-	uint32_t index;
 	uint16_t req_cnt;
 	uint16_t cnt;
 	uint32_t *clr_ptr;
@@ -3646,8 +3733,8 @@
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (qla2x00_marker(vha, req,
-			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+		if (qla2x00_marker(vha, ha->base_qpair,
+			0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
 			return EXT_STATUS_MAILBOX;
 		vha->marker_needed = 0;
 	}
@@ -3655,17 +3742,8 @@
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-
-	if (index == req->num_outstanding_cmds) {
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0) {
 		rval = EXT_STATUS_BUSY;
 		goto queuing_error;
 	}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 36cbb29..009fd5a 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -23,9 +23,17 @@
 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
 	sts_entry_t *);
 
+const char *const port_state_str[] = {
+	"Unknown",
+	"UNCONFIGURED",
+	"DEAD",
+	"LOST",
+	"ONLINE"
+};
+
 /**
  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
- * @irq:
+ * @irq: interrupt number
  * @dev_id: SCSI driver HA context
  *
  * Called by system whenever the host adapter generates an interrupt.
@@ -41,7 +49,7 @@
 	int		status;
 	unsigned long	iter;
 	uint16_t	hccr;
-	uint16_t	mb[4];
+	uint16_t	mb[8];
 	struct rsp_que *rsp;
 	unsigned long	flags;
 
@@ -144,7 +152,7 @@
 
 /**
  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
- * @irq:
+ * @irq: interrupt number
  * @dev_id: SCSI driver HA context
  *
  * Called by system whenever the host adapter generates an interrupt.
@@ -160,7 +168,7 @@
 	unsigned long	iter;
 	uint32_t	stat;
 	uint16_t	hccr;
-	uint16_t	mb[4];
+	uint16_t	mb[8];
 	struct rsp_que *rsp;
 	struct qla_hw_data *ha;
 	unsigned long	flags;
@@ -366,7 +374,7 @@
 	static const char *const link_speeds[] = {
 		"1", "2", "?", "4", "8", "16", "32", "10"
 	};
-#define	QLA_LAST_SPEED	7
+#define	QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
 
 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
 		return link_speeds[0];
@@ -708,12 +716,15 @@
 		break;
 
 	case MBA_SYSTEM_ERR:		/* System Error */
-		mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
+		mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+		    IS_QLA28XX(ha)) ?
 			RD_REG_WORD(&reg24->mailbox7) : 0;
 		ql_log(ql_log_warn, vha, 0x5003,
 		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
 		    "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
-
+		ha->fw_dump_mpi =
+		    (IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+		    RD_REG_WORD(&reg24->mailbox7) & BIT_8;
 		ha->isp_ops->fw_dump(vha, 1);
 		ha->flags.fw_init_done = 0;
 		QLA_FW_STOPPED(ha);
@@ -765,7 +776,6 @@
 	case MBA_LOOP_INIT_ERR:
 		ql_log(ql_log_warn, vha, 0x5090,
 		    "LOOP INIT ERROR (%x).\n", mb[1]);
-		ha->isp_ops->fw_dump(vha, 1);
 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		break;
 
@@ -834,8 +844,10 @@
 			 * Restore for Physical Port only
 			 */
 			if (!vha->vp_idx) {
-				if (ha->flags.fawwpn_enabled) {
+				if (ha->flags.fawwpn_enabled &&
+				    (ha->current_topology == ISP_CFG_F)) {
 					void *wwpn = ha->init_cb->port_name;
+
 					memcpy(vha->port_name, wwpn, WWN_SIZE);
 					fc_host_port_name(vha->host) =
 					    wwn_to_u64(vha->port_name);
@@ -1106,10 +1118,9 @@
 			struct event_arg ea;
 
 			memset(&ea, 0, sizeof(ea));
-			ea.event = FCME_RSCN;
 			ea.id.b24 = rscn_entry;
 			ea.id.b.rsvd_1 = rscn_entry >> 24;
-			qla2x00_fcport_event_handler(vha, &ea);
+			qla2x00_handle_rscn(vha, &ea);
 			qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
 		}
 		break;
@@ -1371,7 +1382,7 @@
 		    le16_to_cpu(mbx->status_flags));
 
 		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
-		    (uint8_t *)mbx, sizeof(*mbx));
+		    mbx, sizeof(*mbx));
 
 		goto logio_done;
 	}
@@ -1501,7 +1512,7 @@
 		    if (comp_status == CS_DATA_UNDERRUN) {
 			    res = DID_OK << 16;
 			    bsg_reply->reply_payload_rcv_len =
-				le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
+				le16_to_cpu(pkt->rsp_info_len);
 
 			    ql_log(ql_log_warn, vha, 0x5048,
 				"CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
@@ -1515,7 +1526,7 @@
 			    bsg_reply->reply_payload_rcv_len = 0;
 		    }
 		    ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
-			(uint8_t *)pkt, sizeof(*pkt));
+			pkt, sizeof(*pkt));
 	    } else {
 		    res = DID_OK << 16;
 		    bsg_reply->reply_payload_rcv_len =
@@ -1590,8 +1601,8 @@
 	}
 
 	comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
-	fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
-	fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
+	fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
+	fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
 
 	if (iocb_type == ELS_IOCB_TYPE) {
 		els = &sp->u.iocb_cmd;
@@ -1612,7 +1623,7 @@
 				res = DID_ERROR << 16;
 			}
 		}
-		ql_log(ql_log_info, vha, 0x503f,
+		ql_dbg(ql_dbg_user, vha, 0x503f,
 		    "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
 		    type, sp->handle, comp_status, fw_status[1], fw_status[2],
 		    le16_to_cpu(((struct els_sts_entry_24xx *)
@@ -1655,7 +1666,7 @@
 		memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
 		       fw_status, sizeof(fw_status));
 		ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
-				(uint8_t *)pkt, sizeof(*pkt));
+		    pkt, sizeof(*pkt));
 	}
 	else {
 		res =  DID_OK << 16;
@@ -1699,7 +1710,7 @@
 		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
 		    logio->entry_status);
 		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
-		    (uint8_t *)logio, sizeof(*logio));
+		    logio, sizeof(*logio));
 
 		goto logio_done;
 	}
@@ -1714,6 +1725,15 @@
 
 		vha->hw->exch_starvation = 0;
 		data[0] = MBS_COMMAND_COMPLETE;
+
+		if (sp->type == SRB_PRLI_CMD) {
+			lio->u.logio.iop[0] =
+			    le32_to_cpu(logio->io_parameter[0]);
+			lio->u.logio.iop[1] =
+			    le32_to_cpu(logio->io_parameter[1]);
+			goto logio_done;
+		}
+
 		if (sp->type != SRB_LOGIN_CMD)
 			goto logio_done;
 
@@ -1836,8 +1856,8 @@
 	}
 
 	if (iocb->u.tmf.data != QLA_SUCCESS)
-		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
-		    (uint8_t *)sts, sizeof(*sts));
+		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
+		    sts, sizeof(*sts));
 
 	sp->done(sp, 0);
 }
@@ -1850,11 +1870,12 @@
 	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
 	uint16_t        state_flags;
 	struct nvmefc_fcp_req *fd;
-	uint16_t        ret = 0;
+	uint16_t        ret = QLA_SUCCESS;
+	uint16_t	comp_status = le16_to_cpu(sts->comp_status);
 
 	iocb = &sp->u.iocb_cmd;
 	fcport = sp->fcport;
-	iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
+	iocb->u.nvme.comp_status = comp_status;
 	state_flags  = le16_to_cpu(sts->state_flags);
 	fd = iocb->u.nvme.desc;
 
@@ -1892,28 +1913,35 @@
 	fd->transferred_length = fd->payload_length -
 	    le32_to_cpu(sts->residual_len);
 
-	switch (le16_to_cpu(sts->comp_status)) {
+	if (unlikely(comp_status != CS_COMPLETE))
+		ql_log(ql_log_warn, fcport->vha, 0x5060,
+		   "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x  ox_id=%x\n",
+		   sp->name, sp->handle, comp_status,
+		   fd->transferred_length, le32_to_cpu(sts->residual_len),
+		   sts->ox_id);
+
+	/*
+	 * If transport error then Failure (HBA rejects request)
+	 * otherwise transport will handle.
+	 */
+	switch (comp_status) {
 	case CS_COMPLETE:
-		ret = QLA_SUCCESS;
 		break;
-	case CS_ABORTED:
+
 	case CS_RESET:
 	case CS_PORT_UNAVAILABLE:
 	case CS_PORT_LOGGED_OUT:
+		fcport->nvme_flag |= NVME_FLAG_RESETTING;
+		/* fall through */
+	case CS_ABORTED:
 	case CS_PORT_BUSY:
-		ql_log(ql_log_warn, fcport->vha, 0x5060,
-		    "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
-		    sp->name, sp->handle, sts->comp_status,
-		    le32_to_cpu(sts->residual_len), sts->ox_id);
 		fd->transferred_length = 0;
 		iocb->u.nvme.rsp_pyld_len = 0;
 		ret = QLA_ABORTED;
 		break;
+	case CS_DATA_UNDERRUN:
+		break;
 	default:
-		ql_log(ql_log_warn, fcport->vha, 0x5060,
-		    "NVME-%s error - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
-		    sp->name, sp->handle, sts->comp_status,
-		    le32_to_cpu(sts->residual_len), sts->ox_id);
 		ret = QLA_FUNCTION_FAILED;
 		break;
 	}
@@ -1951,6 +1979,52 @@
 	sp->done(sp, rval);
 }
 
+/* Process a single response queue entry. */
+static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
+					   struct rsp_que *rsp,
+					   sts_entry_t *pkt)
+{
+	sts21_entry_t *sts21_entry;
+	sts22_entry_t *sts22_entry;
+	uint16_t handle_cnt;
+	uint16_t cnt;
+
+	switch (pkt->entry_type) {
+	case STATUS_TYPE:
+		qla2x00_status_entry(vha, rsp, pkt);
+		break;
+	case STATUS_TYPE_21:
+		sts21_entry = (sts21_entry_t *)pkt;
+		handle_cnt = sts21_entry->handle_count;
+		for (cnt = 0; cnt < handle_cnt; cnt++)
+			qla2x00_process_completed_request(vha, rsp->req,
+						sts21_entry->handle[cnt]);
+		break;
+	case STATUS_TYPE_22:
+		sts22_entry = (sts22_entry_t *)pkt;
+		handle_cnt = sts22_entry->handle_count;
+		for (cnt = 0; cnt < handle_cnt; cnt++)
+			qla2x00_process_completed_request(vha, rsp->req,
+						sts22_entry->handle[cnt]);
+		break;
+	case STATUS_CONT_TYPE:
+		qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
+		break;
+	case MBX_IOCB_TYPE:
+		qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
+		break;
+	case CT_IOCB_TYPE:
+		qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
+		break;
+	default:
+		/* Type Not Supported. */
+		ql_log(ql_log_warn, vha, 0x504a,
+		       "Received unknown response pkt type %x entry status=%x.\n",
+		       pkt->entry_type, pkt->entry_status);
+		break;
+	}
+}
+
 /**
  * qla2x00_process_response_queue() - Process response queue entries.
  * @rsp: response queue
@@ -1962,8 +2036,6 @@
 	struct qla_hw_data *ha = rsp->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	sts_entry_t	*pkt;
-	uint16_t        handle_cnt;
-	uint16_t        cnt;
 
 	vha = pci_get_drvdata(ha->pdev);
 
@@ -1988,42 +2060,7 @@
 			continue;
 		}
 
-		switch (pkt->entry_type) {
-		case STATUS_TYPE:
-			qla2x00_status_entry(vha, rsp, pkt);
-			break;
-		case STATUS_TYPE_21:
-			handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
-			for (cnt = 0; cnt < handle_cnt; cnt++) {
-				qla2x00_process_completed_request(vha, rsp->req,
-				    ((sts21_entry_t *)pkt)->handle[cnt]);
-			}
-			break;
-		case STATUS_TYPE_22:
-			handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
-			for (cnt = 0; cnt < handle_cnt; cnt++) {
-				qla2x00_process_completed_request(vha, rsp->req,
-				    ((sts22_entry_t *)pkt)->handle[cnt]);
-			}
-			break;
-		case STATUS_CONT_TYPE:
-			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
-			break;
-		case MBX_IOCB_TYPE:
-			qla2x00_mbx_iocb_entry(vha, rsp->req,
-			    (struct mbx_entry *)pkt);
-			break;
-		case CT_IOCB_TYPE:
-			qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
-			break;
-		default:
-			/* Type Not Supported. */
-			ql_log(ql_log_warn, vha, 0x504a,
-			    "Received unknown response pkt type %x "
-			    "entry status=%x.\n",
-			    pkt->entry_type, pkt->entry_status);
-			break;
-		}
+		qla2x00_process_response_entry(vha, rsp, pkt);
 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
 		wmb();
 	}
@@ -2218,10 +2255,8 @@
 	struct bsg_job *bsg_job = NULL;
 	struct fc_bsg_request *bsg_request;
 	struct fc_bsg_reply *bsg_reply;
-	sts_entry_t *sts;
-	struct sts_entry_24xx *sts24;
-	sts = (sts_entry_t *) pkt;
-	sts24 = (struct sts_entry_24xx *) pkt;
+	sts_entry_t *sts = pkt;
+	struct sts_entry_24xx *sts24 = pkt;
 
 	/* Validate handle. */
 	if (index >= req->num_outstanding_cmds) {
@@ -2367,8 +2402,8 @@
 	srb_t		*sp;
 	fc_port_t	*fcport;
 	struct scsi_cmnd *cp;
-	sts_entry_t *sts;
-	struct sts_entry_24xx *sts24;
+	sts_entry_t *sts = pkt;
+	struct sts_entry_24xx *sts24 = pkt;
 	uint16_t	comp_status;
 	uint16_t	scsi_status;
 	uint16_t	ox_id;
@@ -2386,8 +2421,6 @@
 	uint16_t state_flags = 0;
 	uint16_t retry_delay = 0;
 
-	sts = (sts_entry_t *) pkt;
-	sts24 = (struct sts_entry_24xx *) pkt;
 	if (IS_FWI2_CAPABLE(ha)) {
 		comp_status = le16_to_cpu(sts24->comp_status);
 		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
@@ -2687,7 +2720,7 @@
 				"Port to be marked lost on fcport=%02x%02x%02x, current "
 				"port state= %s comp_status %x.\n", fcport->d_id.b.domain,
 				fcport->d_id.b.area, fcport->d_id.b.al_pa,
-				port_state_str[atomic_read(&fcport->state)],
+				port_state_str[FCS_ONLINE],
 				comp_status);
 
 			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -2717,6 +2750,17 @@
 			    cp->device->vendor);
 		break;
 
+	case CS_DMA:
+		ql_log(ql_log_info, fcport->vha, 0x3022,
+		    "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
+		    comp_status, scsi_status, res, vha->host_no,
+		    cp->device->id, cp->device->lun, fcport->d_id.b24,
+		    ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
+		    resid_len, fw_resid_len, sp, cp);
+		ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
+		    pkt, sizeof(*sts24));
+		res = DID_ERROR << 16;
+		break;
 	default:
 		res = DID_ERROR << 16;
 		break;
@@ -2837,6 +2881,7 @@
 	case ELS_IOCB_TYPE:
 	case ABORT_IOCB_TYPE:
 	case MBX_IOCB_TYPE:
+	default:
 		sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
 		if (sp) {
 			sp->done(sp, res);
@@ -2847,7 +2892,6 @@
 	case ABTS_RESP_24XX:
 	case CTIO_TYPE7:
 	case CTIO_CRC2:
-	default:
 		return 1;
 	}
 fatal:
@@ -2985,7 +3029,8 @@
 			qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
 			break;
 		case ABTS_RECV_24XX:
-			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+			if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+			    IS_QLA28XX(ha)) {
 				/* ensure that the ATIO queue is empty */
 				qlt_handle_abts_recv(vha, rsp,
 				    (response_t *)pkt);
@@ -3043,6 +3088,7 @@
 	/* Adjust ring index */
 	if (IS_P3P_TYPE(ha)) {
 		struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+
 		WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
 	} else {
 		WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
@@ -3058,7 +3104,7 @@
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
 	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
-	    !IS_QLA27XX(ha))
+	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return;
 
 	rval = QLA_SUCCESS;
@@ -3101,7 +3147,7 @@
 
 /**
  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
- * @irq:
+ * @irq: interrupt number
  * @dev_id: SCSI driver HA context
  *
  * Called by system whenever the host adapter generates an interrupt.
@@ -3121,6 +3167,7 @@
 	uint16_t	mb[8];
 	struct rsp_que *rsp;
 	unsigned long	flags;
+	bool process_atio = false;
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
@@ -3181,22 +3228,13 @@
 			qla24xx_process_response_queue(vha, rsp);
 			break;
 		case INTR_ATIO_QUE_UPDATE_27XX:
-		case INTR_ATIO_QUE_UPDATE:{
-			unsigned long flags2;
-			spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
-			qlt_24xx_process_atio_queue(vha, 1);
-			spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+		case INTR_ATIO_QUE_UPDATE:
+			process_atio = true;
 			break;
-		}
-		case INTR_ATIO_RSP_QUE_UPDATE: {
-			unsigned long flags2;
-			spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
-			qlt_24xx_process_atio_queue(vha, 1);
-			spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
-
+		case INTR_ATIO_RSP_QUE_UPDATE:
+			process_atio = true;
 			qla24xx_process_response_queue(vha, rsp);
 			break;
-		}
 		default:
 			ql_dbg(ql_dbg_async, vha, 0x504f,
 			    "Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -3210,6 +3248,12 @@
 	qla2x00_handle_mbx_completion(ha, status);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
+	if (process_atio) {
+		spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+		qlt_24xx_process_atio_queue(vha, 0);
+		spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+	}
+
 	return IRQ_HANDLED;
 }
 
@@ -3256,6 +3300,7 @@
 	uint32_t	hccr;
 	uint16_t	mb[8];
 	unsigned long flags;
+	bool process_atio = false;
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
@@ -3312,22 +3357,13 @@
 			qla24xx_process_response_queue(vha, rsp);
 			break;
 		case INTR_ATIO_QUE_UPDATE_27XX:
-		case INTR_ATIO_QUE_UPDATE:{
-			unsigned long flags2;
-			spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
-			qlt_24xx_process_atio_queue(vha, 1);
-			spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+		case INTR_ATIO_QUE_UPDATE:
+			process_atio = true;
 			break;
-		}
-		case INTR_ATIO_RSP_QUE_UPDATE: {
-			unsigned long flags2;
-			spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
-			qlt_24xx_process_atio_queue(vha, 1);
-			spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
-
+		case INTR_ATIO_RSP_QUE_UPDATE:
+			process_atio = true;
 			qla24xx_process_response_queue(vha, rsp);
 			break;
-		}
 		default:
 			ql_dbg(ql_dbg_async, vha, 0x5051,
 			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -3338,6 +3374,12 @@
 	qla2x00_handle_mbx_completion(ha, status);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
+	if (process_atio) {
+		spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+		qlt_24xx_process_atio_queue(vha, 0);
+		spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+	}
+
 	return IRQ_HANDLED;
 }
 
@@ -3406,7 +3448,7 @@
 		min_vecs++;
 	}
 
-	if (USER_CTRL_IRQ(ha)) {
+	if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
 		/* user wants to control IRQ setting for target mode */
 		ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
 		    ha->msix_count, PCI_IRQ_MSIX);
@@ -3422,10 +3464,8 @@
 		    ha->msix_count, ret);
 		goto msix_out;
 	} else if (ret < ha->msix_count) {
-		ql_log(ql_log_warn, vha, 0x00c6,
-		    "MSI-X: Failed to enable support "
-		     "with %d vectors, using %d vectors.\n",
-		    ha->msix_count, ret);
+		ql_log(ql_log_info, vha, 0x00c6,
+		    "MSI-X: Using %d vectors\n", ret);
 		ha->msix_count = ret;
 		/* Recalculate queue values */
 		if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
@@ -3442,6 +3482,7 @@
 			    "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
 		}
 	}
+	vha->irq_offset = desc.pre_vectors;
 	ha->msix_entries = kcalloc(ha->msix_count,
 				   sizeof(struct qla_msix_entry),
 				   GFP_KERNEL);
@@ -3449,7 +3490,7 @@
 		ql_log(ql_log_fatal, vha, 0x00c8,
 		    "Failed to allocate memory for ha->msix_entries.\n");
 		ret = -ENOMEM;
-		goto msix_out;
+		goto free_irqs;
 	}
 	ha->flags.msix_enabled = 1;
 
@@ -3513,7 +3554,7 @@
 	}
 
 	/* Enable MSI-X vector for response queue update for queue 0 */
-	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 		if (ha->msixbase && ha->mqiobase &&
 		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
 		     ql2xmqsupport))
@@ -3532,6 +3573,10 @@
 
 msix_out:
 	return ret;
+
+free_irqs:
+	pci_free_irq_vectors(ha->pdev);
+	goto msix_out;
 }
 
 int
@@ -3544,7 +3589,7 @@
 	/* If possible, enable MSI-X. */
 	if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
 	    !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
-	    !IS_QLAFX00(ha) && !IS_QLA27XX(ha)))
+	    !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
 		goto skip_msi;
 
 	if (ql2xenablemsix == 2)
@@ -3583,7 +3628,7 @@
 
 	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
 	    !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
-	    !IS_QLA27XX(ha))
+	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		goto skip_msi;
 
 	ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 84f57f0..4a1f21c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -60,6 +60,7 @@
 	{ MBC_GET_ADAPTER_LOOP_ID },
 	{ MBC_READ_SFP },
 	{ MBC_GET_RNID_PARAMS },
+	{ MBC_GET_SET_ZIO_THRESHOLD },
 };
 
 static int is_rom_cmd(uint16_t cmd)
@@ -189,7 +190,7 @@
 		goto premature_exit;
 	}
 
-	ha->flags.mbox_busy = 1;
+
 	/* Save mailbox command for debug */
 	ha->mcp = mcp;
 
@@ -198,12 +199,13 @@
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
+	    ha->flags.mbox_busy) {
 		rval = QLA_ABORTED;
-		ha->flags.mbox_busy = 0;
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 		goto premature_exit;
 	}
+	ha->flags.mbox_busy = 1;
 
 	/* Load mailbox registers. */
 	if (IS_P3P_TYPE(ha))
@@ -251,20 +253,9 @@
 	if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
 		set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
 
-		if (IS_P3P_TYPE(ha)) {
-			if (RD_REG_DWORD(&reg->isp82.hint) &
-				HINT_MBX_INT_PENDING) {
-				spin_unlock_irqrestore(&ha->hardware_lock,
-					flags);
-				ha->flags.mbox_busy = 0;
-				atomic_dec(&ha->num_pend_mbx_stage2);
-				ql_dbg(ql_dbg_mbx, vha, 0x1010,
-				    "Pending mailbox timeout, exiting.\n");
-				rval = QLA_FUNCTION_TIMEOUT;
-				goto premature_exit;
-			}
+		if (IS_P3P_TYPE(ha))
 			WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
-		} else if (IS_FWI2_CAPABLE(ha))
+		else if (IS_FWI2_CAPABLE(ha))
 			WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
 		else
 			WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -274,6 +265,16 @@
 		atomic_inc(&ha->num_pend_mbx_stage3);
 		if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
 		    mcp->tov * HZ)) {
+			if (chip_reset != ha->chip_reset) {
+				spin_lock_irqsave(&ha->hardware_lock, flags);
+				ha->flags.mbox_busy = 0;
+				spin_unlock_irqrestore(&ha->hardware_lock,
+				    flags);
+				atomic_dec(&ha->num_pend_mbx_stage2);
+				atomic_dec(&ha->num_pend_mbx_stage3);
+				rval = QLA_ABORTED;
+				goto premature_exit;
+			}
 			ql_dbg(ql_dbg_mbx, vha, 0x117a,
 			    "cmd=%x Timeout.\n", command);
 			spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -282,7 +283,9 @@
 
 		} else if (ha->flags.purge_mbox ||
 		    chip_reset != ha->chip_reset) {
+			spin_lock_irqsave(&ha->hardware_lock, flags);
 			ha->flags.mbox_busy = 0;
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
 			atomic_dec(&ha->num_pend_mbx_stage2);
 			atomic_dec(&ha->num_pend_mbx_stage3);
 			rval = QLA_ABORTED;
@@ -300,9 +303,9 @@
 		if (IS_P3P_TYPE(ha)) {
 			if (RD_REG_DWORD(&reg->isp82.hint) &
 				HINT_MBX_INT_PENDING) {
+				ha->flags.mbox_busy = 0;
 				spin_unlock_irqrestore(&ha->hardware_lock,
 					flags);
-				ha->flags.mbox_busy = 0;
 				atomic_dec(&ha->num_pend_mbx_stage2);
 				ql_dbg(ql_dbg_mbx, vha, 0x1012,
 				    "Pending mailbox timeout, exiting.\n");
@@ -320,7 +323,10 @@
 		while (!ha->flags.mbox_int) {
 			if (ha->flags.purge_mbox ||
 			    chip_reset != ha->chip_reset) {
+				spin_lock_irqsave(&ha->hardware_lock, flags);
 				ha->flags.mbox_busy = 0;
+				spin_unlock_irqrestore(&ha->hardware_lock,
+				    flags);
 				atomic_dec(&ha->num_pend_mbx_stage2);
 				rval = QLA_ABORTED;
 				goto premature_exit;
@@ -363,7 +369,10 @@
 		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
 		if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
+			spin_lock_irqsave(&ha->hardware_lock, flags);
 			ha->flags.mbox_busy = 0;
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
 			/* Setting Link-Down error */
 			mcp->mb[0] = MBS_LINK_DOWN_ERROR;
 			ha->mcp = NULL;
@@ -373,8 +382,12 @@
 			goto premature_exit;
 		}
 
-		if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
+		if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
+			ql_dbg(ql_dbg_mbx, vha, 0x11ff,
+			       "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
+			       MBS_COMMAND_COMPLETE);
 			rval = QLA_FUNCTION_FAILED;
+		}
 
 		/* Load return mailbox registers. */
 		iptr2 = mcp->mb;
@@ -436,7 +449,10 @@
 				 * then only PCI ERR flag would be set.
 				 * we will do premature exit for above case.
 				 */
+				spin_lock_irqsave(&ha->hardware_lock, flags);
 				ha->flags.mbox_busy = 0;
+				spin_unlock_irqrestore(&ha->hardware_lock,
+				    flags);
 				rval = QLA_FUNCTION_TIMEOUT;
 				goto premature_exit;
 			}
@@ -451,8 +467,9 @@
 			rval = QLA_FUNCTION_TIMEOUT;
 		 }
 	}
-
+	spin_lock_irqsave(&ha->hardware_lock, flags);
 	ha->flags.mbox_busy = 0;
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	/* Clean up */
 	ha->mcp = NULL;
@@ -542,9 +559,9 @@
 		    mcp->mb[0]);
 	} else if (rval) {
 		if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
-			pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
+			pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
 			    dev_name(&ha->pdev->dev), 0x1020+0x800,
-			    vha->host_no);
+			    vha->host_no, rval);
 			mboxes = mcp->in_mb;
 			cnt = 4;
 			for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
@@ -609,14 +626,15 @@
 		mcp->out_mb |= MBX_4;
 	}
 
-	mcp->in_mb = MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(vha, mcp);
 
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x1023,
-		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
 	} else {
 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
 		    "Done %s.\n", __func__);
@@ -631,7 +649,7 @@
 {
 	uint16_t mb4 = BIT_0;
 
-	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
 
 	return mb4;
@@ -641,7 +659,7 @@
 {
 	uint16_t mb4 = BIT_0;
 
-	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 		struct nvram_81xx *nv = ha->nvram;
 
 		mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
@@ -684,9 +702,10 @@
 		mcp->mb[2] = LSW(risc_addr);
 		mcp->mb[3] = 0;
 		mcp->mb[4] = 0;
+		mcp->mb[11] = 0;
 		ha->flags.using_lr_setting = 0;
 		if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
-		    IS_QLA27XX(ha)) {
+		    IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 			if (ql2xautodetectsfp) {
 				if (ha->flags.detected_lr_sfp) {
 					mcp->mb[4] |=
@@ -705,19 +724,20 @@
 			}
 		}
 
-		if (ql2xnvmeenable && IS_QLA27XX(ha))
+		if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
 			mcp->mb[4] |= NVME_ENABLE_FLAG;
 
-		if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 			struct nvram_81xx *nv = ha->nvram;
 			/* set minimum speed if specified in nvram */
-			if (nv->min_link_speed >= 2 &&
-			    nv->min_link_speed <= 5) {
+			if (nv->min_supported_speed >= 2 &&
+			    nv->min_supported_speed <= 5) {
 				mcp->mb[4] |= BIT_4;
-				mcp->mb[11] = nv->min_link_speed;
+				mcp->mb[11] |= nv->min_supported_speed & 0xF;
 				mcp->out_mb |= MBX_11;
 				mcp->in_mb |= BIT_5;
-				vha->min_link_speed_feat = nv->min_link_speed;
+				vha->min_supported_speed =
+				    nv->min_supported_speed;
 			}
 		}
 
@@ -727,7 +747,7 @@
 		if (ha->flags.exchoffld_enabled)
 			mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
 
-		mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
+		mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
 		mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
 	} else {
 		mcp->mb[1] = LSW(risc_addr);
@@ -745,34 +765,39 @@
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x1026,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
-	} else {
-		if (IS_FWI2_CAPABLE(ha)) {
-			ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
-			ql_dbg(ql_dbg_mbx, vha, 0x119a,
-			    "fw_ability_mask=%x.\n", ha->fw_ability_mask);
-			ql_dbg(ql_dbg_mbx, vha, 0x1027,
-			    "exchanges=%x.\n", mcp->mb[1]);
-			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
-				ha->max_speed_sup = mcp->mb[2] & BIT_0;
-				ql_dbg(ql_dbg_mbx, vha, 0x119b,
-				    "Maximum speed supported=%s.\n",
-				    ha->max_speed_sup ? "32Gps" : "16Gps");
-				if (vha->min_link_speed_feat) {
-					ha->min_link_speed = mcp->mb[5];
-					ql_dbg(ql_dbg_mbx, vha, 0x119c,
-					    "Minimum speed set=%s.\n",
-					    mcp->mb[5] == 5 ? "32Gps" :
-					    mcp->mb[5] == 4 ? "16Gps" :
-					    mcp->mb[5] == 3 ? "8Gps" :
-					    mcp->mb[5] == 2 ? "4Gps" :
-						"unknown");
-				}
-			}
-		}
-		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
-		    "Done.\n");
+		return rval;
 	}
 
+	if (!IS_FWI2_CAPABLE(ha))
+		goto done;
+
+	ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
+	ql_dbg(ql_dbg_mbx, vha, 0x119a,
+	    "fw_ability_mask=%x.\n", ha->fw_ability_mask);
+	ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
+	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+		ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
+		ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
+		    ha->max_supported_speed == 0 ? "16Gps" :
+		    ha->max_supported_speed == 1 ? "32Gps" :
+		    ha->max_supported_speed == 2 ? "64Gps" : "unknown");
+		if (vha->min_supported_speed) {
+			ha->min_supported_speed = mcp->mb[5] &
+			    (BIT_0 | BIT_1 | BIT_2);
+			ql_dbg(ql_dbg_mbx, vha, 0x119c,
+			    "min_supported_speed=%s.\n",
+			    ha->min_supported_speed == 6 ? "64Gps" :
+			    ha->min_supported_speed == 5 ? "32Gps" :
+			    ha->min_supported_speed == 4 ? "16Gps" :
+			    ha->min_supported_speed == 3 ? "8Gps" :
+			    ha->min_supported_speed == 2 ? "4Gps" : "unknown");
+		}
+	}
+
+done:
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+	    "Done %s.\n", __func__);
+
 	return rval;
 }
 
@@ -1028,10 +1053,10 @@
 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
 	if (IS_FWI2_CAPABLE(ha))
 		mcp->in_mb |= MBX_17|MBX_16|MBX_15;
-	if (IS_QLA27XX(ha))
+	if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		mcp->in_mb |=
 		    MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
-		    MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
+		    MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
 
 	mcp->flags = 0;
 	mcp->tov = MBX_TOV_SECONDS;
@@ -1084,7 +1109,12 @@
 		 * FW supports nvme and driver load parameter requested nvme.
 		 * BIT 26 of fw_attributes indicates NVMe support.
 		 */
-		if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
+		if ((ha->fw_attributes_h &
+		    (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
+			ql2xnvmeenable) {
+			if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
+				vha->flags.nvme_first_burst = 1;
+
 			vha->flags.nvme_enabled = 1;
 			ql_log(ql_log_info, vha, 0xd302,
 			    "%s: FC-NVMe is Enabled (0x%x)\n",
@@ -1092,7 +1122,10 @@
 		}
 	}
 
-	if (IS_QLA27XX(ha)) {
+	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+		ha->serdes_version[0] = mcp->mb[7] & 0xff;
+		ha->serdes_version[1] = mcp->mb[8] >> 8;
+		ha->serdes_version[2] = mcp->mb[8] & 0xff;
 		ha->mpi_version[0] = mcp->mb[10] & 0xff;
 		ha->mpi_version[1] = mcp->mb[11] >> 8;
 		ha->mpi_version[2] = mcp->mb[11] & 0xff;
@@ -1103,6 +1136,13 @@
 		ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
 		ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
 		ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
+		if (IS_QLA28XX(ha)) {
+			if (mcp->mb[16] & BIT_10) {
+				ql_log(ql_log_info, vha, 0xffff,
+				    "FW support secure flash updates\n");
+				ha->flags.secure_fw = 1;
+			}
+		}
 	}
 
 failed:
@@ -1483,17 +1523,12 @@
 	mbx_cmd_t  mc;
 	mbx_cmd_t  *mcp = &mc;
 	scsi_qla_host_t *vha;
-	struct req_que *req;
-	struct rsp_que *rsp;
 
-	l = l;
 	vha = fcport->vha;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
 	    "Entered %s.\n", __func__);
 
-	req = vha->hw->req_q_map[0];
-	rsp = req->rsp;
 	mcp->mb[0] = MBC_ABORT_TARGET;
 	mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
 	if (HAS_EXTENDED_IDS(vha->hw)) {
@@ -1516,7 +1551,7 @@
 	}
 
 	/* Issue marker IOCB. */
-	rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
+	rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
 							MK_SYNC_ID);
 	if (rval2 != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x1040,
@@ -1536,16 +1571,12 @@
 	mbx_cmd_t  mc;
 	mbx_cmd_t  *mcp = &mc;
 	scsi_qla_host_t *vha;
-	struct req_que *req;
-	struct rsp_que *rsp;
 
 	vha = fcport->vha;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
 	    "Entered %s.\n", __func__);
 
-	req = vha->hw->req_q_map[0];
-	rsp = req->rsp;
 	mcp->mb[0] = MBC_LUN_RESET;
 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
 	if (HAS_EXTENDED_IDS(vha->hw))
@@ -1565,7 +1596,7 @@
 	}
 
 	/* Issue marker IOCB. */
-	rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
+	rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
 								MK_SYNC_ID_LUN);
 	if (rval2 != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x1044,
@@ -1617,7 +1648,7 @@
 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
 	if (IS_FWI2_CAPABLE(vha->hw))
 		mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
-	if (IS_QLA27XX(vha->hw))
+	if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
 		mcp->in_mb |= MBX_15;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
@@ -1671,7 +1702,7 @@
 			}
 		}
 
-		if (IS_QLA27XX(vha->hw))
+		if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
 			vha->bbcr = mcp->mb[15];
 	}
 
@@ -1787,7 +1818,7 @@
 	}
 	/* 1 and 2 should normally be captured. */
 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
-	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		/* mb3 is additional info about the installed SFP. */
 		mcp->in_mb  |= MBX_3;
 	mcp->buf_size = size;
@@ -1798,10 +1829,20 @@
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
 		ql_dbg(ql_dbg_mbx, vha, 0x104d,
-		    "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
+		    "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
+		if (ha->init_cb) {
+			ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
+			ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
+			    0x0104d, ha->init_cb, sizeof(*ha->init_cb));
+		}
+		if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
+			ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
+			ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
+			    0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
+		}
 	} else {
-		if (IS_QLA27XX(ha)) {
+		if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
 				ql_dbg(ql_dbg_mbx, vha, 0x119d,
 				    "Invalid SFP/Validation Failed\n");
@@ -1985,7 +2026,7 @@
 
 		/* Passback COS information. */
 		fcport->supported_classes = (pd->options & BIT_4) ?
-		    FC_COS_CLASS2: FC_COS_CLASS3;
+		    FC_COS_CLASS2 : FC_COS_CLASS3;
 	}
 
 gpd_error_out:
@@ -2055,7 +2096,7 @@
 		/*EMPTY*/
 		ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
 	} else {
-		if (IS_QLA27XX(ha)) {
+		if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
 				ql_dbg(ql_dbg_mbx, vha, 0x119e,
 				    "Invalid SFP/Validation Failed\n");
@@ -2209,7 +2250,7 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
+	ql_dbg(ql_dbg_disc, vha, 0x105a,
 	    "Entered %s.\n", __func__);
 
 	if (IS_CNA_CAPABLE(vha->hw)) {
@@ -2220,10 +2261,7 @@
 		mcp->out_mb = MBX_2|MBX_1|MBX_0;
 	} else if (IS_FWI2_CAPABLE(vha->hw)) {
 		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
-		if (N2N_TOPO(vha->hw))
-			mcp->mb[1] = BIT_4; /* re-init */
-		else
-			mcp->mb[1] = BIT_6; /* LIP */
+		mcp->mb[1] = BIT_4;
 		mcp->mb[2] = 0;
 		mcp->mb[3] = vha->hw->loop_reset_delay;
 		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -2733,7 +2771,7 @@
 	    "Entered %s.\n", __func__);
 
 	mcp->mb[0] = MBC_LIP_FULL_LOGIN;
-	mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
+	mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
 	mcp->mb[2] = 0;
 	mcp->mb[3] = 0;
 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -2841,7 +2879,8 @@
 	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
-	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
+	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+	    IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		mcp->in_mb |= MBX_12;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
@@ -2866,7 +2905,8 @@
 		ha->orig_fw_iocb_count = mcp->mb[10];
 		if (ha->flags.npiv_supported)
 			ha->max_npiv_vports = mcp->mb[11];
-		if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+		    IS_QLA28XX(ha))
 			ha->fw_max_fcf_count = mcp->mb[12];
 	}
 
@@ -3072,22 +3112,25 @@
 	struct scsi_qla_host *vha = fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = vha->req;
+	struct qla_qpair *qpair = sp->qpair;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
 	    "Entered %s.\n", __func__);
 
 	if (vha->flags.qpairs_available && sp->qpair)
 		req = sp->qpair->req;
+	else
+		return QLA_FUNCTION_FAILED;
 
 	if (ql2xasynctmfenable)
 		return qla24xx_async_abort_command(sp);
 
-	spin_lock_irqsave(&ha->hardware_lock, flags);
+	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
 		if (req->outstanding_cmds[handle] == sp)
 			break;
 	}
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 	if (handle == req->num_outstanding_cmds) {
 		/* Command not found. */
 		return QLA_FUNCTION_FAILED;
@@ -3157,7 +3200,6 @@
 	scsi_qla_host_t *vha;
 	struct qla_hw_data *ha;
 	struct req_que *req;
-	struct rsp_que *rsp;
 	struct qla_qpair *qpair;
 
 	vha = fcport->vha;
@@ -3170,10 +3212,7 @@
 	if (vha->vp_idx && vha->qpair) {
 		/* NPIV port */
 		qpair = vha->qpair;
-		rsp = qpair->rsp;
 		req = qpair->req;
-	} else {
-		rsp = req->rsp;
 	}
 
 	tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
@@ -3230,8 +3269,8 @@
 	}
 
 	/* Issue marker IOCB. */
-	rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
-	    type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
+	rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
+	    type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
 	if (rval2 != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x1099,
 		    "Failed to issue marker IOCB (%x).\n", rval2);
@@ -3306,7 +3345,7 @@
 	mbx_cmd_t *mcp = &mc;
 
 	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
-	    !IS_QLA27XX(vha->hw))
+	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
@@ -3345,7 +3384,7 @@
 	mbx_cmd_t *mcp = &mc;
 
 	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
-	    !IS_QLA27XX(vha->hw))
+	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
@@ -3451,9 +3490,9 @@
 /**
  * qla2x00_set_serdes_params() -
  * @vha: HA context
- * @sw_em_1g:
- * @sw_em_2g:
- * @sw_em_4g:
+ * @sw_em_1g: serial link options
+ * @sw_em_2g: serial link options
+ * @sw_em_4g: serial link options
  *
  * Returns
  */
@@ -3614,7 +3653,8 @@
 	    "Entered %s.\n", __func__);
 
 	if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
-	    !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+	    !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
+	    !IS_QLA28XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
@@ -3727,7 +3767,7 @@
 	rval = qla2x00_mailbox_command(vha, mcp);
 
 	/* Return mailbox statuses. */
-	if (mb != NULL) {
+	if (mb) {
 		mb[0] = mcp->mb[0];
 		mb[1] = mcp->mb[1];
 		mb[3] = mcp->mb[3];
@@ -3762,7 +3802,7 @@
 	mcp->mb[0] = MBC_PORT_PARAMS;
 	mcp->mb[1] = loop_id;
 	mcp->mb[2] = BIT_0;
-	mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
+	mcp->mb[3] = port_speed & 0x3F;
 	mcp->mb[9] = vha->vp_idx;
 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
 	mcp->in_mb = MBX_3|MBX_1|MBX_0;
@@ -3771,7 +3811,7 @@
 	rval = qla2x00_mailbox_command(vha, mcp);
 
 	/* Return mailbox statuses. */
-	if (mb != NULL) {
+	if (mb) {
 		mb[0] = mcp->mb[0];
 		mb[1] = mcp->mb[1];
 		mb[3] = mcp->mb[3];
@@ -3844,14 +3884,24 @@
 		case TOPO_N2N:
 			ha->current_topology = ISP_CFG_N;
 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+			list_for_each_entry(fcport, &vha->vp_fcports, list) {
+				fcport->scan_state = QLA_FCPORT_SCAN;
+				fcport->n2n_flag = 0;
+			}
+
 			fcport = qla2x00_find_fcport_by_wwpn(vha,
 			    rptid_entry->u.f1.port_name, 1);
 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
 			if (fcport) {
 				fcport->plogi_nack_done_deadline = jiffies + HZ;
-				fcport->dm_login_expire = jiffies + 3*HZ;
+				fcport->dm_login_expire = jiffies + 2*HZ;
 				fcport->scan_state = QLA_FCPORT_FOUND;
+				fcport->n2n_flag = 1;
+				fcport->keep_nport_handle = 1;
+				if (vha->flags.nvme_enabled)
+					fcport->fc4f_nvme = 1;
+
 				switch (fcport->disc_state) {
 				case DSC_DELETED:
 					set_bit(RELOGIN_NEEDED,
@@ -3885,7 +3935,7 @@
 				    rptid_entry->u.f1.port_name,
 				    rptid_entry->u.f1.node_name,
 				    NULL,
-				    FC4_TYPE_UNKNOWN);
+				    FS_FCP_IS_N2N);
 			}
 
 			/* if our portname is higher then initiate N2N login */
@@ -3984,6 +4034,7 @@
 
 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
 			fcport->scan_state = QLA_FCPORT_SCAN;
+			fcport->n2n_flag = 0;
 		}
 
 		fcport = qla2x00_find_fcport_by_wwpn(vha,
@@ -3993,6 +4044,14 @@
 			fcport->login_retry = vha->hw->login_retry_count;
 			fcport->plogi_nack_done_deadline = jiffies + HZ;
 			fcport->scan_state = QLA_FCPORT_FOUND;
+			fcport->keep_nport_handle = 1;
+			fcport->n2n_flag = 1;
+			fcport->d_id.b.domain =
+				rptid_entry->u.f2.remote_nport_id[2];
+			fcport->d_id.b.area =
+				rptid_entry->u.f2.remote_nport_id[1];
+			fcport->d_id.b.al_pa =
+				rptid_entry->u.f2.remote_nport_id[0];
 		}
 	}
 }
@@ -4213,7 +4272,7 @@
 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
 		    "Dump of Verify Request.\n");
 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
-		    (uint8_t *)mn, sizeof(*mn));
+		    mn, sizeof(*mn));
 
 		rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
 		if (rval != QLA_SUCCESS) {
@@ -4225,7 +4284,7 @@
 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
 		    "Dump of Verify Response.\n");
 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
-		    (uint8_t *)mn, sizeof(*mn));
+		    mn, sizeof(*mn));
 
 		status[0] = le16_to_cpu(mn->p.rsp.comp_status);
 		status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
@@ -4301,7 +4360,7 @@
 	mcp->mb[12] = req->qos;
 	mcp->mb[11] = req->vp_idx;
 	mcp->mb[13] = req->rid;
-	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		mcp->mb[15] = 0;
 
 	mcp->mb[4] = req->id;
@@ -4315,9 +4374,10 @@
 	mcp->flags = MBX_DMA_OUT;
 	mcp->tov = MBX_TOV_SECONDS * 2;
 
-	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+	    IS_QLA28XX(ha))
 		mcp->in_mb |= MBX_1;
-	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 		mcp->out_mb |= MBX_15;
 		/* debug q create issue in SR-IOV */
 		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
@@ -4326,7 +4386,7 @@
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	if (!(req->options & BIT_0)) {
 		WRT_REG_DWORD(req->req_q_in, 0);
-		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 			WRT_REG_DWORD(req->req_q_out, 0);
 	}
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4370,7 +4430,7 @@
 	mcp->mb[5] = rsp->length;
 	mcp->mb[14] = rsp->msix->entry;
 	mcp->mb[13] = rsp->rid;
-	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		mcp->mb[15] = 0;
 
 	mcp->mb[4] = rsp->id;
@@ -4387,7 +4447,7 @@
 	if (IS_QLA81XX(ha)) {
 		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
 		mcp->in_mb |= MBX_1;
-	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 		mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
 		mcp->in_mb |= MBX_1;
 		/* debug q create issue in SR-IOV */
@@ -4397,7 +4457,7 @@
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	if (!(rsp->options & BIT_0)) {
 		WRT_REG_DWORD(rsp->rsp_q_out, 0);
-		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 			WRT_REG_DWORD(rsp->rsp_q_in, 0);
 	}
 
@@ -4455,7 +4515,7 @@
 	    "Entered %s.\n", __func__);
 
 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
-	    !IS_QLA27XX(vha->hw))
+	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
@@ -4487,7 +4547,7 @@
 	mbx_cmd_t *mcp = &mc;
 
 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
-	    !IS_QLA27XX(vha->hw))
+	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
@@ -4522,7 +4582,7 @@
 	mbx_cmd_t *mcp = &mc;
 
 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
-	    !IS_QLA27XX(vha->hw))
+	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
@@ -4553,6 +4613,42 @@
 }
 
 int
+qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
+{
+	int rval = QLA_SUCCESS;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+		return rval;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
+	mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
+	    FAC_OPT_CMD_UNLOCK_SEMAPHORE);
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10e3,
+		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
 {
 	int rval = 0;
@@ -4801,10 +4897,10 @@
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
-		if (mcp->mb[0] == MBS_COMMAND_ERROR &&
-		    mcp->mb[1] == 0x22)
+		if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
 			/* sfp is not there */
 			rval = QLA_INTERFACE_ERROR;
+		}
 	} else {
 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
 		    "Done %s.\n", __func__);
@@ -5144,13 +5240,14 @@
 	mcp->mb[3] = MSW(data);
 	mcp->mb[8] = MSW(risc_addr);
 	mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
-	mcp->in_mb = MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
 	mcp->tov = 30;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(vha, mcp);
 	if (rval != QLA_SUCCESS) {
 		ql_dbg(ql_dbg_mbx, vha, 0x1101,
-		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
 	} else {
 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
 		    "Done %s.\n", __func__);
@@ -5221,6 +5318,66 @@
 	return rval;
 }
 
+/* Set the specified data rate */
+int
+qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t val;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
+	    "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
+	    mode);
+
+	if (!IS_FWI2_CAPABLE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	memset(mcp, 0, sizeof(*mcp));
+	switch (ha->set_data_rate) {
+	case PORT_SPEED_AUTO:
+	case PORT_SPEED_4GB:
+	case PORT_SPEED_8GB:
+	case PORT_SPEED_16GB:
+	case PORT_SPEED_32GB:
+		val = ha->set_data_rate;
+		break;
+	default:
+		ql_log(ql_log_warn, vha, 0x1199,
+		    "Unrecognized speed setting:%d. Setting Autoneg\n",
+		    ha->set_data_rate);
+		val = ha->set_data_rate = PORT_SPEED_AUTO;
+		break;
+	}
+
+	mcp->mb[0] = MBC_DATA_RATE;
+	mcp->mb[1] = mode;
+	mcp->mb[2] = val;
+
+	mcp->out_mb = MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+		mcp->in_mb |= MBX_4|MBX_3;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1107,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		if (mcp->mb[1] != 0x7)
+			ql_dbg(ql_dbg_mbx, vha, 0x1179,
+				"Speed set:0x%x\n", mcp->mb[1]);
+
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
 int
 qla2x00_get_data_rate(scsi_qla_host_t *vha)
 {
@@ -5236,10 +5393,10 @@
 		return QLA_FUNCTION_FAILED;
 
 	mcp->mb[0] = MBC_DATA_RATE;
-	mcp->mb[1] = 0;
+	mcp->mb[1] = QLA_GET_DATA_RATE;
 	mcp->out_mb = MBX_1|MBX_0;
 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
-	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		mcp->in_mb |= MBX_3;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
@@ -5269,7 +5426,7 @@
 	    "Entered %s.\n", __func__);
 
 	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
-	    !IS_QLA27XX(ha))
+	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return QLA_FUNCTION_FAILED;
 	mcp->mb[0] = MBC_GET_PORT_CONFIG;
 	mcp->out_mb = MBX_0;
@@ -5585,6 +5742,7 @@
 	mbx_cmd_t *mcp = &mc;
 	int rval = QLA_FUNCTION_FAILED;
 	int offset = 0, size = MINIDUMP_SIZE_36K;
+
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
 	    "Entered %s.\n", __func__);
 
@@ -5765,7 +5923,7 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
@@ -5840,7 +5998,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	unsigned long retry_max_time = jiffies + (2 * HZ);
 
-	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
@@ -5890,7 +6048,7 @@
 	mbx_cmd_t *mcp = &mc;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+	if (!IS_QLA83XX(ha))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
@@ -6024,7 +6182,8 @@
 	mbx_cmd_t *mcp = &mc;
 	dma_addr_t dd_dma;
 
-	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
+	    !IS_QLA28XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
@@ -6066,10 +6225,8 @@
 	return rval;
 }
 
-static void qla2x00_async_mb_sp_done(void *s, int res)
+static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
 {
-	struct srb *sp = s;
-
 	sp->u.iocb_cmd.u.mbx.rc = res;
 
 	complete(&sp->u.iocb_cmd.u.mbx.comp);
@@ -6241,9 +6398,13 @@
 	fcport->d_id.b.rsvd_1 = 0;
 
 	if (fcport->fc4f_nvme) {
-		fcport->nvme_prli_service_param =
-		    pd->prli_nvme_svc_param_word_3;
-		fcport->port_type = FCT_NVME;
+		fcport->port_type = 0;
+		if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
+			fcport->port_type |= FCT_NVME_INITIATOR;
+		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+			fcport->port_type |= FCT_NVME_TARGET;
+		if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
+			fcport->port_type |= FCT_NVME_DISCOVERY;
 	} else {
 		/* If not target must be initiator or unknown type. */
 		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
@@ -6432,3 +6593,101 @@
 done:
 	return rval;
 }
+
+int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
+    uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
+    uint32_t sfub_len)
+{
+	int		rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
+	mcp->mb[1] = opts;
+	mcp->mb[2] = region;
+	mcp->mb[3] = MSW(len);
+	mcp->mb[4] = LSW(len);
+	mcp->mb[5] = MSW(sfub_dma_addr);
+	mcp->mb[6] = LSW(sfub_dma_addr);
+	mcp->mb[7] = MSW(MSD(sfub_dma_addr));
+	mcp->mb[8] = LSW(MSD(sfub_dma_addr));
+	mcp->mb[9] = sfub_len;
+	mcp->out_mb =
+	    MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
+			__func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
+			mcp->mb[2]);
+	}
+
+	return rval;
+}
+
+int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
+    uint32_t data)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_WRITE_REMOTE_REG;
+	mcp->mb[1] = LSW(addr);
+	mcp->mb[2] = MSW(addr);
+	mcp->mb[3] = LSW(data);
+	mcp->mb[4] = MSW(data);
+	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
+    uint32_t *data)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_READ_REMOTE_REG;
+	mcp->mb[1] = LSW(addr);
+	mcp->mb[2] = MSW(addr);
+	mcp->out_mb = MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	*data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index d620f4b..2382409 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -66,6 +66,7 @@
 	uint16_t vp_id;
 	struct qla_hw_data *ha = vha->hw;
 	unsigned long flags = 0;
+	u8 i;
 
 	mutex_lock(&ha->vport_lock);
 	/*
@@ -75,8 +76,11 @@
 	 * ensures no active vp_list traversal while the vport is removed
 	 * from the queue)
 	 */
-	wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
-	    10*HZ);
+	for (i = 0; i < 10; i++) {
+		if (wait_event_timeout(vha->vref_waitq,
+		    !atomic_read(&vha->vref_count), HZ) > 0)
+			break;
+	}
 
 	spin_lock_irqsave(&ha->vport_slock, flags);
 	if (atomic_read(&vha->vref_count)) {
@@ -262,6 +266,9 @@
 	spin_lock_irqsave(&ha->vport_slock, flags);
 	list_for_each_entry(vha, &ha->vp_list, list) {
 		if (vha->vp_idx) {
+			if (test_bit(VPORT_DELETE, &vha->dpc_flags))
+				continue;
+
 			atomic_inc(&vha->vref_count);
 			spin_unlock_irqrestore(&ha->vport_slock, flags);
 
@@ -300,6 +307,20 @@
 int
 qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
 {
+	fc_port_t *fcport;
+
+	/*
+	 * To exclusively reset vport, we need to log it out first.
+	 * Note: This control_vp can fail if ISP reset is already
+	 * issued, this is expected, as the vp would be already
+	 * logged out due to ISP reset.
+	 */
+	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+		list_for_each_entry(fcport, &vha->vp_fcports, list)
+			fcport->logout_on_delete = 0;
+	}
+
 	/*
 	 * Physical port will do most of the abort and recovery work. We can
 	 * just treat it as a loop down
@@ -312,16 +333,9 @@
 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 	}
 
-	/*
-	 * To exclusively reset vport, we need to log it out first.  Note: this
-	 * control_vp can fail if ISP reset is already issued, this is
-	 * expected, as the vp would be already logged out due to ISP reset.
-	 */
-	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
-		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
-
 	ql_dbg(ql_dbg_taskm, vha, 0x801d,
 	    "Scheduling enable of Vport %d.\n", vha->vp_idx);
+
 	return qla24xx_enable_vp(vha);
 }
 
@@ -507,6 +521,7 @@
 	qla2x00_start_timer(vha, WATCH_INTERVAL);
 
 	vha->req = base_vha->req;
+	vha->flags.nvme_enabled = base_vha->flags.nvme_enabled;
 	host->can_queue = base_vha->req->length + 128;
 	host->cmd_per_lun = 3;
 	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
@@ -900,11 +915,10 @@
 	return 0;
 }
 
-static void qla_ctrlvp_sp_done(void *s, int res)
+static void qla_ctrlvp_sp_done(srb_t *sp, int res)
 {
-	struct srb *sp = s;
-
-	complete(&sp->comp);
+	if (sp->comp)
+		complete(sp->comp);
 	/* don't free sp here. Let the caller do the free */
 }
 
@@ -921,6 +935,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	int	vp_index = vha->vp_idx;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	DECLARE_COMPLETION_ONSTACK(comp);
 	srb_t *sp;
 
 	ql_dbg(ql_dbg_vport, vha, 0x10c1,
@@ -935,6 +950,7 @@
 
 	sp->type = SRB_CTRL_VP;
 	sp->name = "ctrl_vp";
+	sp->comp = &comp;
 	sp->done = qla_ctrlvp_sp_done;
 	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
@@ -952,7 +968,9 @@
 	ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
 	    sp->name, sp->handle);
 
-	wait_for_completion(&sp->comp);
+	wait_for_completion(&comp);
+	sp->comp = NULL;
+
 	rval = sp->rc;
 	switch (rval) {
 	case QLA_FUNCTION_TIMEOUT:
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 521a513..605b59c 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -10,7 +10,6 @@
 #include <linux/pci.h>
 #include <linux/ratelimit.h>
 #include <linux/vmalloc.h>
-#include <linux/bsg-lib.h>
 #include <scsi/scsi_tcq.h>
 #include <linux/utsname.h>
 
@@ -149,7 +148,8 @@
 		QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-		wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
+		WARN_ON_ONCE(wait_for_completion_timeout(&ha->mbx_intr_comp,
+							 mcp->tov * HZ) != 0);
 	} else {
 		ql_dbg(ql_dbg_mbx, vha, 0x112c,
 		    "Cmd=%x Polling Mode.\n", command);
@@ -273,9 +273,9 @@
 
 	if (rval) {
 		ql_log(ql_log_warn, base_vha, 0x1163,
-		    "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
-		    "mb[3]=%x, cmd=%x ****.\n",
-		    mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
+		       "**** Failed=%x mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
+		       rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
+		       command);
 	} else {
 		ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
 	}
@@ -629,17 +629,20 @@
  *
  * Returns 0 on success.
  */
-void
+int
 qlafx00_soft_reset(scsi_qla_host_t *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
+	int rval = QLA_FUNCTION_FAILED;
 
 	if (unlikely(pci_channel_offline(ha->pdev) &&
 	    ha->flags.pci_channel_io_perm_failure))
-		return;
+		return rval;
 
 	ha->isp_ops->disable_intrs(ha);
 	qlafx00_soc_cpu_reset(vha);
+
+	return QLA_SUCCESS;
 }
 
 /**
@@ -685,14 +688,12 @@
 }
 
 char *
-qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
+qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
 {
 	struct qla_hw_data *ha = vha->hw;
 
-	if (pci_is_pcie(ha->pdev)) {
-		strcpy(str, "PCIe iSA");
-		return str;
-	}
+	if (pci_is_pcie(ha->pdev))
+		strlcpy(str, "PCIe iSA", str_len);
 	return str;
 }
 
@@ -1138,8 +1139,8 @@
 
 	ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
 	    "Listing Target bit map...\n");
-	ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
-	    0x2089, (uint8_t *)ha->gid_list, 32);
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 0x2089,
+	    ha->gid_list, 32);
 
 	/* Allocate temporary rmtport for any new rmtports discovered. */
 	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
@@ -1320,6 +1321,7 @@
 {
 	int  rval;
 	unsigned long flags;
+
 	rval = QLA_SUCCESS;
 
 	flags = vha->dpc_flags;
@@ -1795,10 +1797,8 @@
 	complete(&lio->u.fxiocb.fxiocb_comp);
 }
 
-static void
-qla2x00_fxdisc_sp_done(void *ptr, int res)
+static void qla2x00_fxdisc_sp_done(srb_t *sp, int res)
 {
-	srb_t *sp = ptr;
 	struct srb_iocb *lio = &sp->u.iocb_cmd;
 
 	complete(&lio->u.fxiocb.fxiocb_comp);
@@ -1877,22 +1877,22 @@
 			phost_info = &preg_hsi->hsi;
 			memset(preg_hsi, 0, sizeof(struct register_host_info));
 			phost_info->os_type = OS_TYPE_LINUX;
-			strncpy(phost_info->sysname,
-			    p_sysid->sysname, SYSNAME_LENGTH);
-			strncpy(phost_info->nodename,
-			    p_sysid->nodename, NODENAME_LENGTH);
+			strlcpy(phost_info->sysname, p_sysid->sysname,
+				sizeof(phost_info->sysname));
+			strlcpy(phost_info->nodename, p_sysid->nodename,
+				sizeof(phost_info->nodename));
 			if (!strcmp(phost_info->nodename, "(none)"))
 				ha->mr.host_info_resend = true;
-			strncpy(phost_info->release,
-			    p_sysid->release, RELEASE_LENGTH);
-			strncpy(phost_info->version,
-			    p_sysid->version, VERSION_LENGTH);
-			strncpy(phost_info->machine,
-			    p_sysid->machine, MACHINE_LENGTH);
-			strncpy(phost_info->domainname,
-			    p_sysid->domainname, DOMNAME_LENGTH);
-			strncpy(phost_info->hostdriver,
-			    QLA2XXX_VERSION, VERSION_LENGTH);
+			strlcpy(phost_info->release, p_sysid->release,
+				sizeof(phost_info->release));
+			strlcpy(phost_info->version, p_sysid->version,
+				sizeof(phost_info->version));
+			strlcpy(phost_info->machine, p_sysid->machine,
+				sizeof(phost_info->machine));
+			strlcpy(phost_info->domainname, p_sysid->domainname,
+				sizeof(phost_info->domainname));
+			strlcpy(phost_info->hostdriver, QLA2XXX_VERSION,
+				sizeof(phost_info->hostdriver));
 			preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
 			ql_dbg(ql_dbg_init, vha, 0x0149,
 			    "ISP%04X: Host registration with firmware\n",
@@ -1913,8 +1913,7 @@
 			    phost_info->domainname,
 			    phost_info->hostdriver);
 			ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
-			    (uint8_t *)phost_info,
-			    sizeof(struct host_system_info));
+			    phost_info, sizeof(*phost_info));
 		}
 	}
 
@@ -1938,8 +1937,10 @@
 	if (fx_type == FXDISC_GET_CONFIG_INFO) {
 		struct config_info_data *pinfo =
 		    (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
-		strcpy(vha->hw->model_number, pinfo->model_num);
-		strcpy(vha->hw->model_desc, pinfo->model_description);
+		strlcpy(vha->hw->model_number, pinfo->model_num,
+			ARRAY_SIZE(vha->hw->model_number));
+		strlcpy(vha->hw->model_desc, pinfo->model_description,
+			ARRAY_SIZE(vha->hw->model_desc));
 		memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
 		    sizeof(vha->hw->mr.symbolic_name));
 		memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
@@ -1968,7 +1969,7 @@
 		vha->d_id.b.al_pa = pinfo->port_id[2];
 		qlafx00_update_host_attr(vha, pinfo);
 		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
-		    (uint8_t *)pinfo, 16);
+		    pinfo, 16);
 	} else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
 		struct qlafx00_tgt_node_info *pinfo =
 		    (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
@@ -1976,12 +1977,12 @@
 		memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
 		fcport->port_type = FCT_TARGET;
 		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
-		    (uint8_t *)pinfo, 16);
+		    pinfo, 16);
 	} else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
 		struct qlafx00_tgt_node_info *pinfo =
 		    (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
 		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
-		    (uint8_t *)pinfo, 16);
+		    pinfo, 16);
 		memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
 	} else if (fx_type == FXDISC_ABORT_IOCTL)
 		fdisc->u.fxiocb.result =
@@ -2212,7 +2213,7 @@
 	struct bsg_job *bsg_job;
 	struct fc_bsg_reply *bsg_reply;
 	struct srb_iocb *iocb_job;
-	int res;
+	int res = 0;
 	struct qla_mt_iocb_rsp_fx00 fstatus;
 	uint8_t	*fw_sts_ptr;
 
@@ -2248,18 +2249,16 @@
 
 		fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
 
-		memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
-		    sizeof(struct qla_mt_iocb_rsp_fx00));
+		memcpy(fw_sts_ptr, &fstatus, sizeof(fstatus));
 		bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
 			sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
 
 		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
-		    sp->fcport->vha, 0x5080,
-		    (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
+		    sp->vha, 0x5080, pkt, sizeof(*pkt));
 
 		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
-		    sp->fcport->vha, 0x5074,
-		    (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
+		    sp->vha, 0x5074,
+		    fw_sts_ptr, sizeof(fstatus));
 
 		res = bsg_reply->result = DID_OK << 16;
 		bsg_reply->reply_payload_rcv_len =
@@ -2540,6 +2539,8 @@
 
 	if (rsp->status_srb == NULL)
 		sp->done(sp, res);
+	else
+		WARN_ON_ONCE(true);
 }
 
 /**
@@ -2597,7 +2598,7 @@
 
 		/* Move sense data. */
 		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
-		    (uint8_t *)pkt, sizeof(sts_cont_entry_t));
+		    pkt, sizeof(*pkt));
 		memcpy(sense_ptr, pkt->data, sense_sz);
 		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
 		    sense_ptr, sense_sz);
@@ -2617,6 +2618,8 @@
 	if (sense_len == 0) {
 		rsp->status_srb = NULL;
 		sp->done(sp, cp->result);
+	} else {
+		WARN_ON_ONCE(true);
 	}
 }
 
@@ -2624,7 +2627,7 @@
  * qlafx00_multistatus_entry() - Process Multi response queue entries.
  * @vha: SCSI driver HA context
  * @rsp: response queue
- * @pkt:
+ * @pkt: received packet
  */
 static void
 qlafx00_multistatus_entry(struct scsi_qla_host *vha,
@@ -2681,12 +2684,10 @@
  * @vha: SCSI driver HA context
  * @rsp: response queue
  * @pkt: Entry pointer
- * @estatus:
- * @etype:
  */
 static void
 qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
-		    struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
+		    struct sts_entry_fx00 *pkt)
 {
 	srb_t *sp;
 	struct qla_hw_data *ha = vha->hw;
@@ -2695,9 +2696,6 @@
 	struct req_que *req = NULL;
 	int res = DID_ERROR << 16;
 
-	ql_dbg(ql_dbg_async, vha, 0x507f,
-	    "type of error status in response: 0x%x\n", estatus);
-
 	req = ha->req_q_map[que];
 
 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
@@ -2745,9 +2743,11 @@
 
 		if (pkt->entry_status != 0 &&
 		    pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
+			ql_dbg(ql_dbg_async, vha, 0x507f,
+			       "type of error status in response: 0x%x\n",
+			       pkt->entry_status);
 			qlafx00_error_entry(vha, rsp,
-			    (struct sts_entry_fx00 *)pkt, pkt->entry_status,
-			    pkt->entry_type);
+					    (struct sts_entry_fx00 *)pkt);
 			continue;
 		}
 
@@ -2867,7 +2867,7 @@
 /**
  * qlafx00x_mbx_completion() - Process mailbox command completions.
  * @vha: SCSI driver HA context
- * @mb0:
+ * @mb0: value to be written into mailbox register 0
  */
 static void
 qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
@@ -2893,7 +2893,7 @@
 
 /**
  * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
- * @irq:
+ * @irq: interrupt number
  * @dev_id: SCSI driver HA context
  *
  * Called by system whenever the host adapter generates an interrupt.
@@ -2995,7 +2995,7 @@
 			 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
 {
 	uint16_t	avail_dsds;
-	__le32 *cur_dsd;
+	struct dsd64	*cur_dsd;
 	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	struct scatterlist *sg;
@@ -3031,12 +3031,10 @@
 
 	/* One DSD is available in the Command Type 3 IOCB */
 	avail_dsds = 1;
-	cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address;
+	cur_dsd = &lcmd_pkt->dsd;
 
 	/* Load data segments */
 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
-		dma_addr_t	sle_dma;
-
 		/* Allocate additional continuation packets? */
 		if (avail_dsds == 0) {
 			/*
@@ -3046,26 +3044,23 @@
 			memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
 			cont_pkt =
 			    qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
-			cur_dsd = (__le32 *)lcont_pkt.dseg_0_address;
+			cur_dsd = lcont_pkt.dsd;
 			avail_dsds = 5;
 			cont = 1;
 		}
 
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 		if (avail_dsds == 0 && cont == 1) {
 			cont = 0;
 			memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
-			    REQUEST_ENTRY_SIZE);
+			    sizeof(lcont_pkt));
 		}
 
 	}
 	if (avail_dsds != 0 && cont == 1) {
 		memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
-		    REQUEST_ENTRY_SIZE);
+		    sizeof(lcont_pkt));
 	}
 }
 
@@ -3080,7 +3075,6 @@
 {
 	int		nseg;
 	unsigned long   flags;
-	uint32_t        index;
 	uint32_t	handle;
 	uint16_t	cnt;
 	uint16_t	req_cnt;
@@ -3104,16 +3098,8 @@
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Map the sg table so we have an accurate count of sg entries needed */
@@ -3175,9 +3161,9 @@
 	lcmd_pkt.entry_status = (uint8_t) rsp->id;
 
 	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
-	    (uint8_t *)cmd->cmnd, cmd->cmd_len);
+	    cmd->cmnd, cmd->cmd_len);
 	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
-	    (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
+	    &lcmd_pkt, sizeof(lcmd_pkt));
 
 	memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
 	wmb();
@@ -3285,11 +3271,9 @@
 			fx_iocb.req_dsdcnt = cpu_to_le16(1);
 			fx_iocb.req_xfrcnt =
 			    cpu_to_le16(fxio->u.fxiocb.req_len);
-			fx_iocb.dseg_rq_address[0] =
-			    cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
-			fx_iocb.dseg_rq_address[1] =
-			    cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
-			fx_iocb.dseg_rq_len =
+			put_unaligned_le64(fxio->u.fxiocb.req_dma_handle,
+					   &fx_iocb.dseg_rq.address);
+			fx_iocb.dseg_rq.length =
 			    cpu_to_le32(fxio->u.fxiocb.req_len);
 		}
 
@@ -3297,11 +3281,9 @@
 			fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
 			fx_iocb.rsp_xfrcnt =
 			    cpu_to_le16(fxio->u.fxiocb.rsp_len);
-			fx_iocb.dseg_rsp_address[0] =
-			    cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
-			fx_iocb.dseg_rsp_address[1] =
-			    cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
-			fx_iocb.dseg_rsp_len =
+			put_unaligned_le64(fxio->u.fxiocb.rsp_dma_handle,
+					   &fx_iocb.dseg_rsp.address);
+			fx_iocb.dseg_rsp.length =
 			    cpu_to_le32(fxio->u.fxiocb.rsp_len);
 		}
 
@@ -3311,6 +3293,7 @@
 		fx_iocb.flags = fxio->u.fxiocb.flags;
 	} else {
 		struct scatterlist *sg;
+
 		bsg_job = sp->u.bsg_job;
 		bsg_request = bsg_job->request;
 		piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
@@ -3330,19 +3313,17 @@
 			int avail_dsds, tot_dsds;
 			cont_a64_entry_t lcont_pkt;
 			cont_a64_entry_t *cont_pkt = NULL;
-			__le32 *cur_dsd;
+			struct dsd64 *cur_dsd;
 			int index = 0, cont = 0;
 
 			fx_iocb.req_dsdcnt =
 			    cpu_to_le16(bsg_job->request_payload.sg_cnt);
 			tot_dsds =
 			    bsg_job->request_payload.sg_cnt;
-			cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0];
+			cur_dsd = &fx_iocb.dseg_rq;
 			avail_dsds = 1;
 			for_each_sg(bsg_job->request_payload.sg_list, sg,
 			    tot_dsds, index) {
-				dma_addr_t sle_dma;
-
 				/* Allocate additional continuation packets? */
 				if (avail_dsds == 0) {
 					/*
@@ -3354,17 +3335,13 @@
 					cont_pkt =
 					    qlafx00_prep_cont_type1_iocb(
 						sp->vha->req, &lcont_pkt);
-					cur_dsd = (__le32 *)
-					    lcont_pkt.dseg_0_address;
+					cur_dsd = lcont_pkt.dsd;
 					avail_dsds = 5;
 					cont = 1;
 					entry_cnt++;
 				}
 
-				sle_dma = sg_dma_address(sg);
-				*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-				*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-				*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+				append_dsd64(&cur_dsd, sg);
 				avail_dsds--;
 
 				if (avail_dsds == 0 && cont == 1) {
@@ -3392,19 +3369,17 @@
 			int avail_dsds, tot_dsds;
 			cont_a64_entry_t lcont_pkt;
 			cont_a64_entry_t *cont_pkt = NULL;
-			__le32 *cur_dsd;
+			struct dsd64 *cur_dsd;
 			int index = 0, cont = 0;
 
 			fx_iocb.rsp_dsdcnt =
 			   cpu_to_le16(bsg_job->reply_payload.sg_cnt);
 			tot_dsds = bsg_job->reply_payload.sg_cnt;
-			cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0];
+			cur_dsd = &fx_iocb.dseg_rsp;
 			avail_dsds = 1;
 
 			for_each_sg(bsg_job->reply_payload.sg_list, sg,
 			    tot_dsds, index) {
-				dma_addr_t sle_dma;
-
 				/* Allocate additional continuation packets? */
 				if (avail_dsds == 0) {
 					/*
@@ -3416,17 +3391,13 @@
 					cont_pkt =
 					    qlafx00_prep_cont_type1_iocb(
 						sp->vha->req, &lcont_pkt);
-					cur_dsd = (__le32 *)
-					    lcont_pkt.dseg_0_address;
+					cur_dsd = lcont_pkt.dsd;
 					avail_dsds = 5;
 					cont = 1;
 					entry_cnt++;
 				}
 
-				sle_dma = sg_dma_address(sg);
-				*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-				*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-				*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+				append_dsd64(&cur_dsd, sg);
 				avail_dsds--;
 
 				if (avail_dsds == 0 && cont == 1) {
@@ -3457,10 +3428,8 @@
 	}
 
 	ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
-	    sp->vha, 0x3047,
-	    (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
+	    sp->vha, 0x3047, &fx_iocb, sizeof(fx_iocb));
 
-	memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
-	    sizeof(struct fxdisc_entry_fx00));
+	memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, sizeof(fx_iocb));
 	wmb();
 }
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index aeaa1b4..4567f0c 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -7,6 +7,8 @@
 #ifndef __QLA_MR_H
 #define __QLA_MR_H
 
+#include "qla_dsd.h"
+
 /*
  * The PCI VendorID and DeviceID for our board.
  */
@@ -46,8 +48,7 @@
 	uint8_t fcp_cdb[MAX_CMDSZ];	/* SCSI command words. */
 	__le32 byte_count;		/* Total byte count. */
 
-	uint32_t dseg_0_address[2];	/* Data segment 0 address. */
-	uint32_t dseg_0_len;		/* Data segment 0 length. */
+	struct dsd64 dsd;
 };
 
 #define	STATUS_TYPE_FX00	0x01		/* Status entry. */
@@ -176,10 +177,8 @@
 	uint8_t flags;
 	uint8_t reserved_1;
 
-	__le32 dseg_rq_address[2];	/* Data segment 0 address. */
-	__le32 dseg_rq_len;		/* Data segment 0 length. */
-	__le32 dseg_rsp_address[2];	/* Data segment 1 address. */
-	__le32 dseg_rsp_len;		/* Data segment 1 length. */
+	struct dsd64 dseg_rq;
+	struct dsd64 dseg_rsp;
 
 	__le32 dataword;
 	__le32 adapid;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index e6545cb..6cc19e0 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -12,8 +12,6 @@
 
 static struct nvme_fc_port_template qla_nvme_fc_transport;
 
-static void qla_nvme_unregister_remote_port(struct work_struct *);
-
 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
 {
 	struct qla_nvme_rport *rport;
@@ -38,7 +36,6 @@
 		(fcport->nvme_flag & NVME_FLAG_REGISTERED))
 		return 0;
 
-	INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
 	fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
 
 	memset(&req, 0, sizeof(struct nvme_fc_port_info));
@@ -74,7 +71,6 @@
 
 	rport = fcport->nvme_remote_port->private;
 	rport->fcport = fcport;
-	list_add_tail(&rport->list, &vha->nvme_rport_list);
 
 	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
 	return 0;
@@ -124,54 +120,88 @@
 	return 0;
 }
 
-static void qla_nvme_sp_ls_done(void *ptr, int res)
+static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
 {
-	srb_t *sp = ptr;
+	struct srb *sp = container_of(kref, struct srb, cmd_kref);
+	struct nvme_private *priv = (struct nvme_private *)sp->priv;
+	struct nvmefc_fcp_req *fd;
 	struct srb_iocb *nvme;
-	struct nvmefc_ls_req   *fd;
-	struct nvme_private *priv;
+	unsigned long flags;
 
-	if (atomic_read(&sp->ref_count) == 0) {
-		ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
-		    "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
-		return;
+	if (!priv)
+		goto out;
+
+	nvme = &sp->u.iocb_cmd;
+	fd = nvme->u.nvme.desc;
+
+	spin_lock_irqsave(&priv->cmd_lock, flags);
+	priv->sp = NULL;
+	sp->priv = NULL;
+	if (priv->comp_status == QLA_SUCCESS) {
+		fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+	} else {
+		fd->rcv_rsplen = 0;
+		fd->transferred_length = 0;
 	}
+	fd->status = 0;
+	spin_unlock_irqrestore(&priv->cmd_lock, flags);
 
-	if (!atomic_dec_and_test(&sp->ref_count))
+	fd->done(fd);
+out:
+	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
+{
+	struct srb *sp = container_of(kref, struct srb, cmd_kref);
+	struct nvme_private *priv = (struct nvme_private *)sp->priv;
+	struct nvmefc_ls_req *fd;
+	unsigned long flags;
+
+	if (!priv)
+		goto out;
+
+	spin_lock_irqsave(&priv->cmd_lock, flags);
+	priv->sp = NULL;
+	sp->priv = NULL;
+	spin_unlock_irqrestore(&priv->cmd_lock, flags);
+
+	fd = priv->fd;
+	fd->done(fd, priv->comp_status);
+out:
+	qla2x00_rel_sp(sp);
+}
+
+static void qla_nvme_ls_complete(struct work_struct *work)
+{
+	struct nvme_private *priv =
+		container_of(work, struct nvme_private, ls_work);
+
+	kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
+}
+
+static void qla_nvme_sp_ls_done(srb_t *sp, int res)
+{
+	struct nvme_private *priv = sp->priv;
+
+	if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
 		return;
 
 	if (res)
 		res = -EINVAL;
 
-	nvme = &sp->u.iocb_cmd;
-	fd = nvme->u.nvme.desc;
-	priv = fd->private;
 	priv->comp_status = res;
+	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
 	schedule_work(&priv->ls_work);
-	/* work schedule doesn't need the sp */
-	qla2x00_rel_sp(sp);
 }
 
-static void qla_nvme_sp_done(void *ptr, int res)
+/* it assumed that QPair lock is held. */
+static void qla_nvme_sp_done(srb_t *sp, int res)
 {
-	srb_t *sp = ptr;
-	struct srb_iocb *nvme;
-	struct nvmefc_fcp_req *fd;
+	struct nvme_private *priv = sp->priv;
 
-	nvme = &sp->u.iocb_cmd;
-	fd = nvme->u.nvme.desc;
-
-	if (!atomic_dec_and_test(&sp->ref_count))
-		return;
-
-	if (res == QLA_SUCCESS)
-		fd->status = 0;
-	else
-		fd->status = NVME_SC_INTERNAL;
-
-	fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
-	fd->done(fd);
-	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+	priv->comp_status = res;
+	kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
 
 	return;
 }
@@ -185,32 +215,55 @@
 	struct qla_hw_data *ha = fcport->vha->hw;
 	int rval;
 
+	ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
+	       "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
+	       __func__, sp, sp->handle, fcport, fcport->deleted);
+
+	if (!ha->flags.fw_started && fcport->deleted)
+		goto out;
+
+	if (ha->flags.host_shutting_down) {
+		ql_log(ql_log_info, sp->fcport->vha, 0xffff,
+		    "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
+		    __func__, sp, sp->type, atomic_read(&sp->ref_count));
+		sp->done(sp, 0);
+		goto out;
+	}
+
 	rval = ha->isp_ops->abort_command(sp);
 
 	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
 	    "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
 	    __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
 	    sp, sp->handle, fcport, rval);
+
+out:
+	/* kref_get was done before work was schedule. */
+	kref_put(&sp->cmd_kref, sp->put_fn);
 }
 
 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
 {
 	struct nvme_private *priv = fd->private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->cmd_lock, flags);
+	if (!priv->sp) {
+		spin_unlock_irqrestore(&priv->cmd_lock, flags);
+		return;
+	}
+
+	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
+		spin_unlock_irqrestore(&priv->cmd_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&priv->cmd_lock, flags);
 
 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
 	schedule_work(&priv->abort_work);
 }
 
-static void qla_nvme_ls_complete(struct work_struct *work)
-{
-	struct nvme_private *priv =
-	    container_of(work, struct nvme_private, ls_work);
-	struct nvmefc_ls_req *fd = priv->fd;
-
-	fd->done(fd, priv->comp_status);
-}
-
 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
 {
@@ -223,8 +276,16 @@
 	struct qla_hw_data *ha;
 	srb_t           *sp;
 
+
+	if (!fcport || (fcport && fcport->deleted))
+		return rval;
+
 	vha = fcport->vha;
 	ha = vha->hw;
+
+	if (!ha->flags.fw_started)
+		return rval;
+
 	/* Alloc SRB structure */
 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
 	if (!sp)
@@ -233,11 +294,13 @@
 	sp->type = SRB_NVME_LS;
 	sp->name = "nvme_ls";
 	sp->done = qla_nvme_sp_ls_done;
-	atomic_set(&sp->ref_count, 1);
-	nvme = &sp->u.iocb_cmd;
+	sp->put_fn = qla_nvme_release_ls_cmd_kref;
+	sp->priv = (void *)priv;
 	priv->sp = sp;
+	kref_init(&sp->cmd_kref);
+	spin_lock_init(&priv->cmd_lock);
+	nvme = &sp->u.iocb_cmd;
 	priv->fd = fd;
-	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
 	nvme->u.nvme.desc = fd;
 	nvme->u.nvme.dir = 0;
 	nvme->u.nvme.dl = 0;
@@ -254,8 +317,10 @@
 	if (rval != QLA_SUCCESS) {
 		ql_log(ql_log_warn, vha, 0x700e,
 		    "qla2x00_start_sp failed = %d\n", rval);
-		atomic_dec(&sp->ref_count);
 		wake_up(&sp->nvme_ls_waitq);
+		sp->priv = NULL;
+		priv->sp = NULL;
+		qla2x00_rel_sp(sp);
 		return rval;
 	}
 
@@ -267,34 +332,34 @@
     struct nvmefc_fcp_req *fd)
 {
 	struct nvme_private *priv = fd->private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->cmd_lock, flags);
+	if (!priv->sp) {
+		spin_unlock_irqrestore(&priv->cmd_lock, flags);
+		return;
+	}
+	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
+		spin_unlock_irqrestore(&priv->cmd_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&priv->cmd_lock, flags);
 
 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
 	schedule_work(&priv->abort_work);
 }
 
-static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
-{
-	struct qla_qpair *qpair = hw_queue_handle;
-	unsigned long flags;
-	struct scsi_qla_host *vha = lport->private;
-
-	spin_lock_irqsave(&qpair->qp_lock, flags);
-	qla24xx_process_response_queue(vha, qpair->rsp);
-	spin_unlock_irqrestore(&qpair->qp_lock, flags);
-}
-
 static inline int qla2x00_start_nvme_mq(srb_t *sp)
 {
 	unsigned long   flags;
 	uint32_t        *clr_ptr;
-	uint32_t        index;
 	uint32_t        handle;
 	struct cmd_nvme *cmd_pkt;
 	uint16_t        cnt, i;
 	uint16_t        req_cnt;
 	uint16_t        tot_dsds;
 	uint16_t	avail_dsds;
-	uint32_t	*cur_dsd;
+	struct dsd64	*cur_dsd;
 	struct req_que *req = NULL;
 	struct scsi_qla_host *vha = sp->fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
@@ -311,17 +376,8 @@
 	/* Acquire qpair specific lock */
 	spin_lock_irqsave(&qpair->qp_lock, flags);
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-
-	if (index == req->num_outstanding_cmds) {
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0) {
 		rval = -EBUSY;
 		goto queuing_error;
 	}
@@ -343,6 +399,7 @@
 
 	if (unlikely(!fd->sqid)) {
 		struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
+
 		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
 			nvme->u.nvme.aen_op = 1;
 			atomic_inc(&ha->nvme_active_aen_cnt);
@@ -369,17 +426,24 @@
 
 	/* No data transfer how do we check buffer len == 0?? */
 	if (fd->io_dir == NVMEFC_FCP_READ) {
-		cmd_pkt->control_flags =
-		    cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
+		cmd_pkt->control_flags = CF_READ_DATA;
 		vha->qla_stats.input_bytes += fd->payload_length;
 		vha->qla_stats.input_requests++;
 	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
-		cmd_pkt->control_flags =
-		    cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
+		cmd_pkt->control_flags = CF_WRITE_DATA;
+		if ((vha->flags.nvme_first_burst) &&
+		    (sp->fcport->nvme_prli_service_param &
+			NVME_PRLI_SP_FIRST_BURST)) {
+			if ((fd->payload_length <=
+			    sp->fcport->nvme_first_burst_size) ||
+				(sp->fcport->nvme_first_burst_size == 0))
+				cmd_pkt->control_flags |=
+				    CF_NVME_FIRST_BURST_ENABLE;
+		}
 		vha->qla_stats.output_bytes += fd->payload_length;
 		vha->qla_stats.output_requests++;
 	} else if (fd->io_dir == 0) {
-		cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
+		cmd_pkt->control_flags = 0;
 	}
 
 	/* Set NPORT-ID */
@@ -391,25 +455,22 @@
 
 	/* NVME RSP IU */
 	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
-	cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
-	cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
+	put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
 
 	/* NVME CNMD IU */
 	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
-	cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
-	cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
+	cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
 
 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
 	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
 
 	/* One DSD is available in the Command Type NVME IOCB */
 	avail_dsds = 1;
-	cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
+	cur_dsd = &cmd_pkt->nvme_dsd;
 	sgl = fd->first_sgl;
 
 	/* Load data segments */
 	for_each_sg(sgl, sg, tot_dsds, i) {
-		dma_addr_t      sle_dma;
 		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets? */
@@ -428,17 +489,14 @@
 				req->ring_ptr++;
 			}
 			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
-			*((uint32_t *)(&cont_pkt->entry_type)) =
-			    cpu_to_le32(CONTINUE_A64_TYPE);
+			put_unaligned_le32(CONTINUE_A64_TYPE,
+					   &cont_pkt->entry_type);
 
-			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
-			avail_dsds = 5;
+			cur_dsd = cont_pkt->dsd;
+			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 		}
 
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
 
@@ -474,27 +532,16 @@
 	int rval = -ENODEV;
 	srb_t *sp;
 	struct qla_qpair *qpair = hw_queue_handle;
-	struct nvme_private *priv;
+	struct nvme_private *priv = fd->private;
 	struct qla_nvme_rport *qla_rport = rport->private;
 
-	if (!fd || !qpair) {
-		ql_log(ql_log_warn, NULL, 0x2134,
-		    "NO NVMe request or Queue Handle\n");
-		return rval;
-	}
-
-	priv = fd->private;
 	fcport = qla_rport->fcport;
-	if (!fcport) {
-		ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
+
+	if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
+	    (fcport && fcport->deleted))
 		return rval;
-	}
 
 	vha = fcport->vha;
-
-	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
-		return rval;
-
 	/*
 	 * If we know the dev is going away while the transport is still sending
 	 * IO's return busy back to stall the IO Q.  This happens when the
@@ -506,17 +553,21 @@
 		return -EBUSY;
 
 	/* Alloc SRB structure */
-	sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
 	if (!sp)
 		return -EBUSY;
 
-	atomic_set(&sp->ref_count, 1);
 	init_waitqueue_head(&sp->nvme_ls_waitq);
+	kref_init(&sp->cmd_kref);
+	spin_lock_init(&priv->cmd_lock);
+	sp->priv = (void *)priv;
 	priv->sp = sp;
 	sp->type = SRB_NVME_CMD;
 	sp->name = "nvme_cmd";
 	sp->done = qla_nvme_sp_done;
+	sp->put_fn = qla_nvme_release_fcp_cmd_kref;
 	sp->qpair = qpair;
+	sp->vha = vha;
 	nvme = &sp->u.iocb_cmd;
 	nvme->u.nvme.desc = fd;
 
@@ -524,8 +575,10 @@
 	if (rval != QLA_SUCCESS) {
 		ql_log(ql_log_warn, vha, 0x212d,
 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
-		atomic_dec(&sp->ref_count);
 		wake_up(&sp->nvme_ls_waitq);
+		sp->priv = NULL;
+		priv->sp = NULL;
+		qla2xxx_rel_qpair_sp(sp->qpair, sp);
 	}
 
 	return rval;
@@ -544,29 +597,16 @@
 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
 {
 	fc_port_t *fcport;
-	struct qla_nvme_rport *qla_rport = rport->private, *trport;
+	struct qla_nvme_rport *qla_rport = rport->private;
 
 	fcport = qla_rport->fcport;
 	fcport->nvme_remote_port = NULL;
 	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
-
-	list_for_each_entry_safe(qla_rport, trport,
-	    &fcport->vha->nvme_rport_list, list) {
-		if (qla_rport->fcport == fcport) {
-			list_del(&qla_rport->list);
-			break;
-		}
-	}
-	complete(&fcport->nvme_del_done);
-
-	if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
-		INIT_WORK(&fcport->free_work, qlt_free_session_done);
-		schedule_work(&fcport->free_work);
-	}
-
-	fcport->nvme_flag &= ~(NVME_FLAG_REGISTERED | NVME_FLAG_DELETING);
+	fcport->nvme_flag &= ~NVME_FLAG_DELETING;
 	ql_log(ql_log_info, fcport->vha, 0x2110,
-	    "remoteport_delete of %p completed.\n", fcport);
+	    "remoteport_delete of %p %8phN completed.\n",
+	    fcport, fcport->port_name);
+	complete(&fcport->nvme_del_done);
 }
 
 static struct nvme_fc_port_template qla_nvme_fc_transport = {
@@ -578,9 +618,8 @@
 	.ls_abort	= qla_nvme_ls_abort,
 	.fcp_io		= qla_nvme_post_cmd,
 	.fcp_abort	= qla_nvme_fcp_abort,
-	.poll_queue	= qla_nvme_poll,
 	.max_hw_queues  = 8,
-	.max_sgl_segments = 128,
+	.max_sgl_segments = 1024,
 	.max_dif_sgl_segments = 64,
 	.dma_boundary = 0xFFFFFFFF,
 	.local_priv_sz  = 8,
@@ -589,79 +628,36 @@
 	.fcprqst_priv_sz = sizeof(struct nvme_private),
 };
 
-#define NVME_ABORT_POLLING_PERIOD    2
-static int qla_nvme_wait_on_command(srb_t *sp)
+void qla_nvme_unregister_remote_port(struct fc_port *fcport)
 {
-	int ret = QLA_SUCCESS;
-
-	wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
-	    NVME_ABORT_POLLING_PERIOD*HZ);
-
-	if (atomic_read(&sp->ref_count) > 1)
-		ret = QLA_FUNCTION_FAILED;
-
-	return ret;
-}
-
-void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
-{
-	int rval;
-
-	if (ha->flags.fw_started) {
-		rval = ha->isp_ops->abort_command(sp);
-		if (!rval && !qla_nvme_wait_on_command(sp))
-			ql_log(ql_log_warn, NULL, 0x2112,
-			    "timed out waiting on sp=%p\n", sp);
-	} else {
-		sp->done(sp, res);
-	}
-}
-
-static void qla_nvme_unregister_remote_port(struct work_struct *work)
-{
-	struct fc_port *fcport = container_of(work, struct fc_port,
-	    nvme_del_work);
-	struct qla_nvme_rport *qla_rport, *trport;
+	int ret;
 
 	if (!IS_ENABLED(CONFIG_NVME_FC))
 		return;
 
 	ql_log(ql_log_warn, NULL, 0x2112,
-	    "%s: unregister remoteport on %p\n",__func__, fcport);
+	    "%s: unregister remoteport on %p %8phN\n",
+	    __func__, fcport, fcport->port_name);
 
-	list_for_each_entry_safe(qla_rport, trport,
-	    &fcport->vha->nvme_rport_list, list) {
-		if (qla_rport->fcport == fcport) {
-			ql_log(ql_log_info, fcport->vha, 0x2113,
-			    "%s: fcport=%p\n", __func__, fcport);
-			init_completion(&fcport->nvme_del_done);
-			nvme_fc_unregister_remoteport(
-			    fcport->nvme_remote_port);
-			wait_for_completion(&fcport->nvme_del_done);
-			break;
-		}
-	}
+	if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
+		nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
+
+	init_completion(&fcport->nvme_del_done);
+	ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
+	if (ret)
+		ql_log(ql_log_info, fcport->vha, 0x2114,
+			"%s: Failed to unregister nvme_remote_port (%d)\n",
+			    __func__, ret);
+	wait_for_completion(&fcport->nvme_del_done);
 }
 
 void qla_nvme_delete(struct scsi_qla_host *vha)
 {
-	struct qla_nvme_rport *qla_rport, *trport;
-	fc_port_t *fcport;
 	int nv_ret;
 
 	if (!IS_ENABLED(CONFIG_NVME_FC))
 		return;
 
-	list_for_each_entry_safe(qla_rport, trport,
-	    &vha->nvme_rport_list, list) {
-		fcport = qla_rport->fcport;
-
-		ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
-		    __func__, fcport);
-
-		nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
-	}
-
 	if (vha->nvme_local_port) {
 		init_completion(&vha->nvme_del_done);
 		ql_log(ql_log_info, vha, 0x2116,
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index 4941d10..ef91290 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -7,12 +7,12 @@
 #ifndef __QLA_NVME_H
 #define __QLA_NVME_H
 
-#include <linux/blk-mq.h>
 #include <uapi/scsi/fc/fc_fs.h>
 #include <uapi/scsi/fc/fc_els.h>
 #include <linux/nvme-fc-driver.h>
 
 #include "qla_def.h"
+#include "qla_dsd.h"
 
 /* default dev loss time (seconds) before transport tears down ctrl */
 #define NVME_FC_DEV_LOSS_TMO  30
@@ -33,10 +33,10 @@
 	struct work_struct ls_work;
 	struct work_struct abort_work;
 	int comp_status;
+	spinlock_t cmd_lock;
 };
 
 struct qla_nvme_rport {
-	struct list_head list;
 	struct fc_port *fcport;
 };
 
@@ -57,23 +57,22 @@
 	uint64_t rsvd;
 
 	uint16_t control_flags;         /* Control Flags */
-#define CF_NVME_ENABLE                  BIT_9
+#define CF_NVME_FIRST_BURST_ENABLE	BIT_11
 #define CF_DIF_SEG_DESCR_ENABLE         BIT_3
 #define CF_DATA_SEG_DESCR_ENABLE        BIT_2
 #define CF_READ_DATA                    BIT_1
 #define CF_WRITE_DATA                   BIT_0
 
 	uint16_t nvme_cmnd_dseg_len;             /* Data segment length. */
-	uint32_t nvme_cmnd_dseg_address[2];      /* Data segment address. */
-	uint32_t nvme_rsp_dseg_address[2];       /* Data segment address. */
+	__le64	 nvme_cmnd_dseg_address __packed;/* Data segment address. */
+	__le64	 nvme_rsp_dseg_address __packed; /* Data segment address. */
 
 	uint32_t byte_count;            /* Total byte count. */
 
 	uint8_t port_id[3];             /* PortID of destination port. */
 	uint8_t vp_index;
 
-	uint32_t nvme_data_dseg_address[2];      /* Data segment address. */
-	uint32_t nvme_data_dseg_len;             /* Data segment length. */
+	struct dsd64 nvme_dsd;
 };
 
 #define PT_LS4_REQUEST 0x89	/* Link Service pass-through IOCB (request) */
@@ -101,10 +100,7 @@
 	uint32_t rsvd3;
 	uint32_t rx_byte_count;
 	uint32_t tx_byte_count;
-	uint32_t dseg0_address[2];
-	uint32_t dseg0_len;
-	uint32_t dseg1_address[2];
-	uint32_t dseg1_len;
+	struct dsd64 dsd[2];
 };
 
 #define PT_LS4_UNSOL 0x56	/* pass-up unsolicited rec FC-NVMe request */
@@ -122,7 +118,7 @@
 	uint32_t exchange_address;
 	uint8_t d_id[3];
 	uint8_t r_ctl;
-	uint8_t s_id[3];
+	be_id_t s_id;
 	uint8_t cs_ctl;
 	uint8_t f_ctl[3];
 	uint8_t type;
@@ -145,8 +141,7 @@
 int qla_nvme_register_hba(struct scsi_qla_host *);
 int  qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *);
 void qla_nvme_delete(struct scsi_qla_host *);
-void qla_nvme_abort(struct qla_hw_data *, struct srb *sp, int res);
 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *,
     struct req_que *);
-void qla24xx_async_gffid_sp_done(void *, int);
+void qla24xx_async_gffid_sp_done(struct srb *sp, int);
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index de2bc78..2b2028f 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -6,6 +6,7 @@
  */
 #include "qla_def.h"
 #include <linux/delay.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/pci.h>
 #include <linux/ratelimit.h>
 #include <linux/vmalloc.h>
@@ -608,6 +609,7 @@
 	} else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
 		QLA82XX_ADDR_OCM0_MAX)) {
 		unsigned int temp1;
+
 		if ((addr & 0x00ff800) == 0xff800) {
 			ql_log(ql_log_warn, vha, 0xb004,
 			    "%s: QM access not handled.\n", __func__);
@@ -990,6 +992,7 @@
 qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
 {
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
 	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
 	qla82xx_wait_rom_busy(ha);
 	if (qla82xx_wait_rom_done(ha)) {
@@ -1030,6 +1033,7 @@
 qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
 {
 	uint32_t val;
+
 	qla82xx_wait_rom_busy(ha);
 	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
 	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
@@ -1047,6 +1051,7 @@
 qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
 {
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
 	if (qla82xx_flash_set_write_enable(ha))
 		return -1;
 	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
@@ -1063,6 +1068,7 @@
 qla82xx_write_disable_flash(struct qla_hw_data *ha)
 {
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
 	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
 	if (qla82xx_wait_rom_done(ha)) {
 		ql_log(ql_log_warn, vha, 0xb00f,
@@ -1435,6 +1441,7 @@
 	long memaddr = BOOTLD_START;
 	u64 data;
 	u32 high, low;
+
 	size = (IMAGE_START - BOOTLD_START) / 8;
 
 	for (i = 0; i < size; i++) {
@@ -1757,11 +1764,14 @@
  *
  * Returns 0 on success.
  */
-void
+int
 qla82xx_reset_chip(scsi_qla_host_t *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
+
 	ha->isp_ops->disable_intrs(ha);
+
+	return QLA_SUCCESS;
 }
 
 void qla82xx_config_rings(struct scsi_qla_host *vha)
@@ -1778,10 +1788,8 @@
 	icb->response_q_inpointer = cpu_to_le16(0);
 	icb->request_q_length = cpu_to_le16(req->length);
 	icb->response_q_length = cpu_to_le16(rsp->length);
-	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
-	icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
-	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
-	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+	put_unaligned_le64(req->dma, &icb->request_q_address);
+	put_unaligned_le64(rsp->dma, &icb->response_q_address);
 
 	WRT_REG_DWORD(&reg->req_q_out[0], 0);
 	WRT_REG_DWORD(&reg->rsp_q_in[0], 0);
@@ -1969,7 +1977,7 @@
 	} while (--retries);
 
 	ql_log(ql_log_fatal, vha, 0x00ac,
-	    "Rcv Peg initializatin failed: 0x%x.\n", val);
+	    "Rcv Peg initialization failed: 0x%x.\n", val);
 	read_lock(&ha->hw_lock);
 	qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
 	read_unlock(&ha->hw_lock);
@@ -1977,7 +1985,7 @@
 }
 
 /* ISR related functions */
-static struct qla82xx_legacy_intr_set legacy_intr[] = \
+static struct qla82xx_legacy_intr_set legacy_intr[] =
 	QLA82XX_LEGACY_INTR_CONFIG;
 
 /*
@@ -1992,6 +2000,7 @@
 	uint16_t __iomem *wptr;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+
 	wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
 
 	/* Load return mailbox registers. */
@@ -2010,7 +2019,7 @@
 
 /**
  * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
- * @irq:
+ * @irq: interrupt number
  * @dev_id: SCSI driver HA context
  *
  * Called by system whenever the host adapter generates an interrupt.
@@ -2028,7 +2037,7 @@
 	unsigned long	flags;
 	unsigned long	iter;
 	uint32_t	stat = 0;
-	uint16_t	mb[4];
+	uint16_t	mb[8];
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
@@ -2112,7 +2121,7 @@
 	unsigned long flags;
 	uint32_t stat = 0;
 	uint32_t host_int = 0;
-	uint16_t mb[4];
+	uint16_t mb[8];
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
@@ -2208,7 +2217,7 @@
 	int status = 0;
 	uint32_t stat;
 	uint32_t host_int = 0;
-	uint16_t mb[4];
+	uint16_t mb[8];
 	unsigned long flags;
 
 	rsp = (struct rsp_que *) dev_id;
@@ -2262,6 +2271,7 @@
 qla82xx_enable_intrs(struct qla_hw_data *ha)
 {
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
 	qla82xx_mbx_intr_enable(vha);
 	spin_lock_irq(&ha->hardware_lock);
 	if (IS_QLA8044(ha))
@@ -2276,7 +2286,10 @@
 qla82xx_disable_intrs(struct qla_hw_data *ha)
 {
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
-	qla82xx_mbx_intr_disable(vha);
+
+	if (ha->interrupts_on)
+		qla82xx_mbx_intr_disable(vha);
+
 	spin_lock_irq(&ha->hardware_lock);
 	if (IS_QLA8044(ha))
 		qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1);
@@ -2658,8 +2671,8 @@
 /*
  * Address and length are byte address
  */
-uint8_t *
-qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla82xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
 	uint32_t offset, uint32_t length)
 {
 	scsi_block_requests(vha->host);
@@ -2767,15 +2780,14 @@
 }
 
 int
-qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+qla82xx_write_optrom_data(struct scsi_qla_host *vha, void *buf,
 	uint32_t offset, uint32_t length)
 {
 	int rval;
 
 	/* Suspend HBA. */
 	scsi_block_requests(vha->host);
-	rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
-		length >> 2);
+	rval = qla82xx_write_flash_data(vha, buf, offset, length >> 2);
 	scsi_unblock_requests(vha->host);
 
 	/* Convert return ISP82xx to generic */
@@ -3276,7 +3288,7 @@
 		case QLA8XXX_DEV_NEED_QUIESCENT:
 			qla82xx_need_qsnt_handler(vha);
 			/* Reset timeout value after quiescence handler */
-			dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
+			dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout
 							 * HZ);
 			break;
 		case QLA8XXX_DEV_QUIESCENT:
@@ -3291,7 +3303,7 @@
 			qla82xx_idc_lock(ha);
 
 			/* Reset timeout value after quiescence handler */
-			dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
+			dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout
 							 * HZ);
 			break;
 		case QLA8XXX_DEV_FAILED:
@@ -3676,7 +3688,7 @@
 			for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
 				sp = req->outstanding_cmds[cnt];
 				if (sp) {
-					if ((!sp->u.scmd.ctx ||
+					if ((!sp->u.scmd.crc_ctx ||
 					    (sp->flags &
 						SRB_FCP_CMND_DMA_VALID)) &&
 						!ha->flags.isp82xx_fw_hung) {
@@ -3699,11 +3711,13 @@
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 		/* Wait for pending cmds (physical and virtual) to complete */
-		if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+		if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
 		    WAIT_HOST) == QLA_SUCCESS) {
 			ql_dbg(ql_dbg_init, vha, 0x00b3,
 			    "Done wait for "
 			    "pending commands.\n");
+		} else {
+			WARN_ON_ONCE(true);
 		}
 	}
 }
@@ -4222,7 +4236,7 @@
 		goto md_failed;
 	}
 
-	entry_hdr = (qla82xx_md_entry_hdr_t *) \
+	entry_hdr = (qla82xx_md_entry_hdr_t *)
 	    (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
 
 	/* Walk through the entry headers */
@@ -4329,7 +4343,7 @@
 		data_collected = (uint8_t *)data_ptr -
 		    (uint8_t *)ha->md_dump;
 skip_nxt_entry:
-		entry_hdr = (qla82xx_md_entry_hdr_t *) \
+		entry_hdr = (qla82xx_md_entry_hdr_t *)
 		    (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
 	}
 
@@ -4464,6 +4478,7 @@
 
 	int rval;
 	struct qla_hw_data *ha = vha->hw;
+
 	qla82xx_idc_lock(ha);
 	rval = qla82xx_mbx_beacon_ctl(vha, 1);
 
@@ -4484,6 +4499,7 @@
 
 	int rval;
 	struct qla_hw_data *ha = vha->hw;
+
 	qla82xx_idc_lock(ha);
 	rval = qla82xx_mbx_beacon_ctl(vha, 0);
 
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 71a4109..230abee 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -7,7 +7,7 @@
 #ifndef __QLA_NX_H
 #define __QLA_NX_H
 
-#include <linux/io-64-nonatomic-lo-hi.h>
+#include <scsi/scsi.h>
 
 /*
  * Following are the states of the Phantom. Phantom will set them and
@@ -486,13 +486,13 @@
 #define QLA82XX_ADDR_QDR_NET		(0x0000000300000000ULL)
 #define QLA82XX_P3_ADDR_QDR_NET_MAX	(0x0000000303ffffffULL)
 
-#define QLA82XX_PCI_CRBSPACE		(unsigned long)0x06000000
-#define QLA82XX_PCI_DIRECT_CRB		(unsigned long)0x04400000
-#define QLA82XX_PCI_CAMQM		(unsigned long)0x04800000
-#define QLA82XX_PCI_CAMQM_MAX		(unsigned long)0x04ffffff
-#define QLA82XX_PCI_DDR_NET		(unsigned long)0x00000000
-#define QLA82XX_PCI_QDR_NET		(unsigned long)0x04000000
-#define QLA82XX_PCI_QDR_NET_MAX		(unsigned long)0x043fffff
+#define QLA82XX_PCI_CRBSPACE		0x06000000UL
+#define QLA82XX_PCI_DIRECT_CRB		0x04400000UL
+#define QLA82XX_PCI_CAMQM		0x04800000UL
+#define QLA82XX_PCI_CAMQM_MAX		0x04ffffffUL
+#define QLA82XX_PCI_DDR_NET		0x00000000UL
+#define QLA82XX_PCI_QDR_NET		0x04000000UL
+#define QLA82XX_PCI_QDR_NET_MAX		0x043fffffUL
 
 /*
  *   Register offsets for MN
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 3a2b028..c056f46 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -559,12 +559,12 @@
 /*
  * Address and length are byte address
  */
-uint8_t *
-qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla8044_read_optrom_data(struct scsi_qla_host *vha, void *buf,
 	uint32_t offset, uint32_t length)
 {
 	scsi_block_requests(vha->host);
-	if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4)
+	if (qla8044_read_flash_data(vha, buf, offset, length / 4)
 	    != QLA_SUCCESS) {
 		ql_log(ql_log_warn, vha,  0xb08d,
 		    "%s: Failed to read from flash\n",
@@ -2810,7 +2810,7 @@
 
 #define ISP8044_PEX_DMA_ENGINE_INDEX		8
 #define ISP8044_PEX_DMA_BASE_ADDRESS		0x77320000
-#define ISP8044_PEX_DMA_NUM_OFFSET		0x10000
+#define ISP8044_PEX_DMA_NUM_OFFSET		0x10000UL
 #define ISP8044_PEX_DMA_CMD_ADDR_LOW		0x0
 #define ISP8044_PEX_DMA_CMD_ADDR_HIGH		0x04
 #define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL	0x08
@@ -3007,10 +3007,9 @@
 	uint16_t count;
 	uint32_t poll, mask, modify_mask;
 	uint32_t wait_count = 0;
-
 	uint32_t *data_ptr = *d_ptr;
-
 	struct qla8044_minidump_entry_rddfe *rddfe;
+
 	rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
 
 	addr1 = rddfe->addr_1;
@@ -3797,7 +3796,7 @@
 }
 
 int
-qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+qla8044_write_optrom_data(struct scsi_qla_host *vha, void *buf,
 			  uint32_t offset, uint32_t length)
 {
 	int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
@@ -3878,7 +3877,7 @@
 #define PF_BITS_MASK		(0xF << 16)
 /**
  * qla8044_intr_handler() - Process interrupts for the ISP8044
- * @irq:
+ * @irq: interrupt number
  * @dev_id: SCSI driver HA context
  *
  * Called by system whenever the host adapter generates an interrupt.
@@ -3896,7 +3895,7 @@
 	unsigned long	flags;
 	unsigned long	iter;
 	uint32_t	stat;
-	uint16_t	mb[4];
+	uint16_t	mb[8];
 	uint32_t leg_int_ptr = 0, pf_bit;
 
 	rsp = (struct rsp_que *) dev_id;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ae9fd2d..726ad4c 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -14,6 +14,8 @@
 #include <linux/kobject.h>
 #include <linux/slab.h>
 #include <linux/blk-mq-pci.h>
+#include <linux/refcount.h>
+
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
@@ -40,7 +42,7 @@
 /*
  * error level for logging
  */
-int ql_errlev = ql_log_all;
+uint ql_errlev = 0x8001;
 
 static int ql2xenableclass2;
 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
@@ -65,9 +67,9 @@
 MODULE_PARM_DESC(ql2xplogiabsentdevice,
 		"Option to enable PLOGI to devices that are not present after "
 		"a Fabric scan.  This is needed for several broken switches. "
-		"Default is 0 - no PLOGI. 1 - perfom PLOGI.");
+		"Default is 0 - no PLOGI. 1 - perform PLOGI.");
 
-int ql2xloginretrycount = 0;
+int ql2xloginretrycount;
 module_param(ql2xloginretrycount, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xloginretrycount,
 		"Specify an alternate value for the NVRAM login retry count.");
@@ -106,7 +108,7 @@
 		"Set to control shifting of command type processing "
 		"based on total number of SG elements.");
 
-int ql2xfdmienable=1;
+int ql2xfdmienable = 1;
 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xfdmienable,
@@ -152,7 +154,7 @@
 		"  1 -- Error isolation enabled only for DIX Type 0\n"
 		"  2 -- Error isolation enabled for all Types\n");
 
-int ql2xiidmaenable=1;
+int ql2xiidmaenable = 1;
 module_param(ql2xiidmaenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xiidmaenable,
 		"Enables iIDMA settings "
@@ -204,7 +206,7 @@
 module_param(ql2xasynctmfenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xasynctmfenable,
 		"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
-		"Default is 0 - Issue TM IOCBs via mailbox mechanism.");
+		"Default is 1 - Issue TM IOCBs via mailbox mechanism.");
 
 int ql2xdontresethba;
 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
@@ -232,7 +234,7 @@
 		"0 - MiniDump disabled. "
 		"1 (Default) - MiniDump enabled.");
 
-int ql2xexlogins = 0;
+int ql2xexlogins;
 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xexlogins,
 		 "Number of extended Logins. "
@@ -248,7 +250,7 @@
 MODULE_PARM_DESC(ql2xiniexchg,
 	"Number of initiator exchanges.");
 
-int ql2xfwholdabts = 0;
+int ql2xfwholdabts;
 module_param(ql2xfwholdabts, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xfwholdabts,
 		"Allow FW to hold status IOCB until ABTS rsp received. "
@@ -283,20 +285,26 @@
 		 "Reserve 1/2 of emergency exchanges for ELS.\n"
 		 " 0 (default): disabled");
 
-/*
- * SCSI host template entry points
- */
-static int qla2xxx_slave_configure(struct scsi_device * device);
-static int qla2xxx_slave_alloc(struct scsi_device *);
-static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
-static void qla2xxx_scan_start(struct Scsi_Host *);
-static void qla2xxx_slave_destroy(struct scsi_device *);
-static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
-static int qla2xxx_eh_abort(struct scsi_cmnd *);
-static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
-static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
-static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
-static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
+static int ql2xprotmask;
+module_param(ql2xprotmask, int, 0644);
+MODULE_PARM_DESC(ql2xprotmask,
+		 "Override DIF/DIX protection capabilities mask\n"
+		 "Default is 0 which sets protection mask based on "
+		 "capabilities reported by HBA firmware.\n");
+
+static int ql2xprotguard;
+module_param(ql2xprotguard, int, 0644);
+MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
+		 "  0 -- Let HBA firmware decide\n"
+		 "  1 -- Force T10 CRC\n"
+		 "  2 -- Force IP checksum\n");
+
+int ql2xdifbundlinginternalbuffers;
+module_param(ql2xdifbundlinginternalbuffers, int, 0644);
+MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
+    "Force using internal buffers for DIF information\n"
+    "0 (Default). Based on check.\n"
+    "1 Force using internal buffers\n");
 
 static void qla2x00_clear_drv_active(struct qla_hw_data *);
 static void qla2x00_free_device(scsi_qla_host_t *);
@@ -304,38 +312,6 @@
 static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
 
 
-struct scsi_host_template qla2xxx_driver_template = {
-	.module			= THIS_MODULE,
-	.name			= QLA2XXX_DRIVER_NAME,
-	.queuecommand		= qla2xxx_queuecommand,
-
-	.eh_timed_out		= fc_eh_timed_out,
-	.eh_abort_handler	= qla2xxx_eh_abort,
-	.eh_device_reset_handler = qla2xxx_eh_device_reset,
-	.eh_target_reset_handler = qla2xxx_eh_target_reset,
-	.eh_bus_reset_handler	= qla2xxx_eh_bus_reset,
-	.eh_host_reset_handler	= qla2xxx_eh_host_reset,
-
-	.slave_configure	= qla2xxx_slave_configure,
-
-	.slave_alloc		= qla2xxx_slave_alloc,
-	.slave_destroy		= qla2xxx_slave_destroy,
-	.scan_finished		= qla2xxx_scan_finished,
-	.scan_start		= qla2xxx_scan_start,
-	.change_queue_depth	= scsi_change_queue_depth,
-	.map_queues             = qla2xxx_map_queues,
-	.this_id		= -1,
-	.cmd_per_lun		= 3,
-	.use_clustering		= ENABLE_CLUSTERING,
-	.sg_tablesize		= SG_ALL,
-
-	.max_sectors		= 0xFFFF,
-	.shost_attrs		= qla2x00_host_attrs,
-
-	.supported_mode		= MODE_INITIATOR,
-	.track_queue_depth	= 1,
-};
-
 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
 
@@ -389,21 +365,24 @@
     struct rsp_que *rsp)
 {
 	struct qla_hw_data *ha = vha->hw;
+
 	rsp->qpair = ha->base_qpair;
 	rsp->req = req;
+	ha->base_qpair->hw = ha;
 	ha->base_qpair->req = req;
 	ha->base_qpair->rsp = rsp;
 	ha->base_qpair->vha = vha;
 	ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
 	ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
 	ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
+	ha->base_qpair->srb_mempool = ha->srb_mempool;
 	INIT_LIST_HEAD(&ha->base_qpair->hints_list);
 	ha->base_qpair->enable_class_2 = ql2xenableclass2;
 	/* init qpair to this cpu. Will adjust at run time. */
 	qla_cpu_update(rsp->qpair, raw_smp_processor_id());
 	ha->base_qpair->pdev = ha->pdev;
 
-	if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+	if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
 		ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
 }
 
@@ -411,6 +390,7 @@
 				struct rsp_que *rsp)
 {
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
 	ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
 				GFP_KERNEL);
 	if (!ha->req_q_map) {
@@ -556,80 +536,70 @@
 }
 
 static char *
-qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
+qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
 {
 	struct qla_hw_data *ha = vha->hw;
-	static char *pci_bus_modes[] = {
+	static const char *const pci_bus_modes[] = {
 		"33", "66", "100", "133",
 	};
 	uint16_t pci_bus;
 
-	strcpy(str, "PCI");
 	pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
 	if (pci_bus) {
-		strcat(str, "-X (");
-		strcat(str, pci_bus_modes[pci_bus]);
+		snprintf(str, str_len, "PCI-X (%s MHz)",
+			 pci_bus_modes[pci_bus]);
 	} else {
 		pci_bus = (ha->pci_attr & BIT_8) >> 8;
-		strcat(str, " (");
-		strcat(str, pci_bus_modes[pci_bus]);
+		snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
 	}
-	strcat(str, " MHz)");
 
-	return (str);
+	return str;
 }
 
 static char *
-qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
+qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
 {
-	static char *pci_bus_modes[] = { "33", "66", "100", "133", };
+	static const char *const pci_bus_modes[] = {
+		"33", "66", "100", "133",
+	};
 	struct qla_hw_data *ha = vha->hw;
 	uint32_t pci_bus;
 
 	if (pci_is_pcie(ha->pdev)) {
-		char lwstr[6];
 		uint32_t lstat, lspeed, lwidth;
+		const char *speed_str;
 
 		pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
 		lspeed = lstat & PCI_EXP_LNKCAP_SLS;
 		lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
 
-		strcpy(str, "PCIe (");
 		switch (lspeed) {
 		case 1:
-			strcat(str, "2.5GT/s ");
+			speed_str = "2.5GT/s";
 			break;
 		case 2:
-			strcat(str, "5.0GT/s ");
+			speed_str = "5.0GT/s";
 			break;
 		case 3:
-			strcat(str, "8.0GT/s ");
+			speed_str = "8.0GT/s";
 			break;
 		default:
-			strcat(str, "<unknown> ");
+			speed_str = "<unknown>";
 			break;
 		}
-		snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
-		strcat(str, lwstr);
+		snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
 
 		return str;
 	}
 
-	strcpy(str, "PCI");
 	pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
-	if (pci_bus == 0 || pci_bus == 8) {
-		strcat(str, " (");
-		strcat(str, pci_bus_modes[pci_bus >> 3]);
-	} else {
-		strcat(str, "-X ");
-		if (pci_bus & BIT_2)
-			strcat(str, "Mode 2");
-		else
-			strcat(str, "Mode 1");
-		strcat(str, " (");
-		strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
-	}
-	strcat(str, " MHz)");
+	if (pci_bus == 0 || pci_bus == 8)
+		snprintf(str, str_len, "PCI (%s MHz)",
+			 pci_bus_modes[pci_bus >> 3]);
+	else
+		snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
+			 pci_bus & 4 ? 2 : 1,
+			 pci_bus_modes[pci_bus & 3]);
 
 	return str;
 }
@@ -682,13 +652,10 @@
 	return str;
 }
 
-void
-qla2x00_sp_free_dma(void *ptr)
+void qla2x00_sp_free_dma(srb_t *sp)
 {
-	srb_t *sp = ptr;
 	struct qla_hw_data *ha = sp->vha->hw;
 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
-	void *ctx = GET_CMD_CTX_SP(sp);
 
 	if (sp->flags & SRB_DMA_VALID) {
 		scsi_dma_unmap(cmd);
@@ -701,24 +668,21 @@
 		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
 	}
 
-	if (!ctx)
-		goto end;
-
 	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
 		/* List assured to be having elements */
-		qla2x00_clean_dsd_pool(ha, ctx);
+		qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
 		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
 	}
 
 	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
-		struct crc_context *ctx0 = ctx;
+		struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
 
 		dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
 		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
 	}
 
 	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
-		struct ct6_dsd *ctx1 = ctx;
+		struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
 
 		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
 		    ctx1->fcp_cmnd_dma);
@@ -727,44 +691,30 @@
 		ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
 		mempool_free(ctx1, ha->ctx_mempool);
 	}
-
-end:
-	if (sp->type != SRB_NVME_CMD && sp->type != SRB_NVME_LS) {
-		CMD_SP(cmd) = NULL;
-		qla2x00_rel_sp(sp);
-	}
 }
 
-void
-qla2x00_sp_compl(void *ptr, int res)
+void qla2x00_sp_compl(srb_t *sp, int res)
 {
-	srb_t *sp = ptr;
 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	struct completion *comp = sp->comp;
 
-	cmd->result = res;
+	if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
+		return;
 
-	if (atomic_read(&sp->ref_count) == 0) {
-		ql_dbg(ql_dbg_io, sp->vha, 0x3015,
-		    "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
-		    sp, GET_CMD_SP(sp));
-		if (ql2xextended_error_logging & ql_dbg_io)
-			WARN_ON(atomic_read(&sp->ref_count) == 0);
-		return;
-	}
-	if (!atomic_dec_and_test(&sp->ref_count))
-		return;
+	atomic_dec(&sp->ref_count);
 
 	sp->free(sp);
+	cmd->result = res;
+	CMD_SP(cmd) = NULL;
 	cmd->scsi_done(cmd);
+	if (comp)
+		complete(comp);
 }
 
-void
-qla2xxx_qpair_sp_free_dma(void *ptr)
+void qla2xxx_qpair_sp_free_dma(srb_t *sp)
 {
-	srb_t *sp = (srb_t *)ptr;
 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 	struct qla_hw_data *ha = sp->fcport->vha->hw;
-	void *ctx = GET_CMD_CTX_SP(sp);
 
 	if (sp->flags & SRB_DMA_VALID) {
 		scsi_dma_unmap(cmd);
@@ -777,62 +727,86 @@
 		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
 	}
 
-	if (!ctx)
-		goto end;
-
 	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
 		/* List assured to be having elements */
-		qla2x00_clean_dsd_pool(ha, ctx);
+		qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
 		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
 	}
 
-	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
-		struct crc_context *ctx0 = ctx;
+	if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
+		struct crc_context *difctx = sp->u.scmd.crc_ctx;
+		struct dsd_dma *dif_dsd, *nxt_dsd;
 
-		dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
-		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+		list_for_each_entry_safe(dif_dsd, nxt_dsd,
+		    &difctx->ldif_dma_hndl_list, list) {
+			list_del(&dif_dsd->list);
+			dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
+			    dif_dsd->dsd_list_dma);
+			kfree(dif_dsd);
+			difctx->no_dif_bundl--;
+		}
+
+		list_for_each_entry_safe(dif_dsd, nxt_dsd,
+		    &difctx->ldif_dsd_list, list) {
+			list_del(&dif_dsd->list);
+			dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
+			    dif_dsd->dsd_list_dma);
+			kfree(dif_dsd);
+			difctx->no_ldif_dsd--;
+		}
+
+		if (difctx->no_ldif_dsd) {
+			ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
+			    "%s: difctx->no_ldif_dsd=%x\n",
+			    __func__, difctx->no_ldif_dsd);
+		}
+
+		if (difctx->no_dif_bundl) {
+			ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
+			    "%s: difctx->no_dif_bundl=%x\n",
+			    __func__, difctx->no_dif_bundl);
+		}
+		sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
 	}
 
 	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
-		struct ct6_dsd *ctx1 = ctx;
+		struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
+
 		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
 		    ctx1->fcp_cmnd_dma);
 		list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
 		ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
 		ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
 		mempool_free(ctx1, ha->ctx_mempool);
+		sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
 	}
-end:
-	CMD_SP(cmd) = NULL;
-	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+
+	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+		struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
+
+		dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
+		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+	}
 }
 
-void
-qla2xxx_qpair_sp_compl(void *ptr, int res)
+void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
 {
-	srb_t *sp = ptr;
 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	struct completion *comp = sp->comp;
 
-	cmd->result = res;
+	if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
+		return;
 
-	if (atomic_read(&sp->ref_count) == 0) {
-		ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
-		    "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
-		    sp, GET_CMD_SP(sp));
-		if (ql2xextended_error_logging & ql_dbg_io)
-			WARN_ON(atomic_read(&sp->ref_count) == 0);
-		return;
-	}
-	if (!atomic_dec_and_test(&sp->ref_count))
-		return;
+	atomic_dec(&sp->ref_count);
 
 	sp->free(sp);
+	cmd->result = res;
+	CMD_SP(cmd) = NULL;
 	cmd->scsi_done(cmd);
+	if (comp)
+		complete(comp);
 }
 
-/* If we are SP1 here, we need to still take and release the host_lock as SP1
- * does not have the changes necessary to avoid taking host->host_lock.
- */
 static int
 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 {
@@ -843,23 +817,21 @@
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 	srb_t *sp;
 	int rval;
-	struct qla_qpair *qpair = NULL;
-	uint32_t tag;
-	uint16_t hwq;
 
-	if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
+	if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
+	    WARN_ON_ONCE(!rport)) {
 		cmd->result = DID_NO_CONNECT << 16;
 		goto qc24_fail_command;
 	}
 
 	if (ha->mqenable) {
-		if (shost_use_blk_mq(vha->host)) {
-			tag = blk_mq_unique_tag(cmd->request);
-			hwq = blk_mq_unique_tag_to_hwq(tag);
-			qpair = ha->queue_pair_map[hwq];
-		} else if (vha->vp_idx && vha->qpair) {
-			qpair = vha->qpair;
-		}
+		uint32_t tag;
+		uint16_t hwq;
+		struct qla_qpair *qpair = NULL;
+
+		tag = blk_mq_unique_tag(cmd->request);
+		hwq = blk_mq_unique_tag_to_hwq(tag);
+		qpair = ha->queue_pair_map[hwq];
 
 		if (qpair)
 			return qla2xxx_mqueuecommand(host, cmd, qpair);
@@ -926,9 +898,8 @@
 	else
 		goto qc24_target_busy;
 
-	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
-	if (!sp)
-		goto qc24_host_busy;
+	sp = scsi_cmd_priv(cmd);
+	qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
 
 	sp->u.scmd.cmd = cmd;
 	sp->type = SRB_SCSI_CMD;
@@ -949,9 +920,6 @@
 qc24_host_busy_free_sp:
 	sp->free(sp);
 
-qc24_host_busy:
-	return SCSI_MLQUEUE_HOST_BUSY;
-
 qc24_target_busy:
 	return SCSI_MLQUEUE_TARGET_BUSY;
 
@@ -974,7 +942,7 @@
 	srb_t *sp;
 	int rval;
 
-	rval = fc_remote_port_chkready(rport);
+	rval = rport ? fc_remote_port_chkready(rport) : FC_PORTSTATE_OFFLINE;
 	if (rval) {
 		cmd->result = rval;
 		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
@@ -1012,9 +980,8 @@
 	else
 		goto qc24_target_busy;
 
-	sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
-	if (!sp)
-		goto qc24_host_busy;
+	sp = scsi_cmd_priv(cmd);
+	qla2xxx_init_sp(sp, vha, qpair, fcport);
 
 	sp->u.scmd.cmd = cmd;
 	sp->type = SRB_SCSI_CMD;
@@ -1038,9 +1005,6 @@
 qc24_host_busy_free_sp:
 	sp->free(sp);
 
-qc24_host_busy:
-	return SCSI_MLQUEUE_HOST_BUSY;
-
 qc24_target_busy:
 	return SCSI_MLQUEUE_TARGET_BUSY;
 
@@ -1059,8 +1023,8 @@
  *    cmd = Scsi Command to wait on.
  *
  * Return:
- *    Not Found : 0
- *    Found : 1
+ *    Completed in time : QLA_SUCCESS
+ *    Did not complete in time : QLA_FUNCTION_FAILED
  */
 static int
 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
@@ -1151,9 +1115,17 @@
 void
 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
 {
+	u8 i;
+
 	qla2x00_mark_all_devices_lost(vha, 0);
 
-	wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ);
+	for (i = 0; i < 10; i++) {
+		if (wait_event_timeout(vha->fcport_waitQ,
+		    test_fcport_count(vha), HZ) > 0)
+			break;
+	}
+
+	flush_workqueue(vha->hw->wq);
 }
 
 /*
@@ -1212,10 +1184,14 @@
 	return return_status;
 }
 
-static void
+static int
 sp_get(struct srb *sp)
 {
-	atomic_inc(&sp->ref_count);
+	if (!refcount_inc_not_zero((refcount_t *)&sp->ref_count))
+		/* kref get fail */
+		return ENXIO;
+	else
+		return 0;
 }
 
 #define ISP_REG_DISCONNECT 0xffffffffU
@@ -1266,12 +1242,12 @@
 qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 {
 	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+	DECLARE_COMPLETION_ONSTACK(comp);
 	srb_t *sp;
 	int ret;
 	unsigned int id;
 	uint64_t lun;
-	unsigned long flags;
-	int rval, wait = 0;
+	int rval;
 	struct qla_hw_data *ha = vha->hw;
 
 	if (qla2x00_isp_reg_stat(ha)) {
@@ -1279,71 +1255,78 @@
 		    "PCI/Register disconnect, exiting.\n");
 		return FAILED;
 	}
-	if (!CMD_SP(cmd))
-		return SUCCESS;
 
 	ret = fc_block_scsi_eh(cmd);
 	if (ret != 0)
 		return ret;
-	ret = SUCCESS;
+
+	sp = scsi_cmd_priv(cmd);
+
+	if (sp->fcport && sp->fcport->deleted)
+		return SUCCESS;
+
+	/* Return if the command has already finished. */
+	if (sp_get(sp))
+		return SUCCESS;
 
 	id = cmd->device->id;
 	lun = cmd->device->lun;
 
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	sp = (srb_t *) CMD_SP(cmd);
-	if (!sp) {
-		spin_unlock_irqrestore(&ha->hardware_lock, flags);
-		return SUCCESS;
-	}
-
 	ql_dbg(ql_dbg_taskm, vha, 0x8002,
 	    "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
 	    vha->host_no, id, lun, sp, cmd, sp->handle);
 
-	/* Get a reference to the sp and drop the lock.*/
-	sp_get(sp);
-
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	rval = ha->isp_ops->abort_command(sp);
-	if (rval) {
-		if (rval == QLA_FUNCTION_PARAMETER_ERROR)
-			ret = SUCCESS;
-		else
-			ret = FAILED;
+	ql_dbg(ql_dbg_taskm, vha, 0x8003,
+	       "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
 
-		ql_dbg(ql_dbg_taskm, vha, 0x8003,
-		    "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval);
-	} else {
-		ql_dbg(ql_dbg_taskm, vha, 0x8004,
-		    "Abort command mbx success cmd=%p.\n", cmd);
-		wait = 1;
-	}
-
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	sp->done(sp, 0);
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-	/* Did the command return during mailbox execution? */
-	if (ret == FAILED && !CMD_SP(cmd))
+	switch (rval) {
+	case QLA_SUCCESS:
+		/*
+		 * The command has been aborted. That means that the firmware
+		 * won't report a completion.
+		 */
+		sp->done(sp, DID_ABORT << 16);
 		ret = SUCCESS;
+		break;
+	case QLA_FUNCTION_PARAMETER_ERROR: {
+		/* Wait for the command completion. */
+		uint32_t ratov = ha->r_a_tov/10;
+		uint32_t ratov_j = msecs_to_jiffies(4 * ratov * 1000);
 
-	/* Wait for the command to be returned. */
-	if (wait) {
-		if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
-			ql_log(ql_log_warn, vha, 0x8006,
-			    "Abort handler timed out cmd=%p.\n", cmd);
+		WARN_ON_ONCE(sp->comp);
+		sp->comp = &comp;
+		if (!wait_for_completion_timeout(&comp, ratov_j)) {
+			ql_dbg(ql_dbg_taskm, vha, 0xffff,
+			    "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
+			    __func__, ha->r_a_tov);
 			ret = FAILED;
+		} else {
+			ret = SUCCESS;
 		}
+		break;
+	}
+	default:
+		/*
+		 * Either abort failed or abort and completion raced. Let
+		 * the SCSI core retry the abort in the former case.
+		 */
+		ret = FAILED;
+		break;
 	}
 
+	sp->comp = NULL;
+	atomic_dec(&sp->ref_count);
 	ql_log(ql_log_info, vha, 0x801c,
-	    "Abort command issued nexus=%ld:%d:%llu --  %d %x.\n",
-	    vha->host_no, id, lun, wait, ret);
+	    "Abort command issued nexus=%ld:%d:%llu -- %x.\n",
+	    vha->host_no, id, lun, ret);
 
 	return ret;
 }
 
+/*
+ * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
+ */
 int
 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
 	uint64_t l, enum nexus_wait_type type)
@@ -1417,6 +1400,9 @@
 	if (err != 0)
 		return err;
 
+	if (fcport->deleted)
+		return SUCCESS;
+
 	ql_log(ql_log_info, vha, 0x8009,
 	    "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
 	    cmd->device->id, cmd->device->lun, cmd);
@@ -1428,7 +1414,7 @@
 		goto eh_reset_failed;
 	}
 	err = 2;
-	if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
+	if (do_reset(fcport, cmd->device->lun, 1)
 		!= QLA_SUCCESS) {
 		ql_log(ql_log_warn, vha, 0x800c,
 		    "do_reset failed for cmd=%p.\n", cmd);
@@ -1531,6 +1517,9 @@
 		return ret;
 	ret = FAILED;
 
+	if (qla2x00_chip_is_down(vha))
+		return ret;
+
 	ql_log(ql_log_info, vha, 0x8012,
 	    "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
 
@@ -1710,10 +1699,47 @@
 	return QLA_SUCCESS;
 }
 
+static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
+			      unsigned long *flags)
+	__releases(qp->qp_lock_ptr)
+	__acquires(qp->qp_lock_ptr)
+{
+	DECLARE_COMPLETION_ONSTACK(comp);
+	scsi_qla_host_t *vha = qp->vha;
+	struct qla_hw_data *ha = vha->hw;
+	int rval;
+
+	if (sp_get(sp))
+		return;
+
+	if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
+	    (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
+	     !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+	     !qla2x00_isp_reg_stat(ha))) {
+		sp->comp = &comp;
+		spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
+		rval = ha->isp_ops->abort_command(sp);
+
+		switch (rval) {
+		case QLA_SUCCESS:
+			sp->done(sp, res);
+			break;
+		case QLA_FUNCTION_PARAMETER_ERROR:
+			wait_for_completion(&comp);
+			break;
+		}
+
+		spin_lock_irqsave(qp->qp_lock_ptr, *flags);
+		sp->comp = NULL;
+	}
+
+	atomic_dec(&sp->ref_count);
+}
+
 static void
 __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
 {
-	int cnt, status;
+	int cnt;
 	unsigned long flags;
 	srb_t *sp;
 	scsi_qla_host_t *vha = qp->vha;
@@ -1721,7 +1747,6 @@
 	struct req_que *req;
 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 	struct qla_tgt_cmd *cmd;
-	uint8_t trace = 0;
 
 	if (!ha->req_q_map)
 		return;
@@ -1731,64 +1756,26 @@
 		sp = req->outstanding_cmds[cnt];
 		if (sp) {
 			req->outstanding_cmds[cnt] = NULL;
-			if (sp->cmd_type == TYPE_SRB) {
-				if (sp->type == SRB_NVME_CMD ||
-				    sp->type == SRB_NVME_LS) {
-					sp_get(sp);
-					spin_unlock_irqrestore(qp->qp_lock_ptr,
-					    flags);
-					qla_nvme_abort(ha, sp, res);
-					spin_lock_irqsave(qp->qp_lock_ptr,
-					    flags);
-				} else if (GET_CMD_SP(sp) &&
-				    !ha->flags.eeh_busy &&
-				    (!test_bit(ABORT_ISP_ACTIVE,
-					&vha->dpc_flags)) &&
-				    (sp->type == SRB_SCSI_CMD)) {
-					/*
-					 * Don't abort commands in
-					 * adapter during EEH
-					 * recovery as it's not
-					 * accessible/responding.
-					 *
-					 * Get a reference to the sp
-					 * and drop the lock. The
-					 * reference ensures this
-					 * sp->done() call and not the
-					 * call in qla2xxx_eh_abort()
-					 * ends the SCSI command (with
-					 * result 'res').
-					 */
-					sp_get(sp);
-					spin_unlock_irqrestore(qp->qp_lock_ptr,
-					    flags);
-					status = qla2xxx_eh_abort(
-					    GET_CMD_SP(sp));
-					spin_lock_irqsave(qp->qp_lock_ptr,
-					    flags);
-					/*
-					 * Get rid of extra reference
-					 * if immediate exit from
-					 * ql2xxx_eh_abort
-					 */
-					if (status == FAILED &&
-					    (qla2x00_isp_reg_stat(ha)))
-						atomic_dec(
-						    &sp->ref_count);
-				}
-				sp->done(sp, res);
-			} else {
+			switch (sp->cmd_type) {
+			case TYPE_SRB:
+				qla2x00_abort_srb(qp, sp, res, &flags);
+				break;
+			case TYPE_TGT_CMD:
 				if (!vha->hw->tgt.tgt_ops || !tgt ||
 				    qla_ini_mode_enabled(vha)) {
-					if (!trace)
-						ql_dbg(ql_dbg_tgt_mgt,
-						    vha, 0xf003,
-						    "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
-						    vha->dpc_flags);
+					ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+					    "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+					    vha->dpc_flags);
 					continue;
 				}
 				cmd = (struct qla_tgt_cmd *)sp;
-				qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+				cmd->aborted = 1;
+				break;
+			case TYPE_TGT_TMCMD:
+				/* Skip task management functions. */
+				break;
+			default:
+				break;
 			}
 		}
 	}
@@ -1801,8 +1788,13 @@
 	int que;
 	struct qla_hw_data *ha = vha->hw;
 
+	/* Continue only if initialization complete. */
+	if (!ha->base_qpair)
+		return;
 	__qla2x00_abort_all_cmds(ha->base_qpair, res);
 
+	if (!ha->queue_pair_map)
+		return;
 	for (que = 0; que < ha->max_qpairs; que++) {
 		if (!ha->queue_pair_map[que])
 			continue;
@@ -2478,7 +2470,7 @@
 	.config_rings		= qla24xx_config_rings,
 	.reset_adapter		= qla24xx_reset_adapter,
 	.nvram_config		= qla81xx_nvram_config,
-	.update_fw_options	= qla81xx_update_fw_options,
+	.update_fw_options	= qla24xx_update_fw_options,
 	.load_risc		= qla81xx_load_risc,
 	.pci_info_str		= qla24xx_pci_info_str,
 	.fw_version_str		= qla24xx_fw_version_str,
@@ -2655,6 +2647,24 @@
 		ha->device_type |= DT_T10_PI;
 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
 		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2081:
+	case PCI_DEVICE_ID_QLOGIC_ISP2089:
+		ha->isp_type |= DT_ISP2081;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->device_type |= DT_T10_PI;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2281:
+	case PCI_DEVICE_ID_QLOGIC_ISP2289:
+		ha->isp_type |= DT_ISP2281;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->device_type |= DT_T10_PI;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
 	}
 
 	if (IS_QLA82XX(ha))
@@ -2662,7 +2672,8 @@
 	else {
 		/* Get adapter physical port no from interrupt pin register. */
 		pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
-		if (IS_QLA27XX(ha))
+		if (IS_QLA25XX(ha) || IS_QLA2031(ha) ||
+		    IS_QLA27XX(ha) || IS_QLA28XX(ha))
 			ha->port_no--;
 		else
 			ha->port_no = !(ha->port_no & 1);
@@ -2708,7 +2719,7 @@
 		struct scsi_qla_host, iocb_work);
 	struct qla_hw_data *ha = vha->hw;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
-	int i = 20;
+	int i = 2;
 	unsigned long flags;
 
 	if (test_bit(UNLOADING, &base_vha->dpc_flags))
@@ -2759,7 +2770,11 @@
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
-	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) {
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) {
 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
 		mem_only = 1;
 		ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2779,6 +2794,10 @@
 	/* This may fail but that's ok */
 	pci_enable_pcie_error_reporting(pdev);
 
+	/* Turn off T10-DIF when FC-NVMe is enabled */
+	if (ql2xnvmeenable)
+		ql2xenabledif = 0;
+
 	ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
 	if (!ha) {
 		ql_log_pci(ql_log_fatal, pdev, 0x0009,
@@ -2808,7 +2827,7 @@
 
 	/* Set EEH reset type to fundamental if required by hba */
 	if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
-	    IS_QLA83XX(ha) || IS_QLA27XX(ha))
+	    IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		pdev->needs_freset = 1;
 
 	ha->prev_topology = 0;
@@ -2819,6 +2838,8 @@
 	atomic_set(&ha->num_pend_mbx_stage1, 0);
 	atomic_set(&ha->num_pend_mbx_stage2, 0);
 	atomic_set(&ha->num_pend_mbx_stage3, 0);
+	atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
+	ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
 
 	/* Assign ISP specific operations. */
 	if (IS_QLA2100(ha)) {
@@ -2985,6 +3006,23 @@
 		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
 		ha->nvram_conf_off = ~0;
 		ha->nvram_data_off = ~0;
+	} else if (IS_QLA28XX(ha)) {
+		ha->portnum = PCI_FUNC(ha->pdev->devfn);
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT;
+		req_length = REQUEST_ENTRY_CNT_24XX;
+		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+		ha->gid_list_info_size = 8;
+		ha->optrom_size = OPTROM_SIZE_28XX;
+		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+		ha->isp_ops = &qla27xx_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
 	}
 
 	ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -3109,10 +3147,11 @@
 		ql_log(ql_log_fatal, base_vha, 0x003d,
 		    "Failed to allocate memory for queue pointers..."
 		    "aborting.\n");
+		ret = -ENODEV;
 		goto probe_failed;
 	}
 
-	if (ha->mqenable && shost_use_blk_mq(host)) {
+	if (ha->mqenable) {
 		/* number of hardware queues supported by blk/scsi-mq*/
 		host->nr_hw_queues = ha->max_qpairs;
 
@@ -3150,7 +3189,8 @@
 	req->req_q_out = &ha->iobase->isp24.req_q_out;
 	rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
 	rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
-	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+	    IS_QLA28XX(ha)) {
 		req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
 		req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
 		rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -3186,6 +3226,10 @@
 	    req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
 
 	ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+	if (unlikely(!ha->wq)) {
+		ret = -ENOMEM;
+		goto probe_failed;
+	}
 
 	if (ha->isp_ops->initialize_adapter(base_vha)) {
 		ql_log(ql_log_fatal, base_vha, 0x00d6,
@@ -3224,25 +3268,17 @@
 	    base_vha->mgmt_svr_loop_id, host->sg_tablesize);
 
 	if (ha->mqenable) {
-		bool mq = false;
 		bool startit = false;
 
-		if (QLA_TGT_MODE_ENABLED()) {
-			mq = true;
+		if (QLA_TGT_MODE_ENABLED())
 			startit = false;
-		}
 
-		if ((ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) &&
-		    shost_use_blk_mq(host)) {
-			mq = true;
+		if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
 			startit = true;
-		}
 
-		if (mq) {
-			/* Create start of day qpairs for Block MQ */
-			for (i = 0; i < ha->max_qpairs; i++)
-				qla2xxx_create_qpair(base_vha, 5, 0, startit);
-		}
+		/* Create start of day qpairs for Block MQ */
+		for (i = 0; i < ha->max_qpairs; i++)
+			qla2xxx_create_qpair(base_vha, 5, 0, startit);
 	}
 
 	if (ha->flags.running_gold_fw)
@@ -3303,18 +3339,22 @@
 	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
 		if (ha->fw_attributes & BIT_4) {
 			int prot = 0, guard;
+
 			base_vha->flags.difdix_supported = 1;
 			ql_dbg(ql_dbg_init, base_vha, 0x00f1,
 			    "Registering for DIF/DIX type 1 and 3 protection.\n");
 			if (ql2xenabledif == 1)
 				prot = SHOST_DIX_TYPE0_PROTECTION;
-			scsi_host_set_prot(host,
-			    prot | SHOST_DIF_TYPE1_PROTECTION
-			    | SHOST_DIF_TYPE2_PROTECTION
-			    | SHOST_DIF_TYPE3_PROTECTION
-			    | SHOST_DIX_TYPE1_PROTECTION
-			    | SHOST_DIX_TYPE2_PROTECTION
-			    | SHOST_DIX_TYPE3_PROTECTION);
+			if (ql2xprotmask)
+				scsi_host_set_prot(host, ql2xprotmask);
+			else
+				scsi_host_set_prot(host,
+				    prot | SHOST_DIF_TYPE1_PROTECTION
+				    | SHOST_DIF_TYPE2_PROTECTION
+				    | SHOST_DIF_TYPE3_PROTECTION
+				    | SHOST_DIX_TYPE1_PROTECTION
+				    | SHOST_DIX_TYPE2_PROTECTION
+				    | SHOST_DIX_TYPE3_PROTECTION);
 
 			guard = SHOST_DIX_GUARD_CRC;
 
@@ -3322,7 +3362,10 @@
 			    (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
 				guard |= SHOST_DIX_GUARD_IP;
 
-			scsi_host_set_guard(host, guard);
+			if (ql2xprotguard)
+				scsi_host_set_guard(host, ql2xprotguard);
+			else
+				scsi_host_set_guard(host, guard);
 		} else
 			base_vha->flags.difdix_supported = 0;
 	}
@@ -3373,7 +3416,8 @@
 	    "QLogic %s - %s.\n", ha->model_number, ha->model_desc);
 	ql_log(ql_log_info, base_vha, 0x00fc,
 	    "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
-	    pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
+	    pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info,
+						       sizeof(pci_info)),
 	    pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
 	    base_vha->host_no,
 	    ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
@@ -3395,6 +3439,12 @@
 	return 0;
 
 probe_failed:
+	if (base_vha->gnl.l) {
+		dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
+				base_vha->gnl.l, base_vha->gnl.ldma);
+		base_vha->gnl.l = NULL;
+	}
+
 	if (base_vha->timer_active)
 		qla2x00_stop_timer(base_vha);
 	base_vha->flags.online = 0;
@@ -3478,7 +3528,8 @@
 	if (ha->eft)
 		qla2x00_disable_eft_trace(vha);
 
-	if (IS_QLA25XX(ha) ||  IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+	if (IS_QLA25XX(ha) ||  IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+	    IS_QLA28XX(ha)) {
 		if (ha->flags.fw_started)
 			qla2x00_abort_isp_cleanup(vha);
 	} else {
@@ -3486,6 +3537,10 @@
 		qla2x00_try_to_stop_firmware(vha);
 	}
 
+	/* Disable timer */
+	if (vha->timer_active)
+		qla2x00_stop_timer(vha);
+
 	/* Turn adapter off line */
 	vha->flags.online = 0;
 
@@ -3523,6 +3578,8 @@
 		spin_unlock_irqrestore(&ha->vport_slock, flags);
 		mutex_unlock(&ha->vport_lock);
 
+		qla_nvme_delete(vha);
+
 		fc_vport_terminate(vha->fc_vport);
 		scsi_host_put(vha->host);
 
@@ -3581,7 +3638,8 @@
 		if (ha->mqiobase)
 			iounmap(ha->mqiobase);
 
-		if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
+		if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+		    ha->msixbase)
 			iounmap(ha->msixbase);
 	}
 }
@@ -3624,7 +3682,7 @@
 	if (!atomic_read(&pdev->enable_cnt)) {
 		dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
 		    base_vha->gnl.l, base_vha->gnl.ldma);
-
+		base_vha->gnl.l = NULL;
 		scsi_host_put(base_vha->host);
 		kfree(ha);
 		pci_set_drvdata(pdev, NULL);
@@ -3632,7 +3690,8 @@
 	}
 	qla2x00_wait_for_hba_ready(base_vha);
 
-	if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+	if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+	    IS_QLA28XX(ha)) {
 		if (ha->flags.fw_started)
 			qla2x00_abort_isp_cleanup(base_vha);
 	} else if (!IS_QLAFX00(ha)) {
@@ -3663,6 +3722,8 @@
 	dma_free_coherent(&ha->pdev->dev,
 		base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
 
+	base_vha->gnl.l = NULL;
+
 	vfree(base_vha->scan.l);
 
 	if (IS_QLAFX00(ha))
@@ -3670,8 +3731,6 @@
 
 	qla2x00_delete_all_vps(ha, base_vha);
 
-	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
-
 	qla2x00_dfs_remove(base_vha);
 
 	qla84xx_put_chip(base_vha);
@@ -3760,11 +3819,8 @@
 {
 	fc_port_t *fcport, *tfcport;
 
-	list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
-		list_del(&fcport->list);
-		qla2x00_clear_loop_id(fcport);
-		kfree(fcport);
-	}
+	list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list)
+		qla2x00_free_fcport(fcport);
 }
 
 static inline void
@@ -3789,6 +3845,7 @@
 		qla2xxx_wake_dpc(base_vha);
 	} else {
 		int now;
+
 		if (rport) {
 			ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
 			    "%s %8phN. rport %p roles %x\n",
@@ -3880,6 +3937,19 @@
 	}
 }
 
+static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
+{
+	int i;
+
+	if (IS_FWI2_CAPABLE(ha))
+		return;
+
+	for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
+		set_bit(i, ha->loop_id_map);
+	set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
+	set_bit(BROADCAST, ha->loop_id_map);
+}
+
 /*
 * qla2x00_mem_alloc
 *      Allocates adapter memory.
@@ -3961,9 +4031,86 @@
 			    "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
 			goto fail_dl_dma_pool;
 		}
+
+		if (ql2xenabledif) {
+			u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE;
+			struct dsd_dma *dsd, *nxt;
+			uint i;
+			/* Creata a DMA pool of buffers for DIF bundling */
+			ha->dif_bundl_pool = dma_pool_create(name,
+			    &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0);
+			if (!ha->dif_bundl_pool) {
+				ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
+				    "%s: failed create dif_bundl_pool\n",
+				    __func__);
+				goto fail_dif_bundl_dma_pool;
+			}
+
+			INIT_LIST_HEAD(&ha->pool.good.head);
+			INIT_LIST_HEAD(&ha->pool.unusable.head);
+			ha->pool.good.count = 0;
+			ha->pool.unusable.count = 0;
+			for (i = 0; i < 128; i++) {
+				dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC);
+				if (!dsd) {
+					ql_dbg_pci(ql_dbg_init, ha->pdev,
+					    0xe0ee, "%s: failed alloc dsd\n",
+					    __func__);
+					return 1;
+				}
+				ha->dif_bundle_kallocs++;
+
+				dsd->dsd_addr = dma_pool_alloc(
+				    ha->dif_bundl_pool, GFP_ATOMIC,
+				    &dsd->dsd_list_dma);
+				if (!dsd->dsd_addr) {
+					ql_dbg_pci(ql_dbg_init, ha->pdev,
+					    0xe0ee,
+					    "%s: failed alloc ->dsd_addr\n",
+					    __func__);
+					kfree(dsd);
+					ha->dif_bundle_kallocs--;
+					continue;
+				}
+				ha->dif_bundle_dma_allocs++;
+
+				/*
+				 * if DMA buffer crosses 4G boundary,
+				 * put it on bad list
+				 */
+				if (MSD(dsd->dsd_list_dma) ^
+				    MSD(dsd->dsd_list_dma + bufsize)) {
+					list_add_tail(&dsd->list,
+					    &ha->pool.unusable.head);
+					ha->pool.unusable.count++;
+				} else {
+					list_add_tail(&dsd->list,
+					    &ha->pool.good.head);
+					ha->pool.good.count++;
+				}
+			}
+
+			/* return the good ones back to the pool */
+			list_for_each_entry_safe(dsd, nxt,
+			    &ha->pool.good.head, list) {
+				list_del(&dsd->list);
+				dma_pool_free(ha->dif_bundl_pool,
+				    dsd->dsd_addr, dsd->dsd_list_dma);
+				ha->dif_bundle_dma_allocs--;
+				kfree(dsd);
+				ha->dif_bundle_kallocs--;
+			}
+
+			ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
+			    "%s: dif dma pool (good=%u unusable=%u)\n",
+			    __func__, ha->pool.good.count,
+			    ha->pool.unusable.count);
+		}
+
 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
-		    "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
-		    ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
+		    "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n",
+		    ha->dl_dma_pool, ha->fcp_cmnd_dma_pool,
+		    ha->dif_bundl_pool);
 	}
 
 	/* Allocate memory for SNS commands */
@@ -4045,7 +4192,8 @@
 		ha->npiv_info = NULL;
 
 	/* Get consistent memory allocated for EX-INIT-CB. */
-	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+	    IS_QLA28XX(ha)) {
 		ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
 		    &ha->ex_init_cb_dma);
 		if (!ha->ex_init_cb)
@@ -4088,8 +4236,20 @@
 		goto fail_sfp_data;
 	}
 
+	ha->flt = dma_alloc_coherent(&ha->pdev->dev,
+	    sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma,
+	    GFP_KERNEL);
+	if (!ha->flt) {
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+		    "Unable to allocate memory for FLT.\n");
+		goto fail_flt_buffer;
+	}
+
 	return 0;
 
+fail_flt_buffer:
+	dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+	    ha->sfp_data, ha->sfp_data_dma);
 fail_sfp_data:
 	kfree(ha->loop_id_map);
 fail_loop_id_map:
@@ -4128,6 +4288,24 @@
 		dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
 		    ha->sns_cmd, ha->sns_cmd_dma);
 fail_dma_pool:
+	if (ql2xenabledif) {
+		struct dsd_dma *dsd, *nxt;
+
+		list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
+		    list) {
+			list_del(&dsd->list);
+			dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
+			    dsd->dsd_list_dma);
+			ha->dif_bundle_dma_allocs--;
+			kfree(dsd);
+			ha->dif_bundle_kallocs--;
+			ha->pool.unusable.count--;
+		}
+		dma_pool_destroy(ha->dif_bundl_pool);
+		ha->dif_bundl_pool = NULL;
+	}
+
+fail_dif_bundl_dma_pool:
 	if (IS_QLA82XX(ha) || ql2xenabledif) {
 		dma_pool_destroy(ha->fcp_cmnd_dma_pool);
 		ha->fcp_cmnd_dma_pool = NULL;
@@ -4144,12 +4322,10 @@
 	kfree(ha->nvram);
 	ha->nvram = NULL;
 fail_free_ctx_mempool:
-	if (ha->ctx_mempool)
-		mempool_destroy(ha->ctx_mempool);
+	mempool_destroy(ha->ctx_mempool);
 	ha->ctx_mempool = NULL;
 fail_free_srb_mempool:
-	if (ha->srb_mempool)
-		mempool_destroy(ha->srb_mempool);
+	mempool_destroy(ha->srb_mempool);
 	ha->srb_mempool = NULL;
 fail_free_gid_list:
 	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
@@ -4249,29 +4425,34 @@
 qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
 {
 	u32 temp;
+	struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
 	*ret_cnt = FW_DEF_EXCHANGES_CNT;
 
 	if (max_cnt > vha->hw->max_exchg)
 		max_cnt = vha->hw->max_exchg;
 
 	if (qla_ini_mode_enabled(vha)) {
-		if (ql2xiniexchg > max_cnt)
-			ql2xiniexchg = max_cnt;
+		if (vha->ql2xiniexchg > max_cnt)
+			vha->ql2xiniexchg = max_cnt;
 
-		if (ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
-			*ret_cnt = ql2xiniexchg;
+		if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
+			*ret_cnt = vha->ql2xiniexchg;
+
 	} else if (qla_tgt_mode_enabled(vha)) {
-		if (ql2xexchoffld > max_cnt)
-			ql2xexchoffld = max_cnt;
+		if (vha->ql2xexchoffld > max_cnt) {
+			vha->ql2xexchoffld = max_cnt;
+			icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
+		}
 
-		if (ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
-			*ret_cnt = ql2xexchoffld;
+		if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
+			*ret_cnt = vha->ql2xexchoffld;
 	} else if (qla_dual_mode_enabled(vha)) {
-		temp = ql2xiniexchg + ql2xexchoffld;
+		temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
 		if (temp > max_cnt) {
-			ql2xiniexchg -= (temp - max_cnt)/2;
-			ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
+			vha->ql2xiniexchg -= (temp - max_cnt)/2;
+			vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
 			temp = max_cnt;
+			icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
 		}
 
 		if (temp > FW_DEF_EXCHANGES_CNT)
@@ -4309,6 +4490,12 @@
 
 	if (totsz != ha->exchoffld_size) {
 		qla2x00_free_exchoffld_buffer(ha);
+		if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
+			ha->exchoffld_size = 0;
+			ha->flags.exchoffld_enabled = 0;
+			return QLA_SUCCESS;
+		}
+
 		ha->exchoffld_size = totsz;
 
 		ql_log(ql_log_info, vha, 0xd016,
@@ -4341,6 +4528,15 @@
 
 			return -ENOMEM;
 		}
+	} else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
+		/* pathological case */
+		qla2x00_free_exchoffld_buffer(ha);
+		ha->exchoffld_size = 0;
+		ha->flags.exchoffld_enabled = 0;
+		ql_log(ql_log_info, vha, 0xd016,
+		    "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
+		    ha->exchoffld_size, actual_cnt, size, totsz);
+		return 0;
 	}
 
 	/* Now configure the dma buffer */
@@ -4356,7 +4552,7 @@
 		if (qla_ini_mode_enabled(vha))
 			icb->exchange_count = 0;
 		else
-			icb->exchange_count = cpu_to_le16(ql2xexchoffld);
+			icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
 	}
 
 	return rval;
@@ -4389,6 +4585,9 @@
 static void
 qla2x00_free_fw_dump(struct qla_hw_data *ha)
 {
+	struct fwdt *fwdt = ha->fwdt;
+	uint j;
+
 	if (ha->fce)
 		dma_free_coherent(&ha->pdev->dev,
 		    FCE_SIZE, ha->fce, ha->fce_dma);
@@ -4399,11 +4598,10 @@
 
 	if (ha->fw_dump)
 		vfree(ha->fw_dump);
-	if (ha->fw_dump_template)
-		vfree(ha->fw_dump_template);
 
 	ha->fce = NULL;
 	ha->fce_dma = 0;
+	ha->flags.fce_enabled = 0;
 	ha->eft = NULL;
 	ha->eft_dma = 0;
 	ha->fw_dumped = 0;
@@ -4411,8 +4609,13 @@
 	ha->fw_dump_reading = 0;
 	ha->fw_dump = NULL;
 	ha->fw_dump_len = 0;
-	ha->fw_dump_template = NULL;
-	ha->fw_dump_template_len = 0;
+
+	for (j = 0; j < 2; j++, fwdt++) {
+		if (fwdt->template)
+			vfree(fwdt->template);
+		fwdt->template = NULL;
+		fwdt->length = 0;
+	}
 }
 
 /*
@@ -4430,46 +4633,68 @@
 	if (ha->mctp_dump)
 		dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
 		    ha->mctp_dump_dma);
+	ha->mctp_dump = NULL;
 
-	if (ha->srb_mempool)
-		mempool_destroy(ha->srb_mempool);
+	mempool_destroy(ha->srb_mempool);
+	ha->srb_mempool = NULL;
 
 	if (ha->dcbx_tlv)
 		dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
 		    ha->dcbx_tlv, ha->dcbx_tlv_dma);
+	ha->dcbx_tlv = NULL;
 
 	if (ha->xgmac_data)
 		dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
 		    ha->xgmac_data, ha->xgmac_data_dma);
+	ha->xgmac_data = NULL;
 
 	if (ha->sns_cmd)
 		dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
 		ha->sns_cmd, ha->sns_cmd_dma);
+	ha->sns_cmd = NULL;
+	ha->sns_cmd_dma = 0;
 
 	if (ha->ct_sns)
 		dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
 		ha->ct_sns, ha->ct_sns_dma);
+	ha->ct_sns = NULL;
+	ha->ct_sns_dma = 0;
 
 	if (ha->sfp_data)
 		dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
 		    ha->sfp_data_dma);
+	ha->sfp_data = NULL;
+
+	if (ha->flt)
+		dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+		    ha->flt, ha->flt_dma);
+	ha->flt = NULL;
+	ha->flt_dma = 0;
 
 	if (ha->ms_iocb)
 		dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
+	ha->ms_iocb = NULL;
+	ha->ms_iocb_dma = 0;
 
 	if (ha->ex_init_cb)
 		dma_pool_free(ha->s_dma_pool,
 			ha->ex_init_cb, ha->ex_init_cb_dma);
+	ha->ex_init_cb = NULL;
+	ha->ex_init_cb_dma = 0;
 
 	if (ha->async_pd)
 		dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
+	ha->async_pd = NULL;
+	ha->async_pd_dma = 0;
 
-	if (ha->s_dma_pool)
-		dma_pool_destroy(ha->s_dma_pool);
+	dma_pool_destroy(ha->s_dma_pool);
+	ha->s_dma_pool = NULL;
 
 	if (ha->gid_list)
 		dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
 		ha->gid_list, ha->gid_list_dma);
+	ha->gid_list = NULL;
+	ha->gid_list_dma = 0;
 
 	if (IS_QLA82XX(ha)) {
 		if (!list_empty(&ha->gbl_dsd_list)) {
@@ -4486,61 +4711,59 @@
 		}
 	}
 
-	if (ha->dl_dma_pool)
-		dma_pool_destroy(ha->dl_dma_pool);
+	dma_pool_destroy(ha->dl_dma_pool);
+	ha->dl_dma_pool = NULL;
 
-	if (ha->fcp_cmnd_dma_pool)
-		dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+	dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+	ha->fcp_cmnd_dma_pool = NULL;
 
-	if (ha->ctx_mempool)
-		mempool_destroy(ha->ctx_mempool);
+	mempool_destroy(ha->ctx_mempool);
+	ha->ctx_mempool = NULL;
+
+	if (ql2xenabledif && ha->dif_bundl_pool) {
+		struct dsd_dma *dsd, *nxt;
+
+		list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
+					 list) {
+			list_del(&dsd->list);
+			dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
+				      dsd->dsd_list_dma);
+			ha->dif_bundle_dma_allocs--;
+			kfree(dsd);
+			ha->dif_bundle_kallocs--;
+			ha->pool.unusable.count--;
+		}
+		list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
+			list_del(&dsd->list);
+			dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
+				      dsd->dsd_list_dma);
+			ha->dif_bundle_dma_allocs--;
+			kfree(dsd);
+			ha->dif_bundle_kallocs--;
+		}
+	}
+
+	dma_pool_destroy(ha->dif_bundl_pool);
+	ha->dif_bundl_pool = NULL;
 
 	qlt_mem_free(ha);
 
 	if (ha->init_cb)
 		dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
 			ha->init_cb, ha->init_cb_dma);
-
-	vfree(ha->optrom_buffer);
-	kfree(ha->nvram);
-	kfree(ha->npiv_info);
-	kfree(ha->swl);
-	kfree(ha->loop_id_map);
-
-	ha->srb_mempool = NULL;
-	ha->ctx_mempool = NULL;
-	ha->sns_cmd = NULL;
-	ha->sns_cmd_dma = 0;
-	ha->ct_sns = NULL;
-	ha->ct_sns_dma = 0;
-	ha->ms_iocb = NULL;
-	ha->ms_iocb_dma = 0;
 	ha->init_cb = NULL;
 	ha->init_cb_dma = 0;
-	ha->ex_init_cb = NULL;
-	ha->ex_init_cb_dma = 0;
-	ha->async_pd = NULL;
-	ha->async_pd_dma = 0;
-	ha->loop_id_map = NULL;
-	ha->npiv_info = NULL;
+
+	vfree(ha->optrom_buffer);
 	ha->optrom_buffer = NULL;
-	ha->swl = NULL;
+	kfree(ha->nvram);
 	ha->nvram = NULL;
-	ha->mctp_dump = NULL;
-	ha->dcbx_tlv = NULL;
-	ha->xgmac_data = NULL;
-	ha->sfp_data = NULL;
-
-	ha->s_dma_pool = NULL;
-	ha->dl_dma_pool = NULL;
-	ha->fcp_cmnd_dma_pool = NULL;
-
-	ha->gid_list = NULL;
-	ha->gid_list_dma = 0;
-
-	ha->tgt.atio_ring = NULL;
-	ha->tgt.atio_dma = 0;
-	ha->tgt.tgt_vp_map = NULL;
+	kfree(ha->npiv_info);
+	ha->npiv_info = NULL;
+	kfree(ha->swl);
+	ha->swl = NULL;
+	kfree(ha->loop_id_map);
+	ha->loop_id_map = NULL;
 }
 
 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -4564,6 +4787,10 @@
 	vha->host_no = host->host_no;
 	vha->hw = ha;
 
+	vha->qlini_mode = ql2x_ini_mode;
+	vha->ql2xexchoffld = ql2xexchoffld;
+	vha->ql2xiniexchg = ql2xiniexchg;
+
 	INIT_LIST_HEAD(&vha->vp_fcports);
 	INIT_LIST_HEAD(&vha->work_list);
 	INIT_LIST_HEAD(&vha->list);
@@ -4573,13 +4800,11 @@
 	INIT_LIST_HEAD(&vha->plogi_ack_list);
 	INIT_LIST_HEAD(&vha->qp_list);
 	INIT_LIST_HEAD(&vha->gnl.fcports);
-	INIT_LIST_HEAD(&vha->nvme_rport_list);
 	INIT_LIST_HEAD(&vha->gpnid_list);
 	INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
 
 	spin_lock_init(&vha->work_lock);
 	spin_lock_init(&vha->cmd_list_lock);
-	spin_lock_init(&vha->gnl.fcports_lock);
 	init_waitqueue_head(&vha->fcport_waitQ);
 	init_waitqueue_head(&vha->vref_waitq);
 
@@ -4590,7 +4815,7 @@
 	if (!vha->gnl.l) {
 		ql_log(ql_log_fatal, vha, 0xd04a,
 		    "Alloc failed for name list.\n");
-		scsi_remove_host(vha->host);
+		scsi_host_put(vha->host);
 		return NULL;
 	}
 
@@ -4602,7 +4827,8 @@
 		    "Alloc failed for scan database.\n");
 		dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
 		    vha->gnl.l, vha->gnl.ldma);
-		scsi_remove_host(vha->host);
+		vha->gnl.l = NULL;
+		scsi_host_put(vha->host);
 		return NULL;
 	}
 	INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
@@ -4710,7 +4936,6 @@
 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
 qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
-qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
 
@@ -4761,16 +4986,25 @@
 	return qla2x00_post_work(vha, e);
 }
 
-int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+void qla24xx_sched_upd_fcport(fc_port_t *fcport)
 {
-	struct qla_work_evt *e;
+	unsigned long flags;
 
-	e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT);
-	if (!e)
-		return QLA_FUNCTION_FAILED;
+	if (IS_SW_RESV_ADDR(fcport->d_id))
+		return;
 
-	e->u.fcport.fcport = fcport;
-	return qla2x00_post_work(vha, e);
+	spin_lock_irqsave(&fcport->vha->work_lock, flags);
+	if (fcport->disc_state == DSC_UPD_FCPORT) {
+		spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
+		return;
+	}
+	fcport->jiffies_at_registration = jiffies;
+	fcport->sec_since_registration = 0;
+	fcport->next_disc_state = DSC_DELETED;
+	fcport->disc_state = DSC_UPD_FCPORT;
+	spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
+
+	queue_work(system_unbound_wq, &fcport->reg_work);
 }
 
 static
@@ -4808,23 +5042,29 @@
 			fcport->d_id = e->u.new_sess.id;
 			fcport->flags |= FCF_FABRIC_DEVICE;
 			fcport->fw_login_state = DSC_LS_PLOGI_PEND;
-			if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
+			if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
 				fcport->fc4_type = FC4_TYPE_FCP_SCSI;
 
-			if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
+			if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
 				fcport->fc4_type = FC4_TYPE_OTHER;
 				fcport->fc4f_nvme = FC4_TYPE_NVME;
 			}
 
 			memcpy(fcport->port_name, e->u.new_sess.port_name,
 			    WWN_SIZE);
+
+			if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N)
+				fcport->n2n_flag = 1;
+
 		} else {
 			ql_dbg(ql_dbg_disc, vha, 0xffff,
 				   "%s %8phC mem alloc fail.\n",
 				   __func__, e->u.new_sess.port_name);
 
-			if (pla)
+			if (pla) {
+				list_del(&pla->list);
 				kmem_cache_free(qla_tgt_plogi_cachep, pla);
+			}
 			return;
 		}
 
@@ -4855,6 +5095,7 @@
 	if (fcport) {
 		fcport->id_changed = 1;
 		fcport->scan_state = QLA_FCPORT_FOUND;
+		fcport->chip_reset = vha->hw->base_qpair->chip_reset;
 		memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
 
 		if (pla) {
@@ -4913,11 +5154,9 @@
 			if (dfcp)
 				qlt_schedule_sess_for_deletion(tfcp);
 
-
-			if (N2N_TOPO(vha->hw))
-				fcport->flags &= ~FCF_FABRIC_DEVICE;
-
 			if (N2N_TOPO(vha->hw)) {
+				fcport->flags &= ~FCF_FABRIC_DEVICE;
+				fcport->keep_nport_handle = 1;
 				if (vha->flags.nvme_enabled) {
 					fcport->fc4f_nvme = 1;
 					fcport->n2n_flag = 1;
@@ -4934,8 +5173,10 @@
 
 	if (free_fcport) {
 		qla2x00_free_fcport(fcport);
-		if (pla)
+		if (pla) {
+			list_del(&pla->list);
 			kmem_cache_free(qla_tgt_plogi_cachep, pla);
+		}
 	}
 }
 
@@ -4959,14 +5200,14 @@
 	struct qla_work_evt *e, *tmp;
 	unsigned long flags;
 	LIST_HEAD(work);
+	int rc;
 
 	spin_lock_irqsave(&vha->work_lock, flags);
 	list_splice_init(&vha->work_list, &work);
 	spin_unlock_irqrestore(&vha->work_lock, flags);
 
 	list_for_each_entry_safe(e, tmp, &work, list) {
-		list_del_init(&e->list);
-
+		rc = QLA_SUCCESS;
 		switch (e->type) {
 		case QLA_EVT_AEN:
 			fc_host_post_event(vha->host, fc_get_event_number(),
@@ -4980,7 +5221,7 @@
 			    e->u.logio.data);
 			break;
 		case QLA_EVT_ASYNC_LOGOUT:
-			qla2x00_async_logout(vha, e->u.logio.fcport);
+			rc = qla2x00_async_logout(vha, e->u.logio.fcport);
 			break;
 		case QLA_EVT_ASYNC_LOGOUT_DONE:
 			qla2x00_async_logout_done(vha, e->u.logio.fcport,
@@ -4990,19 +5231,12 @@
 			qla2x00_async_adisc(vha, e->u.logio.fcport,
 			    e->u.logio.data);
 			break;
-		case QLA_EVT_ASYNC_ADISC_DONE:
-			qla2x00_async_adisc_done(vha, e->u.logio.fcport,
-			    e->u.logio.data);
-			break;
 		case QLA_EVT_UEVENT:
 			qla2x00_uevent_emit(vha, e->u.uevent.code);
 			break;
 		case QLA_EVT_AENFX:
 			qlafx00_process_aen(vha, e);
 			break;
-		case QLA_EVT_GIDPN:
-			qla24xx_async_gidpn(vha, e->u.fcport.fcport);
-			break;
 		case QLA_EVT_GPNID:
 			qla24xx_async_gpnid(vha, &e->u.gpnid.id);
 			break;
@@ -5025,9 +5259,6 @@
 		case QLA_EVT_GPSC:
 			qla24xx_async_gpsc(vha, e->u.fcport.fcport);
 			break;
-		case QLA_EVT_UPD_FCPORT:
-			qla2x00_update_fcport(vha, e->u.fcport.fcport);
-			break;
 		case QLA_EVT_GNL:
 			qla24xx_async_gnl(vha, e->u.fcport.fcport);
 			break;
@@ -5035,7 +5266,7 @@
 			qla24xx_do_nack_work(vha, e);
 			break;
 		case QLA_EVT_ASYNC_PRLO:
-			qla2x00_async_prlo(vha, e->u.logio.fcport);
+			rc = qla2x00_async_prlo(vha, e->u.logio.fcport);
 			break;
 		case QLA_EVT_ASYNC_PRLO_DONE:
 			qla2x00_async_prlo_done(vha, e->u.logio.fcport,
@@ -5068,6 +5299,15 @@
 			    e->u.fcport.fcport, false);
 			break;
 		}
+
+		if (rc == EAGAIN) {
+			/* put 'work' at head of 'vha->work_list' */
+			spin_lock_irqsave(&vha->work_lock, flags);
+			list_splice(&work, &vha->work_list);
+			spin_unlock_irqrestore(&vha->work_lock, flags);
+			break;
+		}
+		list_del_init(&e->list);
 		if (e->flags & QLA_EVT_FLAG_FREE)
 			kfree(e);
 
@@ -5116,9 +5356,8 @@
 			} else {
 				if (vha->hw->current_topology != ISP_CFG_NL) {
 					memset(&ea, 0, sizeof(ea));
-					ea.event = FCME_RELOGIN;
 					ea.fcport = fcport;
-					qla2x00_fcport_event_handler(vha, &ea);
+					qla24xx_handle_relogin_event(vha, &ea);
 				} else if (vha->hw->current_topology ==
 				    ISP_CFG_NL) {
 					fcport->login_retry--;
@@ -5364,6 +5603,7 @@
 	uint32_t idc_lck_rcvry_stage_mask = 0x3;
 	uint32_t idc_lck_rcvry_owner_mask = 0x3c;
 	struct qla_hw_data *ha = base_vha->hw;
+
 	ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
 	    "Trying force recovery of the IDC lock.\n");
 
@@ -5455,7 +5695,6 @@
 void
 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
 {
-	uint16_t options = (requester_id << 15) | BIT_6;
 	uint32_t data;
 	uint32_t lock_owner;
 	struct qla_hw_data *ha = base_vha->hw;
@@ -5488,22 +5727,6 @@
 	}
 
 	return;
-
-	/* XXX: IDC-lock implementation using access-control mbx */
-retry_lock2:
-	if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
-		ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
-		    "Failed to acquire IDC lock. retrying...\n");
-		/* Retry/Perform IDC-Lock recovery */
-		if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
-			qla83xx_wait_logic();
-			goto retry_lock2;
-		} else
-			ql_log(ql_log_warn, base_vha, 0xb076,
-			    "IDC Lock recovery FAILED.\n");
-	}
-
-	return;
 }
 
 void
@@ -6041,12 +6264,29 @@
 		if (test_and_clear_bit
 		    (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
 		    !test_bit(UNLOADING, &base_vha->dpc_flags)) {
+			bool do_reset = true;
 
-			ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
-			    "ISP abort scheduled.\n");
-			if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
+			switch (base_vha->qlini_mode) {
+			case QLA2XXX_INI_MODE_ENABLED:
+				break;
+			case QLA2XXX_INI_MODE_DISABLED:
+				if (!qla_tgt_mode_enabled(base_vha) &&
+				    !ha->flags.fw_started)
+					do_reset = false;
+				break;
+			case QLA2XXX_INI_MODE_DUAL:
+				if (!qla_dual_mode_enabled(base_vha) &&
+				    !ha->flags.fw_started)
+					do_reset = false;
+				break;
+			default:
+				break;
+			}
+
+			if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
 			    &base_vha->dpc_flags))) {
-
+				ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
+				    "ISP abort scheduled.\n");
 				if (ha->isp_ops->abort_isp(base_vha)) {
 					/* failed. retry later */
 					set_bit(ISP_ABORT_NEEDED,
@@ -6054,10 +6294,9 @@
 				}
 				clear_bit(ABORT_ISP_ACTIVE,
 						&base_vha->dpc_flags);
+				ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
+				    "ISP abort end.\n");
 			}
-
-			ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
-			    "ISP abort end.\n");
 		}
 
 		if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
@@ -6183,17 +6422,28 @@
 			mutex_unlock(&ha->mq_lock);
 		}
 
-		if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) {
+		if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED,
+		    &base_vha->dpc_flags)) {
 			ql_log(ql_log_info, base_vha, 0xffffff,
 				"nvme: SET ZIO Activity exchange threshold to %d.\n",
 						ha->nvme_last_rptd_aen);
-			if (qla27xx_set_zio_threshold(base_vha, ha->nvme_last_rptd_aen)) {
+			if (qla27xx_set_zio_threshold(base_vha,
+			    ha->nvme_last_rptd_aen)) {
 				ql_log(ql_log_info, base_vha, 0xffffff,
-					"nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
-						ha->nvme_last_rptd_aen);
+				    "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
+				    ha->nvme_last_rptd_aen);
 			}
 		}
 
+		if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
+		    &base_vha->dpc_flags)) {
+			ql_log(ql_log_info, base_vha, 0xffffff,
+			    "SET ZIO Activity exchange threshold to %d.\n",
+			    ha->last_zio_threshold);
+			qla27xx_set_zio_threshold(base_vha,
+			    ha->last_zio_threshold);
+		}
+
 		if (!IS_QLAFX00(ha))
 			qla2x00_do_dpc_all_vps(base_vha);
 
@@ -6407,12 +6657,25 @@
 	 * see if the active AEN count has changed from what was last reported.
 	 */
 	if (!vha->vp_idx &&
-		atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen &&
-		ha->zio_mode == QLA_ZIO_MODE_6) {
+	    (atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen) &&
+	    ha->zio_mode == QLA_ZIO_MODE_6 &&
+	    !ha->flags.host_shutting_down) {
 		ql_log(ql_log_info, vha, 0x3002,
-			"nvme: Sched: Set ZIO exchange threshold to %d.\n",
-			ha->nvme_last_rptd_aen);
+		    "nvme: Sched: Set ZIO exchange threshold to %d.\n",
+		    ha->nvme_last_rptd_aen);
 		ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
+		set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
+		start_dpc++;
+	}
+
+	if (!vha->vp_idx &&
+	    (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
+	    (ha->zio_mode == QLA_ZIO_MODE_6) &&
+	    (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
+		ql_log(ql_log_info, vha, 0x3002,
+		    "Sched: Set ZIO exchange threshold to %d.\n",
+		    ha->last_zio_threshold);
+		ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
 		set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
 		start_dpc++;
 	}
@@ -6454,7 +6717,6 @@
 
 /* Firmware interface routines. */
 
-#define FW_BLOBS	11
 #define FW_ISP21XX	0
 #define FW_ISP22XX	1
 #define FW_ISP2300	2
@@ -6466,6 +6728,7 @@
 #define FW_ISP2031	8
 #define FW_ISP8031	9
 #define FW_ISP27XX	10
+#define FW_ISP28XX	11
 
 #define FW_FILE_ISP21XX	"ql2100_fw.bin"
 #define FW_FILE_ISP22XX	"ql2200_fw.bin"
@@ -6478,11 +6741,12 @@
 #define FW_FILE_ISP2031	"ql2600_fw.bin"
 #define FW_FILE_ISP8031	"ql8300_fw.bin"
 #define FW_FILE_ISP27XX	"ql2700_fw.bin"
+#define FW_FILE_ISP28XX	"ql2800_fw.bin"
 
 
 static DEFINE_MUTEX(qla_fw_lock);
 
-static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
+static struct fw_blob qla_fw_blobs[] = {
 	{ .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
 	{ .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
 	{ .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
@@ -6494,6 +6758,8 @@
 	{ .name = FW_FILE_ISP2031, },
 	{ .name = FW_FILE_ISP8031, },
 	{ .name = FW_FILE_ISP27XX, },
+	{ .name = FW_FILE_ISP28XX, },
+	{ .name = NULL, },
 };
 
 struct fw_blob *
@@ -6524,10 +6790,15 @@
 		blob = &qla_fw_blobs[FW_ISP8031];
 	} else if (IS_QLA27XX(ha)) {
 		blob = &qla_fw_blobs[FW_ISP27XX];
+	} else if (IS_QLA28XX(ha)) {
+		blob = &qla_fw_blobs[FW_ISP28XX];
 	} else {
 		return NULL;
 	}
 
+	if (!blob->name)
+		return NULL;
+
 	mutex_lock(&qla_fw_lock);
 	if (blob->fw)
 		goto out;
@@ -6537,7 +6808,6 @@
 		    "Failed to load firmware image (%s).\n", blob->name);
 		blob->fw = NULL;
 		blob = NULL;
-		goto out;
 	}
 
 out:
@@ -6548,14 +6818,86 @@
 static void
 qla2x00_release_firmware(void)
 {
-	int idx;
+	struct fw_blob *blob;
 
 	mutex_lock(&qla_fw_lock);
-	for (idx = 0; idx < FW_BLOBS; idx++)
-		release_firmware(qla_fw_blobs[idx].fw);
+	for (blob = qla_fw_blobs; blob->name; blob++)
+		release_firmware(blob->fw);
 	mutex_unlock(&qla_fw_lock);
 }
 
+static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+	struct qla_qpair *qpair = NULL;
+	struct scsi_qla_host *vp;
+	fc_port_t *fcport;
+	int i;
+	unsigned long flags;
+
+	ha->chip_reset++;
+
+	ha->base_qpair->chip_reset = ha->chip_reset;
+	for (i = 0; i < ha->max_qpairs; i++) {
+		if (ha->queue_pair_map[i])
+			ha->queue_pair_map[i]->chip_reset =
+			    ha->base_qpair->chip_reset;
+	}
+
+	/* purge MBox commands */
+	if (atomic_read(&ha->num_pend_mbx_stage3)) {
+		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+		complete(&ha->mbx_intr_comp);
+	}
+
+	i = 0;
+
+	while (atomic_read(&ha->num_pend_mbx_stage3) ||
+	    atomic_read(&ha->num_pend_mbx_stage2) ||
+	    atomic_read(&ha->num_pend_mbx_stage1)) {
+		msleep(20);
+		i++;
+		if (i > 50)
+			break;
+	}
+
+	ha->flags.purge_mbox = 0;
+
+	mutex_lock(&ha->mq_lock);
+	list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
+		qpair->online = 0;
+	mutex_unlock(&ha->mq_lock);
+
+	qla2x00_mark_all_devices_lost(vha, 0);
+
+	spin_lock_irqsave(&ha->vport_slock, flags);
+	list_for_each_entry(vp, &ha->vp_list, list) {
+		atomic_inc(&vp->vref_count);
+		spin_unlock_irqrestore(&ha->vport_slock, flags);
+		qla2x00_mark_all_devices_lost(vp, 0);
+		spin_lock_irqsave(&ha->vport_slock, flags);
+		atomic_dec(&vp->vref_count);
+	}
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+	/* Clear all async request states across all VPs. */
+	list_for_each_entry(fcport, &vha->vp_fcports, list)
+		fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+	spin_lock_irqsave(&ha->vport_slock, flags);
+	list_for_each_entry(vp, &ha->vp_list, list) {
+		atomic_inc(&vp->vref_count);
+		spin_unlock_irqrestore(&ha->vport_slock, flags);
+		list_for_each_entry(fcport, &vp->vp_fcports, list)
+			fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+		spin_lock_irqsave(&ha->vport_slock, flags);
+		atomic_dec(&vp->vref_count);
+	}
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
+}
+
+
 static pci_ers_result_t
 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
 {
@@ -6581,20 +6923,7 @@
 		return PCI_ERS_RESULT_CAN_RECOVER;
 	case pci_channel_io_frozen:
 		ha->flags.eeh_busy = 1;
-		/* For ISP82XX complete any pending mailbox cmd */
-		if (IS_QLA82XX(ha)) {
-			ha->flags.isp82xx_fw_hung = 1;
-			ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
-			qla82xx_clear_pending_mbx(vha);
-		}
-		qla2x00_free_irqs(vha);
-		pci_disable_device(pdev);
-		/* Return back all IOs */
-		qla2x00_abort_all_cmds(vha, DID_RESET << 16);
-		if (ql2xmqsupport || ql2xnvmeenable) {
-			set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
-			qla2xxx_wake_dpc(vha);
-		}
+		qla_pci_error_cleanup(vha);
 		return PCI_ERS_RESULT_NEED_RESET;
 	case pci_channel_io_perm_failure:
 		ha->flags.pci_channel_io_perm_failure = 1;
@@ -6648,122 +6977,14 @@
 		return PCI_ERS_RESULT_RECOVERED;
 }
 
-static uint32_t
-qla82xx_error_recovery(scsi_qla_host_t *base_vha)
-{
-	uint32_t rval = QLA_FUNCTION_FAILED;
-	uint32_t drv_active = 0;
-	struct qla_hw_data *ha = base_vha->hw;
-	int fn;
-	struct pci_dev *other_pdev = NULL;
-
-	ql_dbg(ql_dbg_aer, base_vha, 0x9006,
-	    "Entered %s.\n", __func__);
-
-	set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
-
-	if (base_vha->flags.online) {
-		/* Abort all outstanding commands,
-		 * so as to be requeued later */
-		qla2x00_abort_isp_cleanup(base_vha);
-	}
-
-
-	fn = PCI_FUNC(ha->pdev->devfn);
-	while (fn > 0) {
-		fn--;
-		ql_dbg(ql_dbg_aer, base_vha, 0x9007,
-		    "Finding pci device at function = 0x%x.\n", fn);
-		other_pdev =
-		    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
-		    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
-		    fn));
-
-		if (!other_pdev)
-			continue;
-		if (atomic_read(&other_pdev->enable_cnt)) {
-			ql_dbg(ql_dbg_aer, base_vha, 0x9008,
-			    "Found PCI func available and enable at 0x%x.\n",
-			    fn);
-			pci_dev_put(other_pdev);
-			break;
-		}
-		pci_dev_put(other_pdev);
-	}
-
-	if (!fn) {
-		/* Reset owner */
-		ql_dbg(ql_dbg_aer, base_vha, 0x9009,
-		    "This devfn is reset owner = 0x%x.\n",
-		    ha->pdev->devfn);
-		qla82xx_idc_lock(ha);
-
-		qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-		    QLA8XXX_DEV_INITIALIZING);
-
-		qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
-		    QLA82XX_IDC_VERSION);
-
-		drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
-		ql_dbg(ql_dbg_aer, base_vha, 0x900a,
-		    "drv_active = 0x%x.\n", drv_active);
-
-		qla82xx_idc_unlock(ha);
-		/* Reset if device is not already reset
-		 * drv_active would be 0 if a reset has already been done
-		 */
-		if (drv_active)
-			rval = qla82xx_start_firmware(base_vha);
-		else
-			rval = QLA_SUCCESS;
-		qla82xx_idc_lock(ha);
-
-		if (rval != QLA_SUCCESS) {
-			ql_log(ql_log_info, base_vha, 0x900b,
-			    "HW State: FAILED.\n");
-			qla82xx_clear_drv_active(ha);
-			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-			    QLA8XXX_DEV_FAILED);
-		} else {
-			ql_log(ql_log_info, base_vha, 0x900c,
-			    "HW State: READY.\n");
-			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-			    QLA8XXX_DEV_READY);
-			qla82xx_idc_unlock(ha);
-			ha->flags.isp82xx_fw_hung = 0;
-			rval = qla82xx_restart_isp(base_vha);
-			qla82xx_idc_lock(ha);
-			/* Clear driver state register */
-			qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
-			qla82xx_set_drv_active(base_vha);
-		}
-		qla82xx_idc_unlock(ha);
-	} else {
-		ql_dbg(ql_dbg_aer, base_vha, 0x900d,
-		    "This devfn is not reset owner = 0x%x.\n",
-		    ha->pdev->devfn);
-		if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
-		    QLA8XXX_DEV_READY)) {
-			ha->flags.isp82xx_fw_hung = 0;
-			rval = qla82xx_restart_isp(base_vha);
-			qla82xx_idc_lock(ha);
-			qla82xx_set_drv_active(base_vha);
-			qla82xx_idc_unlock(ha);
-		}
-	}
-	clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
-
-	return rval;
-}
-
 static pci_ers_result_t
 qla2xxx_pci_slot_reset(struct pci_dev *pdev)
 {
 	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
 	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
 	struct qla_hw_data *ha = base_vha->hw;
-	struct rsp_que *rsp;
-	int rc, retries = 10;
+	int rc;
+	struct qla_qpair *qpair = NULL;
 
 	ql_dbg(ql_dbg_aer, base_vha, 0x9004,
 	    "Slot Reset.\n");
@@ -6792,24 +7013,16 @@
 		goto exit_slot_reset;
 	}
 
-	rsp = ha->rsp_q_map[0];
-	if (qla2x00_request_irqs(ha, rsp))
-		goto exit_slot_reset;
 
 	if (ha->isp_ops->pci_config(base_vha))
 		goto exit_slot_reset;
 
-	if (IS_QLA82XX(ha)) {
-		if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) {
-			ret = PCI_ERS_RESULT_RECOVERED;
-			goto exit_slot_reset;
-		} else
-			goto exit_slot_reset;
-	}
+	mutex_lock(&ha->mq_lock);
+	list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
+		qpair->online = 1;
+	mutex_unlock(&ha->mq_lock);
 
-	while (ha->flags.mbox_busy && retries--)
-		msleep(1000);
-
+	base_vha->flags.online = 1;
 	set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
 	if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
 		ret =  PCI_ERS_RESULT_RECOVERED;
@@ -6833,34 +7046,118 @@
 	ql_dbg(ql_dbg_aer, base_vha, 0x900f,
 	    "pci_resume.\n");
 
+	ha->flags.eeh_busy = 0;
+
 	ret = qla2x00_wait_for_hba_online(base_vha);
 	if (ret != QLA_SUCCESS) {
 		ql_log(ql_log_fatal, base_vha, 0x9002,
 		    "The device failed to resume I/O from slot/link_reset.\n");
 	}
+}
 
-	pci_cleanup_aer_uncorrect_error_status(pdev);
+static void
+qla_pci_reset_prepare(struct pci_dev *pdev)
+{
+	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+	struct qla_hw_data *ha = base_vha->hw;
+	struct qla_qpair *qpair;
 
+	ql_log(ql_log_warn, base_vha, 0xffff,
+	    "%s.\n", __func__);
+
+	/*
+	 * PCI FLR/function reset is about to reset the
+	 * slot. Stop the chip to stop all DMA access.
+	 * It is assumed that pci_reset_done will be called
+	 * after FLR to resume Chip operation.
+	 */
+	ha->flags.eeh_busy = 1;
+	mutex_lock(&ha->mq_lock);
+	list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
+		qpair->online = 0;
+	mutex_unlock(&ha->mq_lock);
+
+	set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+	qla2x00_abort_isp_cleanup(base_vha);
+	qla2x00_abort_all_cmds(base_vha, DID_RESET << 16);
+}
+
+static void
+qla_pci_reset_done(struct pci_dev *pdev)
+{
+	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+	struct qla_hw_data *ha = base_vha->hw;
+	struct qla_qpair *qpair;
+
+	ql_log(ql_log_warn, base_vha, 0xffff,
+	    "%s.\n", __func__);
+
+	/*
+	 * FLR just completed by PCI layer. Resume adapter
+	 */
 	ha->flags.eeh_busy = 0;
+	mutex_lock(&ha->mq_lock);
+	list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
+		qpair->online = 1;
+	mutex_unlock(&ha->mq_lock);
+
+	base_vha->flags.online = 1;
+	ha->isp_ops->abort_isp(base_vha);
+	clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
 }
 
 static int qla2xxx_map_queues(struct Scsi_Host *shost)
 {
 	int rc;
 	scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+	struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
 
-	if (USER_CTRL_IRQ(vha->hw))
-		rc = blk_mq_map_queues(&shost->tag_set);
+	if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
+		rc = blk_mq_map_queues(qmap);
 	else
-		rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev, 0);
+		rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
 	return rc;
 }
 
+struct scsi_host_template qla2xxx_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= QLA2XXX_DRIVER_NAME,
+	.queuecommand		= qla2xxx_queuecommand,
+
+	.eh_timed_out		= fc_eh_timed_out,
+	.eh_abort_handler	= qla2xxx_eh_abort,
+	.eh_device_reset_handler = qla2xxx_eh_device_reset,
+	.eh_target_reset_handler = qla2xxx_eh_target_reset,
+	.eh_bus_reset_handler	= qla2xxx_eh_bus_reset,
+	.eh_host_reset_handler	= qla2xxx_eh_host_reset,
+
+	.slave_configure	= qla2xxx_slave_configure,
+
+	.slave_alloc		= qla2xxx_slave_alloc,
+	.slave_destroy		= qla2xxx_slave_destroy,
+	.scan_finished		= qla2xxx_scan_finished,
+	.scan_start		= qla2xxx_scan_start,
+	.change_queue_depth	= scsi_change_queue_depth,
+	.map_queues             = qla2xxx_map_queues,
+	.this_id		= -1,
+	.cmd_per_lun		= 3,
+	.sg_tablesize		= SG_ALL,
+
+	.max_sectors		= 0xFFFF,
+	.shost_attrs		= qla2x00_host_attrs,
+
+	.supported_mode		= MODE_INITIATOR,
+	.track_queue_depth	= 1,
+	.cmd_size		= sizeof(srb_t),
+};
+
 static const struct pci_error_handlers qla2xxx_err_handler = {
 	.error_detected = qla2xxx_pci_error_detected,
 	.mmio_enabled = qla2xxx_pci_mmio_enabled,
 	.slot_reset = qla2xxx_pci_slot_reset,
 	.resume = qla2xxx_pci_resume,
+	.reset_prepare = qla_pci_reset_prepare,
+	.reset_done = qla_pci_reset_done,
 };
 
 static struct pci_device_id qla2xxx_pci_tbl[] = {
@@ -6886,6 +7183,11 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) },
 	{ 0 },
 };
 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
@@ -6915,6 +7217,30 @@
 {
 	int ret = 0;
 
+	BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
+	BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
+	BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
+	BUILD_BUG_ON(sizeof(init_cb_t) != 96);
+	BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
+	BUILD_BUG_ON(sizeof(request_t) != 64);
+	BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
+	BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
+	BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
+	BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
+	BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64);
+	BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
+	BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
+	BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
+	BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
+	BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
+	BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
+	BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
+	BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
+	BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
+	BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
+	BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
+	BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
+
 	/* Allocate cache for SRBs. */
 	srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
 	    SLAB_HWCACHE_ALIGN, NULL);
@@ -6927,8 +7253,7 @@
 	/* Initialize target kmem_cache and mem_pools */
 	ret = qlt_init();
 	if (ret < 0) {
-		kmem_cache_destroy(srb_cachep);
-		return ret;
+		goto destroy_cache;
 	} else if (ret > 0) {
 		/*
 		 * If initiator mode is explictly disabled by qlt_init(),
@@ -6946,14 +7271,16 @@
 	if (ql2xextended_error_logging == 1)
 		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
 
+	if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL)
+		qla_insert_tgt_attrs();
+
 	qla2xxx_transport_template =
 	    fc_attach_transport(&qla2xxx_transport_functions);
 	if (!qla2xxx_transport_template) {
-		kmem_cache_destroy(srb_cachep);
 		ql_log(ql_log_fatal, NULL, 0x0002,
 		    "fc_attach_transport failed...Failing load!.\n");
-		qlt_exit();
-		return -ENODEV;
+		ret = -ENODEV;
+		goto qlt_exit;
 	}
 
 	apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
@@ -6965,27 +7292,37 @@
 	qla2xxx_transport_vport_template =
 	    fc_attach_transport(&qla2xxx_transport_vport_functions);
 	if (!qla2xxx_transport_vport_template) {
-		kmem_cache_destroy(srb_cachep);
-		qlt_exit();
-		fc_release_transport(qla2xxx_transport_template);
 		ql_log(ql_log_fatal, NULL, 0x0004,
 		    "fc_attach_transport vport failed...Failing load!.\n");
-		return -ENODEV;
+		ret = -ENODEV;
+		goto unreg_chrdev;
 	}
 	ql_log(ql_log_info, NULL, 0x0005,
 	    "QLogic Fibre Channel HBA Driver: %s.\n",
 	    qla2x00_version_str);
 	ret = pci_register_driver(&qla2xxx_pci_driver);
 	if (ret) {
-		kmem_cache_destroy(srb_cachep);
-		qlt_exit();
-		fc_release_transport(qla2xxx_transport_template);
-		fc_release_transport(qla2xxx_transport_vport_template);
 		ql_log(ql_log_fatal, NULL, 0x0006,
 		    "pci_register_driver failed...ret=%d Failing load!.\n",
 		    ret);
+		goto release_vport_transport;
 	}
 	return ret;
+
+release_vport_transport:
+	fc_release_transport(qla2xxx_transport_vport_template);
+
+unreg_chrdev:
+	if (apidev_major >= 0)
+		unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
+	fc_release_transport(qla2xxx_transport_template);
+
+qlt_exit:
+	qlt_exit();
+
+destroy_cache:
+	kmem_cache_destroy(srb_cachep);
+	return ret;
 }
 
 /**
@@ -6994,15 +7331,15 @@
 static void __exit
 qla2x00_module_exit(void)
 {
-	unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
 	pci_unregister_driver(&qla2xxx_pci_driver);
 	qla2x00_release_firmware();
-	kmem_cache_destroy(srb_cachep);
-	qlt_exit();
-	if (ctx_cachep)
-		kmem_cache_destroy(ctx_cachep);
-	fc_release_transport(qla2xxx_transport_template);
+	kmem_cache_destroy(ctx_cachep);
 	fc_release_transport(qla2xxx_transport_vport_template);
+	if (apidev_major >= 0)
+		unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
+	fc_release_transport(qla2xxx_transport_template);
+	qlt_exit();
+	kmem_cache_destroy(srb_cachep);
 }
 
 module_init(qla2x00_module_init);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 4499c78..f2d5115 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -429,102 +429,104 @@
 static inline uint32_t
 flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
 {
-	return ha->flash_conf_off | faddr;
+	return ha->flash_conf_off + faddr;
 }
 
 static inline uint32_t
 flash_data_addr(struct qla_hw_data *ha, uint32_t faddr)
 {
-	return ha->flash_data_off | faddr;
+	return ha->flash_data_off + faddr;
 }
 
 static inline uint32_t
 nvram_conf_addr(struct qla_hw_data *ha, uint32_t naddr)
 {
-	return ha->nvram_conf_off | naddr;
+	return ha->nvram_conf_off + naddr;
 }
 
 static inline uint32_t
 nvram_data_addr(struct qla_hw_data *ha, uint32_t naddr)
 {
-	return ha->nvram_data_off | naddr;
+	return ha->nvram_data_off + naddr;
 }
 
-static uint32_t
-qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr)
+static int
+qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data)
 {
-	int rval;
-	uint32_t cnt, data;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	ulong cnt = 30000;
 
 	WRT_REG_DWORD(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
-	/* Wait for READ cycle to complete. */
-	rval = QLA_SUCCESS;
-	for (cnt = 3000;
-	    (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) == 0 &&
-	    rval == QLA_SUCCESS; cnt--) {
-		if (cnt)
-			udelay(10);
-		else
-			rval = QLA_FUNCTION_TIMEOUT;
+
+	while (cnt--) {
+		if (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) {
+			*data = RD_REG_DWORD(&reg->flash_data);
+			return QLA_SUCCESS;
+		}
+		udelay(10);
 		cond_resched();
 	}
 
-	/* TODO: What happens if we time out? */
-	data = 0xDEADDEAD;
-	if (rval == QLA_SUCCESS)
-		data = RD_REG_DWORD(&reg->flash_data);
-
-	return data;
+	ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090,
+	    "Flash read dword at %x timeout.\n", addr);
+	*data = 0xDEADDEAD;
+	return QLA_FUNCTION_TIMEOUT;
 }
 
-uint32_t *
+int
 qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
     uint32_t dwords)
 {
-	uint32_t i;
+	ulong i;
+	int ret = QLA_SUCCESS;
 	struct qla_hw_data *ha = vha->hw;
 
 	/* Dword reads to flash. */
-	for (i = 0; i < dwords; i++, faddr++)
-		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
-		    flash_data_addr(ha, faddr)));
+	faddr =  flash_data_addr(ha, faddr);
+	for (i = 0; i < dwords; i++, faddr++, dwptr++) {
+		ret = qla24xx_read_flash_dword(ha, faddr, dwptr);
+		if (ret != QLA_SUCCESS)
+			break;
+		cpu_to_le32s(dwptr);
+	}
 
-	return dwptr;
+	return ret;
 }
 
 static int
 qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
 {
-	int rval;
-	uint32_t cnt;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	ulong cnt = 500000;
 
 	WRT_REG_DWORD(&reg->flash_data, data);
-	RD_REG_DWORD(&reg->flash_data);		/* PCI Posting. */
 	WRT_REG_DWORD(&reg->flash_addr, addr | FARX_DATA_FLAG);
-	/* Wait for Write cycle to complete. */
-	rval = QLA_SUCCESS;
-	for (cnt = 500000; (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) &&
-	    rval == QLA_SUCCESS; cnt--) {
-		if (cnt)
-			udelay(10);
-		else
-			rval = QLA_FUNCTION_TIMEOUT;
+
+	while (cnt--) {
+		if (!(RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG))
+			return QLA_SUCCESS;
+		udelay(10);
 		cond_resched();
 	}
-	return rval;
+
+	ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090,
+	    "Flash write dword at %x timeout.\n", addr);
+	return QLA_FUNCTION_TIMEOUT;
 }
 
 static void
 qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
     uint8_t *flash_id)
 {
-	uint32_t ids;
+	uint32_t faddr, ids = 0;
 
-	ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x03ab));
-	*man_id = LSB(ids);
-	*flash_id = MSB(ids);
+	*man_id = *flash_id = 0;
+
+	faddr = flash_conf_addr(ha, 0x03ab);
+	if (!qla24xx_read_flash_dword(ha, faddr, &ids)) {
+		*man_id = LSB(ids);
+		*flash_id = MSB(ids);
+	}
 
 	/* Check if man_id and flash_id are valid. */
 	if (ids != 0xDEADDEAD && (*man_id == 0 || *flash_id == 0)) {
@@ -534,9 +536,11 @@
 		 * Example: ATMEL 0x00 01 45 1F
 		 * Extract MFG and Dev ID from last two bytes.
 		 */
-		ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x009f));
-		*man_id = LSB(ids);
-		*flash_id = MSB(ids);
+		faddr = flash_conf_addr(ha, 0x009f);
+		if (!qla24xx_read_flash_dword(ha, faddr, &ids)) {
+			*man_id = LSB(ids);
+			*flash_id = MSB(ids);
+		}
 	}
 }
 
@@ -545,12 +549,12 @@
 {
 	const char *loc, *locations[] = { "DEF", "PCI" };
 	uint32_t pcihdr, pcids;
-	uint32_t *dcode;
-	uint8_t *buf, *bcode, last_image;
 	uint16_t cnt, chksum, *wptr;
-	struct qla_flt_location *fltl;
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = ha->req_q_map[0];
+	struct qla_flt_location *fltl = (void *)req->ring;
+	uint32_t *dcode = (void *)req->ring;
+	uint8_t *buf = (void *)req->ring, *bcode,  last_image;
 
 	/*
 	 * FLT-location structure resides after the last PCI region.
@@ -571,12 +575,13 @@
 	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 		*start = FA_FLASH_LAYOUT_ADDR_83;
 		goto end;
+	} else if (IS_QLA28XX(ha)) {
+		*start = FA_FLASH_LAYOUT_ADDR_28;
+		goto end;
 	}
+
 	/* Begin with first PCI expansion ROM header. */
-	buf = (uint8_t *)req->ring;
-	dcode = (uint32_t *)req->ring;
 	pcihdr = 0;
-	last_image = 1;
 	do {
 		/* Verify PCI expansion ROM header. */
 		qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
@@ -601,22 +606,19 @@
 	} while (!last_image);
 
 	/* Now verify FLT-location structure. */
-	fltl = (struct qla_flt_location *)req->ring;
-	qla24xx_read_flash_data(vha, dcode, pcihdr >> 2,
-	    sizeof(struct qla_flt_location) >> 2);
-	if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
-	    fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
+	qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2);
+	if (memcmp(fltl->sig, "QFLT", 4))
 		goto end;
 
-	wptr = (uint16_t *)req->ring;
-	cnt = sizeof(struct qla_flt_location) >> 1;
+	wptr = (void *)req->ring;
+	cnt = sizeof(*fltl) / sizeof(*wptr);
 	for (chksum = 0; cnt--; wptr++)
 		chksum += le16_to_cpu(*wptr);
 	if (chksum) {
 		ql_log(ql_log_fatal, vha, 0x0045,
 		    "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
 		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
-		    buf, sizeof(struct qla_flt_location));
+		    fltl, sizeof(*fltl));
 		return QLA_FUNCTION_FAILED;
 	}
 
@@ -634,7 +636,7 @@
 static void
 qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
 {
-	const char *loc, *locations[] = { "DEF", "FLT" };
+	const char *locations[] = { "DEF", "FLT" }, *loc = locations[1];
 	const uint32_t def_fw[] =
 		{ FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR_81 };
 	const uint32_t def_boot[] =
@@ -664,20 +666,13 @@
 	const uint32_t fcp_prio_cfg1[] =
 		{ FA_FCP_PRIO1_ADDR, FA_FCP_PRIO1_ADDR_25,
 			0 };
-	uint32_t def;
-	uint16_t *wptr;
-	uint16_t cnt, chksum;
-	uint32_t start;
-	struct qla_flt_header *flt;
-	struct qla_flt_region *region;
-	struct qla_hw_data *ha = vha->hw;
-	struct req_que *req = ha->req_q_map[0];
 
-	def = 0;
-	if (IS_QLA25XX(ha))
-		def = 1;
-	else if (IS_QLA81XX(ha))
-		def = 2;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0;
+	struct qla_flt_header *flt = (void *)ha->flt;
+	struct qla_flt_region *region = (void *)&flt[1];
+	uint16_t *wptr, cnt, chksum;
+	uint32_t start;
 
 	/* Assign FCP prio region since older adapters may not have FLT, or
 	   FCP prio region in it's FLT.
@@ -686,12 +681,11 @@
 	    fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
 
 	ha->flt_region_flt = flt_addr;
-	wptr = (uint16_t *)req->ring;
-	flt = (struct qla_flt_header *)req->ring;
-	region = (struct qla_flt_region *)&flt[1];
-	ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
-	    flt_addr << 2, OPTROM_BURST_SIZE);
-	if (*wptr == cpu_to_le16(0xffff))
+	wptr = (uint16_t *)ha->flt;
+	ha->isp_ops->read_optrom(vha, (void *)flt, flt_addr << 2,
+	    (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE));
+
+	if (le16_to_cpu(*wptr) == 0xffff)
 		goto no_flash_data;
 	if (flt->version != cpu_to_le16(1)) {
 		ql_log(ql_log_warn, vha, 0x0047,
@@ -701,7 +695,7 @@
 		goto no_flash_data;
 	}
 
-	cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
+	cnt = (sizeof(*flt) + le16_to_cpu(flt->length)) / sizeof(*wptr);
 	for (chksum = 0; cnt--; wptr++)
 		chksum += le16_to_cpu(*wptr);
 	if (chksum) {
@@ -712,18 +706,20 @@
 		goto no_flash_data;
 	}
 
-	loc = locations[1];
-	cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
+	cnt = le16_to_cpu(flt->length) / sizeof(*region);
 	for ( ; cnt; cnt--, region++) {
 		/* Store addresses as DWORD offsets. */
 		start = le32_to_cpu(region->start) >> 2;
 		ql_dbg(ql_dbg_init, vha, 0x0049,
-		    "FLT[%02x]: start=0x%x "
-		    "end=0x%x size=0x%x.\n", le32_to_cpu(region->code) & 0xff,
-		    start, le32_to_cpu(region->end) >> 2,
-		    le32_to_cpu(region->size));
+		    "FLT[%#x]: start=%#x end=%#x size=%#x.\n",
+		    le16_to_cpu(region->code), start,
+		    le32_to_cpu(region->end) >> 2,
+		    le32_to_cpu(region->size) >> 2);
+		if (region->attribute)
+			ql_log(ql_dbg_init, vha, 0xffff,
+			    "Region %x is secure\n", region->code);
 
-		switch (le32_to_cpu(region->code) & 0xff) {
+		switch (le16_to_cpu(region->code)) {
 		case FLT_REG_FCOE_FW:
 			if (!IS_QLA8031(ha))
 				break;
@@ -753,13 +749,13 @@
 				ha->flt_region_vpd = start;
 			break;
 		case FLT_REG_VPD_2:
-			if (!IS_QLA27XX(ha))
+			if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 				break;
 			if (ha->port_no == 2)
 				ha->flt_region_vpd = start;
 			break;
 		case FLT_REG_VPD_3:
-			if (!IS_QLA27XX(ha))
+			if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 				break;
 			if (ha->port_no == 3)
 				ha->flt_region_vpd = start;
@@ -777,13 +773,13 @@
 				ha->flt_region_nvram = start;
 			break;
 		case FLT_REG_NVRAM_2:
-			if (!IS_QLA27XX(ha))
+			if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 				break;
 			if (ha->port_no == 2)
 				ha->flt_region_nvram = start;
 			break;
 		case FLT_REG_NVRAM_3:
-			if (!IS_QLA27XX(ha))
+			if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 				break;
 			if (ha->port_no == 3)
 				ha->flt_region_nvram = start;
@@ -847,36 +843,74 @@
 				ha->flt_region_nvram = start;
 			break;
 		case FLT_REG_IMG_PRI_27XX:
-			if (IS_QLA27XX(ha))
+			if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 				ha->flt_region_img_status_pri = start;
 			break;
 		case FLT_REG_IMG_SEC_27XX:
-			if (IS_QLA27XX(ha))
+			if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 				ha->flt_region_img_status_sec = start;
 			break;
 		case FLT_REG_FW_SEC_27XX:
-			if (IS_QLA27XX(ha))
+			if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 				ha->flt_region_fw_sec = start;
 			break;
 		case FLT_REG_BOOTLOAD_SEC_27XX:
-			if (IS_QLA27XX(ha))
+			if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 				ha->flt_region_boot_sec = start;
 			break;
+		case FLT_REG_AUX_IMG_PRI_28XX:
+			if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+				ha->flt_region_aux_img_status_pri = start;
+			break;
+		case FLT_REG_AUX_IMG_SEC_28XX:
+			if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+				ha->flt_region_aux_img_status_sec = start;
+			break;
+		case FLT_REG_NVRAM_SEC_28XX_0:
+			if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+				if (ha->port_no == 0)
+					ha->flt_region_nvram_sec = start;
+			break;
+		case FLT_REG_NVRAM_SEC_28XX_1:
+			if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+				if (ha->port_no == 1)
+					ha->flt_region_nvram_sec = start;
+			break;
+		case FLT_REG_NVRAM_SEC_28XX_2:
+			if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+				if (ha->port_no == 2)
+					ha->flt_region_nvram_sec = start;
+			break;
+		case FLT_REG_NVRAM_SEC_28XX_3:
+			if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+				if (ha->port_no == 3)
+					ha->flt_region_nvram_sec = start;
+			break;
 		case FLT_REG_VPD_SEC_27XX_0:
-			if (IS_QLA27XX(ha))
-				ha->flt_region_vpd_sec = start;
+		case FLT_REG_VPD_SEC_28XX_0:
+			if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+				ha->flt_region_vpd_nvram_sec = start;
+				if (ha->port_no == 0)
+					ha->flt_region_vpd_sec = start;
+			}
 			break;
 		case FLT_REG_VPD_SEC_27XX_1:
-			if (IS_QLA27XX(ha))
-				ha->flt_region_vpd_sec = start;
+		case FLT_REG_VPD_SEC_28XX_1:
+			if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+				if (ha->port_no == 1)
+					ha->flt_region_vpd_sec = start;
 			break;
 		case FLT_REG_VPD_SEC_27XX_2:
-			if (IS_QLA27XX(ha))
-				ha->flt_region_vpd_sec = start;
+		case FLT_REG_VPD_SEC_28XX_2:
+			if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+				if (ha->port_no == 2)
+					ha->flt_region_vpd_sec = start;
 			break;
 		case FLT_REG_VPD_SEC_27XX_3:
-			if (IS_QLA27XX(ha))
-				ha->flt_region_vpd_sec = start;
+		case FLT_REG_VPD_SEC_28XX_3:
+			if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+				if (ha->port_no == 3)
+					ha->flt_region_vpd_sec = start;
 			break;
 		}
 	}
@@ -912,22 +946,19 @@
 #define FLASH_BLK_SIZE_32K	0x8000
 #define FLASH_BLK_SIZE_64K	0x10000
 	const char *loc, *locations[] = { "MID", "FDT" };
-	uint16_t cnt, chksum;
-	uint16_t *wptr;
-	struct qla_fdt_layout *fdt;
-	uint8_t	man_id, flash_id;
-	uint16_t mid = 0, fid = 0;
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = ha->req_q_map[0];
+	uint16_t cnt, chksum;
+	uint16_t *wptr = (void *)req->ring;
+	struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring;
+	uint8_t	man_id, flash_id;
+	uint16_t mid = 0, fid = 0;
 
-	wptr = (uint16_t *)req->ring;
-	fdt = (struct qla_fdt_layout *)req->ring;
-	ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
-	    ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
-	if (*wptr == cpu_to_le16(0xffff))
+	ha->isp_ops->read_optrom(vha, fdt, ha->flt_region_fdt << 2,
+	    OPTROM_BURST_DWORDS);
+	if (le16_to_cpu(*wptr) == 0xffff)
 		goto no_flash_data;
-	if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
-	    fdt->sig[3] != 'D')
+	if (memcmp(fdt->sig, "QLID", 4))
 		goto no_flash_data;
 
 	for (cnt = 0, chksum = 0; cnt < sizeof(*fdt) >> 1; cnt++, wptr++)
@@ -938,7 +969,7 @@
 		    " checksum=0x%x id=%c version0x%x.\n", chksum,
 		    fdt->sig[0], le16_to_cpu(fdt->version));
 		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
-		    (uint8_t *)fdt, sizeof(*fdt));
+		    fdt, sizeof(*fdt));
 		goto no_flash_data;
 	}
 
@@ -958,7 +989,7 @@
 		ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 |
 		    fdt->unprotect_sec_cmd);
 		ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
-		    flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd):
+		    flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd) :
 		    flash_conf_addr(ha, 0x0336);
 	}
 	goto done;
@@ -1019,8 +1050,7 @@
 		return;
 
 	wptr = (uint32_t *)req->ring;
-	ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
-		QLA82XX_IDC_PARAM_ADDR , 8);
+	ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8);
 
 	if (*wptr == cpu_to_le32(0xffffffff)) {
 		ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
@@ -1045,7 +1075,8 @@
 	struct qla_hw_data *ha = vha->hw;
 
 	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
-	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
+	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
+	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return QLA_SUCCESS;
 
 	ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -1081,8 +1112,8 @@
 	if (IS_QLA8044(ha))
 		return;
 
-	ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
-	    ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
+	ha->isp_ops->read_optrom(vha, &hdr, ha->flt_region_npiv_conf << 2,
+	    sizeof(struct qla_npiv_header));
 	if (hdr.version == cpu_to_le16(0xffff))
 		return;
 	if (hdr.version != cpu_to_le16(1)) {
@@ -1101,8 +1132,8 @@
 		return;
 	}
 
-	ha->isp_ops->read_optrom(vha, (uint8_t *)data,
-	    ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
+	ha->isp_ops->read_optrom(vha, data, ha->flt_region_npiv_conf << 2,
+	    NPIV_CONFIG_SIZE);
 
 	cnt = (sizeof(hdr) + le16_to_cpu(hdr.entries) * sizeof(*entry)) >> 1;
 	for (wptr = data, chksum = 0; cnt--; wptr++)
@@ -1139,10 +1170,8 @@
 		vid.node_name = wwn_to_u64(entry->node_name);
 
 		ql_dbg(ql_dbg_user, vha, 0x7093,
-		    "NPIV[%02x]: wwpn=%llx "
-		    "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
-		    (unsigned long long)vid.port_name,
-		    (unsigned long long)vid.node_name,
+		    "NPIV[%02x]: wwpn=%llx wwnn=%llx vf_id=%#x Q_qos=%#x F_qos=%#x.\n",
+		    cnt, vid.port_name, vid.node_name,
 		    le16_to_cpu(entry->vf_id),
 		    entry->q_qos, entry->f_qos);
 
@@ -1150,10 +1179,8 @@
 			vport = fc_vport_create(vha->host, 0, &vid);
 			if (!vport)
 				ql_log(ql_log_warn, vha, 0x7094,
-				    "NPIV-Config Failed to create vport [%02x]: "
-				    "wwpn=%llx wwnn=%llx.\n", cnt,
-				    (unsigned long long)vid.port_name,
-				    (unsigned long long)vid.node_name);
+				    "NPIV-Config Failed to create vport [%02x]: wwpn=%llx wwnn=%llx.\n",
+				    cnt, vid.port_name, vid.node_name);
 		}
 	}
 done:
@@ -1188,9 +1215,10 @@
 static int
 qla24xx_protect_flash(scsi_qla_host_t *vha)
 {
-	uint32_t cnt;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	ulong cnt = 300;
+	uint32_t faddr, dword;
 
 	if (ha->flags.fac_supported)
 		return qla81xx_fac_do_write_enable(vha, 0);
@@ -1199,11 +1227,14 @@
 		goto skip_wrt_protect;
 
 	/* Enable flash write-protection and wait for completion. */
-	qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101),
-	    ha->fdt_wrt_disable);
-	for (cnt = 300; cnt &&
-	    qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x005)) & BIT_0;
-	    cnt--) {
+	faddr = flash_conf_addr(ha, 0x101);
+	qla24xx_write_flash_dword(ha, faddr, ha->fdt_wrt_disable);
+	faddr = flash_conf_addr(ha, 0x5);
+	while (cnt--) {
+		if (!qla24xx_read_flash_dword(ha, faddr, &dword)) {
+			if (!(dword & BIT_0))
+				break;
+		}
 		udelay(10);
 	}
 
@@ -1211,7 +1242,6 @@
 	/* Disable flash write. */
 	WRT_REG_DWORD(&reg->ctrl_status,
 	    RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
-	RD_REG_DWORD(&reg->ctrl_status);	/* PCI Posting. */
 
 	return QLA_SUCCESS;
 }
@@ -1239,107 +1269,103 @@
     uint32_t dwords)
 {
 	int ret;
-	uint32_t liter;
-	uint32_t sec_mask, rest_addr;
-	uint32_t fdata;
+	ulong liter;
+	ulong dburst = OPTROM_BURST_DWORDS; /* burst size in dwords */
+	uint32_t sec_mask, rest_addr, fdata;
 	dma_addr_t optrom_dma;
 	void *optrom = NULL;
 	struct qla_hw_data *ha = vha->hw;
 
-	/* Prepare burst-capable write on supported ISPs. */
-	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
-	    IS_QLA27XX(ha)) &&
-	    !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
-		optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
-		    &optrom_dma, GFP_KERNEL);
-		if (!optrom) {
-			ql_log(ql_log_warn, vha, 0x7095,
-			    "Unable to allocate "
-			    "memory for optrom burst write (%x KB).\n",
-			    OPTROM_BURST_SIZE / 1024);
-		}
+	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+		goto next;
+
+	/* Allocate dma buffer for burst write */
+	optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+	    &optrom_dma, GFP_KERNEL);
+	if (!optrom) {
+		ql_log(ql_log_warn, vha, 0x7095,
+		    "Failed allocate burst (%x bytes)\n", OPTROM_BURST_SIZE);
+	}
+
+next:
+	ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+	    "Unprotect flash...\n");
+	ret = qla24xx_unprotect_flash(vha);
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0x7096,
+		    "Failed to unprotect flash.\n");
+		goto done;
 	}
 
 	rest_addr = (ha->fdt_block_size >> 2) - 1;
 	sec_mask = ~rest_addr;
-
-	ret = qla24xx_unprotect_flash(vha);
-	if (ret != QLA_SUCCESS) {
-		ql_log(ql_log_warn, vha, 0x7096,
-		    "Unable to unprotect flash for update.\n");
-		goto done;
-	}
-
 	for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
 		fdata = (faddr & sec_mask) << 2;
 
 		/* Are we at the beginning of a sector? */
-		if ((faddr & rest_addr) == 0) {
-			/* Do sector unprotect. */
-			if (ha->fdt_unprotect_sec_cmd)
-				qla24xx_write_flash_dword(ha,
-				    ha->fdt_unprotect_sec_cmd,
-				    (fdata & 0xff00) | ((fdata << 16) &
-				    0xff0000) | ((fdata >> 16) & 0xff));
+		if (!(faddr & rest_addr)) {
+			ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+			    "Erase sector %#x...\n", faddr);
+
 			ret = qla24xx_erase_sector(vha, fdata);
-			if (ret != QLA_SUCCESS) {
+			if (ret) {
 				ql_dbg(ql_dbg_user, vha, 0x7007,
-				    "Unable to erase erase sector: address=%x.\n",
-				    faddr);
+				    "Failed to erase sector %x.\n", faddr);
 				break;
 			}
 		}
 
-		/* Go with burst-write. */
-		if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
-			/* Copy data to DMA'ble buffer. */
-			memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
+		if (optrom) {
+			/* If smaller than a burst remaining */
+			if (dwords - liter < dburst)
+				dburst = dwords - liter;
 
+			/* Copy to dma buffer */
+			memcpy(optrom, dwptr, dburst << 2);
+
+			/* Burst write */
+			ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+			    "Write burst (%#lx dwords)...\n", dburst);
 			ret = qla2x00_load_ram(vha, optrom_dma,
-			    flash_data_addr(ha, faddr),
-			    OPTROM_BURST_DWORDS);
-			if (ret != QLA_SUCCESS) {
-				ql_log(ql_log_warn, vha, 0x7097,
-				    "Unable to burst-write optrom segment "
-				    "(%x/%x/%llx).\n", ret,
-				    flash_data_addr(ha, faddr),
-				    (unsigned long long)optrom_dma);
-				ql_log(ql_log_warn, vha, 0x7098,
-				    "Reverting to slow-write.\n");
-
-				dma_free_coherent(&ha->pdev->dev,
-				    OPTROM_BURST_SIZE, optrom, optrom_dma);
-				optrom = NULL;
-			} else {
-				liter += OPTROM_BURST_DWORDS - 1;
-				faddr += OPTROM_BURST_DWORDS - 1;
-				dwptr += OPTROM_BURST_DWORDS - 1;
+			    flash_data_addr(ha, faddr), dburst);
+			if (!ret) {
+				liter += dburst - 1;
+				faddr += dburst - 1;
+				dwptr += dburst - 1;
 				continue;
 			}
+
+			ql_log(ql_log_warn, vha, 0x7097,
+			    "Failed burst-write at %x (%p/%#llx)....\n",
+			    flash_data_addr(ha, faddr), optrom,
+			    (u64)optrom_dma);
+
+			dma_free_coherent(&ha->pdev->dev,
+			    OPTROM_BURST_SIZE, optrom, optrom_dma);
+			optrom = NULL;
+			if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+				break;
+			ql_log(ql_log_warn, vha, 0x7098,
+			    "Reverting to slow write...\n");
 		}
 
+		/* Slow write */
 		ret = qla24xx_write_flash_dword(ha,
 		    flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
-		if (ret != QLA_SUCCESS) {
+		if (ret) {
 			ql_dbg(ql_dbg_user, vha, 0x7006,
-			    "Unable to program flash address=%x data=%x.\n",
-			    faddr, *dwptr);
+			    "Failed slopw write %x (%x)\n", faddr, *dwptr);
 			break;
 		}
-
-		/* Do sector protect. */
-		if (ha->fdt_unprotect_sec_cmd &&
-		    ((faddr & rest_addr) == rest_addr))
-			qla24xx_write_flash_dword(ha,
-			    ha->fdt_protect_sec_cmd,
-			    (fdata & 0xff00) | ((fdata << 16) &
-			    0xff0000) | ((fdata >> 16) & 0xff));
 	}
 
+	ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+	    "Protect flash...\n");
 	ret = qla24xx_protect_flash(vha);
-	if (ret != QLA_SUCCESS)
+	if (ret)
 		ql_log(ql_log_warn, vha, 0x7099,
-		    "Unable to protect flash after update.\n");
+		    "Failed to protect flash\n");
 done:
 	if (optrom)
 		dma_free_coherent(&ha->pdev->dev,
@@ -1349,7 +1375,7 @@
 }
 
 uint8_t *
-qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla2x00_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
     uint32_t bytes)
 {
 	uint32_t i;
@@ -1368,27 +1394,30 @@
 }
 
 uint8_t *
-qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla24xx_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
     uint32_t bytes)
 {
-	uint32_t i;
-	uint32_t *dwptr;
 	struct qla_hw_data *ha = vha->hw;
+	uint32_t *dwptr = buf;
+	uint32_t i;
 
 	if (IS_P3P_TYPE(ha))
 		return  buf;
 
 	/* Dword reads to flash. */
-	dwptr = (uint32_t *)buf;
-	for (i = 0; i < bytes >> 2; i++, naddr++)
-		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
-		    nvram_data_addr(ha, naddr)));
+	naddr = nvram_data_addr(ha, naddr);
+	bytes >>= 2;
+	for (i = 0; i < bytes; i++, naddr++, dwptr++) {
+		if (qla24xx_read_flash_dword(ha, naddr, dwptr))
+			break;
+		cpu_to_le32s(dwptr);
+	}
 
 	return buf;
 }
 
 int
-qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla2x00_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
     uint32_t bytes)
 {
 	int ret, stat;
@@ -1422,14 +1451,14 @@
 }
 
 int
-qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
     uint32_t bytes)
 {
-	int ret;
-	uint32_t i;
-	uint32_t *dwptr;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	uint32_t *dwptr = buf;
+	uint32_t i;
+	int ret;
 
 	ret = QLA_SUCCESS;
 
@@ -1446,11 +1475,10 @@
 	qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
 
 	/* Dword writes to flash. */
-	dwptr = (uint32_t *)buf;
-	for (i = 0; i < bytes >> 2; i++, naddr++, dwptr++) {
-		ret = qla24xx_write_flash_dword(ha,
-		    nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
-		if (ret != QLA_SUCCESS) {
+	naddr = nvram_data_addr(ha, naddr);
+	bytes >>= 2;
+	for (i = 0; i < bytes; i++, naddr++, dwptr++) {
+		if (qla24xx_write_flash_dword(ha, naddr, cpu_to_le32(*dwptr))) {
 			ql_dbg(ql_dbg_user, vha, 0x709a,
 			    "Unable to program nvram address=%x data=%x.\n",
 			    naddr, *dwptr);
@@ -1470,31 +1498,34 @@
 }
 
 uint8_t *
-qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla25xx_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
     uint32_t bytes)
 {
-	uint32_t i;
-	uint32_t *dwptr;
 	struct qla_hw_data *ha = vha->hw;
+	uint32_t *dwptr = buf;
+	uint32_t i;
 
 	/* Dword reads to flash. */
-	dwptr = (uint32_t *)buf;
-	for (i = 0; i < bytes >> 2; i++, naddr++)
-		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
-		    flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr)));
+	naddr = flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr);
+	bytes >>= 2;
+	for (i = 0; i < bytes; i++, naddr++, dwptr++) {
+		if (qla24xx_read_flash_dword(ha, naddr, dwptr))
+			break;
+
+		cpu_to_le32s(dwptr);
+	}
 
 	return buf;
 }
 
+#define RMW_BUFFER_SIZE	(64 * 1024)
 int
-qla25xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla25xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
     uint32_t bytes)
 {
 	struct qla_hw_data *ha = vha->hw;
-#define RMW_BUFFER_SIZE	(64 * 1024)
-	uint8_t *dbuf;
+	uint8_t *dbuf = vmalloc(RMW_BUFFER_SIZE);
 
-	dbuf = vmalloc(RMW_BUFFER_SIZE);
 	if (!dbuf)
 		return QLA_MEMORY_ALLOC_FAILED;
 	ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
@@ -1728,7 +1759,7 @@
 {
 	uint32_t led_select_value = 0;
 
-	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		goto out;
 
 	if (ha->port_no == 0)
@@ -1749,13 +1780,14 @@
 	uint16_t orig_led_cfg[6];
 	uint32_t led_10_value, led_43_value;
 
-	if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha))
+	if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha) &&
+	    !IS_QLA28XX(ha))
 		return;
 
 	if (!ha->beacon_blink_led)
 		return;
 
-	if (IS_QLA27XX(ha)) {
+	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 		qla2x00_write_ram_word(vha, 0x1003, 0x40000230);
 		qla2x00_write_ram_word(vha, 0x1004, 0x40000230);
 	} else if (IS_QLA2031(ha)) {
@@ -1845,7 +1877,7 @@
 			return QLA_FUNCTION_FAILED;
 		}
 
-		if (IS_QLA2031(ha) || IS_QLA27XX(ha))
+		if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 			goto skip_gpio;
 
 		spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1885,7 +1917,7 @@
 
 	ha->beacon_blink_led = 0;
 
-	if (IS_QLA2031(ha) || IS_QLA27XX(ha))
+	if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		goto set_fw_options;
 
 	if (IS_QLA8031(ha) || IS_QLA81XX(ha))
@@ -2229,7 +2261,7 @@
 
 /**
  * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip.
- * @ha:
+ * @ha: host adapter
  * @man_id: Flash manufacturer ID
  * @flash_id: Flash ID
  */
@@ -2314,8 +2346,8 @@
 	scsi_unblock_requests(vha->host);
 }
 
-uint8_t *
-qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla2x00_read_optrom_data(struct scsi_qla_host *vha, void *buf,
     uint32_t offset, uint32_t length)
 {
 	uint32_t addr, midpoint;
@@ -2349,12 +2381,12 @@
 }
 
 int
-qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
     uint32_t offset, uint32_t length)
 {
 
 	int rval;
-	uint8_t man_id, flash_id, sec_number, data;
+	uint8_t man_id, flash_id, sec_number, *data;
 	uint16_t wd;
 	uint32_t addr, liter, sec_mask, rest_addr;
 	struct qla_hw_data *ha = vha->hw;
@@ -2483,7 +2515,7 @@
 
 		for (addr = offset, liter = 0; liter < length; liter++,
 		    addr++) {
-			data = buf[liter];
+			data = buf + liter;
 			/* Are we at the beginning of a sector? */
 			if ((addr & rest_addr) == 0) {
 				if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
@@ -2551,7 +2583,7 @@
 				}
 			}
 
-			if (qla2x00_program_flash_address(ha, addr, data,
+			if (qla2x00_program_flash_address(ha, addr, *data,
 			    man_id, flash_id)) {
 				rval = QLA_FUNCTION_FAILED;
 				break;
@@ -2567,8 +2599,8 @@
 	return rval;
 }
 
-uint8_t *
-qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
     uint32_t offset, uint32_t length)
 {
 	struct qla_hw_data *ha = vha->hw;
@@ -2578,7 +2610,7 @@
 	set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
 
 	/* Go with read. */
-	qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2);
+	qla24xx_read_flash_data(vha, (void *)buf, offset >> 2, length >> 2);
 
 	/* Resume HBA. */
 	clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
@@ -2587,8 +2619,340 @@
 	return buf;
 }
 
+static int
+qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, uint32_t *buf,
+    uint32_t len, uint32_t buf_size_without_sfub, uint8_t *sfub_buf)
+{
+	uint32_t *p, check_sum = 0;
+	int i;
+
+	p = buf + buf_size_without_sfub;
+
+	/* Extract SFUB from end of file */
+	memcpy(sfub_buf, (uint8_t *)p,
+	    sizeof(struct secure_flash_update_block));
+
+	for (i = 0; i < (sizeof(struct secure_flash_update_block) >> 2); i++)
+		check_sum += p[i];
+
+	check_sum = (~check_sum) + 1;
+
+	if (check_sum != p[i]) {
+		ql_log(ql_log_warn, vha, 0x7097,
+		    "SFUB checksum failed, 0x%x, 0x%x\n",
+		    check_sum, p[i]);
+		return QLA_COMMAND_ERROR;
+	}
+
+	return QLA_SUCCESS;
+}
+
+static int
+qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start,
+    struct qla_flt_region *region)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_flt_header *flt;
+	struct qla_flt_region *flt_reg;
+	uint16_t cnt;
+	int rval = QLA_FUNCTION_FAILED;
+
+	if (!ha->flt)
+		return QLA_FUNCTION_FAILED;
+
+	flt = (struct qla_flt_header *)ha->flt;
+	flt_reg = (struct qla_flt_region *)&flt[1];
+	cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
+
+	for (; cnt; cnt--, flt_reg++) {
+		if (flt_reg->start == start) {
+			memcpy((uint8_t *)region, flt_reg,
+			    sizeof(struct qla_flt_region));
+			rval = QLA_SUCCESS;
+			break;
+		}
+	}
+
+	return rval;
+}
+
+static int
+qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+    uint32_t dwords)
+{
+	struct qla_hw_data *ha = vha->hw;
+	ulong liter;
+	ulong dburst = OPTROM_BURST_DWORDS; /* burst size in dwords */
+	uint32_t sec_mask, rest_addr, fdata;
+	void *optrom = NULL;
+	dma_addr_t optrom_dma;
+	int rval;
+	struct secure_flash_update_block *sfub;
+	dma_addr_t sfub_dma;
+	uint32_t offset = faddr << 2;
+	uint32_t buf_size_without_sfub = 0;
+	struct qla_flt_region region;
+	bool reset_to_rom = false;
+	uint32_t risc_size, risc_attr = 0;
+	uint32_t *fw_array = NULL;
+
+	/* Retrieve region info - must be a start address passed in */
+	rval = qla28xx_get_flash_region(vha, offset, &region);
+
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "Invalid address %x - not a region start address\n",
+		    offset);
+		goto done;
+	}
+
+	/* Allocate dma buffer for burst write */
+	optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+	    &optrom_dma, GFP_KERNEL);
+	if (!optrom) {
+		ql_log(ql_log_warn, vha, 0x7095,
+		    "Failed allocate burst (%x bytes)\n", OPTROM_BURST_SIZE);
+		rval = QLA_COMMAND_ERROR;
+		goto done;
+	}
+
+	/*
+	 * If adapter supports secure flash and region is secure
+	 * extract secure flash update block (SFUB) and verify
+	 */
+	if (ha->flags.secure_adapter && region.attribute) {
+
+		ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+		    "Region %x is secure\n", region.code);
+
+		if (region.code == FLT_REG_FW ||
+		    region.code == FLT_REG_FW_SEC_27XX) {
+			fw_array = dwptr;
+
+			/* 1st fw array */
+			risc_size = be32_to_cpu(fw_array[3]);
+			risc_attr = be32_to_cpu(fw_array[9]);
+
+			buf_size_without_sfub = risc_size;
+			fw_array += risc_size;
+
+			/* 2nd fw array */
+			risc_size = be32_to_cpu(fw_array[3]);
+
+			buf_size_without_sfub += risc_size;
+			fw_array += risc_size;
+
+			/* 1st dump template */
+			risc_size = be32_to_cpu(fw_array[2]);
+
+			/* skip header and ignore checksum */
+			buf_size_without_sfub += risc_size;
+			fw_array += risc_size;
+
+			if (risc_attr & BIT_9) {
+				/* 2nd dump template */
+				risc_size = be32_to_cpu(fw_array[2]);
+
+				/* skip header and ignore checksum */
+				buf_size_without_sfub += risc_size;
+				fw_array += risc_size;
+			}
+		} else {
+			ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+			    "Secure region %x not supported\n",
+			    region.code);
+			rval = QLA_COMMAND_ERROR;
+			goto done;
+		}
+
+		sfub = dma_alloc_coherent(&ha->pdev->dev,
+			sizeof(struct secure_flash_update_block), &sfub_dma,
+			GFP_KERNEL);
+		if (!sfub) {
+			ql_log(ql_log_warn, vha, 0xffff,
+			    "Unable to allocate memory for SFUB\n");
+			rval = QLA_COMMAND_ERROR;
+			goto done;
+		}
+
+		rval = qla28xx_extract_sfub_and_verify(vha, dwptr, dwords,
+			buf_size_without_sfub, (uint8_t *)sfub);
+
+		if (rval != QLA_SUCCESS)
+			goto done;
+
+		ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+		    "SFUB extract and verify successful\n");
+	}
+
+	rest_addr = (ha->fdt_block_size >> 2) - 1;
+	sec_mask = ~rest_addr;
+
+	/* Lock semaphore */
+	rval = qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_LOCK);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "Unable to lock flash semaphore.");
+		goto done;
+	}
+
+	ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+	    "Unprotect flash...\n");
+	rval = qla24xx_unprotect_flash(vha);
+	if (rval) {
+		qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK);
+		ql_log(ql_log_warn, vha, 0x7096, "Failed unprotect flash\n");
+		goto done;
+	}
+
+	for (liter = 0; liter < dwords; liter++, faddr++) {
+		fdata = (faddr & sec_mask) << 2;
+
+		/* If start of sector */
+		if (!(faddr & rest_addr)) {
+			ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+			    "Erase sector %#x...\n", faddr);
+			rval = qla24xx_erase_sector(vha, fdata);
+			if (rval) {
+				ql_dbg(ql_dbg_user, vha, 0x7007,
+				    "Failed erase sector %#x\n", faddr);
+				goto write_protect;
+			}
+		}
+	}
+
+	if (ha->flags.secure_adapter) {
+		/*
+		 * If adapter supports secure flash but FW doesn't,
+		 * disable write protect, release semaphore and reset
+		 * chip to execute ROM code in order to update region securely
+		 */
+		if (!ha->flags.secure_fw) {
+			ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+			    "Disable Write and Release Semaphore.");
+			rval = qla24xx_protect_flash(vha);
+			if (rval != QLA_SUCCESS) {
+				qla81xx_fac_semaphore_access(vha,
+					FAC_SEMAPHORE_UNLOCK);
+				ql_log(ql_log_warn, vha, 0xffff,
+				    "Unable to protect flash.");
+				goto done;
+			}
+
+			ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+			    "Reset chip to ROM.");
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			set_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+			rval = qla2x00_wait_for_chip_reset(vha);
+			if (rval != QLA_SUCCESS) {
+				ql_log(ql_log_warn, vha, 0xffff,
+				    "Unable to reset to ROM code.");
+				goto done;
+			}
+			reset_to_rom = true;
+			ha->flags.fac_supported = 0;
+
+			ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+			    "Lock Semaphore");
+			rval = qla2xxx_write_remote_register(vha,
+			    FLASH_SEMAPHORE_REGISTER_ADDR, 0x00020002);
+			if (rval != QLA_SUCCESS) {
+				ql_log(ql_log_warn, vha, 0xffff,
+				    "Unable to lock flash semaphore.");
+				goto done;
+			}
+
+			/* Unprotect flash */
+			ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+			    "Enable Write.");
+			rval = qla2x00_write_ram_word(vha, 0x7ffd0101, 0);
+			if (rval) {
+				ql_log(ql_log_warn, vha, 0x7096,
+				    "Failed unprotect flash\n");
+				goto done;
+			}
+		}
+
+		/* If region is secure, send Secure Flash MB Cmd */
+		if (region.attribute && buf_size_without_sfub) {
+			ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+			    "Sending Secure Flash MB Cmd\n");
+			rval = qla28xx_secure_flash_update(vha, 0, region.code,
+				buf_size_without_sfub, sfub_dma,
+				sizeof(struct secure_flash_update_block));
+			if (rval != QLA_SUCCESS) {
+				ql_log(ql_log_warn, vha, 0xffff,
+				    "Secure Flash MB Cmd failed %x.", rval);
+				goto write_protect;
+			}
+		}
+
+	}
+
+	/* re-init flash offset */
+	faddr = offset >> 2;
+
+	for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
+		fdata = (faddr & sec_mask) << 2;
+
+		/* If smaller than a burst remaining */
+		if (dwords - liter < dburst)
+			dburst = dwords - liter;
+
+		/* Copy to dma buffer */
+		memcpy(optrom, dwptr, dburst << 2);
+
+		/* Burst write */
+		ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+		    "Write burst (%#lx dwords)...\n", dburst);
+		rval = qla2x00_load_ram(vha, optrom_dma,
+		    flash_data_addr(ha, faddr), dburst);
+		if (rval != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x7097,
+			    "Failed burst write at %x (%p/%#llx)...\n",
+			    flash_data_addr(ha, faddr), optrom,
+			    (u64)optrom_dma);
+			break;
+		}
+
+		liter += dburst - 1;
+		faddr += dburst - 1;
+		dwptr += dburst - 1;
+		continue;
+	}
+
+write_protect:
+	ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+	    "Protect flash...\n");
+	rval = qla24xx_protect_flash(vha);
+	if (rval) {
+		qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK);
+		ql_log(ql_log_warn, vha, 0x7099,
+		    "Failed protect flash\n");
+	}
+
+	if (reset_to_rom == true) {
+		/* Schedule DPC to restart the RISC */
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
+
+		rval = qla2x00_wait_for_hba_online(vha);
+		if (rval != QLA_SUCCESS)
+			ql_log(ql_log_warn, vha, 0xffff,
+			    "Adapter did not come out of reset\n");
+	}
+
+done:
+	if (optrom)
+		dma_free_coherent(&ha->pdev->dev,
+		    OPTROM_BURST_SIZE, optrom, optrom_dma);
+
+	return rval;
+}
+
 int
-qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+qla24xx_write_optrom_data(struct scsi_qla_host *vha, void *buf,
     uint32_t offset, uint32_t length)
 {
 	int rval;
@@ -2599,8 +2963,12 @@
 	set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
 
 	/* Go with write. */
-	rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2,
-	    length >> 2);
+	if (IS_QLA28XX(ha))
+		rval = qla28xx_write_flash_data(vha, (uint32_t *)buf,
+		    offset >> 2, length >> 2);
+	else
+		rval = qla24xx_write_flash_data(vha, (uint32_t *)buf,
+		    offset >> 2, length >> 2);
 
 	clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
 	scsi_unblock_requests(vha->host);
@@ -2608,8 +2976,8 @@
 	return rval;
 }
 
-uint8_t *
-qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla25xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
     uint32_t offset, uint32_t length)
 {
 	int rval;
@@ -2620,7 +2988,7 @@
 	struct qla_hw_data *ha = vha->hw;
 
 	if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
-	    IS_QLA27XX(ha))
+	    IS_QLA27XX(ha) || IS_QLA28XX(ha))
 		goto try_fast;
 	if (offset & 0xfff)
 		goto slow_read;
@@ -2628,6 +2996,8 @@
 		goto slow_read;
 
 try_fast:
+	if (offset & 0xff)
+		goto slow_read;
 	optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
 	    &optrom_dma, GFP_KERNEL);
 	if (!optrom) {
@@ -2874,7 +3244,7 @@
 		    "Dumping fw "
 		    "ver from flash:.\n");
 		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
-		    (uint8_t *)dbyte, 8);
+		    dbyte, 32);
 
 		if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
 		    dcode[2] == 0xffff && dcode[3] == 0xffff) ||
@@ -2905,8 +3275,8 @@
 {
 	int ret = QLA_SUCCESS;
 	uint32_t pcihdr, pcids;
-	uint32_t *dcode;
-	uint8_t *bcode;
+	uint32_t *dcode = mbuf;
+	uint8_t *bcode = mbuf;
 	uint8_t code_type, last_image;
 	struct qla_hw_data *ha = vha->hw;
 
@@ -2918,17 +3288,14 @@
 	memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
 	memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
 
-	dcode = mbuf;
-
 	/* Begin with first PCI expansion ROM header. */
 	pcihdr = ha->flt_region_boot << 2;
 	last_image = 1;
 	do {
 		/* Verify PCI expansion ROM header. */
-		ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcihdr,
-		    0x20 * 4);
+		ha->isp_ops->read_optrom(vha, dcode, pcihdr, 0x20 * 4);
 		bcode = mbuf + (pcihdr % 4);
-		if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
+		if (memcmp(bcode, "\x55\xaa", 2)) {
 			/* No signature */
 			ql_log(ql_log_fatal, vha, 0x0154,
 			    "No matching ROM signature.\n");
@@ -2939,13 +3306,11 @@
 		/* Locate PCI data structure. */
 		pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
 
-		ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcids,
-		    0x20 * 4);
+		ha->isp_ops->read_optrom(vha, dcode, pcids, 0x20 * 4);
 		bcode = mbuf + (pcihdr % 4);
 
 		/* Validate signature of PCI data structure. */
-		if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
-		    bcode[0x2] != 'I' || bcode[0x3] != 'R') {
+		if (memcmp(bcode, "PCIR", 4)) {
 			/* Incorrect header. */
 			ql_log(ql_log_fatal, vha, 0x0155,
 			    "PCI data struct not found pcir_adr=%x.\n", pcids);
@@ -2996,8 +3361,7 @@
 	/* Read firmware image information. */
 	memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
 	dcode = mbuf;
-	ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, ha->flt_region_fw << 2,
-	    0x20);
+	ha->isp_ops->read_optrom(vha, dcode, ha->flt_region_fw << 2, 0x20);
 	bcode = mbuf + (pcihdr % 4);
 
 	/* Validate signature of PCI data structure. */
@@ -3019,15 +3383,14 @@
 qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
 {
 	int ret = QLA_SUCCESS;
-	uint32_t pcihdr, pcids;
-	uint32_t *dcode;
-	uint8_t *bcode;
+	uint32_t pcihdr = 0, pcids = 0;
+	uint32_t *dcode = mbuf;
+	uint8_t *bcode = mbuf;
 	uint8_t code_type, last_image;
 	int i;
 	struct qla_hw_data *ha = vha->hw;
 	uint32_t faddr = 0;
-
-	pcihdr = pcids = 0;
+	struct active_regions active_regions = { };
 
 	if (IS_P3P_TYPE(ha))
 		return ret;
@@ -3040,18 +3403,19 @@
 	memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
 	memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
 
-	dcode = mbuf;
 	pcihdr = ha->flt_region_boot << 2;
-	if (IS_QLA27XX(ha) &&
-	    qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
-		pcihdr = ha->flt_region_boot_sec << 2;
+	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+		qla27xx_get_active_image(vha, &active_regions);
+		if (active_regions.global == QLA27XX_SECONDARY_IMAGE) {
+			pcihdr = ha->flt_region_boot_sec << 2;
+		}
+	}
 
-	last_image = 1;
 	do {
 		/* Verify PCI expansion ROM header. */
 		qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
 		bcode = mbuf + (pcihdr % 4);
-		if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
+		if (memcmp(bcode, "\x55\xaa", 2)) {
 			/* No signature */
 			ql_log(ql_log_fatal, vha, 0x0059,
 			    "No matching ROM signature.\n");
@@ -3066,11 +3430,11 @@
 		bcode = mbuf + (pcihdr % 4);
 
 		/* Validate signature of PCI data structure. */
-		if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
-		    bcode[0x2] != 'I' || bcode[0x3] != 'R') {
+		if (memcmp(bcode, "PCIR", 4)) {
 			/* Incorrect header. */
 			ql_log(ql_log_fatal, vha, 0x005a,
 			    "PCI data struct not found pcir_adr=%x.\n", pcids);
+			ql_dump_buffer(ql_dbg_init, vha, 0x0059, dcode, 32);
 			ret = QLA_FUNCTION_FAILED;
 			break;
 		}
@@ -3117,30 +3481,24 @@
 
 	/* Read firmware image information. */
 	memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
-	dcode = mbuf;
 	faddr = ha->flt_region_fw;
-	if (IS_QLA27XX(ha) &&
-	    qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
-		faddr = ha->flt_region_fw_sec;
+	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+		qla27xx_get_active_image(vha, &active_regions);
+		if (active_regions.global == QLA27XX_SECONDARY_IMAGE)
+			faddr = ha->flt_region_fw_sec;
+	}
 
-	qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
-	for (i = 0; i < 4; i++)
-		dcode[i] = be32_to_cpu(dcode[i]);
-
-	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
-	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
-	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
-	    dcode[3] == 0)) {
+	qla24xx_read_flash_data(vha, dcode, faddr, 8);
+	if (qla24xx_risc_firmware_invalid(dcode)) {
 		ql_log(ql_log_warn, vha, 0x005f,
 		    "Unrecognized fw revision at %x.\n",
 		    ha->flt_region_fw * 4);
+		ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
 	} else {
-		ha->fw_revision[0] = dcode[0];
-		ha->fw_revision[1] = dcode[1];
-		ha->fw_revision[2] = dcode[2];
-		ha->fw_revision[3] = dcode[3];
+		for (i = 0; i < 4; i++)
+			ha->fw_revision[i] = be32_to_cpu(dcode[4+i]);
 		ql_dbg(ql_dbg_init, vha, 0x0060,
-		    "Firmware revision %d.%d.%d (%x).\n",
+		    "Firmware revision (flash) %u.%u.%u (%x).\n",
 		    ha->fw_revision[0], ha->fw_revision[1],
 		    ha->fw_revision[2], ha->fw_revision[3]);
 	}
@@ -3152,20 +3510,17 @@
 	}
 
 	memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
-	dcode = mbuf;
-	ha->isp_ops->read_optrom(vha, (uint8_t *)dcode,
-	    ha->flt_region_gold_fw << 2, 32);
-
-	if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
-	    dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
+	faddr = ha->flt_region_gold_fw;
+	qla24xx_read_flash_data(vha, (void *)dcode, ha->flt_region_gold_fw, 8);
+	if (qla24xx_risc_firmware_invalid(dcode)) {
 		ql_log(ql_log_warn, vha, 0x0056,
-		    "Unrecognized golden fw at 0x%x.\n",
-		    ha->flt_region_gold_fw * 4);
+		    "Unrecognized golden fw at %#x.\n", faddr);
+		ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32);
 		return ret;
 	}
 
-	for (i = 4; i < 8; i++)
-		ha->gold_fw_version[i-4] = be32_to_cpu(dcode[i]);
+	for (i = 0; i < 4; i++)
+		ha->gold_fw_version[i] = be32_to_cpu(dcode[4+i]);
 
 	return ret;
 }
@@ -3237,7 +3592,7 @@
 	fcp_prio_addr = ha->flt_region_fcp_prio;
 
 	/* first read the fcp priority data header from flash */
-	ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
+	ha->isp_ops->read_optrom(vha, ha->fcp_prio_cfg,
 			fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
 
 	if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
@@ -3248,7 +3603,7 @@
 	len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
 	max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
 
-	ha->isp_ops->read_optrom(vha, (uint8_t *)&ha->fcp_prio_cfg->entry[0],
+	ha->isp_ops->read_optrom(vha, &ha->fcp_prio_cfg->entry[0],
 			fcp_prio_addr << 2, (len < max_len ? len : max_len));
 
 	/* revalidate the entire FCP priority config data, including entries */
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index d2888b3..a06e562 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
  *
@@ -11,16 +12,6 @@
  *  Forward port and refactoring to modern qla2xxx and target/configfs
  *
  *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation, version 2
- *  of the License.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *  GNU General Public License for more details.
  */
 
 #include <linux/module.h>
@@ -141,6 +132,8 @@
 	struct abts_recv_from_24xx *);
 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
     uint16_t);
+static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
+static inline uint32_t qlt_make_handle(struct qla_qpair *);
 
 /*
  * Global Variables
@@ -182,6 +175,7 @@
 	/* Send marker if required */
 	if (unlikely(vha->marker_needed != 0)) {
 		int rc = qla2x00_issue_marker(vha, vha_locked);
+
 		if (rc != QLA_SUCCESS) {
 			ql_dbg(ql_dbg_tgt, vha, 0xe03d,
 			    "qla_target(%d): issue_marker() failed\n",
@@ -194,18 +188,17 @@
 
 static inline
 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
-	uint8_t *d_id)
+					    be_id_t d_id)
 {
 	struct scsi_qla_host *host;
-	uint32_t key = 0;
+	uint32_t key;
 
-	if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
-	    (vha->d_id.b.al_pa == d_id[2]))
+	if (vha->d_id.b.area == d_id.area &&
+	    vha->d_id.b.domain == d_id.domain &&
+	    vha->d_id.b.al_pa == d_id.al_pa)
 		return vha;
 
-	key  = (uint32_t)d_id[0] << 16;
-	key |= (uint32_t)d_id[1] <<  8;
-	key |= (uint32_t)d_id[2];
+	key = be_to_port_id(d_id).b24;
 
 	host = btree_lookup32(&vha->hw->tgt.host_map, key);
 	if (!host)
@@ -363,9 +356,9 @@
 			ql_dbg(ql_dbg_tgt, vha, 0xe03e,
 			    "qla_target(%d): Received ATIO_TYPE7 "
 			    "with unknown d_id %x:%x:%x\n", vha->vp_idx,
-			    atio->u.isp24.fcp_hdr.d_id[0],
-			    atio->u.isp24.fcp_hdr.d_id[1],
-			    atio->u.isp24.fcp_hdr.d_id[2]);
+			    atio->u.isp24.fcp_hdr.d_id.domain,
+			    atio->u.isp24.fcp_hdr.d_id.area,
+			    atio->u.isp24.fcp_hdr.d_id.al_pa);
 
 
 			qlt_queue_unknown_atio(vha, atio, ha_locked);
@@ -541,7 +534,6 @@
 		qlt_response_pkt(host, rsp, pkt);
 		break;
 	}
-
 	default:
 		qlt_response_pkt(vha, rsp, pkt);
 		break;
@@ -556,6 +548,7 @@
 	struct imm_ntfy_from_isp *ntfy, int type)
 {
 	struct qla_work_evt *e;
+
 	e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
 	if (!e)
 		return QLA_FUNCTION_FAILED;
@@ -566,10 +559,8 @@
 	return qla2x00_post_work(vha, e);
 }
 
-static
-void qla2x00_async_nack_sp_done(void *s, int res)
+static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
 {
-	struct srb *sp = (struct srb *)s;
 	struct scsi_qla_host *vha = sp->vha;
 	unsigned long flags;
 
@@ -600,14 +591,9 @@
 			sp->fcport->login_succ = 1;
 
 			vha->fcport_count++;
-
-			ql_dbg(ql_dbg_disc, vha, 0x20f3,
-			    "%s %d %8phC post upd_fcport fcp_cnt %d\n",
-			    __func__, __LINE__,
-			    sp->fcport->port_name,
-			    vha->fcport_count);
-			sp->fcport->disc_state = DSC_UPD_FCPORT;
-			qla24xx_post_upd_fcport_work(vha, sp->fcport);
+			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+			qla24xx_sched_upd_fcport(sp->fcport);
+			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 		} else {
 			sp->fcport->login_retry = 0;
 			sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -664,14 +650,14 @@
 	sp->u.iocb_cmd.u.nack.ntfy = ntfy;
 	sp->done = qla2x00_async_nack_sp_done;
 
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
 	ql_dbg(ql_dbg_disc, vha, 0x20f4,
 	    "Async-%s %8phC hndl %x %s\n",
 	    sp->name, fcport->port_name, sp->handle, c);
 
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
 	return rval;
 
 done_free_sp:
@@ -684,34 +670,31 @@
 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
 {
 	fc_port_t *t;
-	unsigned long flags;
 
 	switch (e->u.nack.type) {
 	case SRB_NACK_PRLI:
+		t = e->u.nack.fcport;
+		flush_work(&t->del_work);
+		flush_work(&t->free_work);
 		mutex_lock(&vha->vha_tgt.tgt_mutex);
 		t = qlt_create_sess(vha, e->u.nack.fcport, 0);
 		mutex_unlock(&vha->vha_tgt.tgt_mutex);
 		if (t) {
 			ql_log(ql_log_info, vha, 0xd034,
 			    "%s create sess success %p", __func__, t);
-			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 			/* create sess has an extra kref */
 			vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
-			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 		}
 		break;
 	}
 	qla24xx_async_notify_ack(vha, e->u.nack.fcport,
-	    (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
+	    (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
 }
 
 void qla24xx_delete_sess_fn(struct work_struct *work)
 {
 	fc_port_t *fcport = container_of(work, struct fc_port, del_work);
 	struct qla_hw_data *ha = fcport->vha->hw;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 
 	if (fcport->se_sess) {
 		ha->tgt.tgt_ops->shutdown_sess(fcport);
@@ -719,7 +702,6 @@
 	} else {
 		qlt_unreg_sess(fcport);
 	}
-	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 }
 
 /*
@@ -788,8 +770,9 @@
 		    fcport->port_name, sess->loop_id);
 		sess->local = 0;
 	}
-	ha->tgt.tgt_ops->put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+	ha->tgt.tgt_ops->put_sess(sess);
 }
 
 /*
@@ -803,6 +786,8 @@
 {
 	struct qlt_plogi_ack_t *pla;
 
+	lockdep_assert_held(&vha->hw->hardware_lock);
+
 	list_for_each_entry(pla, &vha->plogi_ack_list, list) {
 		if (pla->id.b24 == id->b24) {
 			ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
@@ -968,7 +953,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	unsigned long flags;
 	bool logout_started = false;
-	scsi_qla_host_t *base_vha;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 	struct qlt_plogi_ack_t *own =
 		sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
 
@@ -981,6 +966,8 @@
 		sess->send_els_logo);
 
 	if (!IS_SW_RESV_ADDR(sess->d_id)) {
+		qla2x00_mark_device_lost(vha, sess, 0, 0);
+
 		if (sess->send_els_logo) {
 			qlt_port_logo_t logo;
 
@@ -1016,6 +1003,12 @@
 				else
 					logout_started = true;
 			}
+		} /* if sess->logout_on_delete */
+
+		if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
+		    !(sess->nvme_flag & NVME_FLAG_DELETING)) {
+			sess->nvme_flag |= NVME_FLAG_DELETING;
+			qla_nvme_unregister_remote_port(sess);
 		}
 	}
 
@@ -1027,6 +1020,7 @@
 
 	if (logout_started) {
 		bool traced = false;
+		u16 cnt = 0;
 
 		while (!READ_ONCE(sess->logout_completed)) {
 			if (!traced) {
@@ -1036,6 +1030,9 @@
 				traced = true;
 			}
 			msleep(100);
+			cnt++;
+			if (cnt > 200)
+				break;
 		}
 
 		ql_dbg(ql_dbg_disc, vha, 0xf087,
@@ -1077,6 +1074,7 @@
 		struct qlt_plogi_ack_t *con =
 		    sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
 		struct imm_ntfy_from_isp *iocb;
+
 		own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
 
 		if (con) {
@@ -1107,6 +1105,7 @@
 	}
 
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+	sess->free_pending = 0;
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
 	    "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
@@ -1115,17 +1114,9 @@
 	if (tgt && (tgt->sess_count == 0))
 		wake_up_all(&tgt->waitQ);
 
-	if (vha->fcport_count == 0)
-		wake_up_all(&vha->fcport_waitQ);
-
-	base_vha = pci_get_drvdata(ha->pdev);
-
-	sess->free_pending = 0;
-
-	if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
-		return;
-
-	if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
+	if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
+	    !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
+	    (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
 		switch (vha->host->active_mode) {
 		case MODE_INITIATOR:
 		case MODE_DUAL:
@@ -1138,6 +1129,9 @@
 			break;
 		}
 	}
+
+	if (vha->fcport_count == 0)
+		wake_up_all(&vha->fcport_waitQ);
 }
 
 /* ha->tgt.sess_lock supposed to be held on entry */
@@ -1161,21 +1155,13 @@
 	if (sess->se_sess)
 		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
 
-	qla2x00_mark_device_lost(vha, sess, 0, 0);
-
 	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
 	sess->disc_state = DSC_DELETE_PEND;
 	sess->last_rscn_gen = sess->rscn_gen;
 	sess->last_login_gen = sess->login_gen;
 
-	if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
-	    !(sess->nvme_flag & NVME_FLAG_DELETING)) {
-		sess->nvme_flag |= NVME_FLAG_DELETING;
-		schedule_work(&sess->nvme_del_work);
-	} else {
-		INIT_WORK(&sess->free_work, qlt_free_session_done);
-		schedule_work(&sess->free_work);
-	}
+	INIT_WORK(&sess->free_work, qlt_free_session_done);
+	queue_work(sess->vha->hw->wq, &sess->free_work);
 }
 EXPORT_SYMBOL(qlt_unreg_sess);
 
@@ -1222,7 +1208,6 @@
 		sess->logout_on_delete = 0;
 		sess->logo_ack_needed = 0;
 		sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
-		sess->scan_state = 0;
 	}
 }
 
@@ -1230,11 +1215,12 @@
 {
 	struct qla_tgt *tgt = sess->tgt;
 	unsigned long flags;
+	u16 sec;
 
-	if (sess->disc_state == DSC_DELETE_PEND)
+	switch (sess->disc_state) {
+	case DSC_DELETE_PEND:
 		return;
-
-	if (sess->disc_state == DSC_DELETED) {
+	case DSC_DELETED:
 		if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
 			wake_up_all(&tgt->waitQ);
 		if (sess->vha->fcport_count == 0)
@@ -1243,11 +1229,26 @@
 		if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
 			!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
 			return;
+		break;
+	case DSC_UPD_FCPORT:
+		/*
+		 * This port is not done reporting to upper layer.
+		 * let it finish
+		 */
+		sess->next_disc_state = DSC_DELETE_PEND;
+		sec = jiffies_to_msecs(jiffies -
+		    sess->jiffies_at_registration)/1000;
+		if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
+			sess->sec_since_registration = sec;
+			ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+			    "%s %8phC : Slow Rport registration(%d Sec)\n",
+			    __func__, sess->port_name, sec);
+		}
+		return;
+	default:
+		break;
 	}
 
-	if (sess->deleted == QLA_SESS_DELETED)
-		sess->logout_on_delete = 0;
-
 	spin_lock_irqsave(&sess->vha->work_lock, flags);
 	if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
 		spin_unlock_irqrestore(&sess->vha->work_lock, flags);
@@ -1281,13 +1282,12 @@
 	/* At this point tgt could be already dead */
 }
 
-static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
+static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
 	uint16_t *loop_id)
 {
 	struct qla_hw_data *ha = vha->hw;
 	dma_addr_t gid_list_dma;
-	struct gid_list_info *gid_list;
-	char *id_iter;
+	struct gid_list_info *gid_list, *gid;
 	int res, rc, i;
 	uint16_t entries;
 
@@ -1310,18 +1310,17 @@
 		goto out_free_id_list;
 	}
 
-	id_iter = (char *)gid_list;
+	gid = gid_list;
 	res = -ENOENT;
 	for (i = 0; i < entries; i++) {
-		struct gid_list_info *gid = (struct gid_list_info *)id_iter;
-		if ((gid->al_pa == s_id[2]) &&
-		    (gid->area == s_id[1]) &&
-		    (gid->domain == s_id[0])) {
+		if (gid->al_pa == s_id.al_pa &&
+		    gid->area == s_id.area &&
+		    gid->domain == s_id.domain) {
 			*loop_id = le16_to_cpu(gid->loop_id);
 			res = 0;
 			break;
 		}
-		id_iter += ha->gid_list_info_size;
+		gid = (void *)gid + ha->gid_list_info_size;
 	}
 
 out_free_id_list:
@@ -1480,27 +1479,14 @@
 	struct qla_hw_data *ha = tgt->ha;
 	unsigned long flags;
 
+	mutex_lock(&ha->optrom_mutex);
 	mutex_lock(&qla_tgt_mutex);
-	if (!vha->fc_vport) {
-		struct Scsi_Host *sh = vha->host;
-		struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
-		bool npiv_vports;
 
-		spin_lock_irqsave(sh->host_lock, flags);
-		npiv_vports = (fc_host->npiv_vports_inuse);
-		spin_unlock_irqrestore(sh->host_lock, flags);
-
-		if (npiv_vports) {
-			mutex_unlock(&qla_tgt_mutex);
-			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
-			    "NPIV is in use. Can not stop target\n");
-			return -EPERM;
-		}
-	}
 	if (tgt->tgt_stop || tgt->tgt_stopped) {
 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
 		    "Already in tgt->tgt_stop or tgt_stopped state\n");
 		mutex_unlock(&qla_tgt_mutex);
+		mutex_unlock(&ha->optrom_mutex);
 		return -EPERM;
 	}
 
@@ -1538,6 +1524,8 @@
 
 	/* Wait for sessions to clear out (just in case) */
 	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
+	mutex_unlock(&ha->optrom_mutex);
+
 	return 0;
 }
 EXPORT_SYMBOL(qlt_stop_phase1);
@@ -1567,6 +1555,15 @@
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
 	    tgt);
+
+	switch (vha->qlini_mode) {
+	case QLA2XXX_INI_MODE_EXCLUSIVE:
+		vha->flags.online = 1;
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		break;
+	default:
+		break;
+	}
 }
 EXPORT_SYMBOL(qlt_stop_phase2);
 
@@ -1580,11 +1577,10 @@
 	struct qla_qpair_hint *h;
 	struct qla_hw_data *ha = vha->hw;
 
-	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop &&
-	    !tgt->tgt_stopped)
+	if (!tgt->tgt_stop && !tgt->tgt_stopped)
 		qlt_stop_phase1(tgt);
 
-	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+	if (!tgt->tgt_stopped)
 		qlt_stop_phase2(tgt);
 
 	for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
@@ -1716,6 +1712,90 @@
 	qla2x00_start_iocbs(vha, qpair->req);
 }
 
+static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
+{
+	struct scsi_qla_host *vha = mcmd->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct abts_resp_to_24xx *resp;
+	uint32_t f_ctl, h;
+	uint8_t *p;
+	int rc;
+	struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
+	struct qla_qpair *qpair = mcmd->qpair;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe006,
+	    "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
+	    ha, mcmd->fc_tm_rsp);
+
+	rc = qlt_check_reserve_free_req(qpair, 1);
+	if (rc) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+		    "qla_target(%d): %s failed: unable to allocate request packet\n",
+		    vha->vp_idx, __func__);
+		return -EAGAIN;
+	}
+
+	resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
+	memset(resp, 0, sizeof(*resp));
+
+	h = qlt_make_handle(qpair);
+	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+		/*
+		 * CTIO type 7 from the firmware doesn't provide a way to
+		 * know the initiator's LOOP ID, hence we can't find
+		 * the session and, so, the command.
+		 */
+		return -EAGAIN;
+	} else {
+		qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
+	}
+
+	resp->handle = MAKE_HANDLE(qpair->req->id, h);
+	resp->entry_type = ABTS_RESP_24XX;
+	resp->entry_count = 1;
+	resp->nport_handle = abts->nport_handle;
+	resp->vp_index = vha->vp_idx;
+	resp->sof_type = abts->sof_type;
+	resp->exchange_address = abts->exchange_address;
+	resp->fcp_hdr_le = abts->fcp_hdr_le;
+	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+	    F_CTL_SEQ_INITIATIVE);
+	p = (uint8_t *)&f_ctl;
+	resp->fcp_hdr_le.f_ctl[0] = *p++;
+	resp->fcp_hdr_le.f_ctl[1] = *p++;
+	resp->fcp_hdr_le.f_ctl[2] = *p;
+
+	resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
+	resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
+
+	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+	if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
+		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+		resp->payload.ba_acct.low_seq_cnt = 0x0000;
+		resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+		resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+		resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+	} else {
+		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+		resp->payload.ba_rjt.reason_code =
+			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+		/* Other bytes are zero */
+	}
+
+	vha->vha_tgt.qla_tgt->abts_resp_expected++;
+
+	/* Memory Barrier */
+	wmb();
+	if (qpair->reqq_start_iocbs)
+		qpair->reqq_start_iocbs(qpair);
+	else
+		qla2x00_start_iocbs(vha, qpair->req);
+
+	return rc;
+}
+
 /*
  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  */
@@ -1743,6 +1823,7 @@
 	}
 
 	resp->entry_type = ABTS_RESP_24XX;
+	resp->handle = QLA_TGT_SKIP_HANDLE;
 	resp->entry_count = 1;
 	resp->nport_handle = abts->nport_handle;
 	resp->vp_index = vha->vp_idx;
@@ -1757,19 +1838,11 @@
 	resp->fcp_hdr_le.f_ctl[1] = *p++;
 	resp->fcp_hdr_le.f_ctl[2] = *p;
 	if (ids_reversed) {
-		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
-		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
-		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
-		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
-		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
-		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
+		resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id;
+		resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id;
 	} else {
-		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
-		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
-		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
-		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
-		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
-		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+		resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
+		resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
 	}
 	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
 	if (status == FCP_TMF_CMPL) {
@@ -1800,15 +1873,13 @@
  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  */
 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
-	struct abts_resp_from_24xx_fw *entry)
+    struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
 {
 	struct ctio7_to_24xx *ctio;
+	u16 tmp;
+	struct abts_recv_from_24xx *entry;
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe007,
-	    "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
-
-	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(
-	    vha->hw->base_qpair, NULL);
+	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
 	if (ctio == NULL) {
 		ql_dbg(ql_dbg_tgt, vha, 0xe04b,
 		    "qla_target(%d): %s failed: unable to allocate "
@@ -1816,6 +1887,13 @@
 		return;
 	}
 
+	if (mcmd)
+		/* abts from remote port */
+		entry = &mcmd->orig_iocb.abts;
+	else
+		/* abts from this driver.  */
+		entry = (struct abts_recv_from_24xx *)pkt;
+
 	/*
 	 * We've got on entrance firmware's response on by us generated
 	 * ABTS response. So, in it ID fields are reversed.
@@ -1827,56 +1905,44 @@
 	ctio->handle = QLA_TGT_SKIP_HANDLE |	CTIO_COMPLETION_HANDLE_MARK;
 	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
 	ctio->vp_index = vha->vp_idx;
-	ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
-	ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
-	ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
 	ctio->exchange_addr = entry->exchange_addr_to_abort;
-	ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
-					    CTIO7_FLAGS_TERMINATE);
-	ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
+	tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
+
+	if (mcmd) {
+		ctio->initiator_id = entry->fcp_hdr_le.s_id;
+
+		if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
+			tmp |= (mcmd->abort_io_attr << 9);
+		else if (qpair->retry_term_cnt & 1)
+			tmp |= (0x4 << 9);
+	} else {
+		ctio->initiator_id = entry->fcp_hdr_le.d_id;
+
+		if (qpair->retry_term_cnt & 1)
+			tmp |= (0x4 << 9);
+	}
+	ctio->u.status1.flags = cpu_to_le16(tmp);
+	ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe007,
+	    "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
+	    le16_to_cpu(ctio->u.status1.flags),
+	    le16_to_cpu(ctio->u.status1.ox_id),
+	    (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
 
 	/* Memory Barrier */
 	wmb();
-	qla2x00_start_iocbs(vha, vha->req);
+	if (qpair->reqq_start_iocbs)
+		qpair->reqq_start_iocbs(qpair);
+	else
+		qla2x00_start_iocbs(vha, qpair->req);
 
-	qlt_24xx_send_abts_resp(vha->hw->base_qpair,
-	    (struct abts_recv_from_24xx *)entry,
-	    FCP_TMF_CMPL, true);
-}
+	if (mcmd)
+		qlt_build_abts_resp_iocb(mcmd);
+	else
+		qlt_24xx_send_abts_resp(qpair,
+		    (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
 
-static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
-{
-	struct qla_tgt_sess_op *op;
-	struct qla_tgt_cmd *cmd;
-	unsigned long flags;
-
-	spin_lock_irqsave(&vha->cmd_list_lock, flags);
-	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
-		if (tag == op->atio.u.isp24.exchange_addr) {
-			op->aborted = true;
-			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
-			return 1;
-		}
-	}
-
-	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
-		if (tag == op->atio.u.isp24.exchange_addr) {
-			op->aborted = true;
-			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
-			return 1;
-		}
-	}
-
-	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
-		if (tag == cmd->atio.u.isp24.exchange_addr) {
-			cmd->aborted = 1;
-			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
-			return 1;
-		}
-	}
-	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
-
-	return 0;
 }
 
 /* drop cmds for the given lun
@@ -1884,8 +1950,7 @@
  * XXX does not go through the list of other port (which may have cmds
  *     for the same lun)
  */
-static void abort_cmds_for_lun(struct scsi_qla_host *vha,
-			        u64 lun, uint8_t *s_id)
+static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
 {
 	struct qla_tgt_sess_op *op;
 	struct qla_tgt_cmd *cmd;
@@ -1971,9 +2036,8 @@
 		spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
 		switch (mcmd->tmr_func) {
 		case QLA_TGT_ABTS:
-			qlt_24xx_send_abts_resp(mcmd->qpair,
-			    &mcmd->orig_iocb.abts,
-			    FCP_TMF_REJECTED, false);
+			mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
+			qlt_build_abts_resp_iocb(mcmd);
 			break;
 		case QLA_TGT_LUN_RESET:
 		case QLA_TGT_CLEAR_TS:
@@ -2008,12 +2072,6 @@
 	struct qla_tgt_mgmt_cmd *mcmd;
 	struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
 
-	if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
-		/* send TASK_ABORT response immediately */
-		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false);
-		return 0;
-	}
-
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
 	    "qla_target(%d): task abort (tag=%d)\n",
 	    vha->vp_idx, abts->exchange_addr_to_abort);
@@ -2026,7 +2084,7 @@
 		return -ENOMEM;
 	}
 	memset(mcmd, 0, sizeof(*mcmd));
-
+	mcmd->cmd_type = TYPE_TGT_TMCMD;
 	mcmd->sess = sess;
 	memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
 	mcmd->reset_count = ha->base_qpair->chip_reset;
@@ -2048,6 +2106,8 @@
 		if (abort_cmd && abort_cmd->qpair) {
 			mcmd->qpair = abort_cmd->qpair;
 			mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
+			mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
+			mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
 		}
 	}
 
@@ -2066,7 +2126,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct fc_port *sess;
 	uint32_t tag = abts->exchange_addr_to_abort;
-	uint8_t s_id[3];
+	be_id_t s_id;
 	int rc;
 	unsigned long flags;
 
@@ -2090,13 +2150,11 @@
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
 	    "qla_target(%d): task abort (s_id=%x:%x:%x, "
-	    "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
-	    abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
+	    "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
+	    abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag,
 	    le32_to_cpu(abts->fcp_hdr_le.parameter));
 
-	s_id[0] = abts->fcp_hdr_le.s_id[2];
-	s_id[1] = abts->fcp_hdr_le.s_id[1];
-	s_id[2] = abts->fcp_hdr_le.s_id[0];
+	s_id = le_id_to_be(abts->fcp_hdr_le.s_id);
 
 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
@@ -2160,9 +2218,7 @@
 	ctio->nport_handle = mcmd->sess->loop_id;
 	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
 	ctio->vp_index = ha->vp_idx;
-	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
-	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
-	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
 	ctio->exchange_addr = atio->u.isp24.exchange_addr;
 	temp = (atio->u.isp24.attr << 9)|
 		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
@@ -2219,9 +2275,7 @@
 	ctio->nport_handle = cmd->sess->loop_id;
 	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
 	ctio->vp_index = vha->vp_idx;
-	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
-	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
-	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
 	ctio->exchange_addr = atio->u.isp24.exchange_addr;
 	temp = (atio->u.isp24.attr << 9) |
 	    CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
@@ -2237,14 +2291,14 @@
 		ctio->u.status1.scsi_status |=
 		    cpu_to_le16(SS_RESIDUAL_UNDER);
 
-	/* Response code and sense key */
-	put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
-	    (&ctio->u.status1.sense_data)[0]);
+	/* Fixed format sense data. */
+	ctio->u.status1.sense_data[0] = 0x70;
+	ctio->u.status1.sense_data[2] = sense_key;
 	/* Additional sense length */
-	put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
+	ctio->u.status1.sense_data[7] = 0xa;
 	/* ASC and ASCQ */
-	put_unaligned_le32(((asc << 24) | (ascq << 16)),
-	    (&ctio->u.status1.sense_data)[3]);
+	ctio->u.status1.sense_data[12] = asc;
+	ctio->u.status1.sense_data[13] = ascq;
 
 	/* Memory Barrier */
 	wmb();
@@ -2265,6 +2319,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	unsigned long flags;
 	struct qla_qpair *qpair = mcmd->qpair;
+	bool free_mcmd = true;
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
 	    "TM response mcmd (%p) status %#x state %#x",
@@ -2287,26 +2342,26 @@
 	}
 
 	if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
-		if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
-		    ELS_LOGO ||
-		    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
-		    ELS_PRLO ||
-		    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
-		    ELS_TPRLO) {
+		switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
+		case ELS_LOGO:
+		case ELS_PRLO:
+		case ELS_TPRLO:
 			ql_dbg(ql_dbg_disc, vha, 0x2106,
-			    "TM response logo %phC status %#x state %#x",
+			    "TM response logo %8phC status %#x state %#x",
 			    mcmd->sess->port_name, mcmd->fc_tm_rsp,
 			    mcmd->flags);
 			qlt_schedule_sess_for_deletion(mcmd->sess);
-		} else {
+			break;
+		default:
 			qlt_send_notify_ack(vha->hw->base_qpair,
 			    &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
+			break;
 		}
 	} else {
-		if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
-			qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts,
-			    mcmd->fc_tm_rsp, false);
-		else
+		if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
+			qlt_build_abts_resp_iocb(mcmd);
+			free_mcmd = false;
+		} else
 			qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
 			    mcmd->fc_tm_rsp);
 	}
@@ -2318,7 +2373,9 @@
 	 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
 	 * qlt_xmit_tm_rsp() returns here..
 	 */
-	ha->tgt.tgt_ops->free_mcmd(mcmd);
+	if (free_mcmd)
+		ha->tgt.tgt_ops->free_mcmd(mcmd);
+
 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 }
 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
@@ -2331,7 +2388,7 @@
 	BUG_ON(cmd->sg_cnt == 0);
 
 	prm->sg = (struct scatterlist *)cmd->sg;
-	prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
+	prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
 	    cmd->sg_cnt, cmd->dma_data_direction);
 	if (unlikely(prm->seg_cnt == 0))
 		goto out_err;
@@ -2358,7 +2415,7 @@
 
 		if (cmd->prot_sg_cnt) {
 			prm->prot_sg      = cmd->prot_sg;
-			prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
+			prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
 				cmd->prot_sg, cmd->prot_sg_cnt,
 				cmd->dma_data_direction);
 			if (unlikely(prm->prot_seg_cnt == 0))
@@ -2388,17 +2445,18 @@
 {
 	struct qla_hw_data *ha;
 	struct qla_qpair *qpair;
+
 	if (!cmd->sg_mapped)
 		return;
 
 	qpair = cmd->qpair;
 
-	pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
+	dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
 	    cmd->dma_data_direction);
 	cmd->sg_mapped = 0;
 
 	if (cmd->prot_sg_cnt)
-		pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
+		dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
 			cmd->dma_data_direction);
 
 	if (!cmd->ctx)
@@ -2518,9 +2576,7 @@
 	pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
 	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
 	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
-	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
-	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
-	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
 	pkt->exchange_addr = atio->u.isp24.exchange_addr;
 	temp = atio->u.isp24.attr << 9;
 	pkt->u.status0.flags |= cpu_to_le16(temp);
@@ -2538,7 +2594,7 @@
 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
 {
 	int cnt;
-	uint32_t *dword_ptr;
+	struct dsd64 *cur_dsd;
 
 	/* Build continuation packets */
 	while (prm->seg_cnt > 0) {
@@ -2559,19 +2615,13 @@
 		cont_pkt64->sys_define = 0;
 
 		cont_pkt64->entry_type = CONTINUE_A64_TYPE;
-		dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address;
+		cur_dsd = cont_pkt64->dsd;
 
 		/* Load continuation entry data segments */
 		for (cnt = 0;
 		    cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
 		    cnt++, prm->seg_cnt--) {
-			*dword_ptr++ =
-			    cpu_to_le32(pci_dma_lo32
-				(sg_dma_address(prm->sg)));
-			*dword_ptr++ = cpu_to_le32(pci_dma_hi32
-			    (sg_dma_address(prm->sg)));
-			*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
-
+			append_dsd64(&cur_dsd, prm->sg);
 			prm->sg = sg_next(prm->sg);
 		}
 	}
@@ -2584,13 +2634,13 @@
 static void qlt_load_data_segments(struct qla_tgt_prm *prm)
 {
 	int cnt;
-	uint32_t *dword_ptr;
+	struct dsd64 *cur_dsd;
 	struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
 
 	pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
 
 	/* Setup packet address segment pointer */
-	dword_ptr = pkt24->u.status0.dseg_0_address;
+	cur_dsd = &pkt24->u.status0.dsd;
 
 	/* Set total data segment count */
 	if (prm->seg_cnt)
@@ -2598,8 +2648,8 @@
 
 	if (prm->seg_cnt == 0) {
 		/* No data transfer */
-		*dword_ptr++ = 0;
-		*dword_ptr = 0;
+		cur_dsd->address = 0;
+		cur_dsd->length = 0;
 		return;
 	}
 
@@ -2609,14 +2659,7 @@
 	for (cnt = 0;
 	    (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
 	    cnt++, prm->seg_cnt--) {
-		*dword_ptr++ =
-		    cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
-
-		*dword_ptr++ = cpu_to_le32(pci_dma_hi32(
-			sg_dma_address(prm->sg)));
-
-		*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
-
+		append_dsd64(&cur_dsd, prm->sg);
 		prm->sg = sg_next(prm->sg);
 	}
 
@@ -2940,7 +2983,7 @@
 static inline int
 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
 {
-	uint32_t		*cur_dsd;
+	struct dsd64		*cur_dsd;
 	uint32_t		transfer_length = 0;
 	uint32_t		data_bytes;
 	uint32_t		dif_bytes;
@@ -3046,9 +3089,7 @@
 	pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
 	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
 	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
-	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
-	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
-	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
 	pkt->exchange_addr   = atio->u.isp24.exchange_addr;
 
 	/* silence compile warning */
@@ -3086,12 +3127,11 @@
 
 	qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
 
-	pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
-	pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+	put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
 	pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
 
 	if (!bundling) {
-		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+		cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
 	} else {
 		/*
 		 * Configure Bundling if we need to fetch interlaving
@@ -3101,7 +3141,7 @@
 		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
 		crc_ctx_pkt->u.bundling.dseg_count =
 			cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
-		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+		cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
 	}
 
 	/* Finish the common fields of CRC pkt */
@@ -3134,9 +3174,9 @@
 		/* Walks dif segments */
 		pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
 
-		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+		cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
 		if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
-			prm->prot_seg_cnt, &tc))
+			prm->prot_seg_cnt, cmd))
 			goto crc_queuing_error;
 	}
 	return QLA_SUCCESS;
@@ -3163,14 +3203,11 @@
 	unsigned long flags = 0;
 	int res;
 
-	if (cmd->sess && cmd->sess->deleted) {
+	if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
+	    (cmd->sess && cmd->sess->deleted)) {
 		cmd->state = QLA_TGT_STATE_PROCESSED;
-		if (cmd->sess->logout_completed)
-			/* no need to terminate. FW already freed exchange. */
-			qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
-		else
-			qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0);
-		return 0;
+		res = 0;
+		goto free;
 	}
 
 	ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
@@ -3181,9 +3218,8 @@
 
 	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
 	    &full_req_cnt);
-	if (unlikely(res != 0)) {
-		return res;
-	}
+	if (unlikely(res != 0))
+		goto free;
 
 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
 
@@ -3198,13 +3234,13 @@
 		 * previous life, just abort the processing.
 		 */
 		cmd->state = QLA_TGT_STATE_PROCESSED;
-		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
 		ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
 			"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
 			vha->flags.online, qla2x00_reset_active(vha),
 			cmd->reset_count, qpair->chip_reset);
 		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-		return 0;
+		res = 0;
+		goto free;
 	}
 
 	/* Does F/W have an IOCBs for this request */
@@ -3291,6 +3327,7 @@
 
 	cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
 	cmd->cmd_sent_to_fw = 1;
+	cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
 
 	/* Memory Barrier */
 	wmb();
@@ -3306,6 +3343,8 @@
 	qlt_unmap_sg(vha, cmd);
 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 
+free:
+	vha->hw->tgt.tgt_ops->free_cmd(cmd);
 	return res;
 }
 EXPORT_SYMBOL(qlt_xmit_response);
@@ -3336,8 +3375,10 @@
 		 * Either the port is not online or this request was from
 		 * previous life, just abort the processing.
 		 */
-		cmd->state = QLA_TGT_STATE_NEED_DATA;
-		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+		cmd->aborted = 1;
+		cmd->write_data_transferred = 0;
+		cmd->state = QLA_TGT_STATE_DATA_IN;
+		vha->hw->tgt.tgt_ops->handle_data(cmd);
 		ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
 			"RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
 			vha->flags.online, qla2x00_reset_active(vha),
@@ -3369,6 +3410,7 @@
 
 	cmd->state = QLA_TGT_STATE_NEED_DATA;
 	cmd->cmd_sent_to_fw = 1;
+	cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
 
 	/* Memory Barrier */
 	wmb();
@@ -3546,33 +3588,11 @@
 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
 	struct imm_ntfy_from_isp *imm, int ha_locked)
 {
-	unsigned long flags = 0;
 	int rc;
 
-	if (ha_locked) {
-		rc = __qlt_send_term_imm_notif(vha, imm);
-
-#if 0	/* Todo  */
-		if (rc == -ENOMEM)
-			qlt_alloc_qfull_cmd(vha, imm, 0, 0);
-#else
-		if (rc) {
-		}
-#endif
-		goto done;
-	}
-
-	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+	WARN_ON_ONCE(!ha_locked);
 	rc = __qlt_send_term_imm_notif(vha, imm);
-
-#if 0	/* Todo */
-	if (rc == -ENOMEM)
-		qlt_alloc_qfull_cmd(vha, imm, 0, 0);
-#endif
-
-done:
-	if (!ha_locked)
-		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+	pr_debug("rc = %d\n", rc);
 }
 
 /*
@@ -3622,9 +3642,7 @@
 	ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
 	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
 	ctio24->vp_index = vha->vp_idx;
-	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
-	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
-	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
 	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
 	temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
 		CTIO7_FLAGS_TERMINATE;
@@ -3813,6 +3831,7 @@
 
 	if (ctio != NULL) {
 		struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
+
 		term = !(c->flags &
 		    cpu_to_le16(OF_TERM_EXCH));
 	} else
@@ -3826,10 +3845,10 @@
 
 
 /* ha->hardware_lock supposed to be held on entry */
-static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
 	struct rsp_que *rsp, uint32_t handle, void *ctio)
 {
-	struct qla_tgt_cmd *cmd = NULL;
+	void *cmd = NULL;
 	struct req_que *req;
 	int qid = GET_QID(handle);
 	uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
@@ -3858,7 +3877,7 @@
 			return NULL;
 		}
 
-		cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h];
+		cmd = (void *) req->outstanding_cmds[h];
 		if (unlikely(cmd == NULL)) {
 			ql_dbg(ql_dbg_async, vha, 0xe053,
 			    "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
@@ -3877,39 +3896,6 @@
 	return cmd;
 }
 
-/* hardware_lock should be held by caller. */
-void
-qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
-{
-	struct qla_hw_data *ha = vha->hw;
-
-	if (cmd->sg_mapped)
-		qlt_unmap_sg(vha, cmd);
-
-	/* TODO: fix debug message type and ids. */
-	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
-		ql_dbg(ql_dbg_io, vha, 0xff00,
-		    "HOST-ABORT: state=PROCESSED.\n");
-	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
-		cmd->write_data_transferred = 0;
-		cmd->state = QLA_TGT_STATE_DATA_IN;
-
-		ql_dbg(ql_dbg_io, vha, 0xff01,
-		    "HOST-ABORT: state=DATA_IN.\n");
-
-		ha->tgt.tgt_ops->handle_data(cmd);
-		return;
-	} else {
-		ql_dbg(ql_dbg_io, vha, 0xff03,
-		    "HOST-ABORT: state=BAD(%d).\n",
-		    cmd->state);
-		dump_stack();
-	}
-
-	cmd->trc_flags |= TRC_FLUSH;
-	ha->tgt.tgt_ops->free_cmd(cmd);
-}
-
 /*
  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  */
@@ -3942,12 +3928,20 @@
 
 	if (unlikely(status != CTIO_SUCCESS)) {
 		switch (status & 0xFFFF) {
+		case CTIO_INVALID_RX_ID:
+			if (printk_ratelimit())
+				dev_info(&vha->hw->pdev->dev,
+				    "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
+				    vha->vp_idx, cmd->atio.u.isp24.attr,
+				    ((cmd->ctio_flags >> 9) & 0xf),
+				    cmd->ctio_flags);
+
+			break;
 		case CTIO_LIP_RESET:
 		case CTIO_TARGET_RESET:
 		case CTIO_ABORTED:
 			/* driver request abort via Terminate exchange */
 		case CTIO_TIMEOUT:
-		case CTIO_INVALID_RX_ID:
 			/* They are OK */
 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
 			    "qla_target(%d): CTIO with "
@@ -3974,7 +3968,6 @@
 				 * Session is already logged out, but we need
 				 * to notify initiator, who's not aware of this
 				 */
-				cmd->sess->logout_on_delete = 0;
 				cmd->sess->send_els_logo = 1;
 				ql_dbg(ql_dbg_disc, vha, 0x20f8,
 				    "%s %d %8phC post del sess\n",
@@ -4082,8 +4075,6 @@
 	return fcp_task_attr;
 }
 
-static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *,
-					uint8_t *);
 /*
  * Process context for I/O path into tcm_qla2xxx code
  */
@@ -4133,11 +4124,9 @@
 	if (ret != 0)
 		goto out_term;
 	/*
-	 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
+	 * Drop extra session reference from qlt_handle_cmd_for_atio().
 	 */
-	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 	ha->tgt.tgt_ops->put_sess(sess);
-	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 	return;
 
 out_term:
@@ -4154,9 +4143,7 @@
 	target_free_tag(sess->se_sess, &cmd->se_cmd);
 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 
-	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 	ha->tgt.tgt_ops->put_sess(sess);
-	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 }
 
 static void qlt_do_work(struct work_struct *work)
@@ -4331,9 +4318,7 @@
 		return -ENODEV;
 	}
 
-	id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
-	id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
-	id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
+	id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
 	if (IS_SW_RESV_ADDR(id))
 		return -EBUSY;
 
@@ -4365,9 +4350,7 @@
 	if (!cmd) {
 		ql_dbg(ql_dbg_io, vha, 0x3062,
 		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
-		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 		ha->tgt.tgt_ops->put_sess(sess);
-		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 		return -EBUSY;
 	}
 
@@ -4433,7 +4416,7 @@
 	case QLA_TGT_CLEAR_TS:
 	case QLA_TGT_ABORT_TS:
 		abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
-		/* drop through */
+		/* fall through */
 	case QLA_TGT_CLEAR_ACA:
 		h = qlt_find_qphint(vha, mcmd->unpacked_lun);
 		mcmd->qpair = h->qpair;
@@ -4666,6 +4649,7 @@
 
 	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
 		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+
 		if (op_key == key) {
 			op->aborted = true;
 			count++;
@@ -4674,6 +4658,7 @@
 
 	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
 		uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+
 		if (cmd_key == key) {
 			cmd->aborted = 1;
 			count++;
@@ -4695,6 +4680,8 @@
 	struct qlt_plogi_ack_t *pla;
 	unsigned long flags;
 
+	lockdep_assert_held(&vha->hw->hardware_lock);
+
 	wwn = wwn_to_u64(iocb->u.isp24.port_name);
 
 	port_id.b.domain = iocb->u.isp24.port_id[2];
@@ -4712,6 +4699,12 @@
 		sess = qlt_find_sess_invalidate_other(vha, wwn,
 		    port_id, loop_id, &conflict_sess);
 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0xffff,
+		    "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
+		    __func__, __LINE__, loop_id, port_id.b24);
+		qlt_send_term_imm_notif(vha, iocb, 1);
+		goto out;
 	}
 
 	if (IS_SW_RESV_ADDR(port_id)) {
@@ -4753,6 +4746,34 @@
 		goto out;
 	}
 
+	if (sess->disc_state == DSC_UPD_FCPORT) {
+		u16 sec;
+
+		/*
+		 * Remote port registration is still going on from
+		 * previous login. Allow it to finish before we
+		 * accept the new login.
+		 */
+		sess->next_disc_state = DSC_DELETE_PEND;
+		sec = jiffies_to_msecs(jiffies -
+		    sess->jiffies_at_registration) / 1000;
+		if (sess->sec_since_registration < sec && sec &&
+		    !(sec % 5)) {
+			sess->sec_since_registration = sec;
+			ql_dbg(ql_dbg_disc, vha, 0xffff,
+			    "%s %8phC - Slow Rport registration (%d Sec)\n",
+			    __func__, sess->port_name, sec);
+		}
+
+		if (!conflict_sess) {
+			list_del(&pla->list);
+			kmem_cache_free(qla_tgt_plogi_cachep, pla);
+		}
+
+		qlt_send_term_imm_notif(vha, iocb, 1);
+		goto out;
+	}
+
 	qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
 	sess->d_id = port_id;
 	sess->login_gen++;
@@ -4836,6 +4857,8 @@
 	int res = 0;
 	unsigned long flags;
 
+	lockdep_assert_held(&ha->hardware_lock);
+
 	wwn = wwn_to_u64(iocb->u.isp24.port_name);
 
 	port_id.b.domain = iocb->u.isp24.port_id[2];
@@ -4911,6 +4934,8 @@
 
 		if (sess != NULL) {
 			bool delete = false;
+			int sec;
+
 			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
 			switch (sess->fw_login_state) {
 			case DSC_LS_PLOGI_PEND:
@@ -4923,9 +4948,24 @@
 			}
 
 			switch (sess->disc_state) {
+			case DSC_UPD_FCPORT:
+				spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
+				    flags);
+
+				sec = jiffies_to_msecs(jiffies -
+				    sess->jiffies_at_registration)/1000;
+				if (sess->sec_since_registration < sec && sec &&
+				    !(sec % 5)) {
+					sess->sec_since_registration = sec;
+					ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+					    "%s %8phC : Slow Rport registration(%d Sec)\n",
+					    __func__, sess->port_name, sec);
+				}
+				qlt_send_term_imm_notif(vha, iocb, 1);
+				return 0;
+
 			case DSC_LOGIN_PEND:
 			case DSC_GPDB:
-			case DSC_UPD_FCPORT:
 			case DSC_LOGIN_COMPLETE:
 			case DSC_ADISC:
 				delete = false;
@@ -5048,6 +5088,7 @@
 	case ELS_ADISC:
 	{
 		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
 		if (tgt->link_reinit_iocb_pending) {
 			qlt_send_notify_ack(ha->base_qpair,
 			    &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
@@ -5094,6 +5135,8 @@
 	int send_notify_ack = 1;
 	uint16_t status;
 
+	lockdep_assert_held(&ha->hardware_lock);
+
 	status = le16_to_cpu(iocb->u.isp2x.status);
 	switch (status) {
 	case IMM_NTFY_LIP_RESET:
@@ -5111,6 +5154,7 @@
 	case IMM_NTFY_LIP_LINK_REINIT:
 	{
 		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
 		    "qla_target(%d): LINK REINIT (loop %#x, "
 		    "subcode %x)\n", vha->vp_idx,
@@ -5230,10 +5274,7 @@
 	u16 temp;
 	port_id_t id;
 
-	id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
-	id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
-	id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
-	id.b.rsvd_1 = 0;
+	id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
 
 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 	sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
@@ -5261,9 +5302,7 @@
 	ctio24->nport_handle = sess->loop_id;
 	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
 	ctio24->vp_index = vha->vp_idx;
-	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
-	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
-	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
 	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
 	temp = (atio->u.isp24.attr << 9) |
 		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
@@ -5337,11 +5376,7 @@
 	se_sess = sess->se_sess;
 
 	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
-	if (tag < 0)
-		return;
-
-	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
-	if (!cmd) {
+	if (tag < 0) {
 		ql_dbg(ql_dbg_io, vha, 0x3009,
 			"qla_target(%d): %s: Allocation of cmd failed\n",
 			vha->vp_idx, __func__);
@@ -5356,6 +5391,7 @@
 		return;
 	}
 
+	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
 	memset(cmd, 0, sizeof(struct qla_tgt_cmd));
 
 	qlt_incr_num_pend_cmds(vha);
@@ -5609,6 +5645,100 @@
 	tgt->atio_irq_cmd_count--;
 }
 
+/*
+ * qpair lock is assume to be held
+ * rc = 0 : send terminate & abts respond
+ * rc != 0: do not send term & abts respond
+ */
+static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
+    struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int rc = 0;
+
+	/*
+	 * Detect unresolved exchange. If the same ABTS is unable
+	 * to terminate an existing command and the same ABTS loops
+	 * between FW & Driver, then force FW dump. Under 1 jiff,
+	 * we should see multiple loops.
+	 */
+	if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
+	    qpair->retry_term_jiff == jiffies) {
+		/* found existing exchange */
+		qpair->retry_term_cnt++;
+		if (qpair->retry_term_cnt >= 5) {
+			rc = EIO;
+			qpair->retry_term_cnt = 0;
+			ql_log(ql_log_warn, vha, 0xffff,
+			    "Unable to send ABTS Respond. Dumping firmware.\n");
+			ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
+			    vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
+
+			if (qpair == ha->base_qpair)
+				ha->isp_ops->fw_dump(vha, 1);
+			else
+				ha->isp_ops->fw_dump(vha, 0);
+
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		}
+	} else if (qpair->retry_term_jiff != jiffies) {
+		qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
+		qpair->retry_term_cnt = 0;
+		qpair->retry_term_jiff = jiffies;
+	}
+
+	return rc;
+}
+
+
+static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
+	struct rsp_que *rsp, response_t *pkt)
+{
+	struct abts_resp_from_24xx_fw *entry =
+		(struct abts_resp_from_24xx_fw *)pkt;
+	u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
+	struct qla_tgt_mgmt_cmd *mcmd;
+	struct qla_hw_data *ha = vha->hw;
+
+	mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
+	if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
+		ql_dbg(ql_dbg_async, vha, 0xe064,
+		    "qla_target(%d): ABTS Comp without mcmd\n",
+		    vha->vp_idx);
+		return;
+	}
+
+	if (mcmd)
+		vha  = mcmd->vha;
+	vha->vha_tgt.qla_tgt->abts_resp_expected--;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe038,
+	    "ABTS_RESP_24XX: compl_status %x\n",
+	    entry->compl_status);
+
+	if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
+		if ((entry->error_subcode1 == 0x1E) &&
+		    (entry->error_subcode2 == 0)) {
+			if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
+				ha->tgt.tgt_ops->free_mcmd(mcmd);
+				return;
+			}
+			qlt_24xx_retry_term_exchange(vha, rsp->qpair,
+			    pkt, mcmd);
+		} else {
+			ql_dbg(ql_dbg_tgt, vha, 0xe063,
+			    "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
+			    vha->vp_idx, entry->compl_status,
+			    entry->error_subcode1,
+			    entry->error_subcode2);
+			ha->tgt.tgt_ops->free_mcmd(mcmd);
+		}
+	} else if (mcmd) {
+		ha->tgt.tgt_ops->free_mcmd(mcmd);
+	}
+}
+
 /* ha->hardware_lock supposed to be held on entry */
 /* called via callback from qla2xxx */
 static void qlt_response_pkt(struct scsi_qla_host *vha,
@@ -5633,6 +5763,7 @@
 	case CTIO_TYPE7:
 	{
 		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+
 		qlt_do_ctio_completion(vha, rsp, entry->handle,
 		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
 		    entry);
@@ -5643,6 +5774,7 @@
 	{
 		struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
 		int rc;
+
 		if (atio->u.isp2x.status !=
 		    cpu_to_le16(ATIO_CDB_VALID)) {
 			ql_dbg(ql_dbg_tgt, vha, 0xe05e,
@@ -5691,6 +5823,7 @@
 	case CONTINUE_TGT_IO_TYPE:
 	{
 		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+
 		qlt_do_ctio_completion(vha, rsp, entry->handle,
 		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
 		    entry);
@@ -5700,6 +5833,7 @@
 	case CTIO_A64_TYPE:
 	{
 		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+
 		qlt_do_ctio_completion(vha, rsp, entry->handle,
 		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
 		    entry);
@@ -5714,6 +5848,7 @@
 	case NOTIFY_ACK_TYPE:
 		if (tgt->notify_ack_expected > 0) {
 			struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+
 			ql_dbg(ql_dbg_tgt, vha, 0xe036,
 			    "NOTIFY_ACK seq %08x status %x\n",
 			    le16_to_cpu(entry->u.isp2x.seq_id),
@@ -5741,41 +5876,7 @@
 
 	case ABTS_RESP_24XX:
 		if (tgt->abts_resp_expected > 0) {
-			struct abts_resp_from_24xx_fw *entry =
-				(struct abts_resp_from_24xx_fw *)pkt;
-			ql_dbg(ql_dbg_tgt, vha, 0xe038,
-			    "ABTS_RESP_24XX: compl_status %x\n",
-			    entry->compl_status);
-			tgt->abts_resp_expected--;
-			if (le16_to_cpu(entry->compl_status) !=
-			    ABTS_RESP_COMPL_SUCCESS) {
-				if ((entry->error_subcode1 == 0x1E) &&
-				    (entry->error_subcode2 == 0)) {
-					/*
-					 * We've got a race here: aborted
-					 * exchange not terminated, i.e.
-					 * response for the aborted command was
-					 * sent between the abort request was
-					 * received and processed.
-					 * Unfortunately, the firmware has a
-					 * silly requirement that all aborted
-					 * exchanges must be explicitely
-					 * terminated, otherwise it refuses to
-					 * send responses for the abort
-					 * requests. So, we have to
-					 * (re)terminate the exchange and retry
-					 * the abort response.
-					 */
-					qlt_24xx_retry_term_exchange(vha,
-					    entry);
-				} else
-					ql_dbg(ql_dbg_tgt, vha, 0xe063,
-					    "qla_target(%d): ABTS_RESP_24XX "
-					    "failed %x (subcode %x:%x)",
-					    vha->vp_idx, entry->compl_status,
-					    entry->error_subcode1,
-					    entry->error_subcode2);
-			}
+			qlt_handle_abts_completion(vha, rsp, pkt);
 		} else {
 			ql_dbg(ql_dbg_tgt, vha, 0xe064,
 			    "qla_target(%d): Unexpected ABTS_RESP_24XX "
@@ -5965,10 +6066,7 @@
 	case MODE_DUAL:
 		if (newfcport) {
 			if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
-				ql_dbg(ql_dbg_disc, vha, 0x20fe,
-				   "%s %d %8phC post upd_fcport fcp_cnt %d\n",
-				   __func__, __LINE__, fcport->port_name, vha->fcport_count);
-				qla24xx_post_upd_fcport_work(vha, fcport);
+				qla24xx_sched_upd_fcport(fcport);
 			} else {
 				ql_dbg(ql_dbg_disc, vha, 0x20ff,
 				   "%s %d %8phC post gpsc fcp_cnt %d\n",
@@ -5990,21 +6088,21 @@
 
 /* Must be called under tgt_mutex */
 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
-	uint8_t *s_id)
+					   be_id_t s_id)
 {
 	struct fc_port *sess = NULL;
 	fc_port_t *fcport = NULL;
 	int rc, global_resets;
 	uint16_t loop_id = 0;
 
-	if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) {
+	if (s_id.domain == 0xFF && s_id.area == 0xFC) {
 		/*
 		 * This is Domain Controller, so it should be
 		 * OK to drop SCSI commands from it.
 		 */
 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
 		    "Unable to find initiator with S_ID %x:%x:%x",
-		    s_id[0], s_id[1], s_id[2]);
+		    s_id.domain, s_id.area, s_id.al_pa);
 		return NULL;
 	}
 
@@ -6021,12 +6119,12 @@
 		ql_log(ql_log_info, vha, 0xf071,
 		    "qla_target(%d): Unable to find "
 		    "initiator with S_ID %x:%x:%x",
-		    vha->vp_idx, s_id[0], s_id[1],
-		    s_id[2]);
+		    vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
 
 		if (rc == -ENOENT) {
 			qlt_port_logo_t logo;
-			sid_to_portid(s_id, &logo.id);
+
+			logo.id = be_to_port_id(s_id);
 			logo.cmd_count = 1;
 			qlt_send_first_logo(vha, &logo);
 		}
@@ -6065,8 +6163,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct fc_port *sess = NULL;
 	unsigned long flags = 0, flags2 = 0;
-	uint32_t be_s_id;
-	uint8_t s_id[3];
+	be_id_t s_id;
 	int rc;
 
 	spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
@@ -6074,12 +6171,9 @@
 	if (tgt->tgt_stop)
 		goto out_term2;
 
-	s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
-	s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
-	s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
+	s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id);
 
-	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
-	    (unsigned char *)&be_s_id);
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
 	if (!sess) {
 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 
@@ -6105,16 +6199,15 @@
 	}
 
 	rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
-	ha->tgt.tgt_ops->put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 
+	ha->tgt.tgt_ops->put_sess(sess);
+
 	if (rc != 0)
 		goto out_term;
 	return;
 
 out_term2:
-	if (sess)
-		ha->tgt.tgt_ops->put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 
 out_term:
@@ -6130,9 +6223,9 @@
 	struct atio_from_isp *a = &prm->tm_iocb2;
 	struct scsi_qla_host *vha = tgt->vha;
 	struct qla_hw_data *ha = vha->hw;
-	struct fc_port *sess = NULL;
+	struct fc_port *sess;
 	unsigned long flags;
-	uint8_t *s_id = NULL; /* to hide compiler warnings */
+	be_id_t s_id;
 	int rc;
 	u64 unpacked_lun;
 	int fn;
@@ -6156,7 +6249,6 @@
 			goto out_term2;
 	} else {
 		if (sess->deleted) {
-			sess = NULL;
 			goto out_term2;
 		}
 
@@ -6164,7 +6256,6 @@
 			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
 			    "%s: kref_get fail %8phC\n",
 			     __func__, sess->port_name);
-			sess = NULL;
 			goto out_term2;
 		}
 	}
@@ -6175,16 +6266,15 @@
 	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
 
 	rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
-	ha->tgt.tgt_ops->put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 
+	ha->tgt.tgt_ops->put_sess(sess);
+
 	if (rc != 0)
 		goto out_term;
 	return;
 
 out_term2:
-	if (sess)
-		ha->tgt.tgt_ops->put_sess(sess);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 out_term:
 	qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
@@ -6290,6 +6380,7 @@
 		unsigned long flags;
 
 		struct qla_qpair *qpair = ha->queue_pair_map[i];
+
 		h = &tgt->qphints[i + 1];
 		INIT_LIST_HEAD(&h->hint_elem);
 		if (qpair) {
@@ -6363,31 +6454,19 @@
 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
 	unsigned char *b)
 {
-	int i;
-
-	pr_debug("qla2xxx HW vha->node_name: ");
-	for (i = 0; i < WWN_SIZE; i++)
-		pr_debug("%02x ", vha->node_name[i]);
-	pr_debug("\n");
-	pr_debug("qla2xxx HW vha->port_name: ");
-	for (i = 0; i < WWN_SIZE; i++)
-		pr_debug("%02x ", vha->port_name[i]);
-	pr_debug("\n");
-
-	pr_debug("qla2xxx passed configfs WWPN: ");
+	pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
+	pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
 	put_unaligned_be64(wwpn, b);
-	for (i = 0; i < WWN_SIZE; i++)
-		pr_debug("%02x ", b[i]);
-	pr_debug("\n");
+	pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b);
 }
 
 /**
  * qla_tgt_lport_register - register lport with external module
  *
  * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
- * @phys_wwpn:
- * @npiv_wwpn:
- * @npiv_wwnn:
+ * @phys_wwpn: physical port WWPN
+ * @npiv_wwpn: NPIV WWPN
+ * @npiv_wwnn: NPIV WWNN
  * @callback:  lport initialization callback for tcm_qla2xxx code
  */
 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
@@ -6414,6 +6493,9 @@
 		if (!(host->hostt->supported_mode & MODE_TARGET))
 			continue;
 
+		if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
+			continue;
+
 		spin_lock_irqsave(&ha->hardware_lock, flags);
 		if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
 			pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
@@ -6476,15 +6558,15 @@
 EXPORT_SYMBOL(qlt_lport_deregister);
 
 /* Must be called under HW lock */
-static void qlt_set_mode(struct scsi_qla_host *vha)
+void qlt_set_mode(struct scsi_qla_host *vha)
 {
-	switch (ql2x_ini_mode) {
+	switch (vha->qlini_mode) {
 	case QLA2XXX_INI_MODE_DISABLED:
 	case QLA2XXX_INI_MODE_EXCLUSIVE:
 		vha->host->active_mode = MODE_TARGET;
 		break;
 	case QLA2XXX_INI_MODE_ENABLED:
-		vha->host->active_mode = MODE_UNKNOWN;
+		vha->host->active_mode = MODE_INITIATOR;
 		break;
 	case QLA2XXX_INI_MODE_DUAL:
 		vha->host->active_mode = MODE_DUAL;
@@ -6497,7 +6579,7 @@
 /* Must be called under HW lock */
 static void qlt_clear_mode(struct scsi_qla_host *vha)
 {
-	switch (ql2x_ini_mode) {
+	switch (vha->qlini_mode) {
 	case QLA2XXX_INI_MODE_DISABLED:
 		vha->host->active_mode = MODE_UNKNOWN;
 		break;
@@ -6533,20 +6615,29 @@
 		dump_stack();
 		return;
 	}
+	if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
+		return;
 
+	if (ha->tgt.num_act_qpairs > ha->max_qpairs)
+		ha->tgt.num_act_qpairs = ha->max_qpairs;
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	tgt->tgt_stopped = 0;
 	qlt_set_mode(vha);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
+	mutex_lock(&ha->optrom_mutex);
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+	    "%s.\n", __func__);
 	if (vha->vp_idx) {
 		qla24xx_disable_vp(vha);
 		qla24xx_enable_vp(vha);
 	} else {
 		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
 		qla2xxx_wake_dpc(base_vha);
-		qla2x00_wait_for_hba_online(base_vha);
+		WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
+			     QLA_SUCCESS);
 	}
+	mutex_unlock(&ha->optrom_mutex);
 }
 EXPORT_SYMBOL(qlt_enable_vha);
 
@@ -6575,7 +6666,9 @@
 
 	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 	qla2xxx_wake_dpc(vha);
-	qla2x00_wait_for_hba_online(vha);
+	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+		ql_dbg(ql_dbg_tgt, vha, 0xe081,
+		       "qla2x00_wait_for_hba_online() failed\n");
 }
 
 /*
@@ -6674,7 +6767,7 @@
 			 */
 			ql_log(ql_log_warn, vha, 0xd03c,
 			    "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
-			    pkt->u.isp24.fcp_hdr.s_id,
+			    &pkt->u.isp24.fcp_hdr.s_id,
 			    be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
 			    le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
 
@@ -6719,7 +6812,7 @@
 	RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
 
 	if (ha->flags.msix_enabled) {
-		if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 			if (IS_QLA2071(ha)) {
 				/* 4 ports Baker: Enable Interrupt Handshake */
 				icb->msix_atio = 0;
@@ -6734,7 +6827,7 @@
 		}
 	} else {
 		/* INTx|MSI */
-		if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 			icb->msix_atio = 0;
 			icb->firmware_options_2 |= BIT_26;
 			ql_dbg(ql_dbg_init, vha, 0xf072,
@@ -6768,7 +6861,7 @@
 		if (qla_tgt_mode_enabled(vha))
 			nv->exchange_count = cpu_to_le16(0xFFFF);
 		else			/* dual */
-			nv->exchange_count = cpu_to_le16(ql2xexchoffld);
+			nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
 
 		/* Enable target mode */
 		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6847,14 +6940,6 @@
 		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
 		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
 	}
-
-	/* disable ZIO at start time. */
-	if (!vha->flags.init_done) {
-		uint32_t tmp;
-		tmp = le32_to_cpu(icb->firmware_options_2);
-		tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
-		icb->firmware_options_2 = cpu_to_le32(tmp);
-	}
 }
 
 void
@@ -6882,7 +6967,7 @@
 		if (qla_tgt_mode_enabled(vha))
 			nv->exchange_count = cpu_to_le16(0xFFFF);
 		else			/* dual */
-			nv->exchange_count = cpu_to_le16(ql2xexchoffld);
+			nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
 
 		/* Enable target mode */
 		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6958,15 +7043,6 @@
 		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
 		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
 	}
-
-	/* disable ZIO at start time. */
-	if (!vha->flags.init_done) {
-		uint32_t tmp;
-		tmp = le32_to_cpu(icb->firmware_options_2);
-		tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
-		icb->firmware_options_2 = cpu_to_le32(tmp);
-	}
-
 }
 
 void
@@ -7000,7 +7076,8 @@
 	if (!QLA_TGT_MODE_ENABLED())
 		return;
 
-	if  ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	if  ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+	    IS_QLA28XX(ha)) {
 		ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
 		ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
 	} else {
@@ -7128,7 +7205,10 @@
 		    sizeof(struct atio_from_isp), ha->tgt.atio_ring,
 		    ha->tgt.atio_dma);
 	}
+	ha->tgt.atio_ring = NULL;
+	ha->tgt.atio_dma = 0;
 	kfree(ha->tgt.tgt_vp_map);
+	ha->tgt.tgt_vp_map = NULL;
 }
 
 /* vport_slock to be held by the caller */
@@ -7212,6 +7292,9 @@
 {
 	int ret;
 
+	BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
+	BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
+
 	if (!qlt_parse_ini_mode()) {
 		ql_log(ql_log_fatal, NULL, 0xe06b,
 		    "qlt_parse_ini_mode() failed\n");
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 199d3ba..d006f0a 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
  *  Copyright (C) 2004 - 2005 Leonid Stoljar
@@ -9,16 +10,6 @@
  *  Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
  *
  *  Additional file for the target driver support.
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version 2
- *  of the License, or (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *  GNU General Public License for more details.
  */
 /*
  * This is the global def file that is useful for including from the
@@ -29,6 +20,7 @@
 #define __QLA_TARGET_H
 
 #include "qla_def.h"
+#include "qla_dsd.h"
 
 /*
  * Must be changed on any change in any initiator visible interfaces or
@@ -224,12 +216,7 @@
 	uint16_t reserved_1[3];
 	uint16_t scsi_status;
 	uint32_t transfer_length;
-	uint32_t dseg_0_address;	/* Data segment 0 address. */
-	uint32_t dseg_0_length;		/* Data segment 0 length. */
-	uint32_t dseg_1_address;	/* Data segment 1 address. */
-	uint32_t dseg_1_length;		/* Data segment 1 length. */
-	uint32_t dseg_2_address;	/* Data segment 2 address. */
-	uint32_t dseg_2_length;		/* Data segment 2 length. */
+	struct dsd32 dsd[3];
 } __packed;
 #define ATIO_PATH_INVALID       0x07
 #define ATIO_CANT_PROV_CAP      0x16
@@ -260,9 +247,9 @@
 
 struct fcp_hdr {
 	uint8_t  r_ctl;
-	uint8_t  d_id[3];
+	be_id_t  d_id;
 	uint8_t  cs_ctl;
-	uint8_t  s_id[3];
+	be_id_t  s_id;
 	uint8_t  type;
 	uint8_t  f_ctl[3];
 	uint8_t  seq_id;
@@ -274,9 +261,9 @@
 } __packed;
 
 struct fcp_hdr_le {
-	uint8_t  d_id[3];
+	le_id_t  d_id;
 	uint8_t  r_ctl;
-	uint8_t  s_id[3];
+	le_id_t  s_id;
 	uint8_t  cs_ctl;
 	uint8_t  f_ctl[3];
 	uint8_t  type;
@@ -415,7 +402,7 @@
 	uint16_t dseg_count;		    /* Data segment count. */
 	uint8_t  vp_index;
 	uint8_t  add_flags;
-	uint8_t  initiator_id[3];
+	le_id_t  initiator_id;
 	uint8_t  reserved;
 	uint32_t exchange_addr;
 	union {
@@ -429,10 +416,7 @@
 			uint32_t reserved2;
 			uint32_t transfer_length;
 			uint32_t reserved3;
-			/* Data segment 0 address. */
-			uint32_t dseg_0_address[2];
-			/* Data segment 0 length. */
-			uint32_t dseg_0_length;
+			struct dsd64 dsd;
 		} status0;
 		struct {
 			uint16_t sense_length;
@@ -514,7 +498,7 @@
 	uint8_t  add_flags;		/* additional flags */
 #define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
 
-	uint8_t  initiator_id[3];	/* initiator ID */
+	le_id_t  initiator_id;		/* initiator ID */
 	uint8_t  reserved1;
 	uint32_t exchange_addr;		/* rcv exchange address */
 	uint16_t reserved2;
@@ -526,10 +510,10 @@
 	uint32_t reserved5;
 	__le32 transfer_length;		/* total fc transfer length */
 	uint32_t reserved6;
-	__le32 crc_context_address[2];/* Data segment address. */
+	__le64	 crc_context_address __packed; /* Data segment address. */
 	uint16_t crc_context_len;	/* Data segment length. */
 	uint16_t reserved_1;		/* MUST be set to 0. */
-} __packed;
+};
 
 /* CTIO Type CRC_x Status IOCB */
 struct ctio_crc_from_fw {
@@ -698,7 +682,7 @@
 	struct fc_port *(*find_sess_by_loop_id)(struct scsi_qla_host *,
 						const uint16_t);
 	struct fc_port *(*find_sess_by_s_id)(struct scsi_qla_host *,
-						const uint8_t *);
+					     const be_id_t);
 	void (*clear_nacl_from_fcport_map)(struct fc_port *);
 	void (*put_sess)(struct fc_port *);
 	void (*shutdown_sess)(struct fc_port *);
@@ -771,14 +755,6 @@
 #define	FC_TM_REJECT                4
 #define FC_TM_FAILED                5
 
-#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
-#define pci_dma_lo32(a) (a & 0xffffffff)
-#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
-#else
-#define pci_dma_lo32(a) (a & 0xffffffff)
-#define pci_dma_hi32(a) 0
-#endif
-
 #define QLA_TGT_SENSE_VALID(sense)  ((sense != NULL) && \
 				(((const uint8_t *)(sense))[0] & 0x70) == 0x70)
 
@@ -863,7 +839,7 @@
 	TRC_CTIO_ERR = BIT_11,
 	TRC_CTIO_DONE = BIT_12,
 	TRC_CTIO_ABORTED =  BIT_13,
-	TRC_CTIO_STRANGE= BIT_14,
+	TRC_CTIO_STRANGE = BIT_14,
 	TRC_CMD_DONE = BIT_15,
 	TRC_CMD_CHK_STOP = BIT_16,
 	TRC_CMD_FREE = BIT_17,
@@ -897,9 +873,14 @@
 	unsigned int term_exchg:1;
 	unsigned int cmd_sent_to_fw:1;
 	unsigned int cmd_in_wq:1;
-	unsigned int aborted:1;
-	unsigned int data_work:1;
-	unsigned int data_work_free:1;
+
+	/*
+	 * This variable may be set from outside the LIO and I/O completion
+	 * callback functions. Do not declare this member variable as a
+	 * bitfield to avoid a read-modify-write operation when this variable
+	 * is set.
+	 */
+	unsigned int aborted;
 
 	struct scatterlist *sg;	/* cmd data buffer SG vector */
 	int sg_cnt;		/* SG segments count */
@@ -908,6 +889,7 @@
 	u64 unpacked_lun;
 	enum dma_data_direction dma_data_direction;
 
+	uint16_t ctio_flags;
 	uint16_t vp_idx;
 	uint16_t loop_id;	/* to save extra sess dereferences */
 	struct qla_tgt *tgt;	/* to save extra sess dereferences */
@@ -930,10 +912,12 @@
 	uint8_t scsi_status, sense_key, asc, ascq;
 
 	struct crc_context *ctx;
-	uint8_t		*cdb;
+	const uint8_t	*cdb;
 	uint64_t	lba;
 	uint16_t	a_guard, e_guard, a_app_tag, e_app_tag;
 	uint32_t	a_ref_tag, e_ref_tag;
+#define DIF_BUNDL_DMA_VALID 1
+	uint16_t prot_flags;
 
 	uint64_t jiffies_at_alloc;
 	uint64_t jiffies_at_free;
@@ -956,16 +940,20 @@
 };
 
 struct qla_tgt_mgmt_cmd {
+	uint8_t cmd_type;
+	uint8_t pad[3];
 	uint16_t tmr_func;
 	uint8_t fc_tm_rsp;
+	uint8_t abort_io_attr;
 	struct fc_port *sess;
 	struct qla_qpair *qpair;
 	struct scsi_qla_host *vha;
 	struct se_cmd se_cmd;
 	struct work_struct free_work;
 	unsigned int flags;
+#define QLA24XX_MGMT_SEND_NACK	BIT_0
+#define QLA24XX_MGMT_ABORT_IO_ATTR_VALID BIT_1
 	uint32_t reset_count;
-#define QLA24XX_MGMT_SEND_NACK	1
 	struct work_struct work;
 	uint64_t unpacked_lun;
 	union {
@@ -1042,22 +1030,11 @@
 	return (ha->host->active_mode == MODE_DUAL);
 }
 
-static inline uint32_t sid_to_key(const uint8_t *s_id)
+static inline uint32_t sid_to_key(const be_id_t s_id)
 {
-	uint32_t key;
-
-	key = (((unsigned long)s_id[0] << 16) |
-	       ((unsigned long)s_id[1] << 8) |
-	       (unsigned long)s_id[2]);
-	return key;
-}
-
-static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
-{
-	memset(p, 0, sizeof(*p));
-	p->b.domain = s_id[0];
-	p->b.area = s_id[1];
-	p->b.al_pa = s_id[2];
+	return s_id.domain << 16 |
+		s_id.area << 8 |
+		s_id.al_pa;
 }
 
 /*
@@ -1103,7 +1080,5 @@
 
 void qlt_send_resp_ctio(struct qla_qpair *, struct qla_tgt_cmd *, uint8_t,
     uint8_t, uint8_t, uint8_t);
-extern void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *,
-    struct qla_tgt_cmd *);
 
 #endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 0ccd06f..294d77c 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -7,103 +7,9 @@
 #include "qla_def.h"
 #include "qla_tmpl.h"
 
-/* note default template is in big endian */
-static const uint32_t ql27xx_fwdt_default_template[] = {
-	0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
-	0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
-	0x00000000, 0x00000000, 0x00000000, 0x00000000,
-	0x00000000, 0x00000000, 0x00000000, 0x00000000,
-	0x00000000, 0x00000000, 0x00000000, 0x00000000,
-	0x00000000, 0x00000000, 0x00000000, 0x00000000,
-	0x00000000, 0x00000000, 0x00000000, 0x00000000,
-	0x00000000, 0x00000000, 0x00000000, 0x00000000,
-	0x00000000, 0x00000000, 0x00000000, 0x00000000,
-	0x00000000, 0x00000000, 0x00000000, 0x00000000,
-	0x00000000, 0x04010000, 0x14000000, 0x00000000,
-	0x02000000, 0x44000000, 0x09010000, 0x10000000,
-	0x00000000, 0x02000000, 0x01010000, 0x1c000000,
-	0x00000000, 0x02000000, 0x00600000, 0x00000000,
-	0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
-	0x02000000, 0x00600000, 0x00000000, 0xcc000000,
-	0x01010000, 0x1c000000, 0x00000000, 0x02000000,
-	0x10600000, 0x00000000, 0xd4000000, 0x01010000,
-	0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
-	0x00000060, 0xf0000000, 0x00010000, 0x18000000,
-	0x00000000, 0x02000000, 0x00700000, 0x041000c0,
-	0x00010000, 0x18000000, 0x00000000, 0x02000000,
-	0x10700000, 0x041000c0, 0x00010000, 0x18000000,
-	0x00000000, 0x02000000, 0x40700000, 0x041000c0,
-	0x01010000, 0x1c000000, 0x00000000, 0x02000000,
-	0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
-	0x18000000, 0x00000000, 0x02000000, 0x007c0000,
-	0x040300c4, 0x00010000, 0x18000000, 0x00000000,
-	0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
-	0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
-	0x00000000, 0xc0000000, 0x00010000, 0x18000000,
-	0x00000000, 0x02000000, 0x007c0000, 0x04200000,
-	0x0b010000, 0x18000000, 0x00000000, 0x02000000,
-	0x0c000000, 0x00000000, 0x02010000, 0x20000000,
-	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-	0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
-	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-	0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
-	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-	0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
-	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-	0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
-	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-	0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
-	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-	0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
-	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-	0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
-	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-	0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
-	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-	0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
-	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-	0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
-	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-	0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
-	0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
-	0x00010000, 0x18000000, 0x00000000, 0x02000000,
-	0x0a000000, 0x04200080, 0x00010000, 0x18000000,
-	0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
-	0x00010000, 0x18000000, 0x00000000, 0x02000000,
-	0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
-	0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
-	0x00010000, 0x18000000, 0x00000000, 0x02000000,
-	0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
-	0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
-	0x00010000, 0x18000000, 0x00000000, 0x02000000,
-	0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
-	0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
-	0x00010000, 0x18000000, 0x00000000, 0x02000000,
-	0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
-	0x00000000, 0x02000000, 0x00300000, 0x041000c0,
-	0x00010000, 0x18000000, 0x00000000, 0x02000000,
-	0x10300000, 0x041000c0, 0x00010000, 0x18000000,
-	0x00000000, 0x02000000, 0x20300000, 0x041000c0,
-	0x00010000, 0x18000000, 0x00000000, 0x02000000,
-	0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
-	0x00000000, 0x02000000, 0x06010000, 0x1c000000,
-	0x00000000, 0x02000000, 0x01000000, 0x00000200,
-	0xff230200, 0x06010000, 0x1c000000, 0x00000000,
-	0x02000000, 0x02000000, 0x00001000, 0x00000000,
-	0x07010000, 0x18000000, 0x00000000, 0x02000000,
-	0x00000000, 0x01000000, 0x07010000, 0x18000000,
-	0x00000000, 0x02000000, 0x00000000, 0x02000000,
-	0x07010000, 0x18000000, 0x00000000, 0x02000000,
-	0x00000000, 0x03000000, 0x0d010000, 0x14000000,
-	0x00000000, 0x02000000, 0x00000000, 0xff000000,
-	0x10000000, 0x00000000, 0x00000080,
-};
-
-static inline void __iomem *
-qla27xx_isp_reg(struct scsi_qla_host *vha)
-{
-	return &vha->hw->iobase->isp24;
-}
+#define ISPREG(vha)	(&(vha)->hw->iobase->isp24)
+#define IOBAR(reg)	offsetof(typeof(*(reg)), iobase_addr)
+#define IOBASE(vha)	IOBAR(ISPREG(vha))
 
 static inline void
 qla27xx_insert16(uint16_t value, void *buf, ulong *len)
@@ -128,7 +34,6 @@
 static inline void
 qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
 {
-
 	if (buf && mem && size) {
 		buf += *len;
 		memcpy(buf, mem, size);
@@ -190,9 +95,9 @@
 qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
 	uint offset, uint32_t data, void *buf)
 {
-	__iomem void *window = (void __iomem *)reg + offset;
-
 	if (buf) {
+		void __iomem *window = (void __iomem *)reg + offset;
+
 		WRT_REG_DWORD(window, data);
 	}
 }
@@ -205,7 +110,7 @@
 	void __iomem *window = (void __iomem *)reg + offset;
 	void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
 
-	qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
+	qla27xx_write_reg(reg, IOBAR(reg), addr, buf);
 	while (count--) {
 		qla27xx_insert32(addr, buf, len);
 		readn(window, buf, len);
@@ -221,7 +126,13 @@
 		ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
 }
 
-static int
+static inline struct qla27xx_fwdt_entry *
+qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
+{
+	return (void *)ent + le32_to_cpu(ent->hdr.size);
+}
+
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
@@ -229,10 +140,10 @@
 	    "%s: nop [%lx]\n", __func__, *len);
 	qla27xx_skip_entry(ent, buf);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
@@ -241,138 +152,155 @@
 	qla27xx_skip_entry(ent, buf);
 
 	/* terminate */
-	return true;
+	return NULL;
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+	ulong addr = le32_to_cpu(ent->t256.base_addr);
+	uint offset = ent->t256.pci_offset;
+	ulong count = le16_to_cpu(ent->t256.reg_count);
+	uint width = ent->t256.reg_width;
 
 	ql_dbg(ql_dbg_misc, vha, 0xd200,
 	    "%s: rdio t1 [%lx]\n", __func__, *len);
-	qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
-	    ent->t256.reg_count, ent->t256.reg_width, buf, len);
+	qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+	ulong addr = le32_to_cpu(ent->t257.base_addr);
+	uint offset = ent->t257.pci_offset;
+	ulong data = le32_to_cpu(ent->t257.write_data);
 
 	ql_dbg(ql_dbg_misc, vha, 0xd201,
 	    "%s: wrio t1 [%lx]\n", __func__, *len);
-	qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
-	qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
+	qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
+	qla27xx_write_reg(ISPREG(vha), offset, data, buf);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+	uint banksel = ent->t258.banksel_offset;
+	ulong bank = le32_to_cpu(ent->t258.bank);
+	ulong addr = le32_to_cpu(ent->t258.base_addr);
+	uint offset = ent->t258.pci_offset;
+	uint count = le16_to_cpu(ent->t258.reg_count);
+	uint width = ent->t258.reg_width;
 
 	ql_dbg(ql_dbg_misc, vha, 0xd202,
 	    "%s: rdio t2 [%lx]\n", __func__, *len);
-	qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
-	qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
-	    ent->t258.reg_count, ent->t258.reg_width, buf, len);
+	qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
+	qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+	ulong addr = le32_to_cpu(ent->t259.base_addr);
+	uint banksel = ent->t259.banksel_offset;
+	ulong bank = le32_to_cpu(ent->t259.bank);
+	uint offset = ent->t259.pci_offset;
+	ulong data = le32_to_cpu(ent->t259.write_data);
 
 	ql_dbg(ql_dbg_misc, vha, 0xd203,
 	    "%s: wrio t2 [%lx]\n", __func__, *len);
-	qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
-	qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
-	qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
+	qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
+	qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
+	qla27xx_write_reg(ISPREG(vha), offset, data, buf);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+	uint offset = ent->t260.pci_offset;
 
 	ql_dbg(ql_dbg_misc, vha, 0xd204,
 	    "%s: rdpci [%lx]\n", __func__, *len);
-	qla27xx_insert32(ent->t260.pci_offset, buf, len);
-	qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
+	qla27xx_insert32(offset, buf, len);
+	qla27xx_read_reg(ISPREG(vha), offset, buf, len);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+	uint offset = ent->t261.pci_offset;
+	ulong data = le32_to_cpu(ent->t261.write_data);
 
 	ql_dbg(ql_dbg_misc, vha, 0xd205,
 	    "%s: wrpci [%lx]\n", __func__, *len);
-	qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
+	qla27xx_write_reg(ISPREG(vha), offset, data, buf);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
+	uint area = ent->t262.ram_area;
+	ulong start = le32_to_cpu(ent->t262.start_addr);
+	ulong end = le32_to_cpu(ent->t262.end_addr);
 	ulong dwords;
-	ulong start;
-	ulong end;
 
 	ql_dbg(ql_dbg_misc, vha, 0xd206,
 	    "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
-	start = ent->t262.start_addr;
-	end = ent->t262.end_addr;
 
-	if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
+	if (area == T262_RAM_AREA_CRITICAL_RAM) {
 		;
-	} else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
+	} else if (area == T262_RAM_AREA_EXTERNAL_RAM) {
 		end = vha->hw->fw_memory_size;
 		if (buf)
-			ent->t262.end_addr = end;
-	} else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
+			ent->t262.end_addr = cpu_to_le32(end);
+	} else if (area == T262_RAM_AREA_SHARED_RAM) {
 		start = vha->hw->fw_shared_ram_start;
 		end = vha->hw->fw_shared_ram_end;
 		if (buf) {
-			ent->t262.start_addr = start;
-			ent->t262.end_addr = end;
+			ent->t262.start_addr = cpu_to_le32(start);
+			ent->t262.end_addr = cpu_to_le32(end);
 		}
-	} else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
+	} else if (area == T262_RAM_AREA_DDR_RAM) {
 		start = vha->hw->fw_ddr_ram_start;
 		end = vha->hw->fw_ddr_ram_end;
 		if (buf) {
-			ent->t262.start_addr = start;
-			ent->t262.end_addr = end;
+			ent->t262.start_addr = cpu_to_le32(start);
+			ent->t262.end_addr = cpu_to_le32(end);
+		}
+	} else if (area == T262_RAM_AREA_MISC) {
+		if (buf) {
+			ent->t262.start_addr = cpu_to_le32(start);
+			ent->t262.end_addr = cpu_to_le32(end);
 		}
 	} else {
 		ql_dbg(ql_dbg_misc, vha, 0xd022,
-		    "%s: unknown area %x\n", __func__, ent->t262.ram_area);
+		    "%s: unknown area %x\n", __func__, area);
 		qla27xx_skip_entry(ent, buf);
 		goto done;
 	}
 
 	if (end < start || start == 0 || end == 0) {
 		ql_dbg(ql_dbg_misc, vha, 0xd023,
-		    "%s: unusable range (start=%x end=%x)\n", __func__,
-		    ent->t262.end_addr, ent->t262.start_addr);
+		    "%s: unusable range (start=%lx end=%lx)\n",
+		    __func__, start, end);
 		qla27xx_skip_entry(ent, buf);
 		goto done;
 	}
@@ -384,20 +312,21 @@
 	}
 	*len += dwords * sizeof(uint32_t);
 done:
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
+	uint type = ent->t263.queue_type;
 	uint count = 0;
 	uint i;
 	uint length;
 
-	ql_dbg(ql_dbg_misc, vha, 0xd207,
-	    "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
-	if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
+	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207,
+	    "%s: getq(%x) [%lx]\n", __func__, type, *len);
+	if (type == T263_QUEUE_TYPE_REQ) {
 		for (i = 0; i < vha->hw->max_req_queues; i++) {
 			struct req_que *req = vha->hw->req_q_map[i];
 
@@ -411,7 +340,7 @@
 				count++;
 			}
 		}
-	} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
+	} else if (type == T263_QUEUE_TYPE_RSP) {
 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
 
@@ -439,7 +368,7 @@
 		}
 	} else {
 		ql_dbg(ql_dbg_misc, vha, 0xd026,
-		    "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
+		    "%s: unknown queue %x\n", __func__, type);
 		qla27xx_skip_entry(ent, buf);
 	}
 
@@ -450,10 +379,10 @@
 			qla27xx_skip_entry(ent, buf);
 	}
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
@@ -478,49 +407,48 @@
 		qla27xx_skip_entry(ent, buf);
 	}
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
-
-	ql_dbg(ql_dbg_misc, vha, 0xd209,
+	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209,
 	    "%s: pause risc [%lx]\n", __func__, *len);
 	if (buf)
-		qla24xx_pause_risc(reg, vha->hw);
+		qla24xx_pause_risc(ISPREG(vha), vha->hw);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
 	ql_dbg(ql_dbg_misc, vha, 0xd20a,
 	    "%s: reset risc [%lx]\n", __func__, *len);
 	if (buf)
-		qla24xx_soft_reset(vha->hw);
+		WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+	uint offset = ent->t267.pci_offset;
+	ulong data = le32_to_cpu(ent->t267.data);
 
 	ql_dbg(ql_dbg_misc, vha, 0xd20b,
 	    "%s: dis intr [%lx]\n", __func__, *len);
-	qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
+	qla27xx_write_reg(ISPREG(vha), offset, data, buf);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
@@ -587,10 +515,10 @@
 		break;
 	}
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
@@ -604,53 +532,51 @@
 	if (buf)
 		ent->t269.scratch_size = 5 * sizeof(uint32_t);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
-	ulong dwords = ent->t270.count;
-	ulong addr = ent->t270.addr;
+	ulong addr = le32_to_cpu(ent->t270.addr);
+	ulong dwords = le32_to_cpu(ent->t270.count);
 
 	ql_dbg(ql_dbg_misc, vha, 0xd20e,
 	    "%s: rdremreg [%lx]\n", __func__, *len);
-	qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+	qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, 0x40, buf);
 	while (dwords--) {
-		qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
+		qla27xx_write_reg(ISPREG(vha), 0xc0, addr|0x80000000, buf);
 		qla27xx_insert32(addr, buf, len);
-		qla27xx_read_reg(reg, 0xc4, buf, len);
+		qla27xx_read_reg(ISPREG(vha), 0xc4, buf, len);
 		addr += sizeof(uint32_t);
 	}
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
-	ulong addr = ent->t271.addr;
-	ulong data = ent->t271.data;
+	ulong addr = le32_to_cpu(ent->t271.addr);
+	ulong data = le32_to_cpu(ent->t271.data);
 
 	ql_dbg(ql_dbg_misc, vha, 0xd20f,
 	    "%s: wrremreg [%lx]\n", __func__, *len);
-	qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
-	qla27xx_write_reg(reg, 0xc4, data, buf);
-	qla27xx_write_reg(reg, 0xc0, addr, buf);
+	qla27xx_write_reg(ISPREG(vha), IOBASE(vha), 0x40, buf);
+	qla27xx_write_reg(ISPREG(vha), 0xc4, data, buf);
+	qla27xx_write_reg(ISPREG(vha), 0xc0, addr, buf);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-	ulong dwords = ent->t272.count;
-	ulong start = ent->t272.addr;
+	ulong dwords = le32_to_cpu(ent->t272.count);
+	ulong start = le32_to_cpu(ent->t272.addr);
 
 	ql_dbg(ql_dbg_misc, vha, 0xd210,
 	    "%s: rdremram [%lx]\n", __func__, *len);
@@ -662,15 +588,15 @@
 	}
 	*len += dwords * sizeof(uint32_t);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-	ulong dwords = ent->t273.count;
-	ulong addr = ent->t273.addr;
+	ulong dwords = le32_to_cpu(ent->t273.count);
+	ulong addr = le32_to_cpu(ent->t273.addr);
 	uint32_t value;
 
 	ql_dbg(ql_dbg_misc, vha, 0xd211,
@@ -685,19 +611,20 @@
 		addr += sizeof(uint32_t);
 	}
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
+	ulong type = ent->t274.queue_type;
 	uint count = 0;
 	uint i;
 
-	ql_dbg(ql_dbg_misc, vha, 0xd212,
-	    "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
-	if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
+	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212,
+	    "%s: getqsh(%lx) [%lx]\n", __func__, type, *len);
+	if (type == T274_QUEUE_TYPE_REQ_SHAD) {
 		for (i = 0; i < vha->hw->max_req_queues; i++) {
 			struct req_que *req = vha->hw->req_q_map[i];
 
@@ -709,7 +636,7 @@
 				count++;
 			}
 		}
-	} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
+	} else if (type == T274_QUEUE_TYPE_RSP_SHAD) {
 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
 
@@ -735,7 +662,7 @@
 		}
 	} else {
 		ql_dbg(ql_dbg_misc, vha, 0xd02f,
-		    "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
+		    "%s: unknown queue %lx\n", __func__, type);
 		qla27xx_skip_entry(ent, buf);
 	}
 
@@ -746,85 +673,146 @@
 			qla27xx_skip_entry(ent, buf);
 	}
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
 	ulong offset = offsetof(typeof(*ent), t275.buffer);
+	ulong length = le32_to_cpu(ent->t275.length);
+	ulong size = le32_to_cpu(ent->hdr.size);
+	void *buffer = ent->t275.buffer;
 
-	ql_dbg(ql_dbg_misc, vha, 0xd213,
-	    "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
-	if (!ent->t275.length) {
+	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213,
+	    "%s: buffer(%lx) [%lx]\n", __func__, length, *len);
+	if (!length) {
 		ql_dbg(ql_dbg_misc, vha, 0xd020,
 		    "%s: buffer zero length\n", __func__);
 		qla27xx_skip_entry(ent, buf);
 		goto done;
 	}
-	if (offset + ent->t275.length > ent->hdr.entry_size) {
+	if (offset + length > size) {
+		length = size - offset;
 		ql_dbg(ql_dbg_misc, vha, 0xd030,
-		    "%s: buffer overflow\n", __func__);
-		qla27xx_skip_entry(ent, buf);
-		goto done;
+		    "%s: buffer overflow, truncate [%lx]\n", __func__, length);
+		ent->t275.length = cpu_to_le32(length);
 	}
 
-	qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
+	qla27xx_insertbuf(buffer, length, buf, len);
 done:
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-static int
+static struct qla27xx_fwdt_entry *
+qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
+    struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
+	    "%s: cond [%lx]\n", __func__, *len);
+
+	if (buf) {
+		ulong cond1 = le32_to_cpu(ent->t276.cond1);
+		ulong cond2 = le32_to_cpu(ent->t276.cond2);
+		uint type = vha->hw->pdev->device >> 4 & 0xf;
+		uint func = vha->hw->port_no & 0x3;
+
+		if (type != cond1 || func != cond2) {
+			struct qla27xx_fwdt_template *tmp = buf;
+
+			tmp->count--;
+			ent = qla27xx_next_entry(ent);
+			qla27xx_skip_entry(ent, buf);
+		}
+	}
+
+	return qla27xx_next_entry(ent);
+}
+
+static struct qla27xx_fwdt_entry *
+qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
+    struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr);
+	ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data);
+	ulong data_addr = le32_to_cpu(ent->t277.data_addr);
+
+	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
+	    "%s: rdpep [%lx]\n", __func__, *len);
+	qla27xx_insert32(wr_cmd_data, buf, len);
+	qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
+	qla27xx_read_reg(ISPREG(vha), data_addr, buf, len);
+
+	return qla27xx_next_entry(ent);
+}
+
+static struct qla27xx_fwdt_entry *
+qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
+    struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr);
+	ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data);
+	ulong data_addr = le32_to_cpu(ent->t278.data_addr);
+	ulong wr_data = le32_to_cpu(ent->t278.wr_data);
+
+	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
+	    "%s: wrpep [%lx]\n", __func__, *len);
+	qla27xx_write_reg(ISPREG(vha), data_addr, wr_data, buf);
+	qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
+
+	return qla27xx_next_entry(ent);
+}
+
+static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
+	ulong type = le32_to_cpu(ent->hdr.type);
+
 	ql_dbg(ql_dbg_misc, vha, 0xd2ff,
-	    "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
+	    "%s: other %lx [%lx]\n", __func__, type, *len);
 	qla27xx_skip_entry(ent, buf);
 
-	return false;
+	return qla27xx_next_entry(ent);
 }
 
-struct qla27xx_fwdt_entry_call {
+static struct {
 	uint type;
-	int (*call)(
-	    struct scsi_qla_host *,
-	    struct qla27xx_fwdt_entry *,
-	    void *,
-	    ulong *);
+	typeof(qla27xx_fwdt_entry_other)(*call);
+} qla27xx_fwdt_entry_call[] = {
+	{ ENTRY_TYPE_NOP,		qla27xx_fwdt_entry_t0    },
+	{ ENTRY_TYPE_TMP_END,		qla27xx_fwdt_entry_t255  },
+	{ ENTRY_TYPE_RD_IOB_T1,		qla27xx_fwdt_entry_t256  },
+	{ ENTRY_TYPE_WR_IOB_T1,		qla27xx_fwdt_entry_t257  },
+	{ ENTRY_TYPE_RD_IOB_T2,		qla27xx_fwdt_entry_t258  },
+	{ ENTRY_TYPE_WR_IOB_T2,		qla27xx_fwdt_entry_t259  },
+	{ ENTRY_TYPE_RD_PCI,		qla27xx_fwdt_entry_t260  },
+	{ ENTRY_TYPE_WR_PCI,		qla27xx_fwdt_entry_t261  },
+	{ ENTRY_TYPE_RD_RAM,		qla27xx_fwdt_entry_t262  },
+	{ ENTRY_TYPE_GET_QUEUE,		qla27xx_fwdt_entry_t263  },
+	{ ENTRY_TYPE_GET_FCE,		qla27xx_fwdt_entry_t264  },
+	{ ENTRY_TYPE_PSE_RISC,		qla27xx_fwdt_entry_t265  },
+	{ ENTRY_TYPE_RST_RISC,		qla27xx_fwdt_entry_t266  },
+	{ ENTRY_TYPE_DIS_INTR,		qla27xx_fwdt_entry_t267  },
+	{ ENTRY_TYPE_GET_HBUF,		qla27xx_fwdt_entry_t268  },
+	{ ENTRY_TYPE_SCRATCH,		qla27xx_fwdt_entry_t269  },
+	{ ENTRY_TYPE_RDREMREG,		qla27xx_fwdt_entry_t270  },
+	{ ENTRY_TYPE_WRREMREG,		qla27xx_fwdt_entry_t271  },
+	{ ENTRY_TYPE_RDREMRAM,		qla27xx_fwdt_entry_t272  },
+	{ ENTRY_TYPE_PCICFG,		qla27xx_fwdt_entry_t273  },
+	{ ENTRY_TYPE_GET_SHADOW,	qla27xx_fwdt_entry_t274  },
+	{ ENTRY_TYPE_WRITE_BUF,		qla27xx_fwdt_entry_t275  },
+	{ ENTRY_TYPE_CONDITIONAL,	qla27xx_fwdt_entry_t276  },
+	{ ENTRY_TYPE_RDPEPREG,		qla27xx_fwdt_entry_t277  },
+	{ ENTRY_TYPE_WRPEPREG,		qla27xx_fwdt_entry_t278  },
+	{ -1,				qla27xx_fwdt_entry_other }
 };
 
-static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
-	{ ENTRY_TYPE_NOP		, qla27xx_fwdt_entry_t0    } ,
-	{ ENTRY_TYPE_TMP_END		, qla27xx_fwdt_entry_t255  } ,
-	{ ENTRY_TYPE_RD_IOB_T1		, qla27xx_fwdt_entry_t256  } ,
-	{ ENTRY_TYPE_WR_IOB_T1		, qla27xx_fwdt_entry_t257  } ,
-	{ ENTRY_TYPE_RD_IOB_T2		, qla27xx_fwdt_entry_t258  } ,
-	{ ENTRY_TYPE_WR_IOB_T2		, qla27xx_fwdt_entry_t259  } ,
-	{ ENTRY_TYPE_RD_PCI		, qla27xx_fwdt_entry_t260  } ,
-	{ ENTRY_TYPE_WR_PCI		, qla27xx_fwdt_entry_t261  } ,
-	{ ENTRY_TYPE_RD_RAM		, qla27xx_fwdt_entry_t262  } ,
-	{ ENTRY_TYPE_GET_QUEUE		, qla27xx_fwdt_entry_t263  } ,
-	{ ENTRY_TYPE_GET_FCE		, qla27xx_fwdt_entry_t264  } ,
-	{ ENTRY_TYPE_PSE_RISC		, qla27xx_fwdt_entry_t265  } ,
-	{ ENTRY_TYPE_RST_RISC		, qla27xx_fwdt_entry_t266  } ,
-	{ ENTRY_TYPE_DIS_INTR		, qla27xx_fwdt_entry_t267  } ,
-	{ ENTRY_TYPE_GET_HBUF		, qla27xx_fwdt_entry_t268  } ,
-	{ ENTRY_TYPE_SCRATCH		, qla27xx_fwdt_entry_t269  } ,
-	{ ENTRY_TYPE_RDREMREG		, qla27xx_fwdt_entry_t270  } ,
-	{ ENTRY_TYPE_WRREMREG		, qla27xx_fwdt_entry_t271  } ,
-	{ ENTRY_TYPE_RDREMRAM		, qla27xx_fwdt_entry_t272  } ,
-	{ ENTRY_TYPE_PCICFG		, qla27xx_fwdt_entry_t273  } ,
-	{ ENTRY_TYPE_GET_SHADOW		, qla27xx_fwdt_entry_t274  } ,
-	{ ENTRY_TYPE_WRITE_BUF		, qla27xx_fwdt_entry_t275  } ,
-	{ -1				, qla27xx_fwdt_entry_other }
-};
-
-static inline int (*qla27xx_find_entry(uint type))
-	(struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
+static inline
+typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type))
 {
-	struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
+	typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call;
 
 	while (list->type < type)
 		list++;
@@ -834,50 +822,31 @@
 	return qla27xx_fwdt_entry_other;
 }
 
-static inline void *
-qla27xx_next_entry(void *p)
-{
-	struct qla27xx_fwdt_entry *ent = p;
-
-	return p + ent->hdr.entry_size;
-}
-
 static void
 qla27xx_walk_template(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
 {
-	struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
-	ulong count = tmp->entry_count;
+	struct qla27xx_fwdt_entry *ent = (void *)tmp +
+	    le32_to_cpu(tmp->entry_offset);
+	ulong type;
 
+	tmp->count = le32_to_cpu(tmp->entry_count);
 	ql_dbg(ql_dbg_misc, vha, 0xd01a,
-	    "%s: entry count %lx\n", __func__, count);
-	while (count--) {
-		if (buf && *len >= vha->hw->fw_dump_len)
+	    "%s: entry count %u\n", __func__, tmp->count);
+	while (ent && tmp->count--) {
+		type = le32_to_cpu(ent->hdr.type);
+		ent = qla27xx_find_entry(type)(vha, ent, buf, len);
+		if (!ent)
 			break;
-		if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
-			break;
-		ent = qla27xx_next_entry(ent);
 	}
 
-	if (count)
+	if (tmp->count)
 		ql_dbg(ql_dbg_misc, vha, 0xd018,
-		    "%s: entry residual count (%lx)\n", __func__, count);
+		    "%s: entry count residual=+%u\n", __func__, tmp->count);
 
-	if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
+	if (ent)
 		ql_dbg(ql_dbg_misc, vha, 0xd019,
-		    "%s: missing end entry (%lx)\n", __func__, count);
-
-	if (buf && *len != vha->hw->fw_dump_len)
-		ql_dbg(ql_dbg_misc, vha, 0xd01b,
-		    "%s: length=%#lx residual=%+ld\n",
-		    __func__, *len, vha->hw->fw_dump_len - *len);
-
-	if (buf) {
-		ql_log(ql_log_warn, vha, 0xd015,
-		    "Firmware dump saved to temp buffer (%lu/%p)\n",
-		    vha->host_no, vha->hw->fw_dump);
-		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
-	}
+		    "%s: missing end entry\n", __func__);
 }
 
 static void
@@ -891,8 +860,9 @@
 {
 	uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
 
-	sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
-	    v+0, v+1, v+2, v+3, v+4, v+5);
+	WARN_ON_ONCE(sscanf(qla2x00_version_str,
+			    "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
+			    v+0, v+1, v+2, v+3, v+4, v+5) != 6);
 
 	tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
 	tmp->driver_info[1] = v[5] << 8 | v[4];
@@ -900,8 +870,8 @@
 }
 
 static void
-qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
-	struct scsi_qla_host *vha)
+qla27xx_firmware_info(struct scsi_qla_host *vha,
+    struct qla27xx_fwdt_template *tmp)
 {
 	tmp->firmware_version[0] = vha->hw->fw_major_version;
 	tmp->firmware_version[1] = vha->hw->fw_minor_version;
@@ -918,19 +888,19 @@
 {
 	qla27xx_time_stamp(tmp);
 	qla27xx_driver_info(tmp);
-	qla27xx_firmware_info(tmp, vha);
+	qla27xx_firmware_info(vha, tmp);
 }
 
 static inline uint32_t
 qla27xx_template_checksum(void *p, ulong size)
 {
-	uint32_t *buf = p;
+	__le32 *buf = p;
 	uint64_t sum = 0;
 
 	size /= sizeof(*buf);
 
-	while (size--)
-		sum += *buf++;
+	for ( ; size--; buf++)
+		sum += le32_to_cpu(*buf);
 
 	sum = (sum & 0xffffffff) + (sum >> 32);
 
@@ -946,29 +916,29 @@
 static inline int
 qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
 {
-	return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
+	return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP;
 }
 
-static void
-qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
+static ulong
+qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
+    struct qla27xx_fwdt_template *tmp, void *buf)
 {
-	struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
-	ulong len;
+	ulong len = 0;
 
 	if (qla27xx_fwdt_template_valid(tmp)) {
 		len = tmp->template_size;
-		tmp = memcpy(vha->hw->fw_dump, tmp, len);
+		tmp = memcpy(buf, tmp, len);
 		ql27xx_edit_template(vha, tmp);
-		qla27xx_walk_template(vha, tmp, tmp, &len);
-		vha->hw->fw_dump_len = len;
-		vha->hw->fw_dumped = 1;
+		qla27xx_walk_template(vha, tmp, buf, &len);
 	}
+
+	return len;
 }
 
 ulong
-qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
+qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
 {
-	struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+	struct qla27xx_fwdt_template *tmp = p;
 	ulong len = 0;
 
 	if (qla27xx_fwdt_template_valid(tmp)) {
@@ -987,18 +957,6 @@
 	return tmp->template_size;
 }
 
-ulong
-qla27xx_fwdt_template_default_size(void)
-{
-	return sizeof(ql27xx_fwdt_default_template);
-}
-
-const void *
-qla27xx_fwdt_template_default(void)
-{
-	return ql27xx_fwdt_default_template;
-}
-
 int
 qla27xx_fwdt_template_valid(void *p)
 {
@@ -1006,7 +964,8 @@
 
 	if (!qla27xx_verify_template_header(tmp)) {
 		ql_log(ql_log_warn, NULL, 0xd01c,
-		    "%s: template type %x\n", __func__, tmp->template_type);
+		    "%s: template type %x\n", __func__,
+		    le32_to_cpu(tmp->template_type));
 		return false;
 	}
 
@@ -1029,17 +988,41 @@
 		spin_lock_irqsave(&vha->hw->hardware_lock, flags);
 #endif
 
-	if (!vha->hw->fw_dump)
-		ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
-	else if (!vha->hw->fw_dump_template)
-		ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
-	else if (vha->hw->fw_dumped)
-		ql_log(ql_log_warn, vha, 0xd300,
-		    "Firmware has been previously dumped (%p),"
-		    " -- ignoring request\n", vha->hw->fw_dump);
-	else {
-		QLA_FW_STOPPED(vha->hw);
-		qla27xx_execute_fwdt_template(vha);
+	if (!vha->hw->fw_dump) {
+		ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
+	} else if (vha->hw->fw_dumped) {
+		ql_log(ql_log_warn, vha, 0xd01f,
+		    "-> Firmware already dumped (%p) -- ignoring request\n",
+		    vha->hw->fw_dump);
+	} else {
+		struct fwdt *fwdt = vha->hw->fwdt;
+		uint j;
+		ulong len;
+		void *buf = vha->hw->fw_dump;
+
+		for (j = 0; j < 2; j++, fwdt++, buf += len) {
+			ql_log(ql_log_warn, vha, 0xd011,
+			    "-> fwdt%u running...\n", j);
+			if (!fwdt->template) {
+				ql_log(ql_log_warn, vha, 0xd012,
+				    "-> fwdt%u no template\n", j);
+				break;
+			}
+			len = qla27xx_execute_fwdt_template(vha,
+			    fwdt->template, buf);
+			if (len != fwdt->dump_size) {
+				ql_log(ql_log_warn, vha, 0xd013,
+				    "-> fwdt%u fwdump residual=%+ld\n",
+				    j, fwdt->dump_size - len);
+			}
+		}
+		vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump;
+		vha->hw->fw_dumped = 1;
+
+		ql_log(ql_log_warn, vha, 0xd015,
+		    "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
+		    vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
+		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
 	}
 
 #ifndef __CHECKER__
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index 141c1c5..d2a0014 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -11,12 +11,12 @@
 #define IOBASE_ADDR	offsetof(struct device_reg_24xx, iobase_addr)
 
 struct __packed qla27xx_fwdt_template {
-	uint32_t template_type;
-	uint32_t entry_offset;
+	__le32 template_type;
+	__le32 entry_offset;
 	uint32_t template_size;
-	uint32_t reserved_1;
+	uint32_t count;		/* borrow field for running/residual count */
 
-	uint32_t entry_count;
+	__le32 entry_count;
 	uint32_t template_version;
 	uint32_t capture_timestamp;
 	uint32_t template_checksum;
@@ -54,6 +54,9 @@
 #define ENTRY_TYPE_PCICFG		273
 #define ENTRY_TYPE_GET_SHADOW		274
 #define ENTRY_TYPE_WRITE_BUF		275
+#define ENTRY_TYPE_CONDITIONAL		276
+#define ENTRY_TYPE_RDPEPREG		277
+#define ENTRY_TYPE_WRPEPREG		278
 
 #define CAPTURE_FLAG_PHYS_ONLY		BIT_0
 #define CAPTURE_FLAG_PHYS_VIRT		BIT_1
@@ -62,8 +65,8 @@
 
 struct __packed qla27xx_fwdt_entry {
 	struct __packed {
-		uint32_t entry_type;
-		uint32_t entry_size;
+		__le32 type;
+		__le32 size;
 		uint32_t reserved_1;
 
 		uint8_t  capture_flags;
@@ -78,36 +81,36 @@
 		} t255;
 
 		struct __packed {
-			uint32_t base_addr;
+			__le32 base_addr;
 			uint8_t  reg_width;
-			uint16_t reg_count;
+			__le16 reg_count;
 			uint8_t  pci_offset;
 		} t256;
 
 		struct __packed {
-			uint32_t base_addr;
-			uint32_t write_data;
+			__le32 base_addr;
+			__le32 write_data;
 			uint8_t  pci_offset;
 			uint8_t  reserved[3];
 		} t257;
 
 		struct __packed {
-			uint32_t base_addr;
+			__le32 base_addr;
 			uint8_t  reg_width;
-			uint16_t reg_count;
+			__le16 reg_count;
 			uint8_t  pci_offset;
 			uint8_t  banksel_offset;
 			uint8_t  reserved[3];
-			uint32_t bank;
+			__le32 bank;
 		} t258;
 
 		struct __packed {
-			uint32_t base_addr;
-			uint32_t write_data;
+			__le32 base_addr;
+			__le32 write_data;
 			uint8_t  reserved[2];
 			uint8_t  pci_offset;
 			uint8_t  banksel_offset;
-			uint32_t bank;
+			__le32 bank;
 		} t259;
 
 		struct __packed {
@@ -118,14 +121,14 @@
 		struct __packed {
 			uint8_t pci_offset;
 			uint8_t reserved[3];
-			uint32_t write_data;
+			__le32 write_data;
 		} t261;
 
 		struct __packed {
 			uint8_t  ram_area;
 			uint8_t  reserved[3];
-			uint32_t start_addr;
-			uint32_t end_addr;
+			__le32 start_addr;
+			__le32 end_addr;
 		} t262;
 
 		struct __packed {
@@ -155,7 +158,7 @@
 		struct __packed {
 			uint8_t  pci_offset;
 			uint8_t  reserved[3];
-			uint32_t data;
+			__le32 data;
 		} t267;
 
 		struct __packed {
@@ -170,23 +173,23 @@
 		} t269;
 
 		struct __packed {
-			uint32_t addr;
-			uint32_t count;
+			__le32 addr;
+			__le32 count;
 		} t270;
 
 		struct __packed {
-			uint32_t addr;
-			uint32_t data;
+			__le32 addr;
+			__le32 data;
 		} t271;
 
 		struct __packed {
-			uint32_t addr;
-			uint32_t count;
+			__le32 addr;
+			__le32 count;
 		} t272;
 
 		struct __packed {
-			uint32_t addr;
-			uint32_t count;
+			__le32 addr;
+			__le32 count;
 		} t273;
 
 		struct __packed {
@@ -196,9 +199,27 @@
 		} t274;
 
 		struct __packed {
-			uint32_t length;
+			__le32 length;
 			uint8_t  buffer[];
 		} t275;
+
+		struct __packed {
+			__le32 cond1;
+			__le32 cond2;
+		} t276;
+
+		struct __packed {
+			__le32 cmd_addr;
+			__le32 wr_cmd_data;
+			__le32 data_addr;
+		} t277;
+
+		struct __packed {
+			__le32 cmd_addr;
+			__le32 wr_cmd_data;
+			__le32 data_addr;
+			__le32 wr_data;
+		} t278;
 	};
 };
 
@@ -206,6 +227,7 @@
 #define T262_RAM_AREA_EXTERNAL_RAM	2
 #define T262_RAM_AREA_SHARED_RAM	3
 #define T262_RAM_AREA_DDR_RAM		4
+#define T262_RAM_AREA_MISC		5
 
 #define T263_QUEUE_TYPE_REQ		1
 #define T263_QUEUE_TYPE_RSP		2
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3850b28..a8f2a95 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "10.00.00.08-k"
+#define QLA2XXX_VERSION      "10.01.00.19-k"
 
 #define QLA_DRIVER_MAJOR_VER	10
-#define QLA_DRIVER_MINOR_VER	0
+#define QLA_DRIVER_MINOR_VER	1
 #define QLA_DRIVER_PATCH_VER	0
 #define QLA_DRIVER_BETA_VER	0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 64e2d85..042a2431 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*******************************************************************************
  * This file contains tcm implementation using v4 configfs fabric infrastructure
  * for QLogic target mode HBAs
@@ -11,35 +12,20 @@
  *
  * Copyright (c) 2010 Cisco Systems, Inc
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  ****************************************************************************/
 
 
 #include <linux/module.h>
-#include <linux/moduleparam.h>
 #include <linux/utsname.h>
 #include <linux/vmalloc.h>
-#include <linux/init.h>
 #include <linux/list.h>
 #include <linux/slab.h>
-#include <linux/kthread.h>
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/configfs.h>
 #include <linux/ctype.h>
 #include <asm/unaligned.h>
-#include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
@@ -108,11 +94,6 @@
 		b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
 }
 
-static char *tcm_qla2xxx_get_fabric_name(void)
-{
-	return "qla2xxx";
-}
-
 /*
  * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
  */
@@ -178,11 +159,6 @@
 	return 0;
 }
 
-static char *tcm_qla2xxx_npiv_get_fabric_name(void)
-{
-	return "qla2xxx_npiv";
-}
-
 static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
 	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -282,8 +258,11 @@
 
 	WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
 
+	/* To do: protect all tgt_counters manipulations with proper locking. */
 	cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++;
 	cmd->trc_flags |= TRC_CMD_FREE;
+	cmd->cmd_sent_to_fw = 0;
+
 	transport_generic_free_cmd(&cmd->se_cmd, 0);
 }
 
@@ -332,8 +311,11 @@
 		qlt_free_mcmd(mcmd);
 		return;
 	}
-
 	cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+
+	if (WARN_ON(cmd->cmd_sent_to_fw))
+		return;
+
 	qlt_free_cmd(cmd);
 }
 
@@ -350,7 +332,6 @@
 	if (!sess)
 		return;
 
-	assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
 	kref_put(&sess->sess_kref, tcm_qla2xxx_release_session);
 }
 
@@ -365,8 +346,9 @@
 
 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 	target_sess_cmd_list_set_waiting(se_sess);
-	tcm_qla2xxx_put_sess(sess);
 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+	tcm_qla2xxx_put_sess(sess);
 }
 
 static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
@@ -390,6 +372,8 @@
 			cmd->se_cmd.transport_state,
 			cmd->se_cmd.t_state,
 			cmd->se_cmd.se_cmd_flags);
+		transport_generic_request_failure(&cmd->se_cmd,
+			TCM_CHECK_CONDITION_ABORT_CMD);
 		return 0;
 	}
 	cmd->trc_flags |= TRC_XFR_RDY;
@@ -405,32 +389,12 @@
 	se_cmd->pi_err = 0;
 
 	/*
-	 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+	 * qla_target.c:qlt_rdy_to_xfer() will call dma_map_sg() to setup
 	 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
 	 */
 	return qlt_rdy_to_xfer(cmd);
 }
 
-static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
-{
-	unsigned long flags;
-	/*
-	 * Check for WRITE_PENDING status to determine if we need to wait for
-	 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
-	 */
-	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
-	if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
-	    se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
-		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-		wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
-						50);
-		return 0;
-	}
-	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-
-	return 0;
-}
-
 static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
 {
 	return;
@@ -505,18 +469,15 @@
 	 * Otherwise return an exception via CHECK_CONDITION status.
 	 */
 	cmd->cmd_in_wq = 0;
+	cmd->cmd_sent_to_fw = 0;
+	if (cmd->aborted) {
+		transport_generic_request_failure(&cmd->se_cmd,
+			TCM_CHECK_CONDITION_ABORT_CMD);
+		return;
+	}
 
 	cmd->qpair->tgt_counters.qla_core_ret_ctio++;
 	if (!cmd->write_data_transferred) {
-		/*
-		 * Check if se_cmd has already been aborted via LUN_RESET, and
-		 * waiting upon completion in tcm_qla2xxx_write_pending_status()
-		 */
-		if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
-			complete(&cmd->se_cmd.t_transport_stop_comp);
-			return;
-		}
-
 		switch (cmd->dif_err_code) {
 		case DIF_ERR_GRD:
 			cmd->se_cmd.pi_err =
@@ -659,6 +620,7 @@
 {
 	struct qla_tgt_cmd *cmd = container_of(se_cmd,
 				struct qla_tgt_cmd, se_cmd);
+	struct scsi_qla_host *vha = cmd->vha;
 
 	if (cmd->aborted) {
 		/* Cmd can loop during Q-full.  tcm_qla2xxx_aborted_task
@@ -671,6 +633,7 @@
 			cmd->se_cmd.transport_state,
 			cmd->se_cmd.t_state,
 			cmd->se_cmd.se_cmd_flags);
+		vha->hw->tgt.tgt_ops->free_cmd(cmd);
 		return 0;
 	}
 
@@ -698,6 +661,7 @@
 {
 	struct qla_tgt_cmd *cmd = container_of(se_cmd,
 				struct qla_tgt_cmd, se_cmd);
+	struct scsi_qla_host *vha = cmd->vha;
 	int xmit_type = QLA_TGT_XMIT_STATUS;
 
 	if (cmd->aborted) {
@@ -711,6 +675,7 @@
 		    cmd, kref_read(&cmd->se_cmd.cmd_kref),
 		    cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
 		    cmd->se_cmd.se_cmd_flags);
+		vha->hw->tgt.tgt_ops->free_cmd(cmd);
 		return 0;
 	}
 	cmd->bufflen = se_cmd->data_length;
@@ -829,7 +794,6 @@
 
 static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
 {
-	assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
 	target_sess_cmd_list_set_waiting(sess->se_sess);
 }
 
@@ -925,38 +889,14 @@
 			atomic_read(&tpg->lport_tpg_enabled));
 }
 
-static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
-{
-	struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
-				struct tcm_qla2xxx_tpg, tpg_base_work);
-	struct se_portal_group *se_tpg = &base_tpg->se_tpg;
-	struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
-
-	if (!target_depend_item(&se_tpg->tpg_group.cg_item)) {
-		atomic_set(&base_tpg->lport_tpg_enabled, 1);
-		qlt_enable_vha(base_vha);
-	}
-	complete(&base_tpg->tpg_base_comp);
-}
-
-static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
-{
-	struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
-				struct tcm_qla2xxx_tpg, tpg_base_work);
-	struct se_portal_group *se_tpg = &base_tpg->se_tpg;
-	struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
-
-	if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
-		atomic_set(&base_tpg->lport_tpg_enabled, 0);
-		target_undepend_item(&se_tpg->tpg_group.cg_item);
-	}
-	complete(&base_tpg->tpg_base_comp);
-}
-
 static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
 		const char *page, size_t count)
 {
 	struct se_portal_group *se_tpg = to_tpg(item);
+	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct scsi_qla_host *vha = lport->qla_vha;
 	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
 			struct tcm_qla2xxx_tpg, se_tpg);
 	unsigned long op;
@@ -975,24 +915,16 @@
 		if (atomic_read(&tpg->lport_tpg_enabled))
 			return -EEXIST;
 
-		INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
+		atomic_set(&tpg->lport_tpg_enabled, 1);
+		qlt_enable_vha(vha);
 	} else {
 		if (!atomic_read(&tpg->lport_tpg_enabled))
 			return count;
 
-		INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
+		atomic_set(&tpg->lport_tpg_enabled, 0);
+		qlt_stop_phase1(vha->vha_tgt.qla_tgt);
 	}
-	init_completion(&tpg->tpg_base_comp);
-	schedule_work(&tpg->tpg_base_work);
-	wait_for_completion(&tpg->tpg_base_comp);
 
-	if (op) {
-		if (!atomic_read(&tpg->lport_tpg_enabled))
-			return -ENODEV;
-	} else {
-		if (atomic_read(&tpg->lport_tpg_enabled))
-			return -EPERM;
-	}
 	return count;
 }
 
@@ -1208,9 +1140,8 @@
 /*
  * Expected to be called with struct qla_hw_data->tgt.sess_lock held
  */
-static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(
-	scsi_qla_host_t *vha,
-	const uint8_t *s_id)
+static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(scsi_qla_host_t *vha,
+						     const be_id_t s_id)
 {
 	struct tcm_qla2xxx_lport *lport;
 	struct se_node_acl *se_nacl;
@@ -1253,7 +1184,7 @@
 	struct tcm_qla2xxx_nacl *nacl,
 	struct se_session *se_sess,
 	struct fc_port *fc_port,
-	uint8_t *s_id)
+	be_id_t s_id)
 {
 	u32 key;
 	void *slot;
@@ -1420,14 +1351,9 @@
 		struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess)
 {
 	struct se_session *se_sess = sess->se_sess;
-	unsigned char be_sid[3];
-
-	be_sid[0] = sess->d_id.b.domain;
-	be_sid[1] = sess->d_id.b.area;
-	be_sid[2] = sess->d_id.b.al_pa;
 
 	tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
-				sess, be_sid);
+				     sess, port_id_to_be_id(sess->d_id));
 	tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
 				sess, sess->loop_id);
 }
@@ -1473,19 +1399,14 @@
 	struct fc_port *qlat_sess = p;
 	uint16_t loop_id = qlat_sess->loop_id;
 	unsigned long flags;
-	unsigned char be_sid[3];
-
-	be_sid[0] = qlat_sess->d_id.b.domain;
-	be_sid[1] = qlat_sess->d_id.b.area;
-	be_sid[2] = qlat_sess->d_id.b.al_pa;
 
 	/*
 	 * And now setup se_nacl and session pointers into HW lport internal
 	 * mappings for fabric S_ID and LOOP_ID.
 	 */
 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
-	tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl,
-				     se_sess, qlat_sess, be_sid);
+	tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, qlat_sess,
+				     port_id_to_be_id(qlat_sess->d_id));
 	tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl,
 					se_sess, qlat_sess, loop_id);
 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -1521,7 +1442,7 @@
 	 */
 	tpg = lport->tpg_1;
 	if (!tpg) {
-		pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
+		pr_err("Unable to locate struct tcm_qla2xxx_lport->tpg_1\n");
 		return -EINVAL;
 	}
 	/*
@@ -1881,14 +1802,13 @@
 
 static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
 	.module				= THIS_MODULE,
-	.name				= "qla2xxx",
+	.fabric_name			= "qla2xxx",
 	.node_acl_size			= sizeof(struct tcm_qla2xxx_nacl),
 	/*
 	 * XXX: Limit assumes single page per scatter-gather-list entry.
 	 * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096
 	 */
 	.max_data_sg_nents		= 1200,
-	.get_fabric_name		= tcm_qla2xxx_get_fabric_name,
 	.tpg_get_wwn			= tcm_qla2xxx_get_fabric_wwn,
 	.tpg_get_tag			= tcm_qla2xxx_get_tag,
 	.tpg_check_demo_mode		= tcm_qla2xxx_check_demo_mode,
@@ -1906,7 +1826,6 @@
 	.sess_get_index			= tcm_qla2xxx_sess_get_index,
 	.sess_get_initiator_sid		= NULL,
 	.write_pending			= tcm_qla2xxx_write_pending,
-	.write_pending_status		= tcm_qla2xxx_write_pending_status,
 	.set_default_node_attributes	= tcm_qla2xxx_set_default_node_attrs,
 	.get_cmd_state			= tcm_qla2xxx_get_cmd_state,
 	.queue_data_in			= tcm_qla2xxx_queue_data_in,
@@ -1930,9 +1849,8 @@
 
 static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
 	.module				= THIS_MODULE,
-	.name				= "qla2xxx_npiv",
+	.fabric_name			= "qla2xxx_npiv",
 	.node_acl_size			= sizeof(struct tcm_qla2xxx_nacl),
-	.get_fabric_name		= tcm_qla2xxx_npiv_get_fabric_name,
 	.tpg_get_wwn			= tcm_qla2xxx_get_fabric_wwn,
 	.tpg_get_tag			= tcm_qla2xxx_get_tag,
 	.tpg_check_demo_mode		= tcm_qla2xxx_check_demo_mode,
@@ -1948,7 +1866,6 @@
 	.sess_get_index			= tcm_qla2xxx_sess_get_index,
 	.sess_get_initiator_sid		= NULL,
 	.write_pending			= tcm_qla2xxx_write_pending,
-	.write_pending_status		= tcm_qla2xxx_write_pending_status,
 	.set_default_node_attributes	= tcm_qla2xxx_set_default_node_attrs,
 	.get_cmd_state			= tcm_qla2xxx_get_cmd_state,
 	.queue_data_in			= tcm_qla2xxx_queue_data_in,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 7550ba2..147cf6c 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -48,9 +48,6 @@
 	struct tcm_qla2xxx_tpg_attrib tpg_attrib;
 	/* Returned by tcm_qla2xxx_make_tpg() */
 	struct se_portal_group se_tpg;
-	/* Items for dealing with configfs_depend_item */
-	struct completion tpg_base_comp;
-	struct work_struct tpg_base_work;
 };
 
 struct tcm_qla2xxx_fc_loopid {
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index e4dc7c7..4bdf31b 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 config SCSI_QLA_ISCSI
 	tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
 	depends on PCI && SCSI && NET
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 4230977..1f8a909 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
 		ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o ql4_83xx.o
 
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 52b1a0b..2bf5e3e 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -153,8 +153,8 @@
 	dma_addr_t sys_info_dma;
 	int status = QLA_ERROR;
 
-	sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
-				       &sys_info_dma, GFP_KERNEL);
+	sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+				      &sys_info_dma, GFP_KERNEL);
 	if (sys_info == NULL) {
 		DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
 			      ha->host_no, __func__));
@@ -766,12 +766,10 @@
 	while (drvr_wait) {
 		if (ql4xxx_lock_drvr(a) == 0) {
 			ssleep(QL4_LOCK_DRVR_SLEEP);
-			if (drvr_wait) {
-				DEBUG2(printk("scsi%ld: %s: Waiting for "
-					      "Global Init Semaphore(%d)...\n",
-					      a->host_no,
-					      __func__, drvr_wait));
-			}
+			DEBUG2(printk("scsi%ld: %s: Waiting for "
+				      "Global Init Semaphore(%d)...\n",
+				      a->host_no,
+				      __func__, drvr_wait));
 			drvr_wait -= QL4_LOCK_DRVR_SLEEP;
 		} else {
 			DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 5d56904..dac9a70 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -625,9 +625,9 @@
 	uint32_t mbox_sts[MBOX_REG_COUNT];
 	int status = QLA_ERROR;
 
-	init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
-					 sizeof(struct addr_ctrl_blk),
-					 &init_fw_cb_dma, GFP_KERNEL);
+	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+					sizeof(struct addr_ctrl_blk),
+					&init_fw_cb_dma, GFP_KERNEL);
 	if (init_fw_cb == NULL) {
 		DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
 			      ha->host_no, __func__));
@@ -709,9 +709,9 @@
 	uint32_t mbox_cmd[MBOX_REG_COUNT];
 	uint32_t mbox_sts[MBOX_REG_COUNT];
 
-	init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
-					 sizeof(struct addr_ctrl_blk),
-					 &init_fw_cb_dma, GFP_KERNEL);
+	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+					sizeof(struct addr_ctrl_blk),
+					&init_fw_cb_dma, GFP_KERNEL);
 	if (init_fw_cb == NULL) {
 		printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
 		       __func__);
@@ -1340,9 +1340,9 @@
 	uint32_t mbox_sts[MBOX_REG_COUNT];
 	int status = QLA_ERROR;
 
-	about_fw = dma_zalloc_coherent(&ha->pdev->dev,
-				       sizeof(struct about_fw_info),
-				       &about_fw_dma, GFP_KERNEL);
+	about_fw = dma_alloc_coherent(&ha->pdev->dev,
+				      sizeof(struct about_fw_info),
+				      &about_fw_dma, GFP_KERNEL);
 	if (!about_fw) {
 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
 				  "for about_fw\n", __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index d2b333d..5a31877 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4052,8 +4052,8 @@
 	dma_addr_t sys_info_dma;
 	int status = QLA_ERROR;
 
-	sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
-				       &sys_info_dma, GFP_KERNEL);
+	sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+				      &sys_info_dma, GFP_KERNEL);
 	if (sys_info == NULL) {
 		DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
 		    ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0e13349..8c674ec 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -205,7 +205,6 @@
 
 	.this_id		= -1,
 	.cmd_per_lun		= 3,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.sg_tablesize		= SG_ALL,
 
 	.max_sectors		= 0xFFFF,
@@ -2705,9 +2704,9 @@
 	uint32_t rem = len;
 	struct nlattr *attr;
 
-	init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
-					 sizeof(struct addr_ctrl_blk),
-					 &init_fw_cb_dma, GFP_KERNEL);
+	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+					sizeof(struct addr_ctrl_blk),
+					&init_fw_cb_dma, GFP_KERNEL);
 	if (!init_fw_cb) {
 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
 			   __func__);
@@ -2876,7 +2875,7 @@
 						chap_tbl.secret_len);
 			}
 		}
-		/* allow fall-through */
+		/* fall through */
 	default:
 		return iscsi_session_get_param(cls_sess, param, buf);
 	}
@@ -3204,6 +3203,8 @@
 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
 		return -EINVAL;
 	ep = iscsi_lookup_endpoint(transport_fd);
+	if (!ep)
+		return -EINVAL;
 	conn = cls_conn->dd_data;
 	qla_conn = conn->dd_data;
 	qla_conn->qla_ep = ep->dd_data;
@@ -3382,7 +3383,7 @@
 	if (task->data_count) {
 		task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
 						     task->data_count,
-						     PCI_DMA_TODEVICE);
+						     DMA_TO_DEVICE);
 	}
 
 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -3437,7 +3438,7 @@
 
 	if (task->data_count) {
 		dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
-				 task->data_count, PCI_DMA_TODEVICE);
+				 task->data_count, DMA_TO_DEVICE);
 	}
 
 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -4160,20 +4161,16 @@
 	ha->fw_dump_size = 0;
 
 	/* Free srb pool. */
-	if (ha->srb_mempool)
-		mempool_destroy(ha->srb_mempool);
-
+	mempool_destroy(ha->srb_mempool);
 	ha->srb_mempool = NULL;
 
-	if (ha->chap_dma_pool)
-		dma_pool_destroy(ha->chap_dma_pool);
+	dma_pool_destroy(ha->chap_dma_pool);
 
 	if (ha->chap_list)
 		vfree(ha->chap_list);
 	ha->chap_list = NULL;
 
-	if (ha->fw_ddb_dma_pool)
-		dma_pool_destroy(ha->fw_ddb_dma_pool);
+	dma_pool_destroy(ha->fw_ddb_dma_pool);
 
 	/* release io space registers  */
 	if (is_qla8022(ha)) {
@@ -4211,8 +4208,8 @@
 			  sizeof(struct shadow_regs) +
 			  MEM_ALIGN_VALUE +
 			  (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
-	ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len,
-					 &ha->queues_dma, GFP_KERNEL);
+	ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
+					&ha->queues_dma, GFP_KERNEL);
 	if (ha->queues == NULL) {
 		ql4_printk(KERN_WARNING, ha,
 		    "Memory Allocation failed - queues.\n");
@@ -5933,7 +5930,7 @@
 		val = rd_nvram_byte(ha, sec_addr);
 		if (val & BIT_7)
 			ddb_index[1] = (val & 0x7f);
-
+		goto exit_boot_info;
 	} else if (is_qla80XX(ha)) {
 		buf = dma_alloc_coherent(&ha->pdev->dev, size,
 					 &buf_dma, GFP_KERNEL);
@@ -7237,6 +7234,8 @@
 
 	rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
 					   fw_ddb_entry);
+	if (rc)
+		goto free_sess;
 
 	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
 		   __func__, fnode_sess->dev.kobj.name);
@@ -9020,25 +9019,16 @@
 /**
  * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
  * @ha: HA context
- *
- * At exit, the @ha's flags.enable_64bit_addressing set to indicated
- * supported addressing method.
  */
 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
 {
-	int retval;
-
 	/* Update our PCI device dma_mask for full 64 bit mask */
-	if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
-		if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
-			dev_dbg(&ha->pdev->dev,
-				  "Failed to set 64 bit PCI consistent mask; "
-				   "using 32 bit.\n");
-			retval = pci_set_consistent_dma_mask(ha->pdev,
-							     DMA_BIT_MASK(32));
-		}
-	} else
-		retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
+	if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+		dev_dbg(&ha->pdev->dev,
+			  "Failed to set 64 bit PCI consistent mask; "
+			   "using 32 bit.\n");
+		dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32));
+	}
 }
 
 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
@@ -9824,7 +9814,6 @@
 		     __func__);
 	}
 
-	pci_cleanup_aer_uncorrect_error_status(pdev);
 	clear_bit(AF_EEH_BUSY, &ha->flags);
 }
 
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index 95431d6..8f70900 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -193,7 +193,7 @@
 	.can_queue		= 1,
 	.this_id		= -1,
 	.sg_tablesize		= SG_ALL,
-	.use_clustering		= DISABLE_CLUSTERING,
+	.dma_boundary		= PAGE_SIZE - 1,
 };
 
 static __init int qlogicfas_init(void)
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 8b471a9..136681a 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -139,7 +139,7 @@
 	} else {		/* out */
 #if QL_TURBO_PDMA
 		rtrc(4)
-		    if (reqlen >= 128 && inb(qbase + 8) & 0x10) {	/* empty */
+		if (reqlen >= 128 && inb(qbase + 8) & 0x10) {	/* empty */
 			outsl(qbase + 4, request, 32);
 			reqlen -= 128;
 			request += 128;
@@ -240,7 +240,7 @@
 	outb(0x40 | qlcfg8 | priv->qinitid, qbase + 8);
 	outb(qlcfg7, qbase + 7);
 	outb(qlcfg6, qbase + 6);
-	 /**/ outb(qlcfg5, qbase + 5);	/* select timer */
+	outb(qlcfg5, qbase + 5);	/* select timer */
 	outb(qlcfg9 & 7, qbase + 9);	/* prescaler */
 /*	outb(0x99, qbase + 5);	*/
 	outb(scmd_id(cmd), qbase + 4);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 9d09228..d539bee 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* qlogicpti.c: Performance Technologies QlogicISP sbus card driver.
  *
  * Copyright (C) 1996, 2006, 2008 David S. Miller (davem@davemloft.net)
@@ -199,10 +200,15 @@
 	/* Write mailbox command registers. */
 	switch (mbox_param[param[0]] >> 4) {
 	case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
+		/* Fall through */
 	case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
+		/* Fall through */
 	case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
+		/* Fall through */
 	case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
+		/* Fall through */
 	case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
+		/* Fall through */
 	case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
 	}
 
@@ -253,10 +259,15 @@
 	/* Read back output parameters. */
 	switch (mbox_param[param[0]] & 0xf) {
 	case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
+		/* Fall through */
 	case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
+		/* Fall through */
 	case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
+		/* Fall through */
 	case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
+		/* Fall through */
 	case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
+		/* Fall through */
 	case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
 	}
 
@@ -1287,7 +1298,6 @@
 	.can_queue		= QLOGICPTI_REQ_QUEUE_LEN,
 	.this_id		= 7,
 	.sg_tablesize		= QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
-	.use_clustering		= ENABLE_CLUSTERING,
 };
 
 static const struct of_device_id qpti_match[];
@@ -1315,8 +1325,7 @@
 	qpti->qhost = host;
 	qpti->op = op;
 	qpti->qpti_id = nqptis;
-	strcpy(qpti->prom_name, op->dev.of_node->name);
-	qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp");
+	qpti->is_pti = !of_node_name_eq(op->dev.of_node, "QLGC,isp");
 
 	if (qpti_map_regs(qpti) < 0)
 		goto fail_unlink;
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index 884ad72..2b6374e 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -364,7 +364,6 @@
 	int                       qpti_id;
 	int                       scsi_id;
 	int                       prom_node;
-	char                      prom_name[64];
 	int                       irq;
 	char                      differential, ultra, clock;
 	unsigned char             bursts;
@@ -379,7 +378,7 @@
 #define SREG_IMASK                0x0c   /* Interrupt level            */
 #define SREG_SPMASK               0x03   /* Mask for switch pack       */
 	unsigned char             swsreg;
-	unsigned int	
+	unsigned int
 		gotirq	:	1,	/* this instance got an irq */
 		is_pti	: 	1;	/* Non-zero if this is a PTI board. */
 };
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 5c3d6e1..898a0bd 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * raid_class.c - implementation of a simple raid visualisation class
  *
  * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
  *
- * This file is licensed under GPLv2
- *
  * This class is designed to allow raid attributes to be visualised and
  * manipulated in a form independent of the underlying raid.  Ultimately this
  * should work for both hardware and software raids.
diff --git a/drivers/scsi/script_asm.pl b/drivers/scsi/script_asm.pl
index 7d651d9..0300f4c 100644
--- a/drivers/scsi/script_asm.pl
+++ b/drivers/scsi/script_asm.pl
@@ -1,4 +1,5 @@
 #!/usr/bin/perl -s
+# SPDX-License-Identifier: GPL-2.0-or-later
 
 # NCR 53c810 script assembler
 # Sponsored by 
@@ -13,20 +14,6 @@
 #   Support for 53c710 (via -ncr7x0_family switch) added by Richard
 #   Hirst <richard@sleepie.demon.co.uk> - 15th March 1997
 #
-#   This program is free software; you can redistribute it and/or modify
-#   it under the terms of the GNU General Public License as published by
-#   the Free Software Foundation; either version 2 of the License, or
-#   (at your option) any later version.
-#
-#   This program is distributed in the hope that it will be useful,
-#   but WITHOUT ANY WARRANTY; without even the implied warranty of
-#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#   GNU General Public License for more details.
-#
-#   You should have received a copy of the GNU General Public License
-#   along with this program; if not, write to the Free Software
-#   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-#
 # TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
 #
 
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index fc1356d..1f5b5c8 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  scsi.c Copyright (C) 1992 Drew Eckhardt
  *         Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
@@ -85,15 +86,10 @@
 EXPORT_SYMBOL(scsi_logging_level);
 #endif
 
-/* sd, scsi core and power management need to coordinate flushing async actions */
-ASYNC_DOMAIN(scsi_sd_probe_domain);
-EXPORT_SYMBOL(scsi_sd_probe_domain);
-
 /*
- * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
- * asynchronous system resume operations.  It is marked 'exclusive' to avoid
- * being included in the async_synchronize_full() that is invoked by
- * dpm_resume()
+ * Domain for asynchronous system resume operations.  It is marked 'exclusive'
+ * to avoid being included in the async_synchronize_full() that is invoked by
+ * dpm_resume().
  */
 ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
 EXPORT_SYMBOL(scsi_sd_pm_domain);
@@ -175,22 +171,6 @@
 #endif
 
 /**
- * scsi_cmd_get_serial - Assign a serial number to a command
- * @host: the scsi host
- * @cmd: command to assign serial number to
- *
- * Description: a serial number identifies a request for error recovery
- * and debugging purposes.  Protected by the Host_Lock of host.
- */
-void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
-{
-	cmd->serial_number = host->cmd_serial_number++;
-	if (cmd->serial_number == 0) 
-		cmd->serial_number = host->cmd_serial_number++;
-}
-EXPORT_SYMBOL(scsi_cmd_get_serial);
-
-/**
  * scsi_finish_command - cleanup and pass command back to upper layer
  * @cmd: the command
  *
@@ -780,11 +760,8 @@
 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
 
-#ifdef CONFIG_SCSI_MQ_DEFAULT
+/* This should go away in the future, it doesn't do anything anymore */
 bool scsi_use_blk_mq = true;
-#else
-bool scsi_use_blk_mq = false;
-#endif
 module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
 
 static int __init init_scsi(void)
@@ -839,7 +816,6 @@
 	scsi_exit_devinfo();
 	scsi_exit_procfs();
 	scsi_exit_queue();
-	async_unregister_domain(&scsi_sd_probe_domain);
 }
 
 subsys_initcall(init_scsi);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 60bcc6d..d323523 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
  *  Copyright (C) 1992  Eric Youngdale
@@ -8,13 +9,7 @@
  *
  * Copyright (C) 2001 - 2018 Douglas Gilbert
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
- *
  */
 
 
@@ -62,7 +57,7 @@
 
 /* make sure inq_product_rev string corresponds to this version */
 #define SDEBUG_VERSION "0188"	/* format to fit INQUIRY revision field */
-static const char *sdebug_version_date = "20180128";
+static const char *sdebug_version_date = "20190125";
 
 #define MY_NAME "scsi_debug"
 
@@ -76,6 +71,7 @@
 #define LBA_OUT_OF_RANGE 0x21
 #define INVALID_FIELD_IN_CDB 0x24
 #define INVALID_FIELD_IN_PARAM_LIST 0x26
+#define WRITE_PROTECTED 0x27
 #define UA_RESET_ASC 0x29
 #define UA_CHANGED_ASC 0x2a
 #define TARGET_CHANGED_ASC 0x3f
@@ -351,12 +347,11 @@
 	SDEB_I_ATA_PT = 22,		/* 12, 16 */
 	SDEB_I_SEND_DIAG = 23,
 	SDEB_I_UNMAP = 24,
-	SDEB_I_XDWRITEREAD = 25,	/* 10 only */
-	SDEB_I_WRITE_BUFFER = 26,
-	SDEB_I_WRITE_SAME = 27,		/* 10, 16 */
-	SDEB_I_SYNC_CACHE = 28,		/* 10, 16 */
-	SDEB_I_COMP_WRITE = 29,
-	SDEB_I_LAST_ELEMENT = 30,	/* keep this last (previous + 1) */
+	SDEB_I_WRITE_BUFFER = 25,
+	SDEB_I_WRITE_SAME = 26,		/* 10, 16 */
+	SDEB_I_SYNC_CACHE = 27,		/* 10, 16 */
+	SDEB_I_COMP_WRITE = 28,
+	SDEB_I_LAST_ELEMENT = 29,	/* keep this last (previous + 1) */
 };
 
 
@@ -377,7 +372,7 @@
 /* 0x40; 0x40->0x5f: 10 byte cdbs */
 	0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
 	0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
-	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
+	0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 	    SDEB_I_RELEASE,
 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
@@ -430,7 +425,6 @@
 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
-static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -600,9 +594,6 @@
 	{0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
 	    {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
 /* 25 */
-	{0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
-	    NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
-		   0, 0, 0, 0, 0, 0} },		/* XDWRITEREAD(10) */
 	{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
 	    {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
 	     0, 0, 0, 0} },			/* WRITE_BUFFER */
@@ -618,7 +609,7 @@
 	    {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
 	     0, 0xff, 0x3f, 0xc7} },		/* COMPARE AND WRITE */
 
-/* 30 */
+/* 29 */
 	{0xff, 0, 0, 0, NULL, NULL,		/* terminating element */
 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 };
@@ -673,6 +664,7 @@
 static bool have_dif_prot;
 static bool write_since_sync;
 static bool sdebug_statistics = DEF_STATISTICS;
+static bool sdebug_wp;
 
 static unsigned int sdebug_store_sectors;
 static sector_t sdebug_capacity;	/* in sectors */
@@ -735,7 +727,7 @@
 		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
 }
 
-static void *fake_store(unsigned long long lba)
+static void *lba2fake_store(unsigned long long lba)
 {
 	lba = do_div(lba, sdebug_store_sectors);
 
@@ -836,7 +828,8 @@
 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
 }
 
-static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
+static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
+			    void __user *arg)
 {
 	if (sdebug_verbose) {
 		if (0x1261 == cmd)
@@ -1010,16 +1003,16 @@
 				int arr_len)
 {
 	int act_len;
-	struct scsi_data_buffer *sdb = scsi_in(scp);
+	struct scsi_data_buffer *sdb = &scp->sdb;
 
 	if (!sdb->length)
 		return 0;
-	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
+	if (scp->sc_data_direction != DMA_FROM_DEVICE)
 		return DID_ERROR << 16;
 
 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
 				      arr, arr_len);
-	sdb->resid = scsi_bufflen(scp) - act_len;
+	scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
 
 	return 0;
 }
@@ -1033,20 +1026,21 @@
 				  int arr_len, unsigned int off_dst)
 {
 	int act_len, n;
-	struct scsi_data_buffer *sdb = scsi_in(scp);
+	struct scsi_data_buffer *sdb = &scp->sdb;
 	off_t skip = off_dst;
 
 	if (sdb->length <= off_dst)
 		return 0;
-	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
+	if (scp->sc_data_direction != DMA_FROM_DEVICE)
 		return DID_ERROR << 16;
 
 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
 				       arr, arr_len, skip);
 	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
-		 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
+		 __func__, off_dst, scsi_bufflen(scp), act_len,
+		 scsi_get_resid(scp));
 	n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
-	sdb->resid = min(sdb->resid, n);
+	scsi_set_resid(scp, min(scsi_get_resid(scp), n));
 	return 0;
 }
 
@@ -1058,7 +1052,7 @@
 {
 	if (!scsi_bufflen(scp))
 		return 0;
-	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
+	if (scp->sc_data_direction != DMA_TO_DEVICE)
 		return -1;
 
 	return scsi_sg_copy_to_buffer(scp, arr, arr_len);
@@ -2146,9 +2140,11 @@
 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
 			(devip->target * 1000) - 3;
 	/* for disks set DPOFUA bit and clear write protect (WP) bit */
-	if (is_disk)
+	if (is_disk) {
 		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
-	else
+		if (sdebug_wp)
+			dev_spec |= 0x80;
+	} else
 		dev_spec = 0x0;
 	if (msense_6) {
 		arr[2] = dev_spec;
@@ -2331,6 +2327,10 @@
 		if (ctrl_m_pg[1] == arr[off + 1]) {
 			memcpy(ctrl_m_pg + 2, arr + off + 2,
 			       sizeof(ctrl_m_pg) - 2);
+			if (ctrl_m_pg[4] & 0x8)
+				sdebug_wp = true;
+			else
+				sdebug_wp = false;
 			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
 			goto set_mode_changed_ua;
 		}
@@ -2455,8 +2455,8 @@
 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
 }
 
-static int check_device_access_params(struct scsi_cmnd *scp,
-				      unsigned long long lba, unsigned int num)
+static inline int check_device_access_params(struct scsi_cmnd *scp,
+	unsigned long long lba, unsigned int num, bool write)
 {
 	if (lba + num > sdebug_capacity) {
 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
@@ -2468,6 +2468,10 @@
 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
 		return check_condition_result;
 	}
+	if (write && unlikely(sdebug_wp)) {
+		mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
+		return check_condition_result;
+	}
 	return 0;
 }
 
@@ -2477,21 +2481,19 @@
 {
 	int ret;
 	u64 block, rest = 0;
-	struct scsi_data_buffer *sdb;
+	struct scsi_data_buffer *sdb = &scmd->sdb;
 	enum dma_data_direction dir;
 
 	if (do_write) {
-		sdb = scsi_out(scmd);
 		dir = DMA_TO_DEVICE;
 		write_since_sync = true;
 	} else {
-		sdb = scsi_in(scmd);
 		dir = DMA_FROM_DEVICE;
 	}
 
 	if (!sdb->length)
 		return 0;
-	if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
+	if (scmd->sc_data_direction != dir)
 		return -1;
 
 	block = do_div(lba, sdebug_store_sectors);
@@ -2514,8 +2516,8 @@
 	return ret;
 }
 
-/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
- * arr into fake_store(lba,num) and return true. If comparison fails then
+/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
+ * arr into lba2fake_store(lba,num) and return true. If comparison fails then
  * return false. */
 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
 {
@@ -2643,7 +2645,7 @@
 		if (sdt->app_tag == cpu_to_be16(0xffff))
 			continue;
 
-		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
+		ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
 		if (ret) {
 			dif_errors++;
 			return ret;
@@ -2728,18 +2730,9 @@
 	} else
 		sqcp = NULL;
 
-	/* inline check_device_access_params() */
-	if (unlikely(lba + num > sdebug_capacity)) {
-		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
-		return check_condition_result;
-	}
-	/* transfer length excessive (tie in to block limits VPD page) */
-	if (unlikely(num > sdebug_store_sectors)) {
-		/* needs work to find which cdb byte 'num' comes from */
-		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
-		return check_condition_result;
-	}
-
+	ret = check_device_access_params(scp, lba, num, false);
+	if (ret)
+		return ret;
 	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
 		     (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
 		     ((lba + num) > sdebug_medium_error_start))) {
@@ -2774,7 +2767,7 @@
 	if (unlikely(ret == -1))
 		return DID_ERROR << 16;
 
-	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
+	scsi_set_resid(scp, scsi_bufflen(scp) - ret);
 
 	if (unlikely(sqcp)) {
 		if (sqcp->inj_recovered) {
@@ -3031,19 +3024,9 @@
 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
 				    "to DIF device\n");
 	}
-
-	/* inline check_device_access_params() */
-	if (unlikely(lba + num > sdebug_capacity)) {
-		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
-		return check_condition_result;
-	}
-	/* transfer length excessive (tie in to block limits VPD page) */
-	if (unlikely(num > sdebug_store_sectors)) {
-		/* needs work to find which cdb byte 'num' comes from */
-		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
-		return check_condition_result;
-	}
-
+	ret = check_device_access_params(scp, lba, num, true);
+	if (ret)
+		return ret;
 	write_lock_irqsave(&atomic_rw, iflags);
 
 	/* DIX + T10 DIF */
@@ -3182,7 +3165,7 @@
 				my_name, __func__, k, lba, num, sg_off);
 		if (num == 0)
 			continue;
-		ret = check_device_access_params(scp, lba, num);
+		ret = check_device_access_params(scp, lba, num, true);
 		if (ret)
 			goto err_out_unlock;
 		num_by = num * lb_size;
@@ -3261,12 +3244,14 @@
 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
 			   u32 ei_lba, bool unmap, bool ndob)
 {
+	int ret;
 	unsigned long iflags;
 	unsigned long long i;
-	int ret;
-	u64 lba_off;
+	u32 lb_size = sdebug_sector_size;
+	u64 block, lbaa;
+	u8 *fs1p;
 
-	ret = check_device_access_params(scp, lba, num);
+	ret = check_device_access_params(scp, lba, num, true);
 	if (ret)
 		return ret;
 
@@ -3276,31 +3261,30 @@
 		unmap_region(lba, num);
 		goto out;
 	}
-
-	lba_off = lba * sdebug_sector_size;
+	lbaa = lba;
+	block = do_div(lbaa, sdebug_store_sectors);
 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
+	fs1p = fake_storep + (block * lb_size);
 	if (ndob) {
-		memset(fake_storep + lba_off, 0, sdebug_sector_size);
+		memset(fs1p, 0, lb_size);
 		ret = 0;
 	} else
-		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
-					  sdebug_sector_size);
+		ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
 
 	if (-1 == ret) {
 		write_unlock_irqrestore(&atomic_rw, iflags);
 		return DID_ERROR << 16;
-	} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
+	} else if (sdebug_verbose && !ndob && (ret < lb_size))
 		sdev_printk(KERN_INFO, scp->device,
 			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
-			    my_name, "write same",
-			    sdebug_sector_size, ret);
+			    my_name, "write same", lb_size, ret);
 
 	/* Copy first sector to remaining blocks */
-	for (i = 1 ; i < num ; i++)
-		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
-		       fake_storep + lba_off,
-		       sdebug_sector_size);
-
+	for (i = 1 ; i < num ; i++) {
+		lbaa = lba + i;
+		block = do_div(lbaa, sdebug_store_sectors);
+		memmove(fake_storep + (block * lb_size), fs1p, lb_size);
+	}
 	if (scsi_debug_lbp())
 		map_region(lba, num);
 out:
@@ -3439,18 +3423,9 @@
 	    (cmd[1] & 0xe0) == 0)
 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
 			    "to DIF device\n");
-
-	/* inline check_device_access_params() */
-	if (lba + num > sdebug_capacity) {
-		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
-		return check_condition_result;
-	}
-	/* transfer length excessive (tie in to block limits VPD page) */
-	if (num > sdebug_store_sectors) {
-		/* needs work to find which cdb byte 'num' comes from */
-		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
-		return check_condition_result;
-	}
+	ret = check_device_access_params(scp, lba, num, false);
+	if (ret)
+		return ret;
 	dnum = 2 * num;
 	arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
 	if (NULL == arr) {
@@ -3533,7 +3508,7 @@
 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
 
-		ret = check_device_access_params(scp, lba, num);
+		ret = check_device_access_params(scp, lba, num, true);
 		if (ret)
 			goto out;
 
@@ -3566,7 +3541,7 @@
 	if (alloc_len < 24)
 		return 0;
 
-	ret = check_device_access_params(scp, lba, 1);
+	ret = check_device_access_params(scp, lba, 1, false);
 	if (ret)
 		return ret;
 
@@ -3718,68 +3693,6 @@
 	return res;
 }
 
-static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
-			    unsigned int num, struct sdebug_dev_info *devip)
-{
-	int j;
-	unsigned char *kaddr, *buf;
-	unsigned int offset;
-	struct scsi_data_buffer *sdb = scsi_in(scp);
-	struct sg_mapping_iter miter;
-
-	/* better not to use temporary buffer. */
-	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
-	if (!buf) {
-		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
-				INSUFF_RES_ASCQ);
-		return check_condition_result;
-	}
-
-	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
-
-	offset = 0;
-	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
-			SG_MITER_ATOMIC | SG_MITER_TO_SG);
-
-	while (sg_miter_next(&miter)) {
-		kaddr = miter.addr;
-		for (j = 0; j < miter.length; j++)
-			*(kaddr + j) ^= *(buf + offset + j);
-
-		offset += miter.length;
-	}
-	sg_miter_stop(&miter);
-	kfree(buf);
-
-	return 0;
-}
-
-static int resp_xdwriteread_10(struct scsi_cmnd *scp,
-			       struct sdebug_dev_info *devip)
-{
-	u8 *cmd = scp->cmnd;
-	u64 lba;
-	u32 num;
-	int errsts;
-
-	if (!scsi_bidi_cmnd(scp)) {
-		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
-				INSUFF_RES_ASCQ);
-		return check_condition_result;
-	}
-	errsts = resp_read_dt0(scp, devip);
-	if (errsts)
-		return errsts;
-	if (!(cmd[1] & 0x4)) {		/* DISABLE_WRITE is not set */
-		errsts = resp_write_dt0(scp, devip);
-		if (errsts)
-			return errsts;
-	}
-	lba = get_unaligned_be32(cmd + 2);
-	num = get_unaligned_be16(cmd + 7);
-	return resp_xdwriteread(scp, lba, num, devip);
-}
-
 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
 {
 	u32 tag = blk_mq_unique_tag(cmnd->request);
@@ -3953,7 +3866,6 @@
 	if (sdebug_verbose)
 		pr_info("slave_alloc <%u %u %u %llu>\n",
 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
-	blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue);
 	return 0;
 }
 
@@ -3973,7 +3885,6 @@
 			return 1;  /* no resources, will be marked offline */
 	}
 	sdp->hostdata = devip;
-	blk_queue_max_segment_size(sdp->request_queue, -1U);
 	if (sdebug_no_uld)
 		sdp->no_uld_attach = 1;
 	config_cdb_len(sdp);
@@ -4554,6 +4465,7 @@
 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
 		   S_IRUGO | S_IWUSR);
+module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
 module_param_named(write_same_length, sdebug_write_same_length, int,
 		   S_IRUGO | S_IWUSR);
 
@@ -4613,6 +4525,7 @@
 		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
+MODULE_PARM_DESC(wp, "Write Protect (def=0)");
 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
 
 #define SDEBUG_INFO_LEN 256
@@ -5851,7 +5764,7 @@
 	.sg_tablesize =		SG_MAX_SEGMENTS,
 	.cmd_per_lun =		DEF_CMD_PER_LUN,
 	.max_sectors =		-1U,
-	.use_clustering = 	DISABLE_CLUSTERING,
+	.max_segment_size =	-1U,
 	.module =		THIS_MODULE,
 	.track_queue_depth =	1,
 };
@@ -5866,8 +5779,9 @@
 	sdbg_host = to_sdebug_host(dev);
 
 	sdebug_driver_template.can_queue = sdebug_max_queue;
-	if (sdebug_clustering)
-		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
+	if (!sdebug_clustering)
+		sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
+
 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
 	if (NULL == hpnt) {
 		pr_err("scsi_host_alloc failed\n");
@@ -5881,8 +5795,7 @@
 	}
 	/* Decide whether to tell scsi subsystem that we want mq */
 	/* Following should give the same answer for each host */
-	if (shost_use_blk_mq(hpnt))
-		hpnt->nr_hw_queues = submit_queues;
+	hpnt->nr_hw_queues = submit_queues;
 
 	sdbg_host->shost = hpnt;
 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index c5a8756..c19ea7a 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -1,4 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
+#include <linux/bitops.h>
 #include <linux/seq_file.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_dbg.h>
@@ -18,9 +19,7 @@
 	bool sep = false;
 	int i;
 
-	for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
-		if (!(flags & BIT(i)))
-			continue;
+	for_each_set_bit(i, &flags, BITS_PER_LONG) {
 		if (sep)
 			seq_puts(m, "|");
 		sep = true;
diff --git a/drivers/scsi/scsi_debugfs.h b/drivers/scsi/scsi_debugfs.h
index 951b043..d125d1b 100644
--- a/drivers/scsi/scsi_debugfs.h
+++ b/drivers/scsi/scsi_debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 struct request;
 struct seq_file;
 
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c4cbfd0..df14597 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -238,6 +238,9 @@
 	{"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
 	{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
 	{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+	{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+	{"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES |
+		BLIST_INQUIRY_36},
 	{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
 	{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
 	{"SONY", "TSL", NULL, BLIST_FORCELUN},		/* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 5a58cbf..42f0550 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * SCSI device handler infrastruture.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
  * Copyright IBM Corporation, 2007
  *      Authors:
  *               Chandra Seetharaman <sekharan@us.ibm.com>
@@ -75,6 +62,7 @@
 	{"NETAPP", "INF-01-00",		"rdac", },
 	{"LSI", "INF-01-00",		"rdac", },
 	{"ENGENIO", "INF-01-00",	"rdac", },
+	{"LENOVO", "DE_Series",		"rdac", },
 	{NULL, NULL,			NULL },
 };
 
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index b7a8fdf..ae2fa17 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  scsi_error.c Copyright (C) 1997 Eric Youngdale
  *
@@ -297,19 +298,19 @@
 
 	if (rtn == BLK_EH_DONE) {
 		/*
-		 * For blk-mq, we must set the request state to complete now
-		 * before sending the request to the scsi error handler. This
-		 * will prevent a use-after-free in the event the LLD manages
-		 * to complete the request before the error handler finishes
-		 * processing this timed out request.
+		 * Set the command to complete first in order to prevent a real
+		 * completion from releasing the command while error handling
+		 * is using it. If the command was already completed, then the
+		 * lower level driver beat the timeout handler, and it is safe
+		 * to return without escalating error recovery.
 		 *
-		 * If the request was already completed, then the LLD beat the
-		 * time out handler from transferring the request to the scsi
-		 * error handler. In that case we can return immediately as no
-		 * further action is required.
+		 * If timeout handling lost the race to a real completion, the
+		 * block layer may ignore that due to a fake timeout injection,
+		 * so return RESET_TIMER to allow error handling another shot
+		 * at this command.
 		 */
-		if (req->q->mq_ops && !blk_mq_mark_complete(req))
-			return rtn;
+		if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state))
+			return BLK_EH_RESET_TIMER;
 		if (scsi_abort_command(scmd) != SUCCESS) {
 			set_host_byte(scmd, DID_TIME_OUT);
 			scsi_eh_scmd_add(scmd);
@@ -338,9 +339,6 @@
 
 	online = scsi_device_online(sdev);
 
-	SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
-		"%s: rtn: %d\n", __func__, online));
-
 	return online;
 }
 EXPORT_SYMBOL(scsi_block_when_processing_errors);
@@ -968,8 +966,8 @@
 	ses->cmnd = scmd->cmnd;
 	ses->data_direction = scmd->sc_data_direction;
 	ses->sdb = scmd->sdb;
-	ses->next_rq = scmd->request->next_rq;
 	ses->result = scmd->result;
+	ses->resid_len = scmd->req.resid_len;
 	ses->underflow = scmd->underflow;
 	ses->prot_op = scmd->prot_op;
 	ses->eh_eflags = scmd->eh_eflags;
@@ -979,8 +977,8 @@
 	scmd->cmnd = ses->eh_cmnd;
 	memset(scmd->cmnd, 0, BLK_MAX_CDB);
 	memset(&scmd->sdb, 0, sizeof(scmd->sdb));
-	scmd->request->next_rq = NULL;
 	scmd->result = 0;
+	scmd->req.resid_len = 0;
 
 	if (sense_bytes) {
 		scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1032,8 +1030,8 @@
 	scmd->cmnd = ses->cmnd;
 	scmd->sc_data_direction = ses->data_direction;
 	scmd->sdb = ses->sdb;
-	scmd->request->next_rq = ses->next_rq;
 	scmd->result = ses->result;
+	scmd->req.resid_len = ses->resid_len;
 	scmd->underflow = ses->underflow;
 	scmd->prot_op = ses->prot_op;
 	scmd->eh_eflags = ses->eh_eflags;
@@ -1060,7 +1058,7 @@
 	struct scsi_device *sdev = scmd->device;
 	struct Scsi_Host *shost = sdev->host;
 	DECLARE_COMPLETION_ONSTACK(done);
-	unsigned long timeleft = timeout;
+	unsigned long timeleft = timeout, delay;
 	struct scsi_eh_save ses;
 	const unsigned long stall_for = msecs_to_jiffies(100);
 	int rtn;
@@ -1071,7 +1069,29 @@
 
 	scsi_log_send(scmd);
 	scmd->scsi_done = scsi_eh_done;
-	rtn = shost->hostt->queuecommand(shost, scmd);
+
+	/*
+	 * Lock sdev->state_mutex to avoid that scsi_device_quiesce() can
+	 * change the SCSI device state after we have examined it and before
+	 * .queuecommand() is called.
+	 */
+	mutex_lock(&sdev->state_mutex);
+	while (sdev->sdev_state == SDEV_BLOCK && timeleft > 0) {
+		mutex_unlock(&sdev->state_mutex);
+		SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_DEBUG, sdev,
+			"%s: state %d <> %d\n", __func__, sdev->sdev_state,
+			SDEV_BLOCK));
+		delay = min(timeleft, stall_for);
+		timeleft -= delay;
+		msleep(jiffies_to_msecs(delay));
+		mutex_lock(&sdev->state_mutex);
+	}
+	if (sdev->sdev_state != SDEV_BLOCK)
+		rtn = shost->hostt->queuecommand(shost, scmd);
+	else
+		rtn = SCSI_MLQUEUE_DEVICE_BUSY;
+	mutex_unlock(&sdev->state_mutex);
+
 	if (rtn) {
 		if (timeleft > stall_for) {
 			scsi_eh_restore_cmnd(scmd, &ses);
@@ -1935,7 +1955,7 @@
 
 static void eh_lock_door_done(struct request *req, blk_status_t status)
 {
-	__blk_put_request(req->q, req);
+	blk_put_request(req);
 }
 
 /**
@@ -2399,7 +2419,6 @@
 	scsi_autopm_put_host(shost);
 	return error;
 }
-EXPORT_SYMBOL(scsi_ioctl_reset);
 
 bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
 				  struct scsi_sense_hdr *sshdr)
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index cc30fcc..57bcd05 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Changes:
  * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 08/23/2000
@@ -221,7 +222,7 @@
 
 	switch (cmd) {
 	case SCSI_IOCTL_GET_IDLUN:
-		if (!access_ok(VERIFY_WRITE, arg, sizeof(struct scsi_idlun)))
+		if (!access_ok(arg, sizeof(struct scsi_idlun)))
 			return -EFAULT;
 
 		__put_user((sdev->id & 0xff)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b5f6382..91c007d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 1999 Eric Youngdale
  * Copyright (C) 2014 Christoph Hellwig
@@ -39,6 +40,18 @@
 #include "scsi_priv.h"
 #include "scsi_logging.h"
 
+/*
+ * Size of integrity metadata is usually small, 1 inline sg should
+ * cover normal cases.
+ */
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+#define  SCSI_INLINE_PROT_SG_CNT  0
+#define  SCSI_INLINE_SG_CNT  0
+#else
+#define  SCSI_INLINE_PROT_SG_CNT  1
+#define  SCSI_INLINE_SG_CNT  2
+#endif
+
 static struct kmem_cache *scsi_sdb_cache;
 static struct kmem_cache *scsi_sense_cache;
 static struct kmem_cache *scsi_sense_isadma_cache;
@@ -71,11 +84,11 @@
 	struct kmem_cache *cache;
 	int ret = 0;
 
+	mutex_lock(&scsi_sense_cache_mutex);
 	cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
 	if (cache)
-		return 0;
+		goto exit;
 
-	mutex_lock(&scsi_sense_cache_mutex);
 	if (shost->unchecked_isa_dma) {
 		scsi_sense_isadma_cache =
 			kmem_cache_create("scsi_sense_cache(DMA)",
@@ -91,7 +104,7 @@
 		if (!scsi_sense_cache)
 			ret = -ENOMEM;
 	}
-
+ exit:
 	mutex_unlock(&scsi_sense_cache_mutex);
 	return ret;
 }
@@ -141,8 +154,6 @@
 
 static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
 {
-	struct scsi_device *sdev = cmd->device;
-
 	if (cmd->request->rq_flags & RQF_DONTPREP) {
 		cmd->request->rq_flags &= ~RQF_DONTPREP;
 		scsi_mq_uninit_cmd(cmd);
@@ -150,7 +161,6 @@
 		WARN_ON_ONCE(true);
 	}
 	blk_mq_requeue_request(cmd->request, true);
-	put_device(&sdev->sdev_gendev);
 }
 
 /**
@@ -168,8 +178,6 @@
 static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
 {
 	struct scsi_device *device = cmd->device;
-	struct request_queue *q = device->request_queue;
-	unsigned long flags;
 
 	SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
 		"Inserting command %p into mlqueue\n", cmd));
@@ -190,26 +198,8 @@
 	 * before blk_cleanup_queue() finishes.
 	 */
 	cmd->result = 0;
-	if (q->mq_ops) {
-		/*
-		 * Before a SCSI command is dispatched,
-		 * get_device(&sdev->sdev_gendev) is called and the host,
-		 * target and device busy counters are increased. Since
-		 * requeuing a request causes these actions to be repeated and
-		 * since scsi_device_unbusy() has already been called,
-		 * put_device(&device->sdev_gendev) must still be called. Call
-		 * put_device() after blk_mq_requeue_request() to avoid that
-		 * removal of the SCSI device can start before requeueing has
-		 * happened.
-		 */
-		blk_mq_requeue_request(cmd->request, true);
-		put_device(&device->sdev_gendev);
-		return;
-	}
-	spin_lock_irqsave(q->queue_lock, flags);
-	blk_requeue_request(q, cmd->request);
-	kblockd_schedule_work(&device->requeue_work);
-	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	blk_mq_requeue_request(cmd->request, true);
 }
 
 /*
@@ -324,7 +314,6 @@
  */
 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
 {
-	cmd->serial_number = 0;
 	scsi_set_resid(cmd, 0);
 	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
 	if (cmd->cmd_len == 0)
@@ -370,10 +359,7 @@
 
 static void scsi_kick_queue(struct request_queue *q)
 {
-	if (q->mq_ops)
-		blk_mq_run_hw_queues(q, false);
-	else
-		blk_run_queue(q);
+	blk_mq_run_hw_queues(q, false);
 }
 
 /*
@@ -534,10 +520,7 @@
 	if (!list_empty(&sdev->host->starved_list))
 		scsi_starved_list_run(sdev->host);
 
-	if (q->mq_ops)
-		blk_mq_run_hw_queues(q, false);
-	else
-		blk_run_queue(q);
+	blk_mq_run_hw_queues(q, false);
 }
 
 void scsi_requeue_run_queue(struct work_struct *work)
@@ -550,42 +533,6 @@
 	scsi_run_queue(q);
 }
 
-/*
- * Function:	scsi_requeue_command()
- *
- * Purpose:	Handle post-processing of completed commands.
- *
- * Arguments:	q	- queue to operate on
- *		cmd	- command that may need to be requeued.
- *
- * Returns:	Nothing
- *
- * Notes:	After command completion, there may be blocks left
- *		over which weren't finished by the previous command
- *		this can be for a number of reasons - the main one is
- *		I/O errors in the middle of the request, in which case
- *		we need to request the blocks that come after the bad
- *		sector.
- * Notes:	Upon return, cmd is a stale pointer.
- */
-static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
-{
-	struct scsi_device *sdev = cmd->device;
-	struct request *req = cmd->request;
-	unsigned long flags;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	blk_unprep_request(req);
-	req->special = NULL;
-	scsi_put_command(cmd);
-	blk_requeue_request(q, req);
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
-	scsi_run_queue(q);
-
-	put_device(&sdev->sdev_gendev);
-}
-
 void scsi_run_host_queues(struct Scsi_Host *shost)
 {
 	struct scsi_device *sdev;
@@ -606,17 +553,12 @@
 
 static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
 {
-	struct scsi_data_buffer *sdb;
-
 	if (cmd->sdb.table.nents)
-		sg_free_table_chained(&cmd->sdb.table, true);
-	if (cmd->request->next_rq) {
-		sdb = cmd->request->next_rq->special;
-		if (sdb)
-			sg_free_table_chained(&sdb->table, true);
-	}
+		sg_free_table_chained(&cmd->sdb.table,
+				SCSI_INLINE_SG_CNT);
 	if (scsi_prot_sg_count(cmd))
-		sg_free_table_chained(&cmd->prot_sdb->table, true);
+		sg_free_table_chained(&cmd->prot_sdb->table,
+				SCSI_INLINE_PROT_SG_CNT);
 }
 
 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
@@ -626,45 +568,9 @@
 	scsi_del_cmd_from_list(cmd);
 }
 
-/*
- * Function:    scsi_release_buffers()
- *
- * Purpose:     Free resources allocate for a scsi_command.
- *
- * Arguments:   cmd	- command that we are bailing.
- *
- * Lock status: Assumed that no lock is held upon entry.
- *
- * Returns:     Nothing
- *
- * Notes:       In the event that an upper level driver rejects a
- *		command, we must release resources allocated during
- *		the __init_io() function.  Primarily this would involve
- *		the scatter-gather table.
- */
-static void scsi_release_buffers(struct scsi_cmnd *cmd)
-{
-	if (cmd->sdb.table.nents)
-		sg_free_table_chained(&cmd->sdb.table, false);
-
-	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
-
-	if (scsi_prot_sg_count(cmd))
-		sg_free_table_chained(&cmd->prot_sdb->table, false);
-}
-
-static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
-{
-	struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
-
-	sg_free_table_chained(&bidi_sdb->table, false);
-	kmem_cache_free(scsi_sdb_cache, bidi_sdb);
-	cmd->request->next_rq->special = NULL;
-}
-
 /* Returns false when no more bytes to process, true if there are more */
 static bool scsi_end_request(struct request *req, blk_status_t error,
-		unsigned int bytes, unsigned int bidi_bytes)
+		unsigned int bytes)
 {
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 	struct scsi_device *sdev = cmd->device;
@@ -673,61 +579,46 @@
 	if (blk_update_request(req, error, bytes))
 		return true;
 
-	/* Bidi request must be completed as a whole */
-	if (unlikely(bidi_bytes) &&
-	    blk_update_request(req->next_rq, error, bidi_bytes))
-		return true;
-
 	if (blk_queue_add_random(q))
 		add_disk_randomness(req->rq_disk);
 
 	if (!blk_rq_is_scsi(req)) {
 		WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
 		cmd->flags &= ~SCMD_INITIALIZED;
-		destroy_rcu_head(&cmd->rcu);
 	}
 
-	if (req->mq_ctx) {
-		/*
-		 * In the MQ case the command gets freed by __blk_mq_end_request,
-		 * so we have to do all cleanup that depends on it earlier.
-		 *
-		 * We also can't kick the queues from irq context, so we
-		 * will have to defer it to a workqueue.
-		 */
-		scsi_mq_uninit_cmd(cmd);
+	/*
+	 * Calling rcu_barrier() is not necessary here because the
+	 * SCSI error handler guarantees that the function called by
+	 * call_rcu() has been called before scsi_end_request() is
+	 * called.
+	 */
+	destroy_rcu_head(&cmd->rcu);
 
-		/*
-		 * queue is still alive, so grab the ref for preventing it
-		 * from being cleaned up during running queue.
-		 */
-		percpu_ref_get(&q->q_usage_counter);
+	/*
+	 * In the MQ case the command gets freed by __blk_mq_end_request,
+	 * so we have to do all cleanup that depends on it earlier.
+	 *
+	 * We also can't kick the queues from irq context, so we
+	 * will have to defer it to a workqueue.
+	 */
+	scsi_mq_uninit_cmd(cmd);
 
-		__blk_mq_end_request(req, error);
+	/*
+	 * queue is still alive, so grab the ref for preventing it
+	 * from being cleaned up during running queue.
+	 */
+	percpu_ref_get(&q->q_usage_counter);
 
-		if (scsi_target(sdev)->single_lun ||
-		    !list_empty(&sdev->host->starved_list))
-			kblockd_schedule_work(&sdev->requeue_work);
-		else
-			blk_mq_run_hw_queues(q, true);
+	__blk_mq_end_request(req, error);
 
-		percpu_ref_put(&q->q_usage_counter);
-	} else {
-		unsigned long flags;
+	if (scsi_target(sdev)->single_lun ||
+	    !list_empty(&sdev->host->starved_list))
+		kblockd_schedule_work(&sdev->requeue_work);
+	else
+		blk_mq_run_hw_queues(q, true);
 
-		if (bidi_bytes)
-			scsi_release_bidi_buffers(cmd);
-		scsi_release_buffers(cmd);
-		scsi_put_command(cmd);
-
-		spin_lock_irqsave(q->queue_lock, flags);
-		blk_finish_request(req, error);
-		spin_unlock_irqrestore(q->queue_lock, flags);
-
-		scsi_run_queue(q);
-	}
-
-	put_device(&sdev->sdev_gendev);
+	percpu_ref_put(&q->q_usage_counter);
 	return false;
 }
 
@@ -757,6 +648,7 @@
 		set_host_byte(cmd, DID_OK);
 		return BLK_STS_TARGET;
 	case DID_NEXUS_FAILURE:
+		set_host_byte(cmd, DID_OK);
 		return BLK_STS_NEXUS;
 	case DID_ALLOC_FAILURE:
 		set_host_byte(cmd, DID_OK);
@@ -774,13 +666,7 @@
 				      struct request_queue *q)
 {
 	/* A new command will be prepared and issued. */
-	if (q->mq_ops) {
-		scsi_mq_requeue_cmd(cmd);
-	} else {
-		/* Unprep request and put it back at head of the queue. */
-		scsi_release_buffers(cmd);
-		scsi_requeue_command(q, cmd);
-	}
+	scsi_mq_requeue_cmd(cmd);
 }
 
 /* Helper for scsi_io_completion() when special action required. */
@@ -924,7 +810,7 @@
 				scsi_print_command(cmd);
 			}
 		}
-		if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req), 0))
+		if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
 			return;
 		/*FALLTHRU*/
 	case ACTION_REPREP:
@@ -1058,30 +944,6 @@
 		 * scsi_result_to_blk_status may have reset the host_byte
 		 */
 		scsi_req(req)->result = cmd->result;
-		scsi_req(req)->resid_len = scsi_get_resid(cmd);
-
-		if (unlikely(scsi_bidi_cmnd(cmd))) {
-			/*
-			 * Bidi commands Must be complete as a whole,
-			 * both sides at once.
-			 */
-			scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
-			if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
-					blk_rq_bytes(req->next_rq)))
-				WARN_ONCE(true,
-					  "Bidi command with remaining bytes");
-			return;
-		}
-	}
-
-	/* no bidi support yet, other than in pass-through */
-	if (unlikely(blk_bidi_rq(req))) {
-		WARN_ONCE(true, "Only support bidi command in passthrough");
-		scmd_printk(KERN_ERR, cmd, "Killing bidi command\n");
-		if (scsi_end_request(req, BLK_STS_IOERR, blk_rq_bytes(req),
-				     blk_rq_bytes(req->next_rq)))
-			WARN_ONCE(true, "Bidi command with remaining bytes");
-		return;
 	}
 
 	/*
@@ -1098,13 +960,13 @@
 	 * to retry code. Fast path should return in this block.
 	 */
 	if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
-		if (likely(!scsi_end_request(req, blk_stat, good_bytes, 0)))
+		if (likely(!scsi_end_request(req, blk_stat, good_bytes)))
 			return; /* no bytes remaining */
 	}
 
 	/* Kill remainder if no retries. */
 	if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
-		if (scsi_end_request(req, blk_stat, blk_rq_bytes(req), 0))
+		if (scsi_end_request(req, blk_stat, blk_rq_bytes(req)))
 			WARN_ONCE(true,
 			    "Bytes remaining after failed, no-retry command");
 		return;
@@ -1120,7 +982,8 @@
 		scsi_io_completion_action(cmd, result);
 }
 
-static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
+static blk_status_t scsi_init_sgtable(struct request *req,
+		struct scsi_data_buffer *sdb)
 {
 	int count;
 
@@ -1128,8 +991,9 @@
 	 * If sg table allocation fails, requeue request later.
 	 */
 	if (unlikely(sg_alloc_table_chained(&sdb->table,
-			blk_rq_nr_phys_segments(req), sdb->table.sgl)))
-		return BLKPREP_DEFER;
+			blk_rq_nr_phys_segments(req), sdb->table.sgl,
+			SCSI_INLINE_SG_CNT)))
+		return BLK_STS_RESOURCE;
 
 	/* 
 	 * Next, walk the list, and fill in the addresses and sizes of
@@ -1139,7 +1003,7 @@
 	BUG_ON(count > sdb->table.nents);
 	sdb->table.nents = count;
 	sdb->length = blk_rq_payload_bytes(req);
-	return BLKPREP_OK;
+	return BLK_STS_OK;
 }
 
 /*
@@ -1149,84 +1013,58 @@
  *
  * Arguments:   cmd   - Command descriptor we wish to initialize
  *
- * Returns:     0 on success
- *		BLKPREP_DEFER if the failure is retryable
- *		BLKPREP_KILL if the failure is fatal
+ * Returns:     BLK_STS_OK on success
+ *		BLK_STS_RESOURCE if the failure is retryable
+ *		BLK_STS_IOERR if the failure is fatal
  */
-int scsi_init_io(struct scsi_cmnd *cmd)
+blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
 {
-	struct scsi_device *sdev = cmd->device;
 	struct request *rq = cmd->request;
-	bool is_mq = (rq->mq_ctx != NULL);
-	int error = BLKPREP_KILL;
+	blk_status_t ret;
 
 	if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
-		goto err_exit;
+		return BLK_STS_IOERR;
 
-	error = scsi_init_sgtable(rq, &cmd->sdb);
-	if (error)
-		goto err_exit;
-
-	if (blk_bidi_rq(rq)) {
-		if (!rq->q->mq_ops) {
-			struct scsi_data_buffer *bidi_sdb =
-				kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
-			if (!bidi_sdb) {
-				error = BLKPREP_DEFER;
-				goto err_exit;
-			}
-
-			rq->next_rq->special = bidi_sdb;
-		}
-
-		error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
-		if (error)
-			goto err_exit;
-	}
+	ret = scsi_init_sgtable(rq, &cmd->sdb);
+	if (ret)
+		return ret;
 
 	if (blk_integrity_rq(rq)) {
 		struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
 		int ivecs, count;
 
-		if (prot_sdb == NULL) {
+		if (WARN_ON_ONCE(!prot_sdb)) {
 			/*
 			 * This can happen if someone (e.g. multipath)
 			 * queues a command to a device on an adapter
 			 * that does not support DIX.
 			 */
-			WARN_ON_ONCE(1);
-			error = BLKPREP_KILL;
-			goto err_exit;
+			ret = BLK_STS_IOERR;
+			goto out_free_sgtables;
 		}
 
 		ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
 
 		if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
-				prot_sdb->table.sgl)) {
-			error = BLKPREP_DEFER;
-			goto err_exit;
+				prot_sdb->table.sgl,
+				SCSI_INLINE_PROT_SG_CNT)) {
+			ret = BLK_STS_RESOURCE;
+			goto out_free_sgtables;
 		}
 
 		count = blk_rq_map_integrity_sg(rq->q, rq->bio,
 						prot_sdb->table.sgl);
-		BUG_ON(unlikely(count > ivecs));
-		BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
+		BUG_ON(count > ivecs);
+		BUG_ON(count > queue_max_integrity_segments(rq->q));
 
 		cmd->prot_sdb = prot_sdb;
 		cmd->prot_sdb->table.nents = count;
 	}
 
-	return BLKPREP_OK;
-err_exit:
-	if (is_mq) {
-		scsi_mq_free_sgtables(cmd);
-	} else {
-		scsi_release_buffers(cmd);
-		cmd->request->special = NULL;
-		scsi_put_command(cmd);
-		put_device(&sdev->sdev_gendev);
-	}
-	return error;
+	return BLK_STS_OK;
+out_free_sgtables:
+	scsi_mq_free_sgtables(cmd);
+	return ret;
 }
 EXPORT_SYMBOL(scsi_init_io);
 
@@ -1251,6 +1089,18 @@
 	cmd->retries = 0;
 }
 
+/*
+ * Only called when the request isn't completed by SCSI, and not freed by
+ * SCSI
+ */
+static void scsi_cleanup_rq(struct request *rq)
+{
+	if (rq->rq_flags & RQF_DONTPREP) {
+		scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
+		rq->rq_flags &= ~RQF_DONTPREP;
+	}
+}
+
 /* Add a command to the list used by the aacraid and dpt_i2o drivers */
 void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
 {
@@ -1312,7 +1162,8 @@
 	scsi_add_cmd_to_list(cmd);
 }
 
-static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
+static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
+		struct request *req)
 {
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 
@@ -1323,8 +1174,8 @@
 	 * submit a request without an attached bio.
 	 */
 	if (req->bio) {
-		int ret = scsi_init_io(cmd);
-		if (unlikely(ret))
+		blk_status_t ret = scsi_init_io(cmd);
+		if (unlikely(ret != BLK_STS_OK))
 			return ret;
 	} else {
 		BUG_ON(blk_rq_bytes(req));
@@ -1336,20 +1187,21 @@
 	cmd->cmnd = scsi_req(req)->cmd;
 	cmd->transfersize = blk_rq_bytes(req);
 	cmd->allowed = scsi_req(req)->retries;
-	return BLKPREP_OK;
+	return BLK_STS_OK;
 }
 
 /*
  * Setup a normal block command.  These are simple request from filesystems
  * that still need to be translated to SCSI CDBs from the ULD.
  */
-static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
+static blk_status_t scsi_setup_fs_cmnd(struct scsi_device *sdev,
+		struct request *req)
 {
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 
 	if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
-		int ret = sdev->handler->prep_fn(sdev, req);
-		if (ret != BLKPREP_OK)
+		blk_status_t ret = sdev->handler->prep_fn(sdev, req);
+		if (ret != BLK_STS_OK)
 			return ret;
 	}
 
@@ -1358,7 +1210,8 @@
 	return scsi_cmd_to_driver(cmd)->init_command(cmd);
 }
 
-static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
+static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
+		struct request *req)
 {
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 
@@ -1375,129 +1228,48 @@
 		return scsi_setup_fs_cmnd(sdev, req);
 }
 
-static int
+static blk_status_t
 scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
 {
-	int ret = BLKPREP_OK;
-
-	/*
-	 * If the device is not in running state we will reject some
-	 * or all commands.
-	 */
-	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
-		switch (sdev->sdev_state) {
-		case SDEV_OFFLINE:
-		case SDEV_TRANSPORT_OFFLINE:
-			/*
-			 * If the device is offline we refuse to process any
-			 * commands.  The device must be brought online
-			 * before trying any recovery commands.
-			 */
-			sdev_printk(KERN_ERR, sdev,
-				    "rejecting I/O to offline device\n");
-			ret = BLKPREP_KILL;
-			break;
-		case SDEV_DEL:
-			/*
-			 * If the device is fully deleted, we refuse to
-			 * process any commands as well.
-			 */
-			sdev_printk(KERN_ERR, sdev,
-				    "rejecting I/O to dead device\n");
-			ret = BLKPREP_KILL;
-			break;
-		case SDEV_BLOCK:
-		case SDEV_CREATED_BLOCK:
-			ret = BLKPREP_DEFER;
-			break;
-		case SDEV_QUIESCE:
-			/*
-			 * If the devices is blocked we defer normal commands.
-			 */
-			if (req && !(req->rq_flags & RQF_PREEMPT))
-				ret = BLKPREP_DEFER;
-			break;
-		default:
-			/*
-			 * For any other not fully online state we only allow
-			 * special commands.  In particular any user initiated
-			 * command is not allowed.
-			 */
-			if (req && !(req->rq_flags & RQF_PREEMPT))
-				ret = BLKPREP_KILL;
-			break;
-		}
-	}
-	return ret;
-}
-
-static int
-scsi_prep_return(struct request_queue *q, struct request *req, int ret)
-{
-	struct scsi_device *sdev = q->queuedata;
-
-	switch (ret) {
-	case BLKPREP_KILL:
-	case BLKPREP_INVALID:
-		scsi_req(req)->result = DID_NO_CONNECT << 16;
-		/* release the command and kill it */
-		if (req->special) {
-			struct scsi_cmnd *cmd = req->special;
-			scsi_release_buffers(cmd);
-			scsi_put_command(cmd);
-			put_device(&sdev->sdev_gendev);
-			req->special = NULL;
-		}
-		break;
-	case BLKPREP_DEFER:
+	switch (sdev->sdev_state) {
+	case SDEV_OFFLINE:
+	case SDEV_TRANSPORT_OFFLINE:
 		/*
-		 * If we defer, the blk_peek_request() returns NULL, but the
-		 * queue must be restarted, so we schedule a callback to happen
-		 * shortly.
+		 * If the device is offline we refuse to process any
+		 * commands.  The device must be brought online
+		 * before trying any recovery commands.
 		 */
-		if (atomic_read(&sdev->device_busy) == 0)
-			blk_delay_queue(q, SCSI_QUEUE_DELAY);
-		break;
+		sdev_printk(KERN_ERR, sdev,
+			    "rejecting I/O to offline device\n");
+		return BLK_STS_IOERR;
+	case SDEV_DEL:
+		/*
+		 * If the device is fully deleted, we refuse to
+		 * process any commands as well.
+		 */
+		sdev_printk(KERN_ERR, sdev,
+			    "rejecting I/O to dead device\n");
+		return BLK_STS_IOERR;
+	case SDEV_BLOCK:
+	case SDEV_CREATED_BLOCK:
+		return BLK_STS_RESOURCE;
+	case SDEV_QUIESCE:
+		/*
+		 * If the devices is blocked we defer normal commands.
+		 */
+		if (req && !(req->rq_flags & RQF_PREEMPT))
+			return BLK_STS_RESOURCE;
+		return BLK_STS_OK;
 	default:
-		req->rq_flags |= RQF_DONTPREP;
+		/*
+		 * For any other not fully online state we only allow
+		 * special commands.  In particular any user initiated
+		 * command is not allowed.
+		 */
+		if (req && !(req->rq_flags & RQF_PREEMPT))
+			return BLK_STS_IOERR;
+		return BLK_STS_OK;
 	}
-
-	return ret;
-}
-
-static int scsi_prep_fn(struct request_queue *q, struct request *req)
-{
-	struct scsi_device *sdev = q->queuedata;
-	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
-	int ret;
-
-	ret = scsi_prep_state_check(sdev, req);
-	if (ret != BLKPREP_OK)
-		goto out;
-
-	if (!req->special) {
-		/* Bail if we can't get a reference to the device */
-		if (unlikely(!get_device(&sdev->sdev_gendev))) {
-			ret = BLKPREP_DEFER;
-			goto out;
-		}
-
-		scsi_init_command(sdev, cmd);
-		req->special = cmd;
-	}
-
-	cmd->tag = req->tag;
-	cmd->request = req;
-	cmd->prot_op = SCSI_PROT_NORMAL;
-
-	ret = scsi_setup_cmnd(sdev, req);
-out:
-	return scsi_prep_return(q, req, ret);
-}
-
-static void scsi_unprep_fn(struct request_queue *q, struct request *req)
-{
-	scsi_uninit_cmd(blk_mq_rq_to_pdu(req));
 }
 
 /*
@@ -1519,14 +1291,8 @@
 		/*
 		 * unblock after device_blocked iterates to zero
 		 */
-		if (atomic_dec_return(&sdev->device_blocked) > 0) {
-			/*
-			 * For the MQ case we take care of this in the caller.
-			 */
-			if (!q->mq_ops)
-				blk_delay_queue(q, SCSI_QUEUE_DELAY);
+		if (atomic_dec_return(&sdev->device_blocked) > 0)
 			goto out_dec;
-		}
 		SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
 				   "unblocking device at zero depth\n"));
 	}
@@ -1661,13 +1427,13 @@
  * needs to return 'not busy'. Otherwise, request stacking drivers
  * may hold requests forever.
  */
-static int scsi_lld_busy(struct request_queue *q)
+static bool scsi_mq_lld_busy(struct request_queue *q)
 {
 	struct scsi_device *sdev = q->queuedata;
 	struct Scsi_Host *shost;
 
 	if (blk_queue_dying(q))
-		return 0;
+		return false;
 
 	shost = sdev->host;
 
@@ -1678,43 +1444,9 @@
 	 * in SCSI layer.
 	 */
 	if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
-		return 1;
+		return true;
 
-	return 0;
-}
-
-/*
- * Kill a request for a dead device
- */
-static void scsi_kill_request(struct request *req, struct request_queue *q)
-{
-	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
-	struct scsi_device *sdev;
-	struct scsi_target *starget;
-	struct Scsi_Host *shost;
-
-	blk_start_request(req);
-
-	scmd_printk(KERN_INFO, cmd, "killing request\n");
-
-	sdev = cmd->device;
-	starget = scsi_target(sdev);
-	shost = sdev->host;
-	scsi_init_cmd_errh(cmd);
-	cmd->result = DID_NO_CONNECT << 16;
-	atomic_inc(&cmd->device->iorequest_cnt);
-
-	/*
-	 * SCSI request completion path will do scsi_device_unbusy(),
-	 * bump busy counts.  To bump the counters, we need to dance
-	 * with the locks as normal issue path does.
-	 */
-	atomic_inc(&sdev->device_busy);
-	atomic_inc(&shost->host_busy);
-	if (starget->can_queue > 0)
-		atomic_inc(&starget->target_busy);
-
-	blk_complete_request(req);
+	return false;
 }
 
 static void scsi_softirq_done(struct request *rq)
@@ -1732,7 +1464,7 @@
 	disposition = scsi_decide_disposition(cmd);
 	if (disposition != SUCCESS &&
 	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
-		sdev_printk(KERN_ERR, cmd->device,
+		scmd_printk(KERN_ERR, cmd,
 			    "timing out command, waited %lus\n",
 			    wait_for/HZ);
 		disposition = SUCCESS;
@@ -1837,178 +1569,14 @@
 	return 0;
 }
 
-/**
- * scsi_done - Invoke completion on finished SCSI command.
- * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
- * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
- *
- * Description: This function is the mid-level's (SCSI Core) interrupt routine,
- * which regains ownership of the SCSI command (de facto) from a LLDD, and
- * calls blk_complete_request() for further processing.
- *
- * This function is interrupt context safe.
- */
-static void scsi_done(struct scsi_cmnd *cmd)
-{
-	trace_scsi_dispatch_cmd_done(cmd);
-	blk_complete_request(cmd->request);
-}
-
-/*
- * Function:    scsi_request_fn()
- *
- * Purpose:     Main strategy routine for SCSI.
- *
- * Arguments:   q       - Pointer to actual queue.
- *
- * Returns:     Nothing
- *
- * Lock status: request queue lock assumed to be held when called.
- *
- * Note: See sd_zbc.c sd_zbc_write_lock_zone() for write order
- * protection for ZBC disks.
- */
-static void scsi_request_fn(struct request_queue *q)
-	__releases(q->queue_lock)
-	__acquires(q->queue_lock)
-{
-	struct scsi_device *sdev = q->queuedata;
-	struct Scsi_Host *shost;
-	struct scsi_cmnd *cmd;
-	struct request *req;
-
-	/*
-	 * To start with, we keep looping until the queue is empty, or until
-	 * the host is no longer able to accept any more requests.
-	 */
-	shost = sdev->host;
-	for (;;) {
-		int rtn;
-		/*
-		 * get next queueable request.  We do this early to make sure
-		 * that the request is fully prepared even if we cannot
-		 * accept it.
-		 */
-		req = blk_peek_request(q);
-		if (!req)
-			break;
-
-		if (unlikely(!scsi_device_online(sdev))) {
-			sdev_printk(KERN_ERR, sdev,
-				    "rejecting I/O to offline device\n");
-			scsi_kill_request(req, q);
-			continue;
-		}
-
-		if (!scsi_dev_queue_ready(q, sdev))
-			break;
-
-		/*
-		 * Remove the request from the request list.
-		 */
-		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
-			blk_start_request(req);
-
-		spin_unlock_irq(q->queue_lock);
-		cmd = blk_mq_rq_to_pdu(req);
-		if (cmd != req->special) {
-			printk(KERN_CRIT "impossible request in %s.\n"
-					 "please mail a stack trace to "
-					 "linux-scsi@vger.kernel.org\n",
-					 __func__);
-			blk_dump_rq_flags(req, "foo");
-			BUG();
-		}
-
-		/*
-		 * We hit this when the driver is using a host wide
-		 * tag map. For device level tag maps the queue_depth check
-		 * in the device ready fn would prevent us from trying
-		 * to allocate a tag. Since the map is a shared host resource
-		 * we add the dev to the starved list so it eventually gets
-		 * a run when a tag is freed.
-		 */
-		if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) {
-			spin_lock_irq(shost->host_lock);
-			if (list_empty(&sdev->starved_entry))
-				list_add_tail(&sdev->starved_entry,
-					      &shost->starved_list);
-			spin_unlock_irq(shost->host_lock);
-			goto not_ready;
-		}
-
-		if (!scsi_target_queue_ready(shost, sdev))
-			goto not_ready;
-
-		if (!scsi_host_queue_ready(q, shost, sdev))
-			goto host_not_ready;
-	
-		if (sdev->simple_tags)
-			cmd->flags |= SCMD_TAGGED;
-		else
-			cmd->flags &= ~SCMD_TAGGED;
-
-		/*
-		 * Finally, initialize any error handling parameters, and set up
-		 * the timers for timeouts.
-		 */
-		scsi_init_cmd_errh(cmd);
-
-		/*
-		 * Dispatch the command to the low-level driver.
-		 */
-		cmd->scsi_done = scsi_done;
-		rtn = scsi_dispatch_cmd(cmd);
-		if (rtn) {
-			scsi_queue_insert(cmd, rtn);
-			spin_lock_irq(q->queue_lock);
-			goto out_delay;
-		}
-		spin_lock_irq(q->queue_lock);
-	}
-
-	return;
-
- host_not_ready:
-	if (scsi_target(sdev)->can_queue > 0)
-		atomic_dec(&scsi_target(sdev)->target_busy);
- not_ready:
-	/*
-	 * lock q, handle tag, requeue req, and decrement device_busy. We
-	 * must return with queue_lock held.
-	 *
-	 * Decrementing device_busy without checking it is OK, as all such
-	 * cases (host limits or settings) should run the queue at some
-	 * later time.
-	 */
-	spin_lock_irq(q->queue_lock);
-	blk_requeue_request(q, req);
-	atomic_dec(&sdev->device_busy);
-out_delay:
-	if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
-		blk_delay_queue(q, SCSI_QUEUE_DELAY);
-}
-
-static inline blk_status_t prep_to_mq(int ret)
-{
-	switch (ret) {
-	case BLKPREP_OK:
-		return BLK_STS_OK;
-	case BLKPREP_DEFER:
-		return BLK_STS_RESOURCE;
-	default:
-		return BLK_STS_IOERR;
-	}
-}
-
 /* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
-static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost)
+static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
 {
-	return min_t(unsigned int, shost->sg_tablesize, SG_CHUNK_SIZE) *
+	return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
 		sizeof(struct scatterlist);
 }
 
-static int scsi_mq_prep_fn(struct request *req)
+static blk_status_t scsi_mq_prep_fn(struct request *req)
 {
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 	struct scsi_device *sdev = req->q->queuedata;
@@ -2017,10 +1585,7 @@
 
 	scsi_init_command(sdev, cmd);
 
-	req->special = cmd;
-
 	cmd->request = req;
-
 	cmd->tag = req->tag;
 	cmd->prot_op = SCSI_PROT_NORMAL;
 
@@ -2034,17 +1599,6 @@
 			(struct scatterlist *)(cmd->prot_sdb + 1);
 	}
 
-	if (blk_bidi_rq(req)) {
-		struct request *next_rq = req->next_rq;
-		struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
-
-		memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
-		bidi_sdb->table.sgl =
-			(struct scatterlist *)(bidi_sdb + 1);
-
-		next_rq->special = bidi_sdb;
-	}
-
 	blk_mq_start_request(req);
 
 	return scsi_setup_cmnd(sdev, req);
@@ -2052,8 +1606,18 @@
 
 static void scsi_mq_done(struct scsi_cmnd *cmd)
 {
+	if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
+		return;
 	trace_scsi_dispatch_cmd_done(cmd);
-	blk_mq_complete_request(cmd->request);
+
+	/*
+	 * If the block layer didn't complete the request due to a timeout
+	 * injection, scsi must clear its internal completed state so that the
+	 * timeout handler will see it needs to escalate its own error
+	 * recovery.
+	 */
+	if (unlikely(!blk_mq_complete_request(cmd->request)))
+		clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
 }
 
 static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
@@ -2062,7 +1626,6 @@
 	struct scsi_device *sdev = q->queuedata;
 
 	atomic_dec(&sdev->device_busy);
-	put_device(&sdev->sdev_gendev);
 }
 
 static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
@@ -2070,16 +1633,9 @@
 	struct request_queue *q = hctx->queue;
 	struct scsi_device *sdev = q->queuedata;
 
-	if (!get_device(&sdev->sdev_gendev))
-		goto out;
-	if (!scsi_dev_queue_ready(q, sdev))
-		goto out_put_device;
+	if (scsi_dev_queue_ready(q, sdev))
+		return true;
 
-	return true;
-
-out_put_device:
-	put_device(&sdev->sdev_gendev);
-out:
 	if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
 		blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
 	return false;
@@ -2096,9 +1652,15 @@
 	blk_status_t ret;
 	int reason;
 
-	ret = prep_to_mq(scsi_prep_state_check(sdev, req));
-	if (ret != BLK_STS_OK)
-		goto out_put_budget;
+	/*
+	 * If the device is not in running state we will reject some or all
+	 * commands.
+	 */
+	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
+		ret = scsi_prep_state_check(sdev, req);
+		if (ret != BLK_STS_OK)
+			goto out_put_budget;
+	}
 
 	ret = BLK_STS_RESOURCE;
 	if (!scsi_target_queue_ready(shost, sdev))
@@ -2107,18 +1669,20 @@
 		goto out_dec_target_busy;
 
 	if (!(req->rq_flags & RQF_DONTPREP)) {
-		ret = prep_to_mq(scsi_mq_prep_fn(req));
+		ret = scsi_mq_prep_fn(req);
 		if (ret != BLK_STS_OK)
 			goto out_dec_host_busy;
 		req->rq_flags |= RQF_DONTPREP;
 	} else {
+		clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
 		blk_mq_start_request(req);
 	}
 
+	cmd->flags &= SCMD_PRESERVED_FLAGS;
 	if (sdev->simple_tags)
 		cmd->flags |= SCMD_TAGGED;
-	else
-		cmd->flags &= ~SCMD_TAGGED;
+	if (bd->last)
+		cmd->flags |= SCMD_LAST;
 
 	scsi_init_cmd_errh(cmd);
 	cmd->scsi_done = scsi_mq_done;
@@ -2148,8 +1712,12 @@
 			ret = BLK_STS_DEV_RESOURCE;
 		break;
 	default:
+		if (unlikely(!scsi_device_online(sdev)))
+			scsi_req(req)->result = DID_NO_CONNECT << 16;
+		else
+			scsi_req(req)->result = DID_ERROR << 16;
 		/*
-		 * Make sure to release all allocated ressources when
+		 * Make sure to release all allocated resources when
 		 * we hit an error, as we will never see this command
 		 * again.
 		 */
@@ -2187,7 +1755,7 @@
 	if (scsi_host_get_prot(shost)) {
 		sg = (void *)cmd + sizeof(struct scsi_cmnd) +
 			shost->hostt->cmd_size;
-		cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost);
+		cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
 	}
 
 	return 0;
@@ -2208,7 +1776,7 @@
 
 	if (shost->hostt->map_queues)
 		return shost->hostt->map_queues(shost);
-	return blk_mq_map_queues(set);
+	return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
 }
 
 void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
@@ -2229,16 +1797,19 @@
 		blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
 	}
 
+	if (dev->dma_mask) {
+		shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+				dma_max_mapping_size(dev) >> SECTOR_SHIFT);
+	}
 	blk_queue_max_hw_sectors(q, shost->max_sectors);
 	if (shost->unchecked_isa_dma)
 		blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
 	blk_queue_segment_boundary(q, shost->dma_boundary);
 	dma_set_seg_boundary(dev, shost->dma_boundary);
 
-	blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
-
-	if (!shost->use_clustering)
-		q->limits.cluster = 0;
+	blk_queue_max_segment_size(q, shost->max_segment_size);
+	blk_queue_virt_boundary(q, shost->virt_boundary_mask);
+	dma_set_max_seg_size(dev, queue_max_segment_size(q));
 
 	/*
 	 * Set a reasonable default alignment:  The larger of 32-byte (dword),
@@ -2251,78 +1822,7 @@
 }
 EXPORT_SYMBOL_GPL(__scsi_init_queue);
 
-static int scsi_old_init_rq(struct request_queue *q, struct request *rq,
-			    gfp_t gfp)
-{
-	struct Scsi_Host *shost = q->rq_alloc_data;
-	const bool unchecked_isa_dma = shost->unchecked_isa_dma;
-	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
-
-	memset(cmd, 0, sizeof(*cmd));
-
-	if (unchecked_isa_dma)
-		cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
-	cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, gfp,
-						    NUMA_NO_NODE);
-	if (!cmd->sense_buffer)
-		goto fail;
-	cmd->req.sense = cmd->sense_buffer;
-
-	if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
-		cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp);
-		if (!cmd->prot_sdb)
-			goto fail_free_sense;
-	}
-
-	return 0;
-
-fail_free_sense:
-	scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer);
-fail:
-	return -ENOMEM;
-}
-
-static void scsi_old_exit_rq(struct request_queue *q, struct request *rq)
-{
-	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
-
-	if (cmd->prot_sdb)
-		kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
-	scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
-			       cmd->sense_buffer);
-}
-
-struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
-{
-	struct Scsi_Host *shost = sdev->host;
-	struct request_queue *q;
-
-	q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
-	if (!q)
-		return NULL;
-	q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
-	q->rq_alloc_data = shost;
-	q->request_fn = scsi_request_fn;
-	q->init_rq_fn = scsi_old_init_rq;
-	q->exit_rq_fn = scsi_old_exit_rq;
-	q->initialize_rq_fn = scsi_initialize_rq;
-
-	if (blk_init_allocated_queue(q) < 0) {
-		blk_cleanup_queue(q);
-		return NULL;
-	}
-
-	__scsi_init_queue(shost, q);
-	blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
-	blk_queue_prep_rq(q, scsi_prep_fn);
-	blk_queue_unprep_rq(q, scsi_unprep_fn);
-	blk_queue_softirq_done(q, scsi_softirq_done);
-	blk_queue_rq_timed_out(q, scsi_times_out);
-	blk_queue_lld_busy(q, scsi_lld_busy);
-	return q;
-}
-
-static const struct blk_mq_ops scsi_mq_ops = {
+static const struct blk_mq_ops scsi_mq_ops_no_commit = {
 	.get_budget	= scsi_mq_get_budget,
 	.put_budget	= scsi_mq_put_budget,
 	.queue_rq	= scsi_queue_rq,
@@ -2334,6 +1834,36 @@
 	.init_request	= scsi_mq_init_request,
 	.exit_request	= scsi_mq_exit_request,
 	.initialize_rq_fn = scsi_initialize_rq,
+	.cleanup_rq	= scsi_cleanup_rq,
+	.busy		= scsi_mq_lld_busy,
+	.map_queues	= scsi_map_queues,
+};
+
+
+static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+	struct request_queue *q = hctx->queue;
+	struct scsi_device *sdev = q->queuedata;
+	struct Scsi_Host *shost = sdev->host;
+
+	shost->hostt->commit_rqs(shost, hctx->queue_num);
+}
+
+static const struct blk_mq_ops scsi_mq_ops = {
+	.get_budget	= scsi_mq_get_budget,
+	.put_budget	= scsi_mq_put_budget,
+	.queue_rq	= scsi_queue_rq,
+	.commit_rqs	= scsi_commit_rqs,
+	.complete	= scsi_softirq_done,
+	.timeout	= scsi_timeout,
+#ifdef CONFIG_BLK_DEBUG_FS
+	.show_rq	= scsi_show_rq,
+#endif
+	.init_request	= scsi_mq_init_request,
+	.exit_request	= scsi_mq_exit_request,
+	.initialize_rq_fn = scsi_initialize_rq,
+	.cleanup_rq	= scsi_cleanup_rq,
+	.busy		= scsi_mq_lld_busy,
 	.map_queues	= scsi_map_queues,
 };
 
@@ -2353,18 +1883,23 @@
 {
 	unsigned int cmd_size, sgl_size;
 
-	sgl_size = scsi_mq_sgl_size(shost);
+	sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
+				scsi_mq_inline_sgl_size(shost));
 	cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
 	if (scsi_host_get_prot(shost))
-		cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
+		cmd_size += sizeof(struct scsi_data_buffer) +
+			sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
 
 	memset(&shost->tag_set, 0, sizeof(shost->tag_set));
-	shost->tag_set.ops = &scsi_mq_ops;
+	if (shost->hostt->commit_rqs)
+		shost->tag_set.ops = &scsi_mq_ops;
+	else
+		shost->tag_set.ops = &scsi_mq_ops_no_commit;
 	shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
 	shost->tag_set.queue_depth = shost->can_queue;
 	shost->tag_set.cmd_size = cmd_size;
 	shost->tag_set.numa_node = NUMA_NO_NODE;
-	shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+	shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
 	shost->tag_set.flags |=
 		BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
 	shost->tag_set.driver_data = shost;
@@ -2388,10 +1923,8 @@
 {
 	struct scsi_device *sdev = NULL;
 
-	if (q->mq_ops) {
-		if (q->mq_ops == &scsi_mq_ops)
-			sdev = q->queuedata;
-	} else if (q->request_fn == scsi_request_fn)
+	if (q->mq_ops == &scsi_mq_ops_no_commit ||
+	    q->mq_ops == &scsi_mq_ops)
 		sdev = q->queuedata;
 	if (!sdev || !get_device(&sdev->sdev_gendev))
 		sdev = NULL;
@@ -2761,6 +2294,7 @@
 		switch (oldstate) {
 		case SDEV_RUNNING:
 		case SDEV_CREATED_BLOCK:
+		case SDEV_OFFLINE:
 			break;
 		default:
 			goto illegal;
@@ -2994,39 +2528,6 @@
 EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
 
 /**
- * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
- * @sdev: SCSI device to count the number of scsi_request_fn() callers for.
- */
-static int scsi_request_fn_active(struct scsi_device *sdev)
-{
-	struct request_queue *q = sdev->request_queue;
-	int request_fn_active;
-
-	WARN_ON_ONCE(sdev->host->use_blk_mq);
-
-	spin_lock_irq(q->queue_lock);
-	request_fn_active = q->request_fn_active;
-	spin_unlock_irq(q->queue_lock);
-
-	return request_fn_active;
-}
-
-/**
- * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
- * @sdev: SCSI device pointer.
- *
- * Wait until the ongoing shost->hostt->queuecommand() calls that are
- * invoked from scsi_request_fn() have finished.
- */
-static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
-{
-	WARN_ON_ONCE(sdev->host->use_blk_mq);
-
-	while (scsi_request_fn_active(sdev))
-		msleep(20);
-}
-
-/**
  *	scsi_device_quiesce - Block user issued commands.
  *	@sdev:	scsi device to quiesce.
  *
@@ -3054,11 +2555,14 @@
 	 */
 	WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
 
-	blk_set_preempt_only(q);
+	if (sdev->quiesced_by == current)
+		return 0;
+
+	blk_set_pm_only(q);
 
 	blk_mq_freeze_queue(q);
 	/*
-	 * Ensure that the effect of blk_set_preempt_only() will be visible
+	 * Ensure that the effect of blk_set_pm_only() will be visible
 	 * for percpu_ref_tryget() callers that occur after the queue
 	 * unfreeze even if the queue was already frozen before this function
 	 * was called. See also https://lwn.net/Articles/573497/.
@@ -3071,7 +2575,7 @@
 	if (err == 0)
 		sdev->quiesced_by = current;
 	else
-		blk_clear_preempt_only(q);
+		blk_clear_pm_only(q);
 	mutex_unlock(&sdev->state_mutex);
 
 	return err;
@@ -3094,9 +2598,10 @@
 	 * device deleted during suspend)
 	 */
 	mutex_lock(&sdev->state_mutex);
-	WARN_ON_ONCE(!sdev->quiesced_by);
-	sdev->quiesced_by = NULL;
-	blk_clear_preempt_only(sdev->request_queue);
+	if (sdev->quiesced_by) {
+		sdev->quiesced_by = NULL;
+		blk_clear_pm_only(sdev->request_queue);
+	}
 	if (sdev->sdev_state == SDEV_QUIESCE)
 		scsi_device_set_state(sdev, SDEV_RUNNING);
 	mutex_unlock(&sdev->state_mutex);
@@ -3146,7 +2651,6 @@
 int scsi_internal_device_block_nowait(struct scsi_device *sdev)
 {
 	struct request_queue *q = sdev->request_queue;
-	unsigned long flags;
 	int err = 0;
 
 	err = scsi_device_set_state(sdev, SDEV_BLOCK);
@@ -3162,14 +2666,7 @@
 	 * block layer from calling the midlayer with this device's
 	 * request queue. 
 	 */
-	if (q->mq_ops) {
-		blk_mq_quiesce_queue_nowait(q);
-	} else {
-		spin_lock_irqsave(q->queue_lock, flags);
-		blk_stop_queue(q);
-		spin_unlock_irqrestore(q->queue_lock, flags);
-	}
-
+	blk_mq_quiesce_queue_nowait(q);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
@@ -3188,10 +2685,6 @@
  * a legal transition). When the device is in this state, command processing
  * is paused until the device leaves the SDEV_BLOCK state. See also
  * scsi_internal_device_unblock().
- *
- * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
- * scsi_internal_device_block() has blocked a SCSI device and also
- * remove the rport mutex lock and unlock calls from srp_queuecommand().
  */
 static int scsi_internal_device_block(struct scsi_device *sdev)
 {
@@ -3200,12 +2693,8 @@
 
 	mutex_lock(&sdev->state_mutex);
 	err = scsi_internal_device_block_nowait(sdev);
-	if (err == 0) {
-		if (q->mq_ops)
-			blk_mq_quiesce_queue(q);
-		else
-			scsi_wait_for_queuecommand(sdev);
-	}
+	if (err == 0)
+		blk_mq_quiesce_queue(q);
 	mutex_unlock(&sdev->state_mutex);
 
 	return err;
@@ -3214,15 +2703,8 @@
 void scsi_start_queue(struct scsi_device *sdev)
 {
 	struct request_queue *q = sdev->request_queue;
-	unsigned long flags;
 
-	if (q->mq_ops) {
-		blk_mq_unquiesce_queue(q);
-	} else {
-		spin_lock_irqsave(q->queue_lock, flags);
-		blk_start_queue(q);
-		spin_unlock_irqrestore(q->queue_lock, flags);
-	}
+	blk_mq_unquiesce_queue(q);
 }
 
 /**
@@ -3243,6 +2725,14 @@
 int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
 					enum scsi_device_state new_state)
 {
+	switch (new_state) {
+	case SDEV_RUNNING:
+	case SDEV_TRANSPORT_OFFLINE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	/*
 	 * Try to transition the scsi device to SDEV_RUNNING or one of the
 	 * offlined states and goose the device queue if successful.
@@ -3300,7 +2790,12 @@
 static void
 device_block(struct scsi_device *sdev, void *data)
 {
-	scsi_internal_device_block(sdev);
+	int ret;
+
+	ret = scsi_internal_device_block(sdev);
+
+	WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n",
+		  dev_name(&sdev->sdev_gendev), ret);
 }
 
 static int
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index bd70339..c6ed0b1 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * scsi_logging.c
  *
  * Copyright (C) 2014 SUSE Linux Products GmbH
  * Copyright (C) 2014 Hannes Reinecke <hare@suse.de>
- *
- * This file is released under the GPLv2
  */
 
 #include <linux/kernel.h>
@@ -16,57 +15,15 @@
 #include <scsi/scsi_eh.h>
 #include <scsi/scsi_dbg.h>
 
-#define SCSI_LOG_SPOOLSIZE 4096
-
-#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
-#warning SCSI logging bitmask too large
-#endif
-
-struct scsi_log_buf {
-	char buffer[SCSI_LOG_SPOOLSIZE];
-	unsigned long map;
-};
-
-static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
-
 static char *scsi_log_reserve_buffer(size_t *len)
 {
-	struct scsi_log_buf *buf;
-	unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
-	unsigned long idx = 0;
-
-	preempt_disable();
-	buf = this_cpu_ptr(&scsi_format_log);
-	idx = find_first_zero_bit(&buf->map, map_bits);
-	if (likely(idx < map_bits)) {
-		while (test_and_set_bit(idx, &buf->map)) {
-			idx = find_next_zero_bit(&buf->map, map_bits, idx);
-			if (idx >= map_bits)
-				break;
-		}
-	}
-	if (WARN_ON(idx >= map_bits)) {
-		preempt_enable();
-		return NULL;
-	}
-	*len = SCSI_LOG_BUFSIZE;
-	return buf->buffer + idx * SCSI_LOG_BUFSIZE;
+	*len = 128;
+	return kmalloc(*len, GFP_ATOMIC);
 }
 
 static void scsi_log_release_buffer(char *bufptr)
 {
-	struct scsi_log_buf *buf;
-	unsigned long idx;
-	int ret;
-
-	buf = this_cpu_ptr(&scsi_format_log);
-	if (bufptr >= buf->buffer &&
-	    bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
-		idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
-		ret = test_and_clear_bit(idx, &buf->map);
-		WARN_ON(!ret);
-	}
-	preempt_enable();
+	kfree(bufptr);
 }
 
 static inline const char *scmd_name(const struct scsi_cmnd *scmd)
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 50e624f..d7f76fd 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  scsi_netlink.c  - SCSI Transport Netlink Interface
  *
  *  Copyright (C) 2006   James Smart, Emulex Corporation
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  */
 #include <linux/time.h>
 #include <linux/jiffies.h>
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index b44c1bb..3717eea 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *	scsi_pm.c	Copyright (C) 2010 Alan Stern
  *
@@ -8,6 +9,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/export.h>
 #include <linux/async.h>
+#include <linux/blk-pm.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -79,8 +81,21 @@
 
 	if (err == 0) {
 		pm_runtime_disable(dev);
-		pm_runtime_set_active(dev);
+		err = pm_runtime_set_active(dev);
 		pm_runtime_enable(dev);
+
+		/*
+		 * Forcibly set runtime PM status of request queue to "active"
+		 * to make sure we can again get requests from the queue
+		 * (see also blk_pm_peek_request()).
+		 *
+		 * The resume hook will correct runtime PM status of the disk.
+		 */
+		if (!err && scsi_is_sdev_device(dev)) {
+			struct scsi_device *sdev = to_scsi_device(dev);
+
+			blk_set_runtime_active(sdev->request_queue);
+		}
 	}
 
 	return err;
@@ -139,16 +154,6 @@
 	else
 		fn = NULL;
 
-	/*
-	 * Forcibly set runtime PM status of request queue to "active" to
-	 * make sure we can again get requests from the queue (see also
-	 * blk_pm_peek_request()).
-	 *
-	 * The resume hook will correct runtime PM status of the disk.
-	 */
-	if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
-		blk_set_runtime_active(to_scsi_device(dev)->request_queue);
-
 	if (fn) {
 		async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
 
@@ -170,11 +175,7 @@
 
 static int scsi_bus_prepare(struct device *dev)
 {
-	if (scsi_is_sdev_device(dev)) {
-		/* sd probing uses async_schedule.  Wait until it finishes. */
-		async_synchronize_full_domain(&scsi_sd_probe_domain);
-
-	} else if (scsi_is_host_device(dev)) {
+	if (scsi_is_host_device(dev)) {
 		/* Wait until async scanning is finished */
 		scsi_complete_async_scans();
 	}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 99f1db5..cc2859d 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -92,7 +92,6 @@
 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
 extern void scsi_run_host_queues(struct Scsi_Host *shost);
 extern void scsi_requeue_run_queue(struct work_struct *work);
-extern struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev);
 extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
 extern void scsi_start_queue(struct scsi_device *sdev);
 extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
@@ -176,7 +175,6 @@
 #endif /* CONFIG_PM */
 
 extern struct async_domain scsi_sd_pm_domain;
-extern struct async_domain scsi_sd_probe_domain;
 
 /* scsi_dh.c */
 #ifdef CONFIG_SCSI_DH
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 7f0ceb6..5b31322 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -372,15 +372,10 @@
 	return err;
 }
 
-static int always_match(struct device *dev, void *data)
-{
-	return 1;
-}
-
 static inline struct device *next_scsi_device(struct device *start)
 {
-	struct device *next = bus_find_device(&scsi_bus_type, start, NULL,
-					      always_match);
+	struct device *next = bus_find_next_device(&scsi_bus_type, start);
+
 	put_device(start);
 	return next;
 }
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 78ca63d..058079f 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -220,7 +220,7 @@
 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 
 	sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
-		       GFP_ATOMIC);
+		       GFP_KERNEL);
 	if (!sdev)
 		goto out;
 
@@ -266,10 +266,7 @@
 	 */
 	sdev->borken = 1;
 
-	if (shost_use_blk_mq(shost))
-		sdev->request_queue = scsi_mq_alloc_queue(sdev);
-	else
-		sdev->request_queue = scsi_old_alloc_queue(sdev);
+	sdev->request_queue = scsi_mq_alloc_queue(sdev);
 	if (!sdev->request_queue) {
 		/* release fn is set up in scsi_sysfs_device_initialise, so
 		 * have to free and put manually here */
@@ -280,11 +277,6 @@
 	WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
 	sdev->request_queue->queuedata = sdev;
 
-	if (!shost_use_blk_mq(sdev->host)) {
-		blk_queue_init_tags(sdev->request_queue,
-				    sdev->host->cmd_per_lun, shost->bqt,
-				    shost->hostt->tag_alloc_policy);
-	}
 	scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
 					sdev->host->cmd_per_lun : 1);
 
@@ -796,7 +788,7 @@
 	 */
 	sdev->inquiry = kmemdup(inq_result,
 				max_t(size_t, sdev->inquiry_len, 36),
-				GFP_ATOMIC);
+				GFP_KERNEL);
 	if (sdev->inquiry == NULL)
 		return SCSI_SCAN_NO_RESPONSE;
 
@@ -1087,7 +1079,7 @@
 	if (!sdev)
 		goto out;
 
-	result = kmalloc(result_len, GFP_ATOMIC |
+	result = kmalloc(result_len, GFP_KERNEL |
 			((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
 	if (!result)
 		goto out_free_sdev;
@@ -1137,7 +1129,8 @@
 	 * that no LUN is present, so don't add sdev in these cases.
 	 * Two specific examples are:
 	 * 1) NetApp targets: return PQ=1, PDT=0x1f
-	 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
+	 * 2) IBM/2145 targets: return PQ=1, PDT=0
+	 * 3) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
 	 *    in the UFI 1.0 spec (we cannot rely on reserved bits).
 	 *
 	 * References:
@@ -1151,8 +1144,8 @@
 	 * PDT=00h Direct-access device (floppy)
 	 * PDT=1Fh none (no FDD connected to the requested logical unit)
 	 */
-	if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
-	    (result[0] & 0x1f) == 0x1f &&
+	if (((result[0] >> 5) == 1 ||
+	    (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
 	    !scsi_is_wlun(lun)) {
 		SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
 					"scsi scan: peripheral device type"
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index 546f162..7259704 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 2003 Christoph Hellwig.
- *	Released under GPL v2.
  */
 
 #include <linux/errno.h>
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 3aee946..6d7362e 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * scsi_sysfs.c
  *
@@ -367,7 +368,6 @@
 
 static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
 
-shost_rd_attr(use_blk_mq, "%d\n");
 shost_rd_attr(unique_id, "%u\n");
 shost_rd_attr(cmd_per_lun, "%hd\n");
 shost_rd_attr(can_queue, "%hd\n");
@@ -386,6 +386,13 @@
 }
 static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
 
+static ssize_t
+show_use_blk_mq(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "1\n");
+}
+static DEVICE_ATTR(use_blk_mq, S_IRUGO, show_use_blk_mq, NULL);
+
 static struct attribute *scsi_sysfs_shost_attrs[] = {
 	&dev_attr_use_blk_mq.attr,
 	&dev_attr_unique_id.attr,
@@ -723,6 +730,14 @@
 		  const char *buf, size_t count)
 {
 	struct kernfs_node *kn;
+	struct scsi_device *sdev = to_scsi_device(dev);
+
+	/*
+	 * We need to try to get module, avoiding the module been removed
+	 * during delete.
+	 */
+	if (scsi_device_get(sdev))
+		return -ENODEV;
 
 	kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
 	WARN_ON_ONCE(!kn);
@@ -737,9 +752,10 @@
 	 * state into SDEV_DEL.
 	 */
 	device_remove_file(dev, attr);
-	scsi_remove_device(to_scsi_device(dev));
+	scsi_remove_device(sdev);
 	if (kn)
 		sysfs_unbreak_active_protection(kn);
+	scsi_device_put(sdev);
 	return count;
 };
 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
@@ -760,11 +776,22 @@
 			break;
 		}
 	}
-	if (!state)
+	switch (state) {
+	case SDEV_RUNNING:
+	case SDEV_OFFLINE:
+		break;
+	default:
 		return -EINVAL;
+	}
 
 	mutex_lock(&sdev->state_mutex);
 	ret = scsi_device_set_state(sdev, state);
+	/*
+	 * If the device state changes to SDEV_RUNNING, we need to run
+	 * the queue to avoid I/O hang.
+	 */
+	if (ret == 0 && state == SDEV_RUNNING)
+		blk_mq_run_hw_queues(sdev->request_queue, true);
 	mutex_unlock(&sdev->state_mutex);
 
 	return ret == 0 ? count : -EINVAL;
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 0ff083b..0f17e7d 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 2010 FUJITSU LIMITED
  * Copyright (C) 2010 Tomohiro Kusumi <kusumi.tomohiro@jp.fujitsu.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <linux/kernel.h>
 #include <linux/trace_seq.h>
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 381668f..2732fa6 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1,29 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  FiberChannel transport specific attributes exported to sysfs.
  *
  *  Copyright (c) 2003 Silicon Graphics, Inc.  All rights reserved.
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- *  ========
- *
  *  Copyright (C) 2004-2007   James Smart, Emulex Corporation
  *    Rewrite for host, target, device, and remote port attributes,
  *    statistics, and service functions...
  *    Add vports, etc
- *
  */
 #include <linux/module.h>
 #include <linux/init.h>
@@ -147,6 +130,7 @@
 	{ FCH_EVT_PORT_OFFLINE,		"port_offline" },
 	{ FCH_EVT_PORT_FABRIC,		"port_fabric" },
 	{ FCH_EVT_LINK_UNKNOWN,		"link_unknown" },
+	{ FCH_EVT_LINK_FPIN,		"link_FPIN" },
 	{ FCH_EVT_VENDOR_UNIQUE,	"vendor_unique" },
 };
 fc_enum_name_search(host_event_code, fc_host_event_code,
@@ -295,6 +279,9 @@
 	{ FC_PORT_ROLE_FCP_INITIATOR,		"FCP Initiator" },
 	{ FC_PORT_ROLE_IP_PORT,			"IP Port" },
 	{ FC_PORT_ROLE_FCP_DUMMY_INITIATOR,	"FCP Dummy Initiator" },
+	{ FC_PORT_ROLE_NVME_INITIATOR,		"NVMe Initiator" },
+	{ FC_PORT_ROLE_NVME_TARGET,		"NVMe Target" },
+	{ FC_PORT_ROLE_NVME_DISCOVERY,		"NVMe Discovery" },
 };
 fc_bitfield_name_search(port_roles, fc_port_role_names)
 
@@ -523,20 +510,23 @@
 }
 EXPORT_SYMBOL(fc_get_event_number);
 
-
 /**
- * fc_host_post_event - called to post an even on an fc_host.
+ * fc_host_post_fc_event - routine to do the work of posting an event
+ *                      on an fc_host.
  * @shost:		host the event occurred on
  * @event_number:	fc event number obtained from get_fc_event_number()
  * @event_code:		fc_host event being posted
- * @event_data:		32bits of data for the event being posted
+ * @data_len:		amount, in bytes, of event data
+ * @data_buf:		pointer to event data
+ * @vendor_id:          value for Vendor id
  *
  * Notes:
  *	This routine assumes no locks are held on entry.
  */
 void
-fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
-		enum fc_host_event_code event_code, u32 event_data)
+fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
+		enum fc_host_event_code event_code,
+		u32 data_len, char *data_buf, u64 vendor_id)
 {
 	struct sk_buff *skb;
 	struct nlmsghdr	*nlh;
@@ -545,12 +535,15 @@
 	u32 len;
 	int err;
 
+	if (!data_buf || data_len < 4)
+		data_len = 0;
+
 	if (!scsi_nl_sock) {
 		err = -ENOENT;
 		goto send_fail;
 	}
 
-	len = FC_NL_MSGALIGN(sizeof(*event));
+	len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
 
 	skb = nlmsg_new(len, GFP_KERNEL);
 	if (!skb) {
@@ -568,12 +561,13 @@
 	INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
 				FC_NL_ASYNC_EVENT, len);
 	event->seconds = ktime_get_real_seconds();
-	event->vendor_id = 0;
+	event->vendor_id = vendor_id;
 	event->host_no = shost->host_no;
-	event->event_datalen = sizeof(u32);	/* bytes */
+	event->event_datalen = data_len;	/* bytes */
 	event->event_num = event_number;
 	event->event_code = event_code;
-	event->event_data = event_data;
+	if (data_len)
+		memcpy(&event->event_data, data_buf, data_len);
 
 	nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
 			GFP_KERNEL);
@@ -586,14 +580,35 @@
 	printk(KERN_WARNING
 		"%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
 		__func__, shost->host_no,
-		(name) ? name : "<unknown>", event_data, err);
+		(name) ? name : "<unknown>",
+		(data_len) ? *((u32 *)data_buf) : 0xFFFFFFFF, err);
 	return;
 }
+EXPORT_SYMBOL(fc_host_post_fc_event);
+
+/**
+ * fc_host_post_event - called to post an even on an fc_host.
+ * @shost:		host the event occurred on
+ * @event_number:	fc event number obtained from get_fc_event_number()
+ * @event_code:		fc_host event being posted
+ * @event_data:		32bits of data for the event being posted
+ *
+ * Notes:
+ *	This routine assumes no locks are held on entry.
+ */
+void
+fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
+		enum fc_host_event_code event_code, u32 event_data)
+{
+	fc_host_post_fc_event(shost, event_number, event_code,
+		(u32)sizeof(u32), (char *)&event_data, 0);
+}
 EXPORT_SYMBOL(fc_host_post_event);
 
 
 /**
- * fc_host_post_vendor_event - called to post a vendor unique event on an fc_host
+ * fc_host_post_vendor_event - called to post a vendor unique event
+ *                      on an fc_host
  * @shost:		host the event occurred on
  * @event_number:	fc event number obtained from get_fc_event_number()
  * @data_len:		amount, in bytes, of vendor unique data
@@ -607,56 +622,27 @@
 fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
 		u32 data_len, char * data_buf, u64 vendor_id)
 {
-	struct sk_buff *skb;
-	struct nlmsghdr	*nlh;
-	struct fc_nl_event *event;
-	u32 len;
-	int err;
-
-	if (!scsi_nl_sock) {
-		err = -ENOENT;
-		goto send_vendor_fail;
-	}
-
-	len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
-
-	skb = nlmsg_new(len, GFP_KERNEL);
-	if (!skb) {
-		err = -ENOBUFS;
-		goto send_vendor_fail;
-	}
-
-	nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
-	if (!nlh) {
-		err = -ENOBUFS;
-		goto send_vendor_fail_skb;
-	}
-	event = nlmsg_data(nlh);
-
-	INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
-				FC_NL_ASYNC_EVENT, len);
-	event->seconds = ktime_get_real_seconds();
-	event->vendor_id = vendor_id;
-	event->host_no = shost->host_no;
-	event->event_datalen = data_len;	/* bytes */
-	event->event_num = event_number;
-	event->event_code = FCH_EVT_VENDOR_UNIQUE;
-	memcpy(&event->event_data, data_buf, data_len);
-
-	nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
-			GFP_KERNEL);
-	return;
-
-send_vendor_fail_skb:
-	kfree_skb(skb);
-send_vendor_fail:
-	printk(KERN_WARNING
-		"%s: Dropped Event : host %d vendor_unique - err %d\n",
-		__func__, shost->host_no, err);
-	return;
+	fc_host_post_fc_event(shost, event_number, FCH_EVT_VENDOR_UNIQUE,
+		data_len, data_buf, vendor_id);
 }
 EXPORT_SYMBOL(fc_host_post_vendor_event);
 
+/**
+ * fc_host_rcv_fpin - routine to process a received FPIN.
+ * @shost:		host the FPIN was received on
+ * @fpin_len:		length of FPIN payload, in bytes
+ * @fpin_buf:		pointer to FPIN payload
+ *
+ * Notes:
+ *	This routine assumes no locks are held on entry.
+ */
+void
+fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf)
+{
+	fc_host_post_fc_event(shost, fc_get_event_number(),
+				FCH_EVT_LINK_FPIN, fpin_len, fpin_buf, 0);
+}
+EXPORT_SYMBOL(fc_host_fpin_rcv);
 
 
 static __init int fc_transport_init(void)
@@ -3592,7 +3578,7 @@
 
 	/* the blk_end_sync_io() doesn't check the error */
 	if (inflight)
-		__blk_complete_request(req);
+		blk_mq_end_request(req, BLK_STS_IOERR);
 	return BLK_EH_DONE;
 }
 
@@ -3684,14 +3670,9 @@
 fc_bsg_goose_queue(struct fc_rport *rport)
 {
 	struct request_queue *q = rport->rqst_q;
-	unsigned long flags;
 
-	if (!q)
-		return;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	blk_run_queue_async(q);
-	spin_unlock_irqrestore(q->queue_lock, flags);
+	if (q)
+		blk_mq_run_hw_queues(q, true);
 }
 
 /**
@@ -3759,6 +3740,37 @@
 		return fc_bsg_host_dispatch(shost, job);
 }
 
+static blk_status_t fc_bsg_rport_prep(struct fc_rport *rport)
+{
+	if (rport->port_state == FC_PORTSTATE_BLOCKED &&
+	    !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
+		return BLK_STS_RESOURCE;
+
+	if (rport->port_state != FC_PORTSTATE_ONLINE)
+		return BLK_STS_IOERR;
+
+	return BLK_STS_OK;
+}
+
+
+static int fc_bsg_dispatch_prep(struct bsg_job *job)
+{
+	struct fc_rport *rport = fc_bsg_to_rport(job);
+	blk_status_t ret;
+
+	ret = fc_bsg_rport_prep(rport);
+	switch (ret) {
+	case BLK_STS_OK:
+		break;
+	case BLK_STS_RESOURCE:
+		return -EAGAIN;
+	default:
+		return -EIO;
+	}
+
+	return fc_bsg_dispatch(job);
+}
+
 /**
  * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
  * @shost:	shost for fc_host
@@ -3780,7 +3792,8 @@
 	snprintf(bsg_name, sizeof(bsg_name),
 		 "fc_host%d", shost->host_no);
 
-	q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size);
+	q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout,
+				i->f->dd_bsg_size);
 	if (IS_ERR(q)) {
 		dev_err(dev,
 			"fc_host%d: bsg interface failed to initialize - setup queue\n",
@@ -3788,26 +3801,11 @@
 		return PTR_ERR(q);
 	}
 	__scsi_init_queue(shost, q);
-	blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
 	blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
 	fc_host->rqst_q = q;
 	return 0;
 }
 
-static int fc_bsg_rport_prep(struct request_queue *q, struct request *req)
-{
-	struct fc_rport *rport = dev_to_rport(q->queuedata);
-
-	if (rport->port_state == FC_PORTSTATE_BLOCKED &&
-	    !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
-		return BLKPREP_DEFER;
-
-	if (rport->port_state != FC_PORTSTATE_ONLINE)
-		return BLKPREP_KILL;
-
-	return BLKPREP_OK;
-}
-
 /**
  * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
  * @shost:	shost that rport is attached to
@@ -3825,15 +3823,13 @@
 	if (!i->f->bsg_request)
 		return -ENOTSUPP;
 
-	q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch,
-			i->f->dd_bsg_size);
+	q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep,
+				fc_bsg_job_timeout, i->f->dd_bsg_size);
 	if (IS_ERR(q)) {
 		dev_err(dev, "failed to setup bsg queue\n");
 		return PTR_ERR(q);
 	}
 	__scsi_init_queue(shost, q);
-	blk_queue_prep_rq(q, fc_bsg_rport_prep);
-	blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
 	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
 	rport->rqst_q = q;
 	return 0;
@@ -3852,10 +3848,7 @@
 static void
 fc_bsg_remove(struct request_queue *q)
 {
-	if (q) {
-		bsg_unregister_queue(q);
-		blk_cleanup_queue(q);
-	}
+	bsg_remove_queue(q);
 }
 
 
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 6fd2fe2..417b868 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * iSCSI transport class definitions
  *
@@ -5,20 +6,6 @@
  * Copyright (C) Mike Christie, 2004 - 2005
  * Copyright (C) Dmitry Yusupov, 2004 - 2005
  * Copyright (C) Alex Aizman, 2004 - 2005
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 #include <linux/module.h>
 #include <linux/mutex.h>
@@ -37,6 +24,18 @@
 
 #define ISCSI_TRANSPORT_VERSION "2.0-870"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/iscsi.h>
+
+/*
+ * Export tracepoint symbols to be used by other modules.
+ */
+EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_conn);
+EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_eh);
+EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_session);
+EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_tcp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_sw_tcp);
+
 static int dbg_session;
 module_param_named(debug_session, dbg_session, int,
 		   S_IRUGO | S_IWUSR);
@@ -59,6 +58,9 @@
 			iscsi_cls_session_printk(KERN_INFO, _session,	\
 						 "%s: " dbg_fmt,	\
 						 __func__, ##arg);	\
+		iscsi_dbg_trace(trace_iscsi_dbg_trans_session,		\
+				&(_session)->dev,			\
+				"%s " dbg_fmt, __func__, ##arg);	\
 	} while (0);
 
 #define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...)			\
@@ -66,7 +68,10 @@
 		if (dbg_conn)						\
 			iscsi_cls_conn_printk(KERN_INFO, _conn,		\
 					      "%s: " dbg_fmt,		\
-					      __func__, ##arg);	\
+					      __func__, ##arg);		\
+		iscsi_dbg_trace(trace_iscsi_dbg_trans_conn,		\
+				&(_conn)->dev,				\
+				"%s " dbg_fmt, __func__, ##arg);	\
 	} while (0);
 
 struct iscsi_internal {
@@ -1542,7 +1547,7 @@
 		return -ENOTSUPP;
 
 	snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
-	q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0);
+	q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, NULL, 0);
 	if (IS_ERR(q)) {
 		shost_printk(KERN_ERR, shost, "bsg interface failed to "
 			     "initialize - no request queue\n");
@@ -1576,10 +1581,7 @@
 	struct Scsi_Host *shost = dev_to_shost(dev);
 	struct iscsi_cls_host *ihost = shost->shost_data;
 
-	if (ihost->bsg_q) {
-		bsg_unregister_queue(ihost->bsg_q);
-		blk_cleanup_queue(ihost->bsg_q);
-	}
+	bsg_remove_queue(ihost->bsg_q);
 	return 0;
 }
 
@@ -2185,6 +2187,8 @@
 	scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
 	/* flush running scans then delete devices */
 	flush_work(&session->scan_work);
+	/* flush running unbind operations */
+	flush_work(&session->unbind_work);
 	__iscsi_unbind_session(&session->unbind_work);
 
 	/* hw iscsi may not have removed all connections from session */
@@ -4497,6 +4501,20 @@
 }
 EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
 
+void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *),
+		     struct device *dev, const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	trace(dev, &vaf);
+	va_end(args);
+}
+EXPORT_SYMBOL_GPL(iscsi_dbg_trace);
+
 static __init int iscsi_transport_init(void)
 {
 	int err;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 0cd16e8..ef138c5 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 2005-2006 Dell Inc.
- *	Released under GPL v2.
  *
  * Serial Attached SCSI (SAS) transport class.
  *
@@ -198,7 +198,7 @@
 
 	if (rphy) {
 		q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev),
-				sas_smp_dispatch, 0);
+				sas_smp_dispatch, NULL, 0);
 		if (IS_ERR(q))
 			return PTR_ERR(q);
 		rphy->q = q;
@@ -207,13 +207,12 @@
 
 		snprintf(name, sizeof(name), "sas_host%d", shost->host_no);
 		q = bsg_setup_queue(&shost->shost_gendev, name,
-				sas_smp_dispatch, 0);
+				sas_smp_dispatch, NULL, 0);
 		if (IS_ERR(q))
 			return PTR_ERR(q);
 		to_sas_host_attrs(shost)->q = q;
 	}
 
-	blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
 	return 0;
 }
 
@@ -246,11 +245,7 @@
 	struct Scsi_Host *shost = dev_to_shost(dev);
 	struct request_queue *q = to_sas_host_attrs(shost)->q;
 
-	if (q) {
-		bsg_unregister_queue(q);
-		blk_cleanup_queue(q);
-	}
-
+	bsg_remove_queue(q);
 	return 0;
 }
 
@@ -612,7 +607,6 @@
 sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
 		unsigned long long);
 sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
-//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
 sas_phy_linkspeed_attr(negotiated_linkrate);
 sas_phy_linkspeed_attr(minimum_linkrate_hw);
 sas_phy_linkspeed_rw_attr(minimum_linkrate);
@@ -1802,7 +1796,6 @@
 	SETUP_PHY_ATTRIBUTE(device_type);
 	SETUP_PHY_ATTRIBUTE(sas_address);
 	SETUP_PHY_ATTRIBUTE(phy_identifier);
-	//SETUP_PHY_ATTRIBUTE(port_identifier);
 	SETUP_PHY_ATTRIBUTE(negotiated_linkrate);
 	SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw);
 	SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 40b85b7..f866106 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /* 
  *  Parallel SCSI (SPI) transport specific attributes exported to sysfs.
  *
  *  Copyright (c) 2003 Silicon Graphics, Inc.  All rights reserved.
  *  Copyright (c) 2004, 2005 James Bottomley <James.Bottomley@SteelEye.com>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/ctype.h>
 #include <linux/init.h>
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 4e46fdb..d4d1104 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * SCSI RDMA (SRP) transport class
  *
  * Copyright (C) 2007 FUJITA Tomonori <tomof@acm.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
  */
 #include <linux/init.h>
 #include <linux/module.h>
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5c9acb6..ebb4016 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *      sd.c Copyright (C) 1992 Drew Eckhardt
  *           Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
@@ -45,6 +46,7 @@
 #include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/blkpg.h>
+#include <linux/blk-pm.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
 #include <linux/string_helpers.h>
@@ -113,7 +115,7 @@
 static int sd_suspend_runtime(struct device *);
 static int sd_resume(struct device *);
 static void sd_rescan(struct device *);
-static int sd_init_command(struct scsi_cmnd *SCpnt);
+static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
 static void sd_uninit_command(struct scsi_cmnd *SCpnt);
 static int sd_done(struct scsi_cmnd *);
 static void sd_eh_reset(struct scsi_cmnd *);
@@ -205,6 +207,12 @@
 	sp = buffer_data[0] & 0x80 ? 1 : 0;
 	buffer_data[0] &= ~0x80;
 
+	/*
+	 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
+	 * received mode parameter buffer before doing MODE SELECT.
+	 */
+	data.device_specific = 0;
+
 	if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
 			     SD_MAX_RETRIES, &data, &sshdr)) {
 		if (scsi_sense_valid(&sshdr))
@@ -560,6 +568,7 @@
 		.name		= "sd",
 		.owner		= THIS_MODULE,
 		.probe		= sd_probe,
+		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
 		.remove		= sd_remove,
 		.shutdown	= sd_shutdown,
 		.pm		= &sd_pm_ops,
@@ -658,6 +667,68 @@
 }
 #endif /* CONFIG_BLK_SED_OPAL */
 
+/*
+ * Look up the DIX operation based on whether the command is read or
+ * write and whether dix and dif are enabled.
+ */
+static unsigned int sd_prot_op(bool write, bool dix, bool dif)
+{
+	/* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
+	static const unsigned int ops[] = {	/* wrt dix dif */
+		SCSI_PROT_NORMAL,		/*  0	0   0  */
+		SCSI_PROT_READ_STRIP,		/*  0	0   1  */
+		SCSI_PROT_READ_INSERT,		/*  0	1   0  */
+		SCSI_PROT_READ_PASS,		/*  0	1   1  */
+		SCSI_PROT_NORMAL,		/*  1	0   0  */
+		SCSI_PROT_WRITE_INSERT,		/*  1	0   1  */
+		SCSI_PROT_WRITE_STRIP,		/*  1	1   0  */
+		SCSI_PROT_WRITE_PASS,		/*  1	1   1  */
+	};
+
+	return ops[write << 2 | dix << 1 | dif];
+}
+
+/*
+ * Returns a mask of the protection flags that are valid for a given DIX
+ * operation.
+ */
+static unsigned int sd_prot_flag_mask(unsigned int prot_op)
+{
+	static const unsigned int flag_mask[] = {
+		[SCSI_PROT_NORMAL]		= 0,
+
+		[SCSI_PROT_READ_STRIP]		= SCSI_PROT_TRANSFER_PI |
+						  SCSI_PROT_GUARD_CHECK |
+						  SCSI_PROT_REF_CHECK |
+						  SCSI_PROT_REF_INCREMENT,
+
+		[SCSI_PROT_READ_INSERT]		= SCSI_PROT_REF_INCREMENT |
+						  SCSI_PROT_IP_CHECKSUM,
+
+		[SCSI_PROT_READ_PASS]		= SCSI_PROT_TRANSFER_PI |
+						  SCSI_PROT_GUARD_CHECK |
+						  SCSI_PROT_REF_CHECK |
+						  SCSI_PROT_REF_INCREMENT |
+						  SCSI_PROT_IP_CHECKSUM,
+
+		[SCSI_PROT_WRITE_INSERT]	= SCSI_PROT_TRANSFER_PI |
+						  SCSI_PROT_REF_INCREMENT,
+
+		[SCSI_PROT_WRITE_STRIP]		= SCSI_PROT_GUARD_CHECK |
+						  SCSI_PROT_REF_CHECK |
+						  SCSI_PROT_REF_INCREMENT |
+						  SCSI_PROT_IP_CHECKSUM,
+
+		[SCSI_PROT_WRITE_PASS]		= SCSI_PROT_TRANSFER_PI |
+						  SCSI_PROT_GUARD_CHECK |
+						  SCSI_PROT_REF_CHECK |
+						  SCSI_PROT_REF_INCREMENT |
+						  SCSI_PROT_IP_CHECKSUM,
+	};
+
+	return flag_mask[prot_op];
+}
+
 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
 					   unsigned int dix, unsigned int dif)
 {
@@ -750,18 +821,18 @@
 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
 }
 
-static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
+static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
 {
 	struct scsi_device *sdp = cmd->device;
 	struct request *rq = cmd->request;
-	u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
-	u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
+	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
 	unsigned int data_len = 24;
 	char *buf;
 
 	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
 	if (!rq->special_vec.bv_page)
-		return BLKPREP_DEFER;
+		return BLK_STS_RESOURCE;
 	clear_highpage(rq->special_vec.bv_page);
 	rq->special_vec.bv_offset = 0;
 	rq->special_vec.bv_len = data_len;
@@ -774,28 +845,28 @@
 	buf = page_address(rq->special_vec.bv_page);
 	put_unaligned_be16(6 + 16, &buf[0]);
 	put_unaligned_be16(16, &buf[2]);
-	put_unaligned_be64(sector, &buf[8]);
-	put_unaligned_be32(nr_sectors, &buf[16]);
+	put_unaligned_be64(lba, &buf[8]);
+	put_unaligned_be32(nr_blocks, &buf[16]);
 
 	cmd->allowed = SD_MAX_RETRIES;
 	cmd->transfersize = data_len;
 	rq->timeout = SD_TIMEOUT;
-	scsi_req(rq)->resid_len = data_len;
 
 	return scsi_init_io(cmd);
 }
 
-static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap)
+static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
+		bool unmap)
 {
 	struct scsi_device *sdp = cmd->device;
 	struct request *rq = cmd->request;
-	u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
-	u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
+	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
 	u32 data_len = sdp->sector_size;
 
 	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
 	if (!rq->special_vec.bv_page)
-		return BLKPREP_DEFER;
+		return BLK_STS_RESOURCE;
 	clear_highpage(rq->special_vec.bv_page);
 	rq->special_vec.bv_offset = 0;
 	rq->special_vec.bv_len = data_len;
@@ -805,28 +876,28 @@
 	cmd->cmnd[0] = WRITE_SAME_16;
 	if (unmap)
 		cmd->cmnd[1] = 0x8; /* UNMAP */
-	put_unaligned_be64(sector, &cmd->cmnd[2]);
-	put_unaligned_be32(nr_sectors, &cmd->cmnd[10]);
+	put_unaligned_be64(lba, &cmd->cmnd[2]);
+	put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
 
 	cmd->allowed = SD_MAX_RETRIES;
 	cmd->transfersize = data_len;
 	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
-	scsi_req(rq)->resid_len = data_len;
 
 	return scsi_init_io(cmd);
 }
 
-static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap)
+static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
+		bool unmap)
 {
 	struct scsi_device *sdp = cmd->device;
 	struct request *rq = cmd->request;
-	u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
-	u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
+	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
 	u32 data_len = sdp->sector_size;
 
 	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
 	if (!rq->special_vec.bv_page)
-		return BLKPREP_DEFER;
+		return BLK_STS_RESOURCE;
 	clear_highpage(rq->special_vec.bv_page);
 	rq->special_vec.bv_offset = 0;
 	rq->special_vec.bv_len = data_len;
@@ -836,24 +907,23 @@
 	cmd->cmnd[0] = WRITE_SAME;
 	if (unmap)
 		cmd->cmnd[1] = 0x8; /* UNMAP */
-	put_unaligned_be32(sector, &cmd->cmnd[2]);
-	put_unaligned_be16(nr_sectors, &cmd->cmnd[7]);
+	put_unaligned_be32(lba, &cmd->cmnd[2]);
+	put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
 
 	cmd->allowed = SD_MAX_RETRIES;
 	cmd->transfersize = data_len;
 	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
-	scsi_req(rq)->resid_len = data_len;
 
 	return scsi_init_io(cmd);
 }
 
-static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
+static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
 {
 	struct request *rq = cmd->request;
 	struct scsi_device *sdp = cmd->device;
 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
-	u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
-	u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
+	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
 
 	if (!(rq->cmd_flags & REQ_NOUNMAP)) {
 		switch (sdkp->zeroing_mode) {
@@ -865,9 +935,9 @@
 	}
 
 	if (sdp->no_write_same)
-		return BLKPREP_INVALID;
+		return BLK_STS_TARGET;
 
-	if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff)
+	if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
 		return sd_setup_write_same16_cmnd(cmd, false);
 
 	return sd_setup_write_same10_cmnd(cmd, false);
@@ -942,37 +1012,33 @@
  * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on
  * the preference indicated by the target device.
  **/
-static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
+static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
 {
 	struct request *rq = cmd->request;
 	struct scsi_device *sdp = cmd->device;
 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
 	struct bio *bio = rq->bio;
-	sector_t sector = blk_rq_pos(rq);
-	unsigned int nr_sectors = blk_rq_sectors(rq);
-	unsigned int nr_bytes = blk_rq_bytes(rq);
-	int ret;
+	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
+	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
+	blk_status_t ret;
 
 	if (sdkp->device->no_write_same)
-		return BLKPREP_INVALID;
+		return BLK_STS_TARGET;
 
 	BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
 
-	sector >>= ilog2(sdp->sector_size) - 9;
-	nr_sectors >>= ilog2(sdp->sector_size) - 9;
-
 	rq->timeout = SD_WRITE_SAME_TIMEOUT;
 
-	if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) {
+	if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) {
 		cmd->cmd_len = 16;
 		cmd->cmnd[0] = WRITE_SAME_16;
-		put_unaligned_be64(sector, &cmd->cmnd[2]);
-		put_unaligned_be32(nr_sectors, &cmd->cmnd[10]);
+		put_unaligned_be64(lba, &cmd->cmnd[2]);
+		put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
 	} else {
 		cmd->cmd_len = 10;
 		cmd->cmnd[0] = WRITE_SAME;
-		put_unaligned_be32(sector, &cmd->cmnd[2]);
-		put_unaligned_be16(nr_sectors, &cmd->cmnd[7]);
+		put_unaligned_be32(lba, &cmd->cmnd[2]);
+		put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
 	}
 
 	cmd->transfersize = sdp->sector_size;
@@ -990,12 +1056,12 @@
 	 */
 	rq->__data_len = sdp->sector_size;
 	ret = scsi_init_io(cmd);
-	rq->__data_len = nr_bytes;
+	rq->__data_len = blk_rq_bytes(rq);
 
 	return ret;
 }
 
-static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
+static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
 {
 	struct request *rq = cmd->request;
 
@@ -1008,247 +1074,196 @@
 	cmd->allowed = SD_MAX_RETRIES;
 
 	rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
-	return BLKPREP_OK;
+	return BLK_STS_OK;
 }
 
-static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
+static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
+				       sector_t lba, unsigned int nr_blocks,
+				       unsigned char flags)
 {
-	struct request *rq = SCpnt->request;
-	struct scsi_device *sdp = SCpnt->device;
-	struct gendisk *disk = rq->rq_disk;
-	struct scsi_disk *sdkp = scsi_disk(disk);
-	sector_t block = blk_rq_pos(rq);
-	sector_t threshold;
-	unsigned int this_count = blk_rq_sectors(rq);
-	unsigned int dif, dix;
-	int ret;
-	unsigned char protect;
+	cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
+	if (unlikely(cmd->cmnd == NULL))
+		return BLK_STS_RESOURCE;
 
-	ret = scsi_init_io(SCpnt);
-	if (ret != BLKPREP_OK)
-		return ret;
-	WARN_ON_ONCE(SCpnt != rq->special);
+	cmd->cmd_len = SD_EXT_CDB_SIZE;
+	memset(cmd->cmnd, 0, cmd->cmd_len);
 
-	/* from here on until we're complete, any goto out
-	 * is used for a killable error condition */
-	ret = BLKPREP_KILL;
+	cmd->cmnd[0]  = VARIABLE_LENGTH_CMD;
+	cmd->cmnd[7]  = 0x18; /* Additional CDB len */
+	cmd->cmnd[9]  = write ? WRITE_32 : READ_32;
+	cmd->cmnd[10] = flags;
+	put_unaligned_be64(lba, &cmd->cmnd[12]);
+	put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
+	put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);
 
-	SCSI_LOG_HLQUEUE(1,
-		scmd_printk(KERN_INFO, SCpnt,
-			"%s: block=%llu, count=%d\n",
-			__func__, (unsigned long long)block, this_count));
+	return BLK_STS_OK;
+}
 
-	if (!sdp || !scsi_device_online(sdp) ||
-	    block + blk_rq_sectors(rq) > get_capacity(disk)) {
-		SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
-						"Finishing %u sectors\n",
-						blk_rq_sectors(rq)));
-		SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
-						"Retry with 0x%p\n", SCpnt));
-		goto out;
-	}
+static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
+				       sector_t lba, unsigned int nr_blocks,
+				       unsigned char flags)
+{
+	cmd->cmd_len  = 16;
+	cmd->cmnd[0]  = write ? WRITE_16 : READ_16;
+	cmd->cmnd[1]  = flags;
+	cmd->cmnd[14] = 0;
+	cmd->cmnd[15] = 0;
+	put_unaligned_be64(lba, &cmd->cmnd[2]);
+	put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
 
-	if (sdp->changed) {
+	return BLK_STS_OK;
+}
+
+static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
+				       sector_t lba, unsigned int nr_blocks,
+				       unsigned char flags)
+{
+	cmd->cmd_len = 10;
+	cmd->cmnd[0] = write ? WRITE_10 : READ_10;
+	cmd->cmnd[1] = flags;
+	cmd->cmnd[6] = 0;
+	cmd->cmnd[9] = 0;
+	put_unaligned_be32(lba, &cmd->cmnd[2]);
+	put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
+
+	return BLK_STS_OK;
+}
+
+static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
+				      sector_t lba, unsigned int nr_blocks,
+				      unsigned char flags)
+{
+	/* Avoid that 0 blocks gets translated into 256 blocks. */
+	if (WARN_ON_ONCE(nr_blocks == 0))
+		return BLK_STS_IOERR;
+
+	if (unlikely(flags & 0x8)) {
 		/*
-		 * quietly refuse to do anything to a changed disc until 
-		 * the changed bit has been reset
+		 * This happens only if this drive failed 10byte rw
+		 * command with ILLEGAL_REQUEST during operation and
+		 * thus turned off use_10_for_rw.
 		 */
-		/* printk("SCSI disk has been changed or is not present. Prohibiting further I/O.\n"); */
-		goto out;
+		scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
+		return BLK_STS_IOERR;
+	}
+
+	cmd->cmd_len = 6;
+	cmd->cmnd[0] = write ? WRITE_6 : READ_6;
+	cmd->cmnd[1] = (lba >> 16) & 0x1f;
+	cmd->cmnd[2] = (lba >> 8) & 0xff;
+	cmd->cmnd[3] = lba & 0xff;
+	cmd->cmnd[4] = nr_blocks;
+	cmd->cmnd[5] = 0;
+
+	return BLK_STS_OK;
+}
+
+static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
+{
+	struct request *rq = cmd->request;
+	struct scsi_device *sdp = cmd->device;
+	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+	sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
+	sector_t threshold;
+	unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
+	unsigned int mask = logical_to_sectors(sdp, 1) - 1;
+	bool write = rq_data_dir(rq) == WRITE;
+	unsigned char protect, fua;
+	blk_status_t ret;
+	unsigned int dif;
+	bool dix;
+
+	ret = scsi_init_io(cmd);
+	if (ret != BLK_STS_OK)
+		return ret;
+
+	if (!scsi_device_online(sdp) || sdp->changed) {
+		scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
+		return BLK_STS_IOERR;
+	}
+
+	if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
+		scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
+		return BLK_STS_IOERR;
+	}
+
+	if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
+		scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
+		return BLK_STS_IOERR;
 	}
 
 	/*
-	 * Some SD card readers can't handle multi-sector accesses which touch
-	 * the last one or two hardware sectors.  Split accesses as needed.
+	 * Some SD card readers can't handle accesses which touch the
+	 * last one or two logical blocks. Split accesses as needed.
 	 */
-	threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS *
-		(sdp->sector_size / 512);
+	threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
 
-	if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) {
-		if (block < threshold) {
+	if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
+		if (lba < threshold) {
 			/* Access up to the threshold but not beyond */
-			this_count = threshold - block;
+			nr_blocks = threshold - lba;
 		} else {
-			/* Access only a single hardware sector */
-			this_count = sdp->sector_size / 512;
+			/* Access only a single logical block */
+			nr_blocks = 1;
 		}
 	}
 
-	SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
-					(unsigned long long)block));
-
-	/*
-	 * If we have a 1K hardware sectorsize, prevent access to single
-	 * 512 byte sectors.  In theory we could handle this - in fact
-	 * the scsi cdrom driver must be able to handle this because
-	 * we typically use 1K blocksizes, and cdroms typically have
-	 * 2K hardware sectorsizes.  Of course, things are simpler
-	 * with the cdrom, since it is read-only.  For performance
-	 * reasons, the filesystems should be able to handle this
-	 * and not force the scsi disk driver to use bounce buffers
-	 * for this.
-	 */
-	if (sdp->sector_size == 1024) {
-		if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
-			scmd_printk(KERN_ERR, SCpnt,
-				    "Bad block number requested\n");
-			goto out;
-		} else {
-			block = block >> 1;
-			this_count = this_count >> 1;
-		}
-	}
-	if (sdp->sector_size == 2048) {
-		if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
-			scmd_printk(KERN_ERR, SCpnt,
-				    "Bad block number requested\n");
-			goto out;
-		} else {
-			block = block >> 2;
-			this_count = this_count >> 2;
-		}
-	}
-	if (sdp->sector_size == 4096) {
-		if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
-			scmd_printk(KERN_ERR, SCpnt,
-				    "Bad block number requested\n");
-			goto out;
-		} else {
-			block = block >> 3;
-			this_count = this_count >> 3;
-		}
-	}
-	if (rq_data_dir(rq) == WRITE) {
-		SCpnt->cmnd[0] = WRITE_6;
-
-		if (blk_integrity_rq(rq))
-			t10_pi_prepare(SCpnt->request, sdkp->protection_type);
-
-	} else if (rq_data_dir(rq) == READ) {
-		SCpnt->cmnd[0] = READ_6;
-	} else {
-		scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq));
-		goto out;
-	}
-
-	SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
-					"%s %d/%u 512 byte blocks.\n",
-					(rq_data_dir(rq) == WRITE) ?
-					"writing" : "reading", this_count,
-					blk_rq_sectors(rq)));
-
-	dix = scsi_prot_sg_count(SCpnt);
-	dif = scsi_host_dif_capable(SCpnt->device->host, sdkp->protection_type);
+	fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
+	dix = scsi_prot_sg_count(cmd);
+	dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
 
 	if (dif || dix)
-		protect = sd_setup_protect_cmnd(SCpnt, dix, dif);
+		protect = sd_setup_protect_cmnd(cmd, dix, dif);
 	else
 		protect = 0;
 
 	if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
-		SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
-
-		if (unlikely(SCpnt->cmnd == NULL)) {
-			ret = BLKPREP_DEFER;
-			goto out;
-		}
-
-		SCpnt->cmd_len = SD_EXT_CDB_SIZE;
-		memset(SCpnt->cmnd, 0, SCpnt->cmd_len);
-		SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD;
-		SCpnt->cmnd[7] = 0x18;
-		SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32;
-		SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
-
-		/* LBA */
-		SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
-		SCpnt->cmnd[13] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
-		SCpnt->cmnd[14] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
-		SCpnt->cmnd[15] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
-		SCpnt->cmnd[16] = (unsigned char) (block >> 24) & 0xff;
-		SCpnt->cmnd[17] = (unsigned char) (block >> 16) & 0xff;
-		SCpnt->cmnd[18] = (unsigned char) (block >> 8) & 0xff;
-		SCpnt->cmnd[19] = (unsigned char) block & 0xff;
-
-		/* Expected Indirect LBA */
-		SCpnt->cmnd[20] = (unsigned char) (block >> 24) & 0xff;
-		SCpnt->cmnd[21] = (unsigned char) (block >> 16) & 0xff;
-		SCpnt->cmnd[22] = (unsigned char) (block >> 8) & 0xff;
-		SCpnt->cmnd[23] = (unsigned char) block & 0xff;
-
-		/* Transfer length */
-		SCpnt->cmnd[28] = (unsigned char) (this_count >> 24) & 0xff;
-		SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff;
-		SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff;
-		SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
-	} else if (sdp->use_16_for_rw || (this_count > 0xffff)) {
-		SCpnt->cmnd[0] += READ_16 - READ_6;
-		SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
-		SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
-		SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
-		SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
-		SCpnt->cmnd[5] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
-		SCpnt->cmnd[6] = (unsigned char) (block >> 24) & 0xff;
-		SCpnt->cmnd[7] = (unsigned char) (block >> 16) & 0xff;
-		SCpnt->cmnd[8] = (unsigned char) (block >> 8) & 0xff;
-		SCpnt->cmnd[9] = (unsigned char) block & 0xff;
-		SCpnt->cmnd[10] = (unsigned char) (this_count >> 24) & 0xff;
-		SCpnt->cmnd[11] = (unsigned char) (this_count >> 16) & 0xff;
-		SCpnt->cmnd[12] = (unsigned char) (this_count >> 8) & 0xff;
-		SCpnt->cmnd[13] = (unsigned char) this_count & 0xff;
-		SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0;
-	} else if ((this_count > 0xff) || (block > 0x1fffff) ||
-		   scsi_device_protection(SCpnt->device) ||
-		   SCpnt->device->use_10_for_rw) {
-		SCpnt->cmnd[0] += READ_10 - READ_6;
-		SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
-		SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
-		SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
-		SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
-		SCpnt->cmnd[5] = (unsigned char) block & 0xff;
-		SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
-		SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
-		SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+		ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
+					 protect | fua);
+	} else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
+		ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
+					 protect | fua);
+	} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
+		   sdp->use_10_for_rw || protect) {
+		ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
+					 protect | fua);
 	} else {
-		if (unlikely(rq->cmd_flags & REQ_FUA)) {
-			/*
-			 * This happens only if this drive failed
-			 * 10byte rw command with ILLEGAL_REQUEST
-			 * during operation and thus turned off
-			 * use_10_for_rw.
-			 */
-			scmd_printk(KERN_ERR, SCpnt,
-				    "FUA write on READ/WRITE(6) drive\n");
-			goto out;
-		}
-
-		SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
-		SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
-		SCpnt->cmnd[3] = (unsigned char) block & 0xff;
-		SCpnt->cmnd[4] = (unsigned char) this_count;
-		SCpnt->cmnd[5] = 0;
+		ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
+					protect | fua);
 	}
-	SCpnt->sdb.length = this_count * sdp->sector_size;
+
+	if (unlikely(ret != BLK_STS_OK))
+		return ret;
 
 	/*
 	 * We shouldn't disconnect in the middle of a sector, so with a dumb
 	 * host adapter, it's safe to assume that we can at least transfer
 	 * this many bytes between each connect / disconnect.
 	 */
-	SCpnt->transfersize = sdp->sector_size;
-	SCpnt->underflow = this_count << 9;
-	SCpnt->allowed = SD_MAX_RETRIES;
+	cmd->transfersize = sdp->sector_size;
+	cmd->underflow = nr_blocks << 9;
+	cmd->allowed = SD_MAX_RETRIES;
+	cmd->sdb.length = nr_blocks * sdp->sector_size;
+
+	SCSI_LOG_HLQUEUE(1,
+			 scmd_printk(KERN_INFO, cmd,
+				     "%s: block=%llu, count=%d\n", __func__,
+				     (unsigned long long)blk_rq_pos(rq),
+				     blk_rq_sectors(rq)));
+	SCSI_LOG_HLQUEUE(2,
+			 scmd_printk(KERN_INFO, cmd,
+				     "%s %d/%u 512 byte blocks.\n",
+				     write ? "writing" : "reading", nr_blocks,
+				     blk_rq_sectors(rq)));
 
 	/*
 	 * This indicates that the command is ready from our end to be
 	 * queued.
 	 */
-	ret = BLKPREP_OK;
- out:
-	return ret;
+	return BLK_STS_OK;
 }
 
-static int sd_init_command(struct scsi_cmnd *cmd)
+static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
 {
 	struct request *rq = cmd->request;
 
@@ -1264,7 +1279,7 @@
 		case SD_LBP_ZERO:
 			return sd_setup_write_same10_cmnd(cmd, false);
 		default:
-			return BLKPREP_INVALID;
+			return BLK_STS_TARGET;
 		}
 	case REQ_OP_WRITE_ZEROES:
 		return sd_setup_write_zeroes_cmnd(cmd);
@@ -1275,13 +1290,13 @@
 	case REQ_OP_READ:
 	case REQ_OP_WRITE:
 		return sd_setup_read_write_cmnd(cmd);
-	case REQ_OP_ZONE_REPORT:
-		return sd_zbc_setup_report_cmnd(cmd);
 	case REQ_OP_ZONE_RESET:
-		return sd_zbc_setup_reset_cmnd(cmd);
+		return sd_zbc_setup_reset_cmnd(cmd, false);
+	case REQ_OP_ZONE_RESET_ALL:
+		return sd_zbc_setup_reset_cmnd(cmd, true);
 	default:
 		WARN_ON_ONCE(1);
-		return BLKPREP_KILL;
+		return BLK_STS_NOTSUPP;
 	}
 }
 
@@ -1402,11 +1417,6 @@
 			scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
 	}
 
-	/*
-	 * XXX and what if there are packets in flight and this close()
-	 * XXX is followed by a "rmmod sd_mod"?
-	 */
-
 	scsi_disk_put(sdkp);
 }
 
@@ -1645,7 +1655,8 @@
 		/* we need to evaluate the error return  */
 		if (scsi_sense_valid(sshdr) &&
 			(sshdr->asc == 0x3a ||	/* medium not present */
-			 sshdr->asc == 0x20))	/* invalid command */
+			 sshdr->asc == 0x20 ||	/* invalid command */
+			 (sshdr->asc == 0x74 && sshdr->ascq == 0x71)))	/* drive is password locked */
 				/* this is no error here */
 				return 0;
 
@@ -1805,6 +1816,7 @@
 	.check_events		= sd_check_events,
 	.revalidate_disk	= sd_revalidate_disk,
 	.unlock_native_capacity	= sd_unlock_native_capacity,
+	.report_zones		= sd_zbc_report_zones,
 	.pr_ops			= &sd_pr_ops,
 };
 
@@ -1948,6 +1960,7 @@
 	case REQ_OP_WRITE_ZEROES:
 	case REQ_OP_WRITE_SAME:
 	case REQ_OP_ZONE_RESET:
+	case REQ_OP_ZONE_RESET_ALL:
 		if (!result) {
 			good_bytes = blk_rq_bytes(req);
 			scsi_set_resid(SCpnt, 0);
@@ -1956,16 +1969,6 @@
 			scsi_set_resid(SCpnt, blk_rq_bytes(req));
 		}
 		break;
-	case REQ_OP_ZONE_REPORT:
-		if (!result) {
-			good_bytes = scsi_bufflen(SCpnt)
-				- scsi_get_resid(SCpnt);
-			scsi_set_resid(SCpnt, 0);
-		} else {
-			good_bytes = 0;
-			scsi_set_resid(SCpnt, blk_rq_bytes(req));
-		}
-		break;
 	default:
 		/*
 		 * In case of bogus fw or device, we could end up having
@@ -1977,6 +1980,7 @@
 			sd_printk(KERN_INFO, sdkp,
 				"Unaligned partial completion (resid=%u, sector_sz=%u)\n",
 				resid, sector_size);
+			scsi_print_command(SCpnt);
 			resid = min(scsi_bufflen(SCpnt),
 				    round_up(resid, sector_size));
 			scsi_set_resid(SCpnt, resid);
@@ -2050,11 +2054,6 @@
 					   "sd_done: completed %d of %d bytes\n",
 					   good_bytes, scsi_bufflen(SCpnt)));
 
-	if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt) &&
-	    good_bytes)
-		t10_pi_complete(SCpnt->request, sdkp->protection_type,
-				good_bytes / scsi_prot_interval(SCpnt));
-
 	return good_bytes;
 }
 
@@ -2257,22 +2256,6 @@
 
 #define READ_CAPACITY_RETRIES_ON_RESET	10
 
-/*
- * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
- * and the reported logical block size is bigger than 512 bytes. Note
- * that last_sector is a u64 and therefore logical_to_sectors() is not
- * applicable.
- */
-static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
-{
-	u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
-
-	if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
-		return false;
-
-	return true;
-}
-
 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
 						unsigned char *buffer)
 {
@@ -2338,14 +2321,6 @@
 		return -ENODEV;
 	}
 
-	if (!sd_addressable_capacity(lba, sector_size)) {
-		sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
-			"kernel compiled with support for large block "
-			"devices.\n");
-		sdkp->capacity = 0;
-		return -EOVERFLOW;
-	}
-
 	/* Logical blocks per physical block exponent */
 	sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
 
@@ -2427,14 +2402,6 @@
 		return sector_size;
 	}
 
-	if (!sd_addressable_capacity(lba, sector_size)) {
-		sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
-			"kernel compiled with support for large block "
-			"devices.\n");
-		sdkp->capacity = 0;
-		return -EOVERFLOW;
-	}
-
 	sdkp->capacity = lba + 1;
 	sdkp->physical_block_size = sector_size;
 	return sector_size;
@@ -2562,25 +2529,25 @@
 	int sector_size = sdkp->device->sector_size;
 	char cap_str_2[10], cap_str_10[10];
 
+	if (!sdkp->first_scan && old_capacity == sdkp->capacity)
+		return;
+
 	string_get_size(sdkp->capacity, sector_size,
 			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
 	string_get_size(sdkp->capacity, sector_size,
-			STRING_UNITS_10, cap_str_10,
-			sizeof(cap_str_10));
+			STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
 
-	if (sdkp->first_scan || old_capacity != sdkp->capacity) {
+	sd_printk(KERN_NOTICE, sdkp,
+		  "%llu %d-byte logical blocks: (%s/%s)\n",
+		  (unsigned long long)sdkp->capacity,
+		  sector_size, cap_str_10, cap_str_2);
+
+	if (sdkp->physical_block_size != sector_size)
 		sd_printk(KERN_NOTICE, sdkp,
-			  "%llu %d-byte logical blocks: (%s/%s)\n",
-			  (unsigned long long)sdkp->capacity,
-			  sector_size, cap_str_10, cap_str_2);
+			  "%u-byte physical blocks\n",
+			  sdkp->physical_block_size);
 
-		if (sdkp->physical_block_size != sector_size)
-			sd_printk(KERN_NOTICE, sdkp,
-				  "%u-byte physical blocks\n",
-				  sdkp->physical_block_size);
-
-		sd_zbc_print_zones(sdkp);
-	}
+	sd_zbc_print_zones(sdkp);
 }
 
 /* called with buffer of length 512 */
@@ -2604,7 +2571,6 @@
 	int res;
 	struct scsi_device *sdp = sdkp->device;
 	struct scsi_mode_data data;
-	int disk_ro = get_disk_ro(sdkp->disk);
 	int old_wp = sdkp->write_prot;
 
 	set_disk_ro(sdkp->disk, 0);
@@ -2645,7 +2611,7 @@
 			  "Test WP failed, assume Write Enabled\n");
 	} else {
 		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
-		set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
+		set_disk_ro(sdkp->disk, sdkp->write_prot);
 		if (sdkp->first_scan || old_wp != sdkp->write_prot) {
 			sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
 				  sdkp->write_prot ? "on" : "off");
@@ -2964,9 +2930,6 @@
 	if (rot == 1) {
 		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
-	} else {
-		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
-		blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
 	}
 
 	if (sdkp->device->type == TYPE_ZBC) {
@@ -3063,6 +3026,58 @@
 		sdkp->security = 1;
 }
 
+/*
+ * Determine the device's preferred I/O size for reads and writes
+ * unless the reported value is unreasonably small, large, not a
+ * multiple of the physical block size, or simply garbage.
+ */
+static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
+				      unsigned int dev_max)
+{
+	struct scsi_device *sdp = sdkp->device;
+	unsigned int opt_xfer_bytes =
+		logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+
+	if (sdkp->opt_xfer_blocks == 0)
+		return false;
+
+	if (sdkp->opt_xfer_blocks > dev_max) {
+		sd_first_printk(KERN_WARNING, sdkp,
+				"Optimal transfer size %u logical blocks " \
+				"> dev_max (%u logical blocks)\n",
+				sdkp->opt_xfer_blocks, dev_max);
+		return false;
+	}
+
+	if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
+		sd_first_printk(KERN_WARNING, sdkp,
+				"Optimal transfer size %u logical blocks " \
+				"> sd driver limit (%u logical blocks)\n",
+				sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
+		return false;
+	}
+
+	if (opt_xfer_bytes < PAGE_SIZE) {
+		sd_first_printk(KERN_WARNING, sdkp,
+				"Optimal transfer size %u bytes < " \
+				"PAGE_SIZE (%u bytes)\n",
+				opt_xfer_bytes, (unsigned int)PAGE_SIZE);
+		return false;
+	}
+
+	if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
+		sd_first_printk(KERN_WARNING, sdkp,
+				"Optimal transfer size %u bytes not a " \
+				"multiple of physical block size (%u bytes)\n",
+				opt_xfer_bytes, sdkp->physical_block_size);
+		return false;
+	}
+
+	sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
+			opt_xfer_bytes);
+	return true;
+}
+
 /**
  *	sd_revalidate_disk - called the first time a new disk is seen,
  *	performs disk spin up, read_capacity, etc.
@@ -3103,6 +3118,15 @@
 	if (sdkp->media_present) {
 		sd_read_capacity(sdkp, buffer);
 
+		/*
+		 * set the default to rotational.  All non-rotational devices
+		 * support the block characteristics VPD page, which will
+		 * cause this to be updated correctly and any device which
+		 * doesn't support it should be treated as rotational.
+		 */
+		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+		blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
+
 		if (scsi_device_supports_vpd(sdp)) {
 			sd_read_block_provisioning(sdkp);
 			sd_read_block_limits(sdkp);
@@ -3132,15 +3156,7 @@
 	dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
 	q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
 
-	/*
-	 * Determine the device's preferred I/O size for reads and writes
-	 * unless the reported value is unreasonably small, large, or
-	 * garbage.
-	 */
-	if (sdkp->opt_xfer_blocks &&
-	    sdkp->opt_xfer_blocks <= dev_max &&
-	    sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
-	    logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
+	if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
 		q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
 		rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
 	} else
@@ -3235,68 +3251,6 @@
 	return 0;
 }
 
-/*
- * The asynchronous part of sd_probe
- */
-static void sd_probe_async(void *data, async_cookie_t cookie)
-{
-	struct scsi_disk *sdkp = data;
-	struct scsi_device *sdp;
-	struct gendisk *gd;
-	u32 index;
-	struct device *dev;
-
-	sdp = sdkp->device;
-	gd = sdkp->disk;
-	index = sdkp->index;
-	dev = &sdp->sdev_gendev;
-
-	gd->major = sd_major((index & 0xf0) >> 4);
-	gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
-
-	gd->fops = &sd_fops;
-	gd->private_data = &sdkp->driver;
-	gd->queue = sdkp->device->request_queue;
-
-	/* defaults, until the device tells us otherwise */
-	sdp->sector_size = 512;
-	sdkp->capacity = 0;
-	sdkp->media_present = 1;
-	sdkp->write_prot = 0;
-	sdkp->cache_override = 0;
-	sdkp->WCE = 0;
-	sdkp->RCD = 0;
-	sdkp->ATO = 0;
-	sdkp->first_scan = 1;
-	sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
-
-	sd_revalidate_disk(gd);
-
-	gd->flags = GENHD_FL_EXT_DEVT;
-	if (sdp->removable) {
-		gd->flags |= GENHD_FL_REMOVABLE;
-		gd->events |= DISK_EVENT_MEDIA_CHANGE;
-	}
-
-	blk_pm_runtime_init(sdp->request_queue, dev);
-	device_add_disk(dev, gd);
-	if (sdkp->capacity)
-		sd_dif_config_host(sdkp);
-
-	sd_revalidate_disk(gd);
-
-	if (sdkp->security) {
-		sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit);
-		if (sdkp->opal_dev)
-			sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
-	}
-
-	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
-		  sdp->removable ? "removable " : "");
-	scsi_autopm_put_device(sdp);
-	put_device(&sdkp->dev);
-}
-
 /**
  *	sd_probe - called during driver initialization and whenever a
  *	new scsi device is attached to the system. It is called once
@@ -3386,8 +3340,50 @@
 	get_device(dev);
 	dev_set_drvdata(dev, sdkp);
 
-	get_device(&sdkp->dev);	/* prevent release before async_schedule */
-	async_schedule_domain(sd_probe_async, sdkp, &scsi_sd_probe_domain);
+	gd->major = sd_major((index & 0xf0) >> 4);
+	gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+
+	gd->fops = &sd_fops;
+	gd->private_data = &sdkp->driver;
+	gd->queue = sdkp->device->request_queue;
+
+	/* defaults, until the device tells us otherwise */
+	sdp->sector_size = 512;
+	sdkp->capacity = 0;
+	sdkp->media_present = 1;
+	sdkp->write_prot = 0;
+	sdkp->cache_override = 0;
+	sdkp->WCE = 0;
+	sdkp->RCD = 0;
+	sdkp->ATO = 0;
+	sdkp->first_scan = 1;
+	sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
+
+	sd_revalidate_disk(gd);
+
+	gd->flags = GENHD_FL_EXT_DEVT;
+	if (sdp->removable) {
+		gd->flags |= GENHD_FL_REMOVABLE;
+		gd->events |= DISK_EVENT_MEDIA_CHANGE;
+		gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
+	}
+
+	blk_pm_runtime_init(sdp->request_queue, dev);
+	device_add_disk(dev, gd, NULL);
+	if (sdkp->capacity)
+		sd_dif_config_host(sdkp);
+
+	sd_revalidate_disk(gd);
+
+	if (sdkp->security) {
+		sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit);
+		if (sdkp->opal_dev)
+			sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
+	}
+
+	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
+		  sdp->removable ? "removable " : "");
+	scsi_autopm_put_device(sdp);
 
 	return 0;
 
@@ -3423,13 +3419,10 @@
 	scsi_autopm_get_device(sdkp->device);
 
 	async_synchronize_full_domain(&scsi_sd_pm_domain);
-	async_synchronize_full_domain(&scsi_sd_probe_domain);
 	device_del(&sdkp->dev);
 	del_gendisk(sdkp->disk);
 	sd_shutdown(dev);
 
-	sd_zbc_remove(sdkp);
-
 	free_opal_dev(sdkp->opal_dev);
 
 	blk_register_region(devt, SD_MINORS, NULL,
@@ -3456,9 +3449,21 @@
 {
 	struct scsi_disk *sdkp = to_scsi_disk(dev);
 	struct gendisk *disk = sdkp->disk;
-	
+	struct request_queue *q = disk->queue;
+
 	ida_free(&sd_index_ida, sdkp->index);
 
+	/*
+	 * Wait until all requests that are in progress have completed.
+	 * This is necessary to avoid that e.g. scsi_end_request() crashes
+	 * due to clearing the disk->private_data pointer. Wait from inside
+	 * scsi_disk_release() instead of from sd_release() to avoid that
+	 * freezing and unfreezing the request queue affects user space I/O
+	 * in case multiple processes open a /dev/sd... node concurrently.
+	 */
+	blk_mq_freeze_queue(q);
+	blk_mq_unfreeze_queue(q);
+
 	disk->private_data = NULL;
 	put_disk(disk);
 	put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index a7d4f50..1eab779 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -76,7 +76,6 @@
 #ifdef CONFIG_BLK_DEV_ZONED
 	u32		nr_zones;
 	u32		zone_blocks;
-	u32		zone_shift;
 	u32		zones_optimal_open;
 	u32		zones_optimal_nonseq;
 	u32		zones_max_open;
@@ -133,7 +132,7 @@
 
 #define sd_first_printk(prefix, sdsk, fmt, a...)			\
 	do {								\
-		if ((sdkp)->first_scan)					\
+		if ((sdsk)->first_scan)					\
 			sd_printk(prefix, sdsk, fmt, ##a);		\
 	} while (0)
 
@@ -189,68 +188,6 @@
 	return sector >> (ilog2(sdev->sector_size) - 9);
 }
 
-/*
- * Look up the DIX operation based on whether the command is read or
- * write and whether dix and dif are enabled.
- */
-static inline unsigned int sd_prot_op(bool write, bool dix, bool dif)
-{
-	/* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
-	const unsigned int ops[] = {	/* wrt dix dif */
-		SCSI_PROT_NORMAL,	/*  0	0   0  */
-		SCSI_PROT_READ_STRIP,	/*  0	0   1  */
-		SCSI_PROT_READ_INSERT,	/*  0	1   0  */
-		SCSI_PROT_READ_PASS,	/*  0	1   1  */
-		SCSI_PROT_NORMAL,	/*  1	0   0  */
-		SCSI_PROT_WRITE_INSERT, /*  1	0   1  */
-		SCSI_PROT_WRITE_STRIP,	/*  1	1   0  */
-		SCSI_PROT_WRITE_PASS,	/*  1	1   1  */
-	};
-
-	return ops[write << 2 | dix << 1 | dif];
-}
-
-/*
- * Returns a mask of the protection flags that are valid for a given DIX
- * operation.
- */
-static inline unsigned int sd_prot_flag_mask(unsigned int prot_op)
-{
-	const unsigned int flag_mask[] = {
-		[SCSI_PROT_NORMAL]		= 0,
-
-		[SCSI_PROT_READ_STRIP]		= SCSI_PROT_TRANSFER_PI |
-						  SCSI_PROT_GUARD_CHECK |
-						  SCSI_PROT_REF_CHECK |
-						  SCSI_PROT_REF_INCREMENT,
-
-		[SCSI_PROT_READ_INSERT]		= SCSI_PROT_REF_INCREMENT |
-						  SCSI_PROT_IP_CHECKSUM,
-
-		[SCSI_PROT_READ_PASS]		= SCSI_PROT_TRANSFER_PI |
-						  SCSI_PROT_GUARD_CHECK |
-						  SCSI_PROT_REF_CHECK |
-						  SCSI_PROT_REF_INCREMENT |
-						  SCSI_PROT_IP_CHECKSUM,
-
-		[SCSI_PROT_WRITE_INSERT]	= SCSI_PROT_TRANSFER_PI |
-						  SCSI_PROT_REF_INCREMENT,
-
-		[SCSI_PROT_WRITE_STRIP]		= SCSI_PROT_GUARD_CHECK |
-						  SCSI_PROT_REF_CHECK |
-						  SCSI_PROT_REF_INCREMENT |
-						  SCSI_PROT_IP_CHECKSUM,
-
-		[SCSI_PROT_WRITE_PASS]		= SCSI_PROT_TRANSFER_PI |
-						  SCSI_PROT_GUARD_CHECK |
-						  SCSI_PROT_REF_CHECK |
-						  SCSI_PROT_REF_INCREMENT |
-						  SCSI_PROT_IP_CHECKSUM,
-	};
-
-	return flag_mask[prot_op];
-}
-
 #ifdef CONFIG_BLK_DEV_INTEGRITY
 
 extern void sd_dif_config_host(struct scsi_disk *);
@@ -271,12 +208,12 @@
 #ifdef CONFIG_BLK_DEV_ZONED
 
 extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
-extern void sd_zbc_remove(struct scsi_disk *sdkp);
 extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
-extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd);
-extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
+extern blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all);
 extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
 			    struct scsi_sense_hdr *sshdr);
+extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+			       struct blk_zone *zones, unsigned int *nr_zones);
 
 #else /* CONFIG_BLK_DEV_ZONED */
 
@@ -286,24 +223,20 @@
 	return 0;
 }
 
-static inline void sd_zbc_remove(struct scsi_disk *sdkp) {}
-
 static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
 
-static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
+static inline blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd,
+						   bool all)
 {
-	return BLKPREP_INVALID;
-}
-
-static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
-{
-	return BLKPREP_INVALID;
+	return BLK_STS_TARGET;
 }
 
 static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
 				   unsigned int good_bytes,
 				   struct scsi_sense_hdr *sshdr) {}
 
+#define sd_zbc_report_zones NULL
+
 #endif /* CONFIG_BLK_DEV_ZONED */
 
 #endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index db72c82..4cadb26 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -1,23 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * sd_dif.c - SCSI Data Integrity Field
  *
  * Copyright (C) 2007, 2008 Oracle Corporation
  * Written by: Martin K. Petersen <martin.petersen@oracle.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
  */
 
 #include <linux/blkdev.h>
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 412c178..1efc69e 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * SCSI Zoned Block commands
  *
@@ -5,24 +6,11 @@
  * Written by: Hannes Reinecke <hare@suse.de>
  * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
  * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
  */
 
 #include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+#include <linux/sched/mm.h>
 
 #include <asm/unaligned.h>
 
@@ -62,16 +50,22 @@
 }
 
 /**
- * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
+ * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command.
  * @sdkp: The target disk
- * @buf: Buffer to use for the reply
+ * @buf: vmalloc-ed buffer to use for the reply
  * @buflen: the buffer size
  * @lba: Start LBA of the report
+ * @partial: Do partial report
  *
  * For internal use during device validation.
+ * Using partial=true can significantly speed up execution of a report zones
+ * command because the disk does not have to count all possible report matching
+ * zones and will only report the count of zones fitting in the command reply
+ * buffer.
  */
-static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
-			       unsigned int buflen, sector_t lba)
+static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
+				  unsigned int buflen, sector_t lba,
+				  bool partial)
 {
 	struct scsi_device *sdp = sdkp->device;
 	const int timeout = sdp->request_queue->rq_timeout;
@@ -85,7 +79,8 @@
 	cmd[1] = ZI_REPORT_ZONES;
 	put_unaligned_be64(lba, &cmd[2]);
 	put_unaligned_be32(buflen, &cmd[10]);
-	memset(buf, 0, buflen);
+	if (partial)
+		cmd[14] = ZBC_REPORT_ZONE_PARTIAL;
 
 	result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
 				  buf, buflen, &sshdr,
@@ -109,109 +104,97 @@
 	return 0;
 }
 
-/**
- * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
- * @cmd: The command to setup
- *
- * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
+/*
+ * Maximum number of zones to get with one report zones command.
  */
-int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
+#define SD_ZBC_REPORT_MAX_ZONES		8192U
+
+/**
+ * Allocate a buffer for report zones reply.
+ * @sdkp: The target disk
+ * @nr_zones: Maximum number of zones to report
+ * @buflen: Size of the buffer allocated
+ *
+ * Try to allocate a reply buffer for the number of requested zones.
+ * The size of the buffer allocated may be smaller than requested to
+ * satify the device constraint (max_hw_sectors, max_segments, etc).
+ *
+ * Return the address of the allocated buffer and update @buflen with
+ * the size of the allocated buffer.
+ */
+static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
+					unsigned int nr_zones, size_t *buflen)
 {
-	struct request *rq = cmd->request;
-	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
-	sector_t lba, sector = blk_rq_pos(rq);
-	unsigned int nr_bytes = blk_rq_bytes(rq);
-	int ret;
+	struct request_queue *q = sdkp->disk->queue;
+	size_t bufsize;
+	void *buf;
 
-	WARN_ON(nr_bytes == 0);
+	/*
+	 * Report zone buffer size should be at most 64B times the number of
+	 * zones requested plus the 64B reply header, but should be at least
+	 * SECTOR_SIZE for ATA devices.
+	 * Make sure that this size does not exceed the hardware capabilities.
+	 * Furthermore, since the report zone command cannot be split, make
+	 * sure that the allocated buffer can always be mapped by limiting the
+	 * number of pages allocated to the HBA max segments limit.
+	 */
+	nr_zones = min(nr_zones, SD_ZBC_REPORT_MAX_ZONES);
+	bufsize = roundup((nr_zones + 1) * 64, 512);
+	bufsize = min_t(size_t, bufsize,
+			queue_max_hw_sectors(q) << SECTOR_SHIFT);
+	bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
 
-	if (!sd_is_zoned(sdkp))
-		/* Not a zoned device */
-		return BLKPREP_KILL;
+	buf = vzalloc(bufsize);
+	if (buf)
+		*buflen = bufsize;
 
-	ret = scsi_init_io(cmd);
-	if (ret != BLKPREP_OK)
-		return ret;
-
-	cmd->cmd_len = 16;
-	memset(cmd->cmnd, 0, cmd->cmd_len);
-	cmd->cmnd[0] = ZBC_IN;
-	cmd->cmnd[1] = ZI_REPORT_ZONES;
-	lba = sectors_to_logical(sdkp->device, sector);
-	put_unaligned_be64(lba, &cmd->cmnd[2]);
-	put_unaligned_be32(nr_bytes, &cmd->cmnd[10]);
-	/* Do partial report for speeding things up */
-	cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL;
-
-	cmd->sc_data_direction = DMA_FROM_DEVICE;
-	cmd->sdb.length = nr_bytes;
-	cmd->transfersize = sdkp->device->sector_size;
-	cmd->allowed = 0;
-
-	return BLKPREP_OK;
+	return buf;
 }
 
 /**
- * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
- * @scmd: The completed report zones command
- * @good_bytes: reply size in bytes
+ * sd_zbc_report_zones - Disk report zones operation.
+ * @disk: The target disk
+ * @sector: Start 512B sector of the report
+ * @zones: Array of zone descriptors
+ * @nr_zones: Number of descriptors in the array
  *
- * Convert all reported zone descriptors to struct blk_zone. The conversion
- * is done in-place, directly in the request specified sg buffer.
+ * Execute a report zones command on the target disk.
  */
-static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
-					 unsigned int good_bytes)
+int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+			struct blk_zone *zones, unsigned int *nr_zones)
 {
-	struct request *rq = scmd->request;
-	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
-	struct sg_mapping_iter miter;
-	struct blk_zone_report_hdr hdr;
-	struct blk_zone zone;
-	unsigned int offset, bytes = 0;
-	unsigned long flags;
-	u8 *buf;
+	struct scsi_disk *sdkp = scsi_disk(disk);
+	unsigned int i, nrz = *nr_zones;
+	unsigned char *buf;
+	size_t buflen = 0, offset = 0;
+	int ret = 0;
 
-	if (good_bytes < 64)
-		return;
+	if (!sd_is_zoned(sdkp))
+		/* Not a zoned device */
+		return -EOPNOTSUPP;
 
-	memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));
+	buf = sd_zbc_alloc_report_buffer(sdkp, nrz, &buflen);
+	if (!buf)
+		return -ENOMEM;
 
-	sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
-		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
+	ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
+			sectors_to_logical(sdkp->device, sector), true);
+	if (ret)
+		goto out;
 
-	local_irq_save(flags);
-	while (sg_miter_next(&miter) && bytes < good_bytes) {
-
-		buf = miter.addr;
-		offset = 0;
-
-		if (bytes == 0) {
-			/* Set the report header */
-			hdr.nr_zones = min_t(unsigned int,
-					 (good_bytes - 64) / 64,
-					 get_unaligned_be32(&buf[0]) / 64);
-			memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
-			offset += 64;
-			bytes += 64;
-		}
-
-		/* Parse zone descriptors */
-		while (offset < miter.length && hdr.nr_zones) {
-			WARN_ON(offset > miter.length);
-			buf = miter.addr + offset;
-			sd_zbc_parse_report(sdkp, buf, &zone);
-			memcpy(buf, &zone, sizeof(struct blk_zone));
-			offset += 64;
-			bytes += 64;
-			hdr.nr_zones--;
-		}
-
-		if (!hdr.nr_zones)
-			break;
-
+	nrz = min(nrz, get_unaligned_be32(&buf[0]) / 64);
+	for (i = 0; i < nrz; i++) {
+		offset += 64;
+		sd_zbc_parse_report(sdkp, buf + offset, zones);
+		zones++;
 	}
-	sg_miter_stop(&miter);
-	local_irq_restore(flags);
+
+	*nr_zones = nrz;
+
+out:
+	kvfree(buf);
+
+	return ret;
 }
 
 /**
@@ -226,10 +209,11 @@
 /**
  * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
  * @cmd: the command to setup
+ * @all: Reset all zones control.
  *
  * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
  */
-int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
+blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd, bool all)
 {
 	struct request *rq = cmd->request;
 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
@@ -238,27 +222,30 @@
 
 	if (!sd_is_zoned(sdkp))
 		/* Not a zoned device */
-		return BLKPREP_KILL;
+		return BLK_STS_IOERR;
 
 	if (sdkp->device->changed)
-		return BLKPREP_KILL;
+		return BLK_STS_IOERR;
 
 	if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
 		/* Unaligned request */
-		return BLKPREP_KILL;
+		return BLK_STS_IOERR;
 
 	cmd->cmd_len = 16;
 	memset(cmd->cmnd, 0, cmd->cmd_len);
 	cmd->cmnd[0] = ZBC_OUT;
 	cmd->cmnd[1] = ZO_RESET_WRITE_POINTER;
-	put_unaligned_be64(block, &cmd->cmnd[2]);
+	if (all)
+		cmd->cmnd[14] = 0x1;
+	else
+		put_unaligned_be64(block, &cmd->cmnd[2]);
 
 	rq->timeout = SD_TIMEOUT;
 	cmd->sc_data_direction = DMA_NONE;
 	cmd->transfersize = 0;
 	cmd->allowed = 0;
 
-	return BLKPREP_OK;
+	return BLK_STS_OK;
 }
 
 /**
@@ -276,48 +263,33 @@
 	int result = cmd->result;
 	struct request *rq = cmd->request;
 
-	switch (req_op(rq)) {
-	case REQ_OP_ZONE_RESET:
-
-		if (result &&
-		    sshdr->sense_key == ILLEGAL_REQUEST &&
-		    sshdr->asc == 0x24)
-			/*
-			 * INVALID FIELD IN CDB error: reset of a conventional
-			 * zone was attempted. Nothing to worry about, so be
-			 * quiet about the error.
-			 */
-			rq->rq_flags |= RQF_QUIET;
-		break;
-
-	case REQ_OP_WRITE:
-	case REQ_OP_WRITE_ZEROES:
-	case REQ_OP_WRITE_SAME:
-		break;
-
-	case REQ_OP_ZONE_REPORT:
-
-		if (!result)
-			sd_zbc_report_zones_complete(cmd, good_bytes);
-		break;
-
+	if (req_op(rq) == REQ_OP_ZONE_RESET &&
+	    result &&
+	    sshdr->sense_key == ILLEGAL_REQUEST &&
+	    sshdr->asc == 0x24) {
+		/*
+		 * INVALID FIELD IN CDB error: reset of a conventional
+		 * zone was attempted. Nothing to worry about, so be
+		 * quiet about the error.
+		 */
+		rq->rq_flags |= RQF_QUIET;
 	}
 }
 
 /**
- * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics
+ * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics
  * @sdkp: Target disk
  * @buf: Buffer where to store the VPD page data
  *
- * Read VPD page B6.
+ * Read VPD page B6, get information and check that reads are unconstrained.
  */
-static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
-					     unsigned char *buf)
+static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
+					      unsigned char *buf)
 {
 
 	if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
 		sd_printk(KERN_NOTICE, sdkp,
-			  "Unconstrained-read check failed\n");
+			  "Read zoned characteristics VPD page failed\n");
 		return -ENODEV;
 	}
 
@@ -335,80 +307,78 @@
 		sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
 	}
 
-	return 0;
-}
-
-/**
- * sd_zbc_check_capacity - Check reported capacity.
- * @sdkp: Target disk
- * @buf: Buffer to use for commands
- *
- * ZBC drive may report only the capacity of the first conventional zones at
- * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply.
- * Check this here. If the disk reported only its conventional zones capacity,
- * get the total capacity by doing a report zones.
- */
-static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
-{
-	sector_t lba;
-	int ret;
-
-	if (sdkp->rc_basis != 0)
-		return 0;
-
-	/* Do a report zone to get the maximum LBA to check capacity */
-	ret = sd_zbc_report_zones(sdkp, buf, SD_BUF_SIZE, 0);
-	if (ret)
-		return ret;
-
-	/* The max_lba field is the capacity of this device */
-	lba = get_unaligned_be64(&buf[8]);
-	if (lba + 1 == sdkp->capacity)
-		return 0;
-
-	if (sdkp->first_scan)
-		sd_printk(KERN_WARNING, sdkp,
-			  "Changing capacity from %llu to max LBA+1 %llu\n",
-			  (unsigned long long)sdkp->capacity,
-			  (unsigned long long)lba + 1);
-	sdkp->capacity = lba + 1;
+	/*
+	 * Check for unconstrained reads: host-managed devices with
+	 * constrained reads (drives failing read after write pointer)
+	 * are not supported.
+	 */
+	if (!sdkp->urswrz) {
+		if (sdkp->first_scan)
+			sd_printk(KERN_NOTICE, sdkp,
+			  "constrained reads devices are not supported\n");
+		return -ENODEV;
+	}
 
 	return 0;
 }
 
-#define SD_ZBC_BUF_SIZE 131072U
-
 /**
- * sd_zbc_check_zone_size - Check the device zone sizes
+ * sd_zbc_check_zones - Check the device capacity and zone sizes
  * @sdkp: Target disk
  *
- * Check that all zones of the device are equal. The last zone can however
- * be smaller. The zone size must also be a power of two number of LBAs.
+ * Check that the device capacity as reported by READ CAPACITY matches the
+ * max_lba value (plus one)of the report zones command reply. Also check that
+ * all zones of the device have an equal size, only allowing the last zone of
+ * the disk to have a smaller size (runt zone). The zone size must also be a
+ * power of two.
  *
  * Returns the zone size in number of blocks upon success or an error code
  * upon failure.
  */
-static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
 {
+	size_t bufsize, buflen;
+	unsigned int noio_flag;
 	u64 zone_blocks = 0;
-	sector_t block = 0;
+	sector_t max_lba, block = 0;
 	unsigned char *buf;
 	unsigned char *rec;
-	unsigned int buf_len;
-	unsigned int list_length;
-	s64 ret;
+	int ret;
 	u8 same;
 
-	/* Get a buffer */
-	buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
+	/* Do all memory allocations as if GFP_NOIO was specified */
+	noio_flag = memalloc_noio_save();
 
-	/* Do a report zone to get the same field */
-	ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
+	/* Get a buffer */
+	buf = sd_zbc_alloc_report_buffer(sdkp, SD_ZBC_REPORT_MAX_ZONES,
+					 &bufsize);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* Do a report zone to get max_lba and the same field */
+	ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, 0, false);
 	if (ret)
 		goto out_free;
 
+	if (sdkp->rc_basis == 0) {
+		/* The max_lba field is the capacity of this device */
+		max_lba = get_unaligned_be64(&buf[8]);
+		if (sdkp->capacity != max_lba + 1) {
+			if (sdkp->first_scan)
+				sd_printk(KERN_WARNING, sdkp,
+					"Changing capacity from %llu to max LBA+1 %llu\n",
+					(unsigned long long)sdkp->capacity,
+					(unsigned long long)max_lba + 1);
+			sdkp->capacity = max_lba + 1;
+		}
+	}
+
+	/*
+	 * Check same field: for any value other than 0, we know that all zones
+	 * have the same size.
+	 */
 	same = buf[4] & 0x0f;
 	if (same > 0) {
 		rec = &buf[64];
@@ -424,12 +394,12 @@
 	do {
 
 		/* Parse REPORT ZONES header */
-		list_length = get_unaligned_be32(&buf[0]) + 64;
+		buflen = min_t(size_t, get_unaligned_be32(&buf[0]) + 64,
+			       bufsize);
 		rec = buf + 64;
-		buf_len = min(list_length, SD_ZBC_BUF_SIZE);
 
 		/* Parse zone descriptors */
-		while (rec < buf + buf_len) {
+		while (rec < buf + buflen) {
 			u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
 
 			if (zone_blocks == 0) {
@@ -445,8 +415,8 @@
 		}
 
 		if (block < sdkp->capacity) {
-			ret = sd_zbc_report_zones(sdkp, buf,
-						  SD_ZBC_BUF_SIZE, block);
+			ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, block,
+						     true);
 			if (ret)
 				goto out_free;
 		}
@@ -470,202 +440,24 @@
 		if (sdkp->first_scan)
 			sd_printk(KERN_NOTICE, sdkp,
 				  "Zone size too large\n");
-		ret = -ENODEV;
+		ret = -EFBIG;
 	} else {
-		ret = zone_blocks;
+		*zblocks = zone_blocks;
+		ret = 0;
 	}
 
 out_free:
-	kfree(buf);
+	memalloc_noio_restore(noio_flag);
+	kvfree(buf);
 
 	return ret;
 }
 
-/**
- * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
- * @nr_zones: Number of zones to allocate space for.
- * @numa_node: NUMA node to allocate the memory from.
- */
-static inline unsigned long *
-sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
-{
-	return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
-			    GFP_KERNEL, numa_node);
-}
-
-/**
- * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
- * @sdkp: disk used
- * @buf: report reply buffer
- * @buflen: length of @buf
- * @zone_shift: logarithm base 2 of the number of blocks in a zone
- * @seq_zones_bitmap: bitmap of sequential zones to set
- *
- * Parse reported zone descriptors in @buf to identify sequential zones and
- * set the reported zone bit in @seq_zones_bitmap accordingly.
- * Since read-only and offline zones cannot be written, do not
- * mark them as sequential in the bitmap.
- * Return the LBA after the last zone reported.
- */
-static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
-				     unsigned int buflen, u32 zone_shift,
-				     unsigned long *seq_zones_bitmap)
-{
-	sector_t lba, next_lba = sdkp->capacity;
-	unsigned int buf_len, list_length;
-	unsigned char *rec;
-	u8 type, cond;
-
-	list_length = get_unaligned_be32(&buf[0]) + 64;
-	buf_len = min(list_length, buflen);
-	rec = buf + 64;
-
-	while (rec < buf + buf_len) {
-		type = rec[0] & 0x0f;
-		cond = (rec[1] >> 4) & 0xf;
-		lba = get_unaligned_be64(&rec[16]);
-		if (type != ZBC_ZONE_TYPE_CONV &&
-		    cond != ZBC_ZONE_COND_READONLY &&
-		    cond != ZBC_ZONE_COND_OFFLINE)
-			set_bit(lba >> zone_shift, seq_zones_bitmap);
-		next_lba = lba + get_unaligned_be64(&rec[8]);
-		rec += 64;
-	}
-
-	return next_lba;
-}
-
-/**
- * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
- * @sdkp: target disk
- * @zone_shift: logarithm base 2 of the number of blocks in a zone
- * @nr_zones: number of zones to set up a seq zone bitmap for
- *
- * Allocate a zone bitmap and initialize it by identifying sequential zones.
- */
-static unsigned long *
-sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
-			      u32 nr_zones)
-{
-	struct request_queue *q = sdkp->disk->queue;
-	unsigned long *seq_zones_bitmap;
-	sector_t lba = 0;
-	unsigned char *buf;
-	int ret = -ENOMEM;
-
-	seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
-	if (!seq_zones_bitmap)
-		return ERR_PTR(-ENOMEM);
-
-	buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
-	if (!buf)
-		goto out;
-
-	while (lba < sdkp->capacity) {
-		ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, lba);
-		if (ret)
-			goto out;
-		lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
-					   zone_shift, seq_zones_bitmap);
-	}
-
-	if (lba != sdkp->capacity) {
-		/* Something went wrong */
-		ret = -EIO;
-	}
-
-out:
-	kfree(buf);
-	if (ret) {
-		kfree(seq_zones_bitmap);
-		return ERR_PTR(ret);
-	}
-	return seq_zones_bitmap;
-}
-
-static void sd_zbc_cleanup(struct scsi_disk *sdkp)
-{
-	struct request_queue *q = sdkp->disk->queue;
-
-	kfree(q->seq_zones_bitmap);
-	q->seq_zones_bitmap = NULL;
-
-	kfree(q->seq_zones_wlock);
-	q->seq_zones_wlock = NULL;
-
-	q->nr_zones = 0;
-}
-
-static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
-{
-	struct request_queue *q = sdkp->disk->queue;
-	u32 zone_shift = ilog2(zone_blocks);
-	u32 nr_zones;
-	int ret;
-
-	/* chunk_sectors indicates the zone size */
-	blk_queue_chunk_sectors(q,
-			logical_to_sectors(sdkp->device, zone_blocks));
-	nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
-
-	/*
-	 * Initialize the device request queue information if the number
-	 * of zones changed.
-	 */
-	if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
-		unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
-		size_t zone_bitmap_size;
-
-		if (nr_zones) {
-			seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
-								   q->node);
-			if (!seq_zones_wlock) {
-				ret = -ENOMEM;
-				goto err;
-			}
-
-			seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
-							zone_shift, nr_zones);
-			if (IS_ERR(seq_zones_bitmap)) {
-				ret = PTR_ERR(seq_zones_bitmap);
-				kfree(seq_zones_wlock);
-				goto err;
-			}
-		}
-		zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
-			sizeof(unsigned long);
-		blk_mq_freeze_queue(q);
-		if (q->nr_zones != nr_zones) {
-			/* READ16/WRITE16 is mandatory for ZBC disks */
-			sdkp->device->use_16_for_rw = 1;
-			sdkp->device->use_10_for_rw = 0;
-
-			sdkp->zone_blocks = zone_blocks;
-			sdkp->zone_shift = zone_shift;
-			sdkp->nr_zones = nr_zones;
-			q->nr_zones = nr_zones;
-			swap(q->seq_zones_wlock, seq_zones_wlock);
-			swap(q->seq_zones_bitmap, seq_zones_bitmap);
-		} else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
-				  zone_bitmap_size) != 0) {
-			memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
-			       zone_bitmap_size);
-		}
-		blk_mq_unfreeze_queue(q);
-		kfree(seq_zones_wlock);
-		kfree(seq_zones_bitmap);
-	}
-
-	return 0;
-
-err:
-	sd_zbc_cleanup(sdkp);
-	return ret;
-}
-
 int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
 {
-	int64_t zone_blocks;
+	struct gendisk *disk = sdkp->disk;
+	unsigned int nr_zones;
+	u32 zone_blocks = 0;
 	int ret;
 
 	if (!sd_is_zoned(sdkp))
@@ -675,26 +467,8 @@
 		 */
 		return 0;
 
-	/* Get zoned block device characteristics */
-	ret = sd_zbc_read_zoned_characteristics(sdkp, buf);
-	if (ret)
-		goto err;
-
-	/*
-	 * Check for unconstrained reads: host-managed devices with
-	 * constrained reads (drives failing read after write pointer)
-	 * are not supported.
-	 */
-	if (!sdkp->urswrz) {
-		if (sdkp->first_scan)
-			sd_printk(KERN_NOTICE, sdkp,
-			  "constrained reads devices are not supported\n");
-		ret = -ENODEV;
-		goto err;
-	}
-
-	/* Check capacity */
-	ret = sd_zbc_check_capacity(sdkp, buf);
+	/* Check zoned block device characteristics (unconstrained reads) */
+	ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
 	if (ret)
 		goto err;
 
@@ -702,33 +476,51 @@
 	 * Check zone size: only devices with a constant zone size (except
 	 * an eventual last runt zone) that is a power of 2 are supported.
 	 */
-	zone_blocks = sd_zbc_check_zone_size(sdkp);
-	ret = -EFBIG;
-	if (zone_blocks != (u32)zone_blocks)
-		goto err;
-	ret = zone_blocks;
-	if (ret < 0)
+	ret = sd_zbc_check_zones(sdkp, &zone_blocks);
+	if (ret != 0)
 		goto err;
 
 	/* The drive satisfies the kernel restrictions: set it up */
-	ret = sd_zbc_setup(sdkp, zone_blocks);
-	if (ret)
-		goto err;
+	blk_queue_chunk_sectors(sdkp->disk->queue,
+			logical_to_sectors(sdkp->device, zone_blocks));
+	blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, sdkp->disk->queue);
+	blk_queue_required_elevator_features(sdkp->disk->queue,
+					     ELEVATOR_F_ZBD_SEQ_WRITE);
+	nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
+
+	/* READ16/WRITE16 is mandatory for ZBC disks */
+	sdkp->device->use_16_for_rw = 1;
+	sdkp->device->use_10_for_rw = 0;
+
+	/*
+	 * Revalidate the disk zone bitmaps once the block device capacity is
+	 * set on the second revalidate execution during disk scan and if
+	 * something changed when executing a normal revalidate.
+	 */
+	if (sdkp->first_scan) {
+		sdkp->zone_blocks = zone_blocks;
+		sdkp->nr_zones = nr_zones;
+		return 0;
+	}
+
+	if (sdkp->zone_blocks != zone_blocks ||
+	    sdkp->nr_zones != nr_zones ||
+	    disk->queue->nr_zones != nr_zones) {
+		ret = blk_revalidate_disk_zones(disk);
+		if (ret != 0)
+			goto err;
+		sdkp->zone_blocks = zone_blocks;
+		sdkp->nr_zones = nr_zones;
+	}
 
 	return 0;
 
 err:
 	sdkp->capacity = 0;
-	sd_zbc_cleanup(sdkp);
 
 	return ret;
 }
 
-void sd_zbc_remove(struct scsi_disk *sdkp)
-{
-	sd_zbc_cleanup(sdkp);
-}
-
 void sd_zbc_print_zones(struct scsi_disk *sdkp)
 {
 	if (!sd_is_zoned(sdkp) || !sdkp->capacity)
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 0fc3922..c2afba2 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -1,25 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * SCSI Enclosure Services
  *
  * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
- *
-**-----------------------------------------------------------------------------
-**
-**  This program is free software; you can redistribute it and/or
-**  modify it under the terms of the GNU General Public License
-**  version 2 as published by the Free Software Foundation.
-**
-**  This program is distributed in the hope that it will be useful,
-**  but WITHOUT ANY WARRANTY; without even the implied warranty of
-**  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-**  GNU General Public License for more details.
-**
-**  You should have received a copy of the GNU General Public License
-**  along with this program; if not, write to the Free Software
-**  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-**-----------------------------------------------------------------------------
-*/
+ */
 
 #include <linux/slab.h>
 #include <linux/module.h>
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8a254bb..cce7575 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  History:
  *  Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
@@ -8,12 +9,6 @@
  *        Copyright (C) 1992 Lawrence Foard
  * Version 2 and 3 extensions to driver:
  *        Copyright (C) 1998 - 2014 Douglas Gilbert
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
  */
 
 static int sg_version_num = 30536;	/* 2 digits for each component */
@@ -434,7 +429,7 @@
 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
 				      "sg_read: count=%d\n", (int) count));
 
-	if (!access_ok(VERIFY_WRITE, buf, count))
+	if (!access_ok(buf, count))
 		return -EFAULT;
 	if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
 		old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
@@ -632,7 +627,7 @@
 	      scsi_block_when_processing_errors(sdp->device)))
 		return -ENXIO;
 
-	if (!access_ok(VERIFY_READ, buf, count))
+	if (!access_ok(buf, count))
 		return -EFAULT;	/* protects following copy_from_user()s + get_user()s */
 	if (count < SZ_SG_HEADER)
 		return -EIO;
@@ -729,7 +724,7 @@
 
 	if (count < SZ_SG_IO_HDR)
 		return -EINVAL;
-	if (!access_ok(VERIFY_READ, buf, count))
+	if (!access_ok(buf, count))
 		return -EFAULT; /* protects following copy_from_user()s + get_user()s */
 
 	sfp->cmd_q = 1;	/* when sg_io_hdr seen, set command queuing on */
@@ -768,7 +763,7 @@
 		sg_remove_request(sfp, srp);
 		return -EMSGSIZE;
 	}
-	if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
+	if (!access_ok(hp->cmdp, hp->cmd_len)) {
 		sg_remove_request(sfp, srp);
 		return -EFAULT;	/* protects following copy_from_user()s + get_user()s */
 	}
@@ -822,7 +817,7 @@
 	if (atomic_read(&sdp->detaching)) {
 		if (srp->bio) {
 			scsi_req_free_cmd(scsi_req(srp->rq));
-			blk_end_request_all(srp->rq, BLK_STS_IOERR);
+			blk_put_request(srp->rq);
 			srp->rq = NULL;
 		}
 
@@ -922,7 +917,7 @@
 			return -ENODEV;
 		if (!scsi_block_when_processing_errors(sdp->device))
 			return -ENXIO;
-		if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
+		if (!access_ok(p, SZ_SG_IO_HDR))
 			return -EFAULT;
 		result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
 				 1, read_only, 1, &srp);
@@ -968,7 +963,7 @@
 	case SG_GET_LOW_DMA:
 		return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
 	case SG_GET_SCSI_ID:
-		if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
+		if (!access_ok(p, sizeof (sg_scsi_id_t)))
 			return -EFAULT;
 		else {
 			sg_scsi_id_t __user *sg_idp = p;
@@ -997,7 +992,7 @@
 		sfp->force_packid = val ? 1 : 0;
 		return 0;
 	case SG_GET_PACK_ID:
-		if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
+		if (!access_ok(ip, sizeof (int)))
 			return -EFAULT;
 		read_lock_irqsave(&sfp->rq_list_lock, iflags);
 		list_for_each_entry(srp, &sfp->rq_list, entry) {
@@ -1078,7 +1073,7 @@
 		val = (sdp->device ? 1 : 0);
 		return put_user(val, ip);
 	case SG_GET_REQUEST_TABLE:
-		if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
+		if (!access_ok(p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
 			return -EFAULT;
 		else {
 			sg_req_info_t *rinfo;
@@ -1390,7 +1385,7 @@
 	 */
 	srp->rq = NULL;
 	scsi_req_free_cmd(scsi_req(rq));
-	__blk_put_request(rq->q, rq);
+	blk_put_request(rq);
 
 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
 	if (unlikely(srp->orphan)) {
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 5ed696d..713bce9 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -208,7 +208,7 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= 8,
-	.use_clustering		= DISABLE_CLUSTERING,
+	.dma_boundary		= PAGE_SIZE - 1,
 };
 
 static int sgiwd93_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 82ed998..2230261 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * sim710.c - Copyright (C) 1999 Richard Hirst <richard@sleepie.demon.co.uk>
  *
  *----------------------------------------------------------------------------
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by 
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  *----------------------------------------------------------------------------
  *
  * MCA card detection code by Trent McNair. (now deleted)
@@ -23,7 +11,6 @@
  * Auto probing of EISA config space from Trevor Hemsley.
  *
  * Rewritten to use 53c700.c by James.Bottomley@SteelEye.com
- *
  */
 
 #include <linux/module.h>
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index 97e159c..bc65068 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,6 +1,8 @@
 #
 # Kernel configuration file for the SMARTPQI
 #
+# Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2017-2018 Microsemi Corporation
 # Copyright (c) 2016 Microsemi Corporation
 # Copyright (c) 2016 PMC-Sierra, Inc.
 #  (mailto:esc.storagedev@microsemi.com)
diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
index e6b7799..28985e5 100644
--- a/drivers/scsi/smartpqi/Makefile
+++ b/drivers/scsi/smartpqi/Makefile
@@ -1,3 +1,3 @@
-ccflags-y += -I.
+# SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
 smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index e97bf26..79d2af3 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,18 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2016-2017 Microsemi Corporation
+ *    Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *    NON INFRINGEMENT.  See the GNU General Public License for more details.
- *
- *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *    Questions/Comments/Bugfixes to storagedev@microchip.com
  *
  */
 
@@ -21,6 +14,9 @@
 #if !defined(_SMARTPQI_H)
 #define _SMARTPQI_H
 
+#include <scsi/scsi_host.h>
+#include <linux/bsg-lib.h>
+
 #pragma pack(1)
 
 #define PQI_DEVICE_SIGNATURE	"PQI DREG"
@@ -97,6 +93,12 @@
 	struct pqi_device_registers pqi_registers;	/* 4000h */
 };
 
+#if ((HZ) < 1000)
+#define PQI_HZ  1000
+#else
+#define PQI_HZ  (HZ)
+#endif
+
 #define PQI_DEVICE_REGISTERS_OFFSET	0x4000
 
 enum pqi_io_path {
@@ -347,6 +349,10 @@
 
 #define PQI_MAX_EVENT_DESCRIPTORS	255
 
+#define PQI_EVENT_OFA_MEMORY_ALLOCATION	0x0
+#define PQI_EVENT_OFA_QUIESCE		0x1
+#define PQI_EVENT_OFA_CANCELLED		0x2
+
 struct pqi_event_response {
 	struct pqi_iu_header header;
 	u8	event_type;
@@ -354,7 +360,17 @@
 	u8	request_acknowlege : 1;
 	__le16	event_id;
 	__le32	additional_event_id;
-	u8	data[16];
+	union {
+		struct {
+			__le32	bytes_requested;
+			u8	reserved[12];
+		} ofa_memory_allocation;
+
+		struct {
+			__le16	reason;		/* reason for cancellation */
+			u8	reserved[14];
+		} ofa_cancelled;
+	} data;
 };
 
 struct pqi_event_acknowledge_request {
@@ -389,6 +405,54 @@
 	u8	response_code;
 };
 
+struct pqi_vendor_general_request {
+	struct pqi_iu_header header;
+	__le16	request_id;
+	__le16	function_code;
+	union {
+		struct {
+			__le16	first_section;
+			__le16	last_section;
+			u8	reserved[48];
+		} config_table_update;
+
+		struct {
+			__le64	buffer_address;
+			__le32	buffer_length;
+			u8	reserved[40];
+		} ofa_memory_allocation;
+	} data;
+};
+
+struct pqi_vendor_general_response {
+	struct pqi_iu_header header;
+	__le16	request_id;
+	__le16	function_code;
+	__le16	status;
+	u8	reserved[2];
+};
+
+#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE	0
+#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE	1
+
+#define PQI_OFA_VERSION			1
+#define PQI_OFA_SIGNATURE		"OFA_QRM"
+#define PQI_OFA_MAX_SG_DESCRIPTORS	64
+
+#define PQI_OFA_MEMORY_DESCRIPTOR_LENGTH \
+	(offsetof(struct pqi_ofa_memory, sg_descriptor) + \
+	(PQI_OFA_MAX_SG_DESCRIPTORS * sizeof(struct pqi_sg_descriptor)))
+
+struct pqi_ofa_memory {
+	__le64	signature;	/* "OFA_QRM" */
+	__le16	version;	/* version of this struct(1 = 1st version) */
+	u8	reserved[62];
+	__le32	bytes_allocated;	/* total allocated memory in bytes */
+	__le16	num_memory_descriptors;
+	u8	reserved1[2];
+	struct pqi_sg_descriptor sg_descriptor[1];
+};
+
 struct pqi_aio_error_info {
 	u8	status;
 	u8	service_response;
@@ -419,6 +483,7 @@
 #define PQI_REQUEST_IU_GENERAL_ADMIN			0x60
 #define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG	0x72
 #define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG		0x73
+#define PQI_REQUEST_IU_VENDOR_GENERAL			0x75
 #define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT		0xf6
 
 #define PQI_RESPONSE_IU_GENERAL_MANAGEMENT		0x81
@@ -430,6 +495,7 @@
 #define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR		0xf3
 #define PQI_RESPONSE_IU_AIO_PATH_DISABLED		0xf4
 #define PQI_RESPONSE_IU_VENDOR_EVENT			0xf5
+#define PQI_RESPONSE_IU_VENDOR_GENERAL			0xf7
 
 #define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY	0x0
 #define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ			0x10
@@ -492,6 +558,7 @@
 #define PQI_EVENT_TYPE_HARDWARE			0x2
 #define PQI_EVENT_TYPE_PHYSICAL_DEVICE		0x4
 #define PQI_EVENT_TYPE_LOGICAL_DEVICE		0x5
+#define PQI_EVENT_TYPE_OFA			0xfb
 #define PQI_EVENT_TYPE_AIO_STATE_CHANGE		0xfd
 #define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE	0xfe
 
@@ -556,6 +623,7 @@
 #define SOP_TASK_ATTRIBUTE_ACA			4
 
 #define SOP_TMF_COMPLETE		0x0
+#define SOP_TMF_REJECTED		0x4
 #define SOP_TMF_FUNCTION_SUCCEEDED	0x8
 
 /* additional CDB bytes usage field codes */
@@ -644,11 +712,13 @@
 #define PQI_CONFIG_TABLE_MAX_LENGTH	((u16)~0)
 
 /* configuration table section IDs */
+#define PQI_CONFIG_TABLE_ALL_SECTIONS			(-1)
 #define PQI_CONFIG_TABLE_SECTION_GENERAL_INFO		0
 #define PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES	1
 #define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA	2
 #define PQI_CONFIG_TABLE_SECTION_DEBUG			3
 #define PQI_CONFIG_TABLE_SECTION_HEARTBEAT		4
+#define PQI_CONFIG_TABLE_SECTION_SOFT_RESET		5
 
 struct pqi_config_table {
 	u8	signature[8];		/* "CFGTABLE" */
@@ -680,6 +750,18 @@
 					/* command */
 };
 
+struct pqi_config_table_firmware_features {
+	struct pqi_config_table_section_header header;
+	__le16	num_elements;
+	u8	features_supported[];
+/*	u8	features_requested_by_host[]; */
+/*	u8	features_enabled[]; */
+};
+
+#define PQI_FIRMWARE_FEATURE_OFA			0
+#define PQI_FIRMWARE_FEATURE_SMP			1
+#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE	11
+
 struct pqi_config_table_debug {
 	struct pqi_config_table_section_header header;
 	__le32	scratchpad;
@@ -690,6 +772,22 @@
 	__le32	heartbeat_counter;
 };
 
+struct pqi_config_table_soft_reset {
+	struct pqi_config_table_section_header header;
+	u8 soft_reset_status;
+};
+
+#define PQI_SOFT_RESET_INITIATE		0x1
+#define PQI_SOFT_RESET_ABORT		0x2
+
+enum pqi_soft_reset_status {
+	RESET_INITIATE_FIRMWARE,
+	RESET_INITIATE_DRIVER,
+	RESET_ABORT,
+	RESET_NORESPONSE,
+	RESET_TIMEDOUT
+};
+
 union pqi_reset_register {
 	struct {
 		u32	reset_type : 3;
@@ -724,6 +822,7 @@
 #define PQI_HBA_BUS			2
 #define PQI_EXTERNAL_RAID_VOLUME_BUS	3
 #define PQI_MAX_BUS			PQI_EXTERNAL_RAID_VOLUME_BUS
+#define PQI_VSEP_CISS_BTL		379
 
 struct report_lun_header {
 	__be32	list_length;
@@ -808,8 +907,10 @@
 	u8	scsi3addr[8];
 	__be64	wwid;
 	u8	volume_id[16];
+	u8	unique_id[16];
 	u8	is_physical_device : 1;
 	u8	is_external_raid_device : 1;
+	u8	is_expander_smp_device : 1;
 	u8	target_lun_valid : 1;
 	u8	device_gone : 1;
 	u8	new_device : 1;
@@ -817,6 +918,7 @@
 	u8	volume_offline : 1;
 	bool	aio_enabled;		/* only valid for physical disks */
 	bool	in_reset;
+	bool	in_remove;
 	bool	device_offline;
 	u8	vendor[8];		/* bytes 8-15 of inquiry data */
 	u8	model[16];		/* bytes 16-31 of inquiry data */
@@ -829,6 +931,9 @@
 	u8	active_path_index;
 	u8	path_map;
 	u8	bay;
+	u8	box_index;
+	u8	phys_box_on_bus;
+	u8	phy_connected_dev_type;
 	u8	box[8];
 	u16	phys_connector[8];
 	bool	raid_bypass_configured;	/* RAID bypass configured */
@@ -854,6 +959,8 @@
 #define CISS_VPD_LV_DEVICE_GEOMETRY	0xc1	/* vendor-specific page */
 #define CISS_VPD_LV_BYPASS_STATUS	0xc2	/* vendor-specific page */
 #define CISS_VPD_LV_STATUS		0xc3	/* vendor-specific page */
+#define SCSI_VPD_HEADER_SZ		4
+#define SCSI_VPD_DEVICE_ID_IDX		8	/* Index of page id in page */
 
 #define VPD_PAGE	(1 << 8)
 
@@ -916,6 +1023,7 @@
 struct pqi_sas_port {
 	struct list_head port_list_entry;
 	u64	sas_address;
+	struct pqi_scsi_dev *device;
 	struct sas_port *port;
 	int	next_phy_index;
 	struct list_head phy_list_head;
@@ -947,13 +1055,15 @@
 	struct list_head request_list_entry;
 };
 
-#define PQI_NUM_SUPPORTED_EVENTS	6
+#define PQI_NUM_SUPPORTED_EVENTS	7
 
 struct pqi_event {
 	bool	pending;
 	u8	event_type;
 	__le16	event_id;
 	__le32	additional_event_id;
+	__le32	ofa_bytes_requested;
+	__le16	ofa_cancel_reason;
 };
 
 #define PQI_RESERVED_IO_SLOTS_LUN_RESET			1
@@ -967,6 +1077,9 @@
 	unsigned int	ctrl_id;
 	struct pci_dev	*pci_dev;
 	char		firmware_version[11];
+	char		serial_number[17];
+	char		model[17];
+	char		vendor[9];
 	void __iomem	*iomem_base;
 	struct pqi_ctrl_registers __iomem *registers;
 	struct pqi_device_registers __iomem *pqi_registers;
@@ -1014,12 +1127,16 @@
 
 	struct mutex	scan_mutex;
 	struct mutex	lun_reset_mutex;
+	struct mutex	ofa_mutex; /* serialize ofa */
 	bool		controller_online;
 	bool		block_requests;
+	bool		in_shutdown;
+	bool		in_ofa;
 	u8		inbound_spanning_supported : 1;
 	u8		outbound_spanning_supported : 1;
 	u8		pqi_mode_enabled : 1;
 	u8		pqi_reset_quiesce_supported : 1;
+	u8		soft_reset_handshake_supported : 1;
 
 	struct list_head scsi_device_list;
 	spinlock_t	scsi_device_list_lock;
@@ -1040,6 +1157,7 @@
 	int		previous_num_interrupts;
 	u32		previous_heartbeat_count;
 	__le32 __iomem	*heartbeat_counter;
+	u8 __iomem	*soft_reset_status;
 	struct timer_list heartbeat_timer;
 	struct work_struct ctrl_offline_work;
 
@@ -1051,6 +1169,10 @@
 	struct list_head raid_bypass_retry_list;
 	spinlock_t	raid_bypass_retry_list_lock;
 	struct work_struct raid_bypass_retry_work;
+
+	struct          pqi_ofa_memory *pqi_ofa_mem_virt_addr;
+	dma_addr_t      pqi_ofa_mem_dma_handle;
+	void            **pqi_ofa_chunk_virt_addr;
 };
 
 enum pqi_ctrl_mode {
@@ -1080,8 +1202,13 @@
 #define BMIC_WRITE				0x27
 #define BMIC_SENSE_CONTROLLER_PARAMETERS	0x64
 #define BMIC_SENSE_SUBSYSTEM_INFORMATION	0x66
+#define BMIC_CSMI_PASSTHRU			0x68
 #define BMIC_WRITE_HOST_WELLNESS		0xa5
 #define BMIC_FLUSH_CACHE			0xc2
+#define BMIC_SET_DIAG_OPTIONS			0xf4
+#define BMIC_SENSE_DIAG_OPTIONS			0xf5
+
+#define CSMI_CC_SAS_SMP_PASSTHRU		0X17
 
 #define SA_FLUSH_CACHE				0x1
 
@@ -1104,11 +1231,24 @@
 	__le16	extended_logical_unit_count;
 	u8	reserved1[34];
 	__le16	firmware_build_number;
-	u8	reserved2[100];
+	u8	reserved2[8];
+	u8	vendor_id[8];
+	u8	product_id[16];
+	u8	reserved3[68];
 	u8	controller_mode;
-	u8	reserved3[32];
+	u8	reserved4[32];
 };
 
+struct bmic_sense_subsystem_info {
+	u8	reserved[44];
+	u8	ctrl_serial_number[16];
+};
+
+#define SA_EXPANDER_SMP_DEVICE		0x05
+#define SA_CONTROLLER_DEVICE		0x07
+/*SCSI Invalid Device Type for SAS devices*/
+#define PQI_SAS_SCSI_INVALID_DEVTYPE	0xff
+
 struct bmic_identify_physical_device {
 	u8	scsi_bus;		/* SCSI Bus number on controller */
 	u8	scsi_id;		/* SCSI ID on this bus */
@@ -1189,6 +1329,50 @@
 	u8	padding_to_multiple_of_512[9];
 };
 
+struct bmic_smp_request {
+	u8	frame_type;
+	u8	function;
+	u8	allocated_response_length;
+	u8	request_length;
+	u8	additional_request_bytes[1016];
+};
+
+struct  bmic_smp_response {
+	u8	frame_type;
+	u8	function;
+	u8	function_result;
+	u8	response_length;
+	u8	additional_response_bytes[1016];
+};
+
+struct bmic_csmi_ioctl_header {
+	__le32	header_length;
+	u8	signature[8];
+	__le32	timeout;
+	__le32	control_code;
+	__le32	return_code;
+	__le32	length;
+};
+
+struct bmic_csmi_smp_passthru {
+	u8	phy_identifier;
+	u8	port_identifier;
+	u8	connection_rate;
+	u8	reserved;
+	__be64	destination_sas_address;
+	__le32	request_length;
+	struct bmic_smp_request request;
+	u8	connection_status;
+	u8	reserved1[3];
+	__le32	response_length;
+	struct bmic_smp_response response;
+};
+
+struct bmic_csmi_smp_passthru_buffer {
+	struct bmic_csmi_ioctl_header ioctl_header;
+	struct bmic_csmi_smp_passthru parameters;
+};
+
 struct bmic_flush_cache {
 	u8	disable_flag;
 	u8	system_power_action;
@@ -1206,8 +1390,42 @@
 	RESTART = 4
 };
 
+struct bmic_diag_options {
+	__le32 options;
+};
+
 #pragma pack()
 
+static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
+{
+	void *hostdata = shost_priv(shost);
+
+	return *((struct pqi_ctrl_info **)hostdata);
+}
+
+static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+	return !ctrl_info->controller_online;
+}
+
+static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
+{
+	atomic_inc(&ctrl_info->num_busy_threads);
+}
+
+static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
+{
+	atomic_dec(&ctrl_info->num_busy_threads);
+}
+
+static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+	return ctrl_info->block_requests;
+}
+
+void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+	struct sas_rphy *rphy);
+
 int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
 void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info);
 int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
@@ -1216,6 +1434,9 @@
 struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
 	struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd);
+int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
+	struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
+	struct pqi_raid_error_info *error_info);
 
 extern struct sas_function_template pqi_sas_transport_functions;
 
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 2112ea6..ea5409b 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,18 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2016-2017 Microsemi Corporation
+ *    Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *    NON INFRINGEMENT.  See the GNU General Public License for more details.
- *
- *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *    Questions/Comments/Bugfixes to storagedev@microchip.com
  *
  */
 
@@ -40,11 +33,11 @@
 #define BUILD_TIMESTAMP
 #endif
 
-#define DRIVER_VERSION		"1.1.4-130"
+#define DRIVER_VERSION		"1.2.8-026"
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		1
-#define DRIVER_RELEASE		4
-#define DRIVER_REVISION		130
+#define DRIVER_MINOR		2
+#define DRIVER_RELEASE		8
+#define DRIVER_REVISION		26
 
 #define DRIVER_NAME		"Microsemi PQI Driver (v" \
 				DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -74,6 +67,15 @@
 	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
 	unsigned int cdb_length, struct pqi_queue_group *queue_group,
 	struct pqi_encryption_info *encryption_info, bool raid_bypass);
+static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
+static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
+	u32 bytes_requested);
+static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
+static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
+static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device, unsigned long timeout_secs);
 
 /* for flags argument to pqi_submit_raid_request_synchronous() */
 #define PQI_SYNC_FLAGS_INTERRUPTABLE	0x1
@@ -113,6 +115,7 @@
 	PQI_EVENT_TYPE_HARDWARE,
 	PQI_EVENT_TYPE_PHYSICAL_DEVICE,
 	PQI_EVENT_TYPE_LOGICAL_DEVICE,
+	PQI_EVENT_TYPE_OFA,
 	PQI_EVENT_TYPE_AIO_STATE_CHANGE,
 	PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
 };
@@ -142,6 +145,18 @@
 	"\t\tSupported: none, reboot, panic\n"
 	"\t\tDefault: none");
 
+static int pqi_expose_ld_first;
+module_param_named(expose_ld_first,
+	pqi_expose_ld_first, int, 0644);
+MODULE_PARM_DESC(expose_ld_first,
+	"Expose logical drives before physical drives.");
+
+static int pqi_hide_vsep;
+module_param_named(hide_vsep,
+	pqi_hide_vsep, int, 0644);
+MODULE_PARM_DESC(hide_vsep,
+	"Hide the virtual SEP for direct attached drives.");
+
 static char *raid_levels[] = {
 	"RAID-0",
 	"RAID-4",
@@ -176,18 +191,16 @@
 	scmd->scsi_done(scmd);
 }
 
+static inline void pqi_disable_write_same(struct scsi_device *sdev)
+{
+	sdev->no_write_same = 1;
+}
+
 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
 {
 	return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
 }
 
-static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
-{
-	void *hostdata = shost_priv(shost);
-
-	return *((struct pqi_ctrl_info **)hostdata);
-}
-
 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
 {
 	return !device->is_physical_device;
@@ -198,11 +211,6 @@
 	return scsi3addr[2] != 0;
 }
 
-static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
-{
-	return !ctrl_info->controller_online;
-}
-
 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
 {
 	if (ctrl_info->controller_online)
@@ -241,11 +249,6 @@
 	scsi_unblock_requests(ctrl_info->scsi_host);
 }
 
-static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
-{
-	return ctrl_info->block_requests;
-}
-
 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
 	unsigned long timeout_msecs)
 {
@@ -275,16 +278,6 @@
 	return remaining_msecs;
 }
 
-static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
-{
-	atomic_inc(&ctrl_info->num_busy_threads);
-}
-
-static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
-{
-	atomic_dec(&ctrl_info->num_busy_threads);
-}
-
 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
 {
 	while (atomic_read(&ctrl_info->num_busy_threads) >
@@ -312,11 +305,39 @@
 	return device->in_reset;
 }
 
+static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
+{
+	ctrl_info->in_ofa = true;
+}
+
+static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
+{
+	ctrl_info->in_ofa = false;
+}
+
+static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
+{
+	return ctrl_info->in_ofa;
+}
+
+static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
+{
+	device->in_remove = true;
+}
+
+static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
+					struct pqi_scsi_dev *device)
+{
+	return device->in_remove && !ctrl_info->in_shutdown;
+}
+
 static inline void pqi_schedule_rescan_worker_with_delay(
 	struct pqi_ctrl_info *ctrl_info, unsigned long delay)
 {
 	if (pqi_ctrl_offline(ctrl_info))
 		return;
+	if (pqi_ctrl_in_ofa(ctrl_info))
+		return;
 
 	schedule_delayed_work(&ctrl_info->rescan_work, delay);
 }
@@ -326,7 +347,7 @@
 	pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
 }
 
-#define PQI_RESCAN_WORK_DELAY  (10 * HZ)
+#define PQI_RESCAN_WORK_DELAY	(10 * PQI_HZ)
 
 static inline void pqi_schedule_rescan_worker_delayed(
 	struct pqi_ctrl_info *ctrl_info)
@@ -347,18 +368,39 @@
 	return readl(ctrl_info->heartbeat_counter);
 }
 
+static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
+{
+	if (!ctrl_info->soft_reset_status)
+		return 0;
+
+	return readb(ctrl_info->soft_reset_status);
+}
+
+static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
+						u8 clear)
+{
+	u8 status;
+
+	if (!ctrl_info->soft_reset_status)
+		return;
+
+	status = pqi_read_soft_reset_status(ctrl_info);
+	status &= ~clear;
+	writeb(status, ctrl_info->soft_reset_status);
+}
+
 static int pqi_map_single(struct pci_dev *pci_dev,
 	struct pqi_sg_descriptor *sg_descriptor, void *buffer,
-	size_t buffer_length, int data_direction)
+	size_t buffer_length, enum dma_data_direction data_direction)
 {
 	dma_addr_t bus_address;
 
-	if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
+	if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
 		return 0;
 
-	bus_address = pci_map_single(pci_dev, buffer, buffer_length,
+	bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
 		data_direction);
-	if (pci_dma_mapping_error(pci_dev, bus_address))
+	if (dma_mapping_error(&pci_dev->dev, bus_address))
 		return -ENOMEM;
 
 	put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
@@ -370,15 +412,15 @@
 
 static void pqi_pci_unmap(struct pci_dev *pci_dev,
 	struct pqi_sg_descriptor *descriptors, int num_descriptors,
-	int data_direction)
+	enum dma_data_direction data_direction)
 {
 	int i;
 
-	if (data_direction == PCI_DMA_NONE)
+	if (data_direction == DMA_NONE)
 		return;
 
 	for (i = 0; i < num_descriptors; i++)
-		pci_unmap_single(pci_dev,
+		dma_unmap_single(&pci_dev->dev,
 			(dma_addr_t)get_unaligned_le64(&descriptors[i].address),
 			get_unaligned_le32(&descriptors[i].length),
 			data_direction);
@@ -387,10 +429,10 @@
 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
 	struct pqi_raid_path_request *request, u8 cmd,
 	u8 *scsi3addr, void *buffer, size_t buffer_length,
-	u16 vpd_page, int *pci_direction)
+	u16 vpd_page, enum dma_data_direction *dir)
 {
 	u8 *cdb;
-	int pci_dir;
+	size_t cdb_length = buffer_length;
 
 	memset(request, 0, sizeof(*request));
 
@@ -413,7 +455,7 @@
 			cdb[1] = 0x1;
 			cdb[2] = (u8)vpd_page;
 		}
-		cdb[4] = (u8)buffer_length;
+		cdb[4] = (u8)cdb_length;
 		break;
 	case CISS_REPORT_LOG:
 	case CISS_REPORT_PHYS:
@@ -423,32 +465,46 @@
 			cdb[1] = CISS_REPORT_PHYS_EXTENDED;
 		else
 			cdb[1] = CISS_REPORT_LOG_EXTENDED;
-		put_unaligned_be32(buffer_length, &cdb[6]);
+		put_unaligned_be32(cdb_length, &cdb[6]);
 		break;
 	case CISS_GET_RAID_MAP:
 		request->data_direction = SOP_READ_FLAG;
 		cdb[0] = CISS_READ;
 		cdb[1] = CISS_GET_RAID_MAP;
-		put_unaligned_be32(buffer_length, &cdb[6]);
+		put_unaligned_be32(cdb_length, &cdb[6]);
 		break;
 	case SA_FLUSH_CACHE:
 		request->data_direction = SOP_WRITE_FLAG;
 		cdb[0] = BMIC_WRITE;
 		cdb[6] = BMIC_FLUSH_CACHE;
-		put_unaligned_be16(buffer_length, &cdb[7]);
+		put_unaligned_be16(cdb_length, &cdb[7]);
 		break;
+	case BMIC_SENSE_DIAG_OPTIONS:
+		cdb_length = 0;
+		/* fall through */
 	case BMIC_IDENTIFY_CONTROLLER:
 	case BMIC_IDENTIFY_PHYSICAL_DEVICE:
+	case BMIC_SENSE_SUBSYSTEM_INFORMATION:
 		request->data_direction = SOP_READ_FLAG;
 		cdb[0] = BMIC_READ;
 		cdb[6] = cmd;
-		put_unaligned_be16(buffer_length, &cdb[7]);
+		put_unaligned_be16(cdb_length, &cdb[7]);
 		break;
+	case BMIC_SET_DIAG_OPTIONS:
+		cdb_length = 0;
+		/* fall through */
 	case BMIC_WRITE_HOST_WELLNESS:
 		request->data_direction = SOP_WRITE_FLAG;
 		cdb[0] = BMIC_WRITE;
 		cdb[6] = cmd;
-		put_unaligned_be16(buffer_length, &cdb[7]);
+		put_unaligned_be16(cdb_length, &cdb[7]);
+		break;
+	case BMIC_CSMI_PASSTHRU:
+		request->data_direction = SOP_BIDIRECTIONAL;
+		cdb[0] = BMIC_WRITE;
+		cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
+		cdb[6] = cmd;
+		put_unaligned_be16(cdb_length, &cdb[7]);
 		break;
 	default:
 		dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
@@ -458,23 +514,21 @@
 
 	switch (request->data_direction) {
 	case SOP_READ_FLAG:
-		pci_dir = PCI_DMA_FROMDEVICE;
+		*dir = DMA_FROM_DEVICE;
 		break;
 	case SOP_WRITE_FLAG:
-		pci_dir = PCI_DMA_TODEVICE;
+		*dir = DMA_TO_DEVICE;
 		break;
 	case SOP_NO_DIRECTION_FLAG:
-		pci_dir = PCI_DMA_NONE;
+		*dir = DMA_NONE;
 		break;
 	default:
-		pci_dir = PCI_DMA_BIDIRECTIONAL;
+		*dir = DMA_BIDIRECTIONAL;
 		break;
 	}
 
-	*pci_direction = pci_dir;
-
 	return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
-		buffer, buffer_length, pci_dir);
+		buffer, buffer_length, *dir);
 }
 
 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
@@ -512,46 +566,137 @@
 	atomic_dec(&io_request->refcount);
 }
 
-static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
-	struct bmic_identify_controller *buffer)
+static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
+		u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
+		struct pqi_raid_error_info *error_info,
+		unsigned long timeout_msecs)
 {
 	int rc;
-	int pci_direction;
+	enum dma_data_direction dir;
 	struct pqi_raid_path_request request;
 
 	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
-		sizeof(*buffer), 0, &pci_direction);
+		cmd, scsi3addr, buffer,
+		buffer_length, vpd_page, &dir);
 	if (rc)
 		return rc;
 
-	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
-		NULL, NO_TIMEOUT);
+	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+		 0, error_info, timeout_msecs);
 
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
-
+	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
 	return rc;
 }
 
-static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
+/* Helper functions for pqi_send_scsi_raid_request */
+
+static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
+		u8 cmd, void *buffer, size_t buffer_length)
+{
+	return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
+			buffer, buffer_length, 0, NULL, NO_TIMEOUT);
+}
+
+static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
+		u8 cmd, void *buffer, size_t buffer_length,
+		struct pqi_raid_error_info *error_info)
+{
+	return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
+			buffer, buffer_length, 0, error_info, NO_TIMEOUT);
+}
+
+
+static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
+		struct bmic_identify_controller *buffer)
+{
+	return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
+			buffer, sizeof(*buffer));
+}
+
+static inline int pqi_sense_subsystem_info(struct  pqi_ctrl_info *ctrl_info,
+		struct bmic_sense_subsystem_info *sense_info)
+{
+	return pqi_send_ctrl_raid_request(ctrl_info,
+			BMIC_SENSE_SUBSYSTEM_INFORMATION,
+			sense_info, sizeof(*sense_info));
+}
+
+static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
 	u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
 {
+	return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
+		buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
+}
+
+static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
+	u8 *scsi3addr, u16 vpd_page)
+{
 	int rc;
-	int pci_direction;
-	struct pqi_raid_path_request request;
+	int i;
+	int pages;
+	unsigned char *buf, bufsize;
 
-	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
-		&pci_direction);
-	if (rc)
-		return rc;
+	buf = kzalloc(256, GFP_KERNEL);
+	if (!buf)
+		return false;
 
-	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
-		NULL, NO_TIMEOUT);
+	/* Get the size of the page list first */
+	rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
+				VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
+				buf, SCSI_VPD_HEADER_SZ);
+	if (rc != 0)
+		goto exit_unsupported;
 
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
+	pages = buf[3];
+	if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
+		bufsize = pages + SCSI_VPD_HEADER_SZ;
+	else
+		bufsize = 255;
+
+	/* Get the whole VPD page list */
+	rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
+				VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
+				buf, bufsize);
+	if (rc != 0)
+		goto exit_unsupported;
+
+	pages = buf[3];
+	for (i = 1; i <= pages; i++)
+		if (buf[3 + i] == vpd_page)
+			goto exit_supported;
+
+exit_unsupported:
+	kfree(buf);
+	return false;
+
+exit_supported:
+	kfree(buf);
+	return true;
+}
+
+static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
+	u8 *scsi3addr, u8 *device_id, int buflen)
+{
+	int rc;
+	unsigned char *buf;
+
+	if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
+		return 1; /* function not supported */
+
+	buf = kzalloc(64, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
+				VPD_PAGE | SCSI_VPD_DEVICE_ID,
+				buf, 64);
+	if (rc == 0) {
+		if (buflen > 16)
+			buflen = 16;
+		memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
+	}
+
+	kfree(buf);
 
 	return rc;
 }
@@ -562,13 +707,13 @@
 	size_t buffer_length)
 {
 	int rc;
-	int pci_direction;
+	enum dma_data_direction dir;
 	u16 bmic_device_index;
 	struct pqi_raid_path_request request;
 
 	rc = pqi_build_raid_path_request(ctrl_info, &request,
 		BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
-		buffer_length, 0, &pci_direction);
+		buffer_length, 0, &dir);
 	if (rc)
 		return rc;
 
@@ -579,9 +724,7 @@
 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
 		0, NULL, NO_TIMEOUT);
 
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
-
+	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
 	return rc;
 }
 
@@ -589,8 +732,6 @@
 	enum bmic_flush_cache_shutdown_event shutdown_event)
 {
 	int rc;
-	struct pqi_raid_path_request request;
-	int pci_direction;
 	struct bmic_flush_cache *flush_cache;
 
 	/*
@@ -606,46 +747,55 @@
 
 	flush_cache->shutdown_event = shutdown_event;
 
-	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
-		sizeof(*flush_cache), 0, &pci_direction);
-	if (rc)
-		goto out;
+	rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
+		sizeof(*flush_cache));
 
-	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-		0, NULL, NO_TIMEOUT);
-
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
-
-out:
 	kfree(flush_cache);
 
 	return rc;
 }
 
-static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
-	void *buffer, size_t buffer_length)
+int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
+	struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
+	struct pqi_raid_error_info *error_info)
+{
+	return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
+		buffer, buffer_length, error_info);
+}
+
+#define PQI_FETCH_PTRAID_DATA (1UL<<31)
+
+static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
 {
 	int rc;
-	struct pqi_raid_path_request request;
-	int pci_direction;
+	struct bmic_diag_options *diag;
 
-	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
-		buffer_length, 0, &pci_direction);
+	diag = kzalloc(sizeof(*diag), GFP_KERNEL);
+	if (!diag)
+		return -ENOMEM;
+
+	rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
+					diag, sizeof(*diag));
 	if (rc)
-		return rc;
+		goto out;
 
-	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-		0, NULL, NO_TIMEOUT);
+	diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
 
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
+	rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
+					diag, sizeof(*diag));
+out:
+	kfree(diag);
 
 	return rc;
 }
 
+static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
+	void *buffer, size_t buffer_length)
+{
+	return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
+					buffer, buffer_length);
+}
+
 #pragma pack(1)
 
 struct bmic_host_wellness_driver_version {
@@ -653,6 +803,7 @@
 	u8	driver_version_tag[2];
 	__le16	driver_version_length;
 	char	driver_version[32];
+	u8	dont_write_tag[2];
 	u8	end_tag[2];
 };
 
@@ -682,6 +833,8 @@
 	strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
 		sizeof(buffer->driver_version) - 1);
 	buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
+	buffer->dont_write_tag[0] = 'D';
+	buffer->dont_write_tag[1] = 'W';
 	buffer->end_tag[0] = 'Z';
 	buffer->end_tag[1] = 'Z';
 
@@ -755,7 +908,7 @@
 	return rc;
 }
 
-#define PQI_UPDATE_TIME_WORK_INTERVAL	(24UL * 60 * 60 * HZ)
+#define PQI_UPDATE_TIME_WORK_INTERVAL	(24UL * 60 * 60 * PQI_HZ)
 
 static void pqi_update_time_worker(struct work_struct *work)
 {
@@ -789,25 +942,11 @@
 	cancel_delayed_work_sync(&ctrl_info->update_time_work);
 }
 
-static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
+static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
 	void *buffer, size_t buffer_length)
 {
-	int rc;
-	int pci_direction;
-	struct pqi_raid_path_request request;
-
-	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
-	if (rc)
-		return rc;
-
-	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
-		NULL, NO_TIMEOUT);
-
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
-
-	return rc;
+	return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
+					buffer_length);
 }
 
 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
@@ -1025,8 +1164,6 @@
 	char *err_msg;
 	u32 raid_map_size;
 	u32 r5or6_blocks_per_row;
-	unsigned int num_phys_disks;
-	unsigned int num_raid_map_entries;
 
 	raid_map_size = get_unaligned_le32(&raid_map->structure_size);
 
@@ -1035,22 +1172,6 @@
 		goto bad_raid_map;
 	}
 
-	if (raid_map_size > sizeof(*raid_map)) {
-		err_msg = "RAID map too large";
-		goto bad_raid_map;
-	}
-
-	num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
-		(get_unaligned_le16(&raid_map->data_disks_per_row) +
-		get_unaligned_le16(&raid_map->metadata_disks_per_row));
-	num_raid_map_entries = num_phys_disks *
-		get_unaligned_le16(&raid_map->row_cnt);
-
-	if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
-		err_msg = "invalid number of map entries in RAID map";
-		goto bad_raid_map;
-	}
-
 	if (device->raid_level == SA_RAID_1) {
 		if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
 			err_msg = "invalid RAID-1 map";
@@ -1089,29 +1210,46 @@
 	struct pqi_scsi_dev *device)
 {
 	int rc;
-	int pci_direction;
-	struct pqi_raid_path_request request;
+	u32 raid_map_size;
 	struct raid_map *raid_map;
 
 	raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
 	if (!raid_map)
 		return -ENOMEM;
 
-	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
-		sizeof(*raid_map), 0, &pci_direction);
-	if (rc)
-		goto error;
-
-	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
-		NULL, NO_TIMEOUT);
-
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
+	rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
+		device->scsi3addr, raid_map, sizeof(*raid_map),
+		0, NULL, NO_TIMEOUT);
 
 	if (rc)
 		goto error;
 
+	raid_map_size = get_unaligned_le32(&raid_map->structure_size);
+
+	if (raid_map_size > sizeof(*raid_map)) {
+
+		kfree(raid_map);
+
+		raid_map = kmalloc(raid_map_size, GFP_KERNEL);
+		if (!raid_map)
+			return -ENOMEM;
+
+		rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
+			device->scsi3addr, raid_map, raid_map_size,
+			0, NULL, NO_TIMEOUT);
+		if (rc)
+			goto error;
+
+		if (get_unaligned_le32(&raid_map->structure_size)
+			!= raid_map_size) {
+			dev_warn(&ctrl_info->pci_dev->dev,
+				"Requested %d bytes, received %d bytes",
+				raid_map_size,
+				get_unaligned_le32(&raid_map->structure_size));
+			goto error;
+		}
+	}
+
 	rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
 	if (rc)
 		goto error;
@@ -1181,6 +1319,9 @@
 	if (rc)
 		goto out;
 
+	if (vpd->page_code != CISS_VPD_LV_STATUS)
+		goto out;
+
 	page_length = offsetof(struct ciss_vpd_logical_volume_status,
 		volume_status) + vpd->page_length;
 	if (page_length < sizeof(*vpd))
@@ -1206,6 +1347,9 @@
 	u8 *buffer;
 	unsigned int retries;
 
+	if (device->is_expander_smp_device)
+		return 0;
+
 	buffer = kmalloc(64, GFP_KERNEL);
 	if (!buffer)
 		return -ENOMEM;
@@ -1241,6 +1385,14 @@
 		}
 	}
 
+	if (pqi_get_device_id(ctrl_info, device->scsi3addr,
+		device->unique_id, sizeof(device->unique_id)) < 0)
+		dev_warn(&ctrl_info->pci_dev->dev,
+			"Can't get device id for scsi %d:%d:%d:%d\n",
+			ctrl_info->scsi_host->host_no,
+			device->bus, device->target,
+			device->lun);
+
 out:
 	kfree(buffer);
 
@@ -1261,7 +1413,9 @@
 		device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
 		return;
 	}
-
+	device->box_index = id_phys->box_index;
+	device->phys_box_on_bus = id_phys->phys_box_on_bus;
+	device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
 	device->queue_depth =
 		get_unaligned_le16(&id_phys->current_queue_depth_limit);
 	device->device_type = id_phys->device_type;
@@ -1403,9 +1557,24 @@
 	return rc;
 }
 
+#define PQI_PENDING_IO_TIMEOUT_SECS	20
+
 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
 	struct pqi_scsi_dev *device)
 {
+	int rc;
+
+	pqi_device_remove_start(device);
+
+	rc = pqi_device_wait_for_pending_io(ctrl_info, device,
+		PQI_PENDING_IO_TIMEOUT_SECS);
+	if (rc)
+		dev_err(&ctrl_info->pci_dev->dev,
+			"scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
+			ctrl_info->scsi_host->host_no, device->bus,
+			device->target, device->lun,
+			atomic_read(&device->scsi_cmds_outstanding));
+
 	if (pqi_is_logical_device(device))
 		scsi_remove_device(device->sdev);
 	else
@@ -1470,6 +1639,14 @@
 	return DEVICE_NOT_FOUND;
 }
 
+static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
+{
+	if (device->is_expander_smp_device)
+		return "Enclosure SMP    ";
+
+	return scsi_device_type(device->devtype);
+}
+
 #define PQI_DEV_INFO_BUFFER_LENGTH	128
 
 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
@@ -1505,7 +1682,7 @@
 
 	count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
 		" %s %.8s %.16s ",
-		scsi_device_type(device->devtype),
+		pqi_device_type(device),
 		device->vendor,
 		device->model);
 
@@ -1550,6 +1727,8 @@
 	existing_device->is_physical_device = new_device->is_physical_device;
 	existing_device->is_external_raid_device =
 		new_device->is_external_raid_device;
+	existing_device->is_expander_smp_device =
+		new_device->is_expander_smp_device;
 	existing_device->aio_enabled = new_device->aio_enabled;
 	memcpy(existing_device->vendor, new_device->vendor,
 		sizeof(existing_device->vendor));
@@ -1563,6 +1742,10 @@
 	existing_device->active_path_index = new_device->active_path_index;
 	existing_device->path_map = new_device->path_map;
 	existing_device->bay = new_device->bay;
+	existing_device->box_index = new_device->box_index;
+	existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
+	existing_device->phy_connected_dev_type =
+		new_device->phy_connected_dev_type;
 	memcpy(existing_device->box, new_device->box,
 		sizeof(existing_device->box));
 	memcpy(existing_device->phys_connector, new_device->phys_connector,
@@ -1574,6 +1757,7 @@
 		new_device->raid_bypass_configured;
 	existing_device->raid_bypass_enabled =
 		new_device->raid_bypass_enabled;
+	existing_device->device_offline = false;
 
 	/* To prevent this from being freed later. */
 	new_device->raid_map = NULL;
@@ -1605,6 +1789,14 @@
 	device->keep_device = false;
 }
 
+static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
+{
+	if (device->is_expander_smp_device)
+		return device->sas_port != NULL;
+
+	return device->sdev != NULL;
+}
+
 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
 	struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
 {
@@ -1690,6 +1882,9 @@
 
 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 
+	if (pqi_ctrl_in_ofa(ctrl_info))
+		pqi_ctrl_ofa_done(ctrl_info);
+
 	/* Remove all devices that have gone away. */
 	list_for_each_entry_safe(device, next, &delete_list,
 		delete_list_entry) {
@@ -1699,7 +1894,7 @@
 		} else {
 			pqi_dev_info(ctrl_info, "removed", device);
 		}
-		if (device->sdev)
+		if (pqi_is_device_added(device))
 			pqi_remove_device(ctrl_info, device);
 		list_del(&device->delete_list_entry);
 		pqi_free_device(device);
@@ -1721,7 +1916,7 @@
 
 	/* Expose any new devices. */
 	list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
-		if (!device->sdev) {
+		if (!pqi_is_device_added(device)) {
 			pqi_dev_info(ctrl_info, "added", device);
 			rc = pqi_add_device(ctrl_info, device);
 			if (rc) {
@@ -1738,7 +1933,12 @@
 
 static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
 {
-	bool is_supported = false;
+	bool is_supported;
+
+	if (device->is_expander_smp_device)
+		return true;
+
+	is_supported = false;
 
 	switch (device->devtype) {
 	case TYPE_DISK:
@@ -1772,6 +1972,35 @@
 	return false;
 }
 
+static inline void pqi_mask_device(u8 *scsi3addr)
+{
+	scsi3addr[3] |= 0xc0;
+}
+
+static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
+{
+	if (!device->is_physical_device)
+		return false;
+
+	if (device->is_expander_smp_device)
+		return true;
+
+	switch (device->devtype) {
+	case TYPE_DISK:
+	case TYPE_ZBC:
+	case TYPE_ENCLOSURE:
+		return true;
+	}
+
+	return false;
+}
+
+static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
+{
+	return !device->is_physical_device ||
+		!pqi_skip_device(device->scsi3addr);
+}
+
 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
 {
 	int i;
@@ -1791,6 +2020,8 @@
 	unsigned int num_valid_devices;
 	bool is_physical_device;
 	u8 *scsi3addr;
+	unsigned int physical_index;
+	unsigned int logical_index;
 	static char *out_of_memory_msg =
 		"failed to allocate memory, device discovery stopped";
 
@@ -1826,6 +2057,21 @@
 			rc = -ENOMEM;
 			goto out;
 		}
+		if (pqi_hide_vsep) {
+			int i;
+
+			for (i = num_physicals - 1; i >= 0; i--) {
+				phys_lun_ext_entry =
+						&physdev_list->lun_entries[i];
+				if (CISS_GET_DRIVE_NUMBER(
+					phys_lun_ext_entry->lunid) ==
+						PQI_VSEP_CISS_BTL) {
+					pqi_mask_device(
+						phys_lun_ext_entry->lunid);
+					break;
+				}
+			}
+		}
 	}
 
 	num_new_devices = num_physicals + num_logicals;
@@ -1853,19 +2099,23 @@
 
 	device = NULL;
 	num_valid_devices = 0;
+	physical_index = 0;
+	logical_index = 0;
 
 	for (i = 0; i < num_new_devices; i++) {
 
-		if (i < num_physicals) {
+		if ((!pqi_expose_ld_first && i < num_physicals) ||
+			(pqi_expose_ld_first && i >= num_logicals)) {
 			is_physical_device = true;
-			phys_lun_ext_entry = &physdev_list->lun_entries[i];
+			phys_lun_ext_entry =
+				&physdev_list->lun_entries[physical_index++];
 			log_lun_ext_entry = NULL;
 			scsi3addr = phys_lun_ext_entry->lunid;
 		} else {
 			is_physical_device = false;
 			phys_lun_ext_entry = NULL;
 			log_lun_ext_entry =
-				&logdev_list->lun_entries[i - num_physicals];
+				&logdev_list->lun_entries[logical_index++];
 			scsi3addr = log_lun_ext_entry->lunid;
 		}
 
@@ -1880,9 +2130,14 @@
 
 		memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
 		device->is_physical_device = is_physical_device;
-		if (!is_physical_device)
+		if (is_physical_device) {
+			if (phys_lun_ext_entry->device_type ==
+				SA_EXPANDER_SMP_DEVICE)
+				device->is_expander_smp_device = true;
+		} else {
 			device->is_external_raid_device =
 				pqi_is_external_raid_addr(scsi3addr);
+		}
 
 		/* Gather information about the device. */
 		rc = pqi_get_device_info(ctrl_info, device);
@@ -1915,30 +2170,22 @@
 			device->wwid = phys_lun_ext_entry->wwid;
 			if ((phys_lun_ext_entry->device_flags &
 				REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
-				phys_lun_ext_entry->aio_handle)
+				phys_lun_ext_entry->aio_handle) {
 				device->aio_enabled = true;
+					device->aio_handle =
+						phys_lun_ext_entry->aio_handle;
+			}
+
+				pqi_get_physical_disk_info(ctrl_info,
+					device, id_phys);
+
 		} else {
 			memcpy(device->volume_id, log_lun_ext_entry->volume_id,
 				sizeof(device->volume_id));
 		}
 
-		switch (device->devtype) {
-		case TYPE_DISK:
-		case TYPE_ZBC:
-		case TYPE_ENCLOSURE:
-			if (device->is_physical_device) {
-				device->sas_address =
-					get_unaligned_be64(&device->wwid);
-				if (device->devtype == TYPE_DISK ||
-					device->devtype == TYPE_ZBC) {
-					device->aio_handle =
-						phys_lun_ext_entry->aio_handle;
-					pqi_get_physical_disk_info(ctrl_info,
-						device, id_phys);
-				}
-			}
-			break;
-		}
+		if (pqi_is_device_with_sas_address(device))
+			device->sas_address = get_unaligned_be64(&device->wwid);
 
 		new_device_list[num_valid_devices++] = device;
 	}
@@ -1981,7 +2228,7 @@
 		if (!device)
 			break;
 
-		if (device->sdev)
+		if (pqi_is_device_added(device))
 			pqi_remove_device(ctrl_info, device);
 		pqi_free_device(device);
 	}
@@ -1989,25 +2236,33 @@
 
 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
 {
-	int rc;
+	int rc = 0;
 
 	if (pqi_ctrl_offline(ctrl_info))
 		return -ENXIO;
 
-	mutex_lock(&ctrl_info->scan_mutex);
-
-	rc = pqi_update_scsi_devices(ctrl_info);
-	if (rc)
+	if (!mutex_trylock(&ctrl_info->scan_mutex)) {
 		pqi_schedule_rescan_worker_delayed(ctrl_info);
-
-	mutex_unlock(&ctrl_info->scan_mutex);
+		rc = -EINPROGRESS;
+	} else {
+		rc = pqi_update_scsi_devices(ctrl_info);
+		if (rc)
+			pqi_schedule_rescan_worker_delayed(ctrl_info);
+		mutex_unlock(&ctrl_info->scan_mutex);
+	}
 
 	return rc;
 }
 
 static void pqi_scan_start(struct Scsi_Host *shost)
 {
-	pqi_scan_scsi_devices(shost_to_hba(shost));
+	struct pqi_ctrl_info *ctrl_info;
+
+	ctrl_info = shost_to_hba(shost);
+	if (pqi_ctrl_in_ofa(ctrl_info))
+		return;
+
+	pqi_scan_scsi_devices(ctrl_info);
 }
 
 /* Returns TRUE if scan is finished. */
@@ -2034,6 +2289,12 @@
 	mutex_unlock(&ctrl_info->lun_reset_mutex);
 }
 
+static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
+{
+	mutex_lock(&ctrl_info->ofa_mutex);
+	mutex_unlock(&ctrl_info->ofa_mutex);
+}
+
 static inline void pqi_set_encryption_info(
 	struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
 	u64 first_block)
@@ -2341,9 +2602,6 @@
 			(map_row * total_disks_per_row) + first_column;
 	}
 
-	if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
-		return PQI_RAID_BYPASS_INELIGIBLE;
-
 	aio_handle = raid_map->disk_data[map_index].aio_handle;
 	disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
 		first_row * strip_size +
@@ -2413,7 +2671,7 @@
 	u8 status;
 
 	pqi_registers = ctrl_info->pqi_registers;
-	timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
+	timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
 
 	while (1) {
 		signature = readq(&pqi_registers->signature);
@@ -2474,10 +2732,9 @@
 		return;
 
 	device->device_offline = true;
-	scsi_device_set_state(sdev, SDEV_OFFLINE);
 	ctrl_info = shost_to_hba(sdev->host);
 	pqi_schedule_rescan_worker(ctrl_info);
-	dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
+	dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
 		path, ctrl_info->scsi_host->host_no, device->bus,
 		device->target, device->lun);
 }
@@ -2552,10 +2809,25 @@
 			scsi_normalize_sense(error_info->data,
 				sense_data_length, &sshdr) &&
 				sshdr.sense_key == HARDWARE_ERROR &&
-				sshdr.asc == 0x3e &&
-				sshdr.ascq == 0x1) {
-			pqi_take_device_offline(scmd->device, "RAID");
-			host_byte = DID_NO_CONNECT;
+				sshdr.asc == 0x3e) {
+			struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
+			struct pqi_scsi_dev *device = scmd->device->hostdata;
+
+			switch (sshdr.ascq) {
+			case 0x1: /* LOGICAL UNIT FAILURE */
+				if (printk_ratelimit())
+					scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
+						ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
+				pqi_take_device_offline(scmd->device, "RAID");
+				host_byte = DID_NO_CONNECT;
+				break;
+
+			default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
+				if (printk_ratelimit())
+					scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
+						sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
+				break;
+			}
 		}
 
 		if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
@@ -2681,6 +2953,9 @@
 	case SOP_TMF_FUNCTION_SUCCEEDED:
 		rc = 0;
 		break;
+	case SOP_TMF_REJECTED:
+		rc = -EAGAIN;
+		break;
 	default:
 		rc = -EIO;
 		break;
@@ -2720,8 +2995,17 @@
 		switch (response->header.iu_type) {
 		case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
 		case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
+			if (io_request->scmd)
+				io_request->scmd->result = 0;
+			/* fall through */
 		case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
 			break;
+		case PQI_RESPONSE_IU_VENDOR_GENERAL:
+			io_request->status =
+				get_unaligned_le16(
+				&((struct pqi_vendor_general_response *)
+					response)->status);
+			break;
 		case PQI_RESPONSE_IU_TASK_MANAGEMENT:
 			io_request->status =
 				pqi_interpret_task_management_response(
@@ -2841,6 +3125,111 @@
 	pqi_send_event_ack(ctrl_info, &request, sizeof(request));
 }
 
+#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS		30
+#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS	1
+
+static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
+	struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned long timeout;
+	u8 status;
+
+	timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
+
+	while (1) {
+		status = pqi_read_soft_reset_status(ctrl_info);
+		if (status & PQI_SOFT_RESET_INITIATE)
+			return RESET_INITIATE_DRIVER;
+
+		if (status & PQI_SOFT_RESET_ABORT)
+			return RESET_ABORT;
+
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"timed out waiting for soft reset status\n");
+			return RESET_TIMEDOUT;
+		}
+
+		if (!sis_is_firmware_running(ctrl_info))
+			return RESET_NORESPONSE;
+
+		ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
+	}
+}
+
+static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
+		enum pqi_soft_reset_status reset_status)
+{
+	int rc;
+
+	switch (reset_status) {
+	case RESET_INITIATE_DRIVER:
+		/* fall through */
+	case RESET_TIMEDOUT:
+		dev_info(&ctrl_info->pci_dev->dev,
+			"resetting controller %u\n", ctrl_info->ctrl_id);
+		sis_soft_reset(ctrl_info);
+		/* fall through */
+	case RESET_INITIATE_FIRMWARE:
+		rc = pqi_ofa_ctrl_restart(ctrl_info);
+		pqi_ofa_free_host_buffer(ctrl_info);
+		dev_info(&ctrl_info->pci_dev->dev,
+			"Online Firmware Activation for controller %u: %s\n",
+			ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
+		break;
+	case RESET_ABORT:
+		pqi_ofa_ctrl_unquiesce(ctrl_info);
+		dev_info(&ctrl_info->pci_dev->dev,
+			"Online Firmware Activation for controller %u: %s\n",
+			ctrl_info->ctrl_id, "ABORTED");
+		break;
+	case RESET_NORESPONSE:
+		pqi_ofa_free_host_buffer(ctrl_info);
+		pqi_take_ctrl_offline(ctrl_info);
+		break;
+	}
+}
+
+static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_event *event)
+{
+	u16 event_id;
+	enum pqi_soft_reset_status status;
+
+	event_id = get_unaligned_le16(&event->event_id);
+
+	mutex_lock(&ctrl_info->ofa_mutex);
+
+	if (event_id == PQI_EVENT_OFA_QUIESCE) {
+		dev_info(&ctrl_info->pci_dev->dev,
+			 "Received Online Firmware Activation quiesce event for controller %u\n",
+			 ctrl_info->ctrl_id);
+		pqi_ofa_ctrl_quiesce(ctrl_info);
+		pqi_acknowledge_event(ctrl_info, event);
+		if (ctrl_info->soft_reset_handshake_supported) {
+			status = pqi_poll_for_soft_reset_status(ctrl_info);
+			pqi_process_soft_reset(ctrl_info, status);
+		} else {
+			pqi_process_soft_reset(ctrl_info,
+					RESET_INITIATE_FIRMWARE);
+		}
+
+	} else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
+		pqi_acknowledge_event(ctrl_info, event);
+		pqi_ofa_setup_host_buffer(ctrl_info,
+			le32_to_cpu(event->ofa_bytes_requested));
+		pqi_ofa_host_memory_update(ctrl_info);
+	} else if (event_id == PQI_EVENT_OFA_CANCELLED) {
+		pqi_ofa_free_host_buffer(ctrl_info);
+		pqi_acknowledge_event(ctrl_info, event);
+		dev_info(&ctrl_info->pci_dev->dev,
+			 "Online Firmware Activation(%u) cancel reason : %u\n",
+			 ctrl_info->ctrl_id, event->ofa_cancel_reason);
+	}
+
+	mutex_unlock(&ctrl_info->ofa_mutex);
+}
+
 static void pqi_event_worker(struct work_struct *work)
 {
 	unsigned int i;
@@ -2860,6 +3249,11 @@
 	for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
 		if (event->pending) {
 			event->pending = false;
+			if (event->event_type == PQI_EVENT_TYPE_OFA) {
+				pqi_ctrl_unbusy(ctrl_info);
+				pqi_ofa_process_event(ctrl_info, event);
+				return;
+			}
 			pqi_acknowledge_event(ctrl_info, event);
 		}
 		event++;
@@ -2869,7 +3263,7 @@
 	pqi_ctrl_unbusy(ctrl_info);
 }
 
-#define PQI_HEARTBEAT_TIMER_INTERVAL	(10 * HZ)
+#define PQI_HEARTBEAT_TIMER_INTERVAL	(10 * PQI_HZ)
 
 static void pqi_heartbeat_timer_handler(struct timer_list *t)
 {
@@ -2938,6 +3332,24 @@
 	return pqi_event_type_to_event_index(event_type) != -1;
 }
 
+static void pqi_ofa_capture_event_payload(struct pqi_event *event,
+	struct pqi_event_response *response)
+{
+	u16 event_id;
+
+	event_id = get_unaligned_le16(&event->event_id);
+
+	if (event->event_type == PQI_EVENT_TYPE_OFA) {
+		if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
+			event->ofa_bytes_requested =
+			response->data.ofa_memory_allocation.bytes_requested;
+		} else if (event_id == PQI_EVENT_OFA_CANCELLED) {
+			event->ofa_cancel_reason =
+			response->data.ofa_cancelled.reason;
+		}
+	}
+}
+
 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
 {
 	unsigned int num_events;
@@ -2972,6 +3384,7 @@
 				event->event_id = response->event_id;
 				event->additional_event_id =
 					response->additional_event_id;
+				pqi_ofa_capture_event_payload(event, response);
 			}
 		}
 
@@ -3225,9 +3638,9 @@
 	alloc_length += PQI_EXTRA_SGL_MEMORY;
 
 	ctrl_info->queue_memory_base =
-		dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
-			alloc_length,
-			&ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
+		dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
+				   &ctrl_info->queue_memory_base_dma_handle,
+				   GFP_KERNEL);
 
 	if (!ctrl_info->queue_memory_base)
 		return -ENOMEM;
@@ -3364,10 +3777,9 @@
 		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
 
 	ctrl_info->admin_queue_memory_base =
-		dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
-			alloc_length,
-			&ctrl_info->admin_queue_memory_base_dma_handle,
-			GFP_KERNEL);
+		dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
+				   &ctrl_info->admin_queue_memory_base_dma_handle,
+				   GFP_KERNEL);
 
 	if (!ctrl_info->admin_queue_memory_base)
 		return -ENOMEM;
@@ -3405,7 +3817,7 @@
 	return 0;
 }
 
-#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES		HZ
+#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES		PQI_HZ
 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS	1
 
 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
@@ -3498,7 +3910,7 @@
 	admin_queues = &ctrl_info->admin_queues;
 	oq_ci = admin_queues->oq_ci_copy;
 
-	timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
+	timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
 
 	while (1) {
 		oq_pi = readl(admin_queues->oq_pi);
@@ -3613,7 +4025,7 @@
 
 	while (1) {
 		if (wait_for_completion_io_timeout(wait,
-			PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
+			PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
 			rc = 0;
 			break;
 		}
@@ -3688,8 +4100,10 @@
 				return -ETIMEDOUT;
 			msecs_blocked =
 				jiffies_to_msecs(jiffies - start_jiffies);
-			if (msecs_blocked >= timeout_msecs)
-				return -ETIMEDOUT;
+			if (msecs_blocked >= timeout_msecs) {
+				rc = -ETIMEDOUT;
+				goto out;
+			}
 			timeout_msecs -= msecs_blocked;
 		}
 	}
@@ -3822,7 +4236,7 @@
 	rc = pqi_map_single(ctrl_info->pci_dev,
 		&request.data.report_device_capability.sg_descriptor,
 		capability, sizeof(*capability),
-		PCI_DMA_FROMDEVICE);
+		DMA_FROM_DEVICE);
 	if (rc)
 		goto out;
 
@@ -3831,7 +4245,7 @@
 
 	pqi_pci_unmap(ctrl_info->pci_dev,
 		&request.data.report_device_capability.sg_descriptor, 1,
-		PCI_DMA_FROMDEVICE);
+		DMA_FROM_DEVICE);
 
 	if (rc)
 		goto out;
@@ -4158,7 +4572,7 @@
 	rc = pqi_map_single(ctrl_info->pci_dev,
 		request.data.report_event_configuration.sg_descriptors,
 		event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
-		PCI_DMA_FROMDEVICE);
+		DMA_FROM_DEVICE);
 	if (rc)
 		goto out;
 
@@ -4167,7 +4581,7 @@
 
 	pqi_pci_unmap(ctrl_info->pci_dev,
 		request.data.report_event_configuration.sg_descriptors, 1,
-		PCI_DMA_FROMDEVICE);
+		DMA_FROM_DEVICE);
 
 	if (rc)
 		goto out;
@@ -4194,7 +4608,7 @@
 	rc = pqi_map_single(ctrl_info->pci_dev,
 		request.data.report_event_configuration.sg_descriptors,
 		event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
-		PCI_DMA_TODEVICE);
+		DMA_TO_DEVICE);
 	if (rc)
 		goto out;
 
@@ -4203,7 +4617,7 @@
 
 	pqi_pci_unmap(ctrl_info->pci_dev,
 		request.data.report_event_configuration.sg_descriptors, 1,
-		PCI_DMA_TODEVICE);
+		DMA_TO_DEVICE);
 
 out:
 	kfree(event_config);
@@ -4251,9 +4665,10 @@
 
 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
 {
-	ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
-		ctrl_info->error_buffer_length,
-		&ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
+	ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
+						     ctrl_info->error_buffer_length,
+						     &ctrl_info->error_buffer_dma_handle,
+						     GFP_KERNEL);
 
 	if (!ctrl_info->error_buffer)
 		return -ENOMEM;
@@ -4943,7 +5358,17 @@
 {
 	struct pqi_scsi_dev *device;
 
+	if (!scmd->device) {
+		set_host_byte(scmd, DID_NO_CONNECT);
+		return;
+	}
+
 	device = scmd->device->hostdata;
+	if (!device) {
+		set_host_byte(scmd, DID_NO_CONNECT);
+		return;
+	}
+
 	atomic_dec(&device->scsi_cmds_outstanding);
 }
 
@@ -4960,16 +5385,24 @@
 	device = scmd->device->hostdata;
 	ctrl_info = shost_to_hba(shost);
 
+	if (!device) {
+		set_host_byte(scmd, DID_NO_CONNECT);
+		pqi_scsi_done(scmd);
+		return 0;
+	}
+
 	atomic_inc(&device->scsi_cmds_outstanding);
 
-	if (pqi_ctrl_offline(ctrl_info)) {
+	if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
+								device)) {
 		set_host_byte(scmd, DID_NO_CONNECT);
 		pqi_scsi_done(scmd);
 		return 0;
 	}
 
 	pqi_ctrl_busy(ctrl_info);
-	if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
+	if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
+	    pqi_ctrl_in_ofa(ctrl_info)) {
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		goto out;
 	}
@@ -5114,25 +5547,75 @@
 	}
 }
 
-static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
-	struct pqi_scsi_dev *device)
+static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
 {
+	unsigned int i;
+	unsigned int path;
+	struct pqi_queue_group *queue_group;
+	unsigned long flags;
+	struct pqi_io_request *io_request;
+	struct pqi_io_request *next;
+	struct scsi_cmnd *scmd;
+
+	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+		queue_group = &ctrl_info->queue_groups[i];
+
+		for (path = 0; path < 2; path++) {
+			spin_lock_irqsave(&queue_group->submit_lock[path],
+						flags);
+
+			list_for_each_entry_safe(io_request, next,
+				&queue_group->request_list[path],
+				request_list_entry) {
+
+				scmd = io_request->scmd;
+				if (!scmd)
+					continue;
+
+				list_del(&io_request->request_list_entry);
+				set_host_byte(scmd, DID_RESET);
+				pqi_scsi_done(scmd);
+			}
+
+			spin_unlock_irqrestore(
+				&queue_group->submit_lock[path], flags);
+		}
+	}
+}
+
+static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device, unsigned long timeout_secs)
+{
+	unsigned long timeout;
+
+	timeout = (timeout_secs * PQI_HZ) + jiffies;
+
 	while (atomic_read(&device->scsi_cmds_outstanding)) {
 		pqi_check_ctrl_health(ctrl_info);
 		if (pqi_ctrl_offline(ctrl_info))
 			return -ENXIO;
+		if (timeout_secs != NO_TIMEOUT) {
+			if (time_after(jiffies, timeout)) {
+				dev_err(&ctrl_info->pci_dev->dev,
+					"timed out waiting for pending IO\n");
+				return -ETIMEDOUT;
+			}
+		}
 		usleep_range(1000, 2000);
 	}
 
 	return 0;
 }
 
-static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
+static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
+	unsigned long timeout_secs)
 {
 	bool io_pending;
 	unsigned long flags;
+	unsigned long timeout;
 	struct pqi_scsi_dev *device;
 
+	timeout = (timeout_secs * PQI_HZ) + jiffies;
 	while (1) {
 		io_pending = false;
 
@@ -5154,6 +5637,13 @@
 		if (pqi_ctrl_offline(ctrl_info))
 			return -ENXIO;
 
+		if (timeout_secs != NO_TIMEOUT) {
+			if (time_after(jiffies, timeout)) {
+				dev_err(&ctrl_info->pci_dev->dev,
+					"timed out waiting for pending IO\n");
+				return -ETIMEDOUT;
+			}
+		}
 		usleep_range(1000, 2000);
 	}
 
@@ -5177,7 +5667,7 @@
 
 	while (1) {
 		if (wait_for_completion_io_timeout(wait,
-			PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
+			PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) {
 			rc = 0;
 			break;
 		}
@@ -5230,16 +5720,55 @@
 
 /* Performs a reset at the LUN level. */
 
+#define PQI_LUN_RESET_RETRIES			3
+#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS	10000
+#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS	120
+
+static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	int rc;
+	unsigned int retries;
+	unsigned long timeout_secs;
+
+	for (retries = 0;;) {
+		rc = pqi_lun_reset(ctrl_info, device);
+		if (rc != -EAGAIN || ++retries > PQI_LUN_RESET_RETRIES)
+			break;
+		msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
+	}
+
+	timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
+
+	rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
+
+	return rc == 0 ? SUCCESS : FAILED;
+}
+
 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
 	struct pqi_scsi_dev *device)
 {
 	int rc;
 
-	rc = pqi_lun_reset(ctrl_info, device);
-	if (rc == 0)
-		rc = pqi_device_wait_for_pending_io(ctrl_info, device);
+	mutex_lock(&ctrl_info->lun_reset_mutex);
 
-	return rc == 0 ? SUCCESS : FAILED;
+	pqi_ctrl_block_requests(ctrl_info);
+	pqi_ctrl_wait_until_quiesced(ctrl_info);
+	pqi_fail_io_queued_for_device(ctrl_info, device);
+	rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
+	pqi_device_reset_start(device);
+	pqi_ctrl_unblock_requests(ctrl_info);
+
+	if (rc)
+		rc = FAILED;
+	else
+		rc = _pqi_device_reset(ctrl_info, device);
+
+	pqi_device_reset_done(device);
+
+	mutex_unlock(&ctrl_info->lun_reset_mutex);
+
+	return rc;
 }
 
 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
@@ -5259,27 +5788,16 @@
 
 	pqi_check_ctrl_health(ctrl_info);
 	if (pqi_ctrl_offline(ctrl_info)) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"controller %u offlined - cannot send device reset\n",
+			ctrl_info->ctrl_id);
 		rc = FAILED;
 		goto out;
 	}
 
-	mutex_lock(&ctrl_info->lun_reset_mutex);
+	pqi_wait_until_ofa_finished(ctrl_info);
 
-	pqi_ctrl_block_requests(ctrl_info);
-	pqi_ctrl_wait_until_quiesced(ctrl_info);
-	pqi_fail_io_queued_for_device(ctrl_info, device);
-	rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
-	pqi_device_reset_start(device);
-	pqi_ctrl_unblock_requests(ctrl_info);
-
-	if (rc)
-		rc = FAILED;
-	else
-		rc = pqi_device_reset(ctrl_info, device);
-
-	pqi_device_reset_done(device);
-
-	mutex_unlock(&ctrl_info->lun_reset_mutex);
+	rc = pqi_device_reset(ctrl_info, device);
 
 out:
 	dev_err(&ctrl_info->pci_dev->dev,
@@ -5324,6 +5842,10 @@
 			scsi_change_queue_depth(sdev,
 				device->advertised_queue_depth);
 		}
+		if (pqi_is_logical_device(device))
+			pqi_disable_write_same(sdev);
+		else
+			sdev->allow_restart = 1;
 	}
 
 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -5335,7 +5857,8 @@
 {
 	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 
-	return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
+	return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+					ctrl_info->pci_dev, 0);
 }
 
 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
@@ -5534,7 +6057,7 @@
 
 		rc = pqi_map_single(ctrl_info->pci_dev,
 			&request.sg_descriptors[0], kernel_buffer,
-			iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+			iocommand.buf_size, DMA_BIDIRECTIONAL);
 		if (rc)
 			goto out;
 
@@ -5548,7 +6071,7 @@
 
 	if (iocommand.buf_size > 0)
 		pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-			PCI_DMA_BIDIRECTIONAL);
+			DMA_BIDIRECTIONAL);
 
 	memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
 
@@ -5588,13 +6111,17 @@
 	return rc;
 }
 
-static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
+		     void __user *arg)
 {
 	int rc;
 	struct pqi_ctrl_info *ctrl_info;
 
 	ctrl_info = shost_to_hba(sdev->host);
 
+	if (pqi_ctrl_in_ofa(ctrl_info))
+		return -EBUSY;
+
 	switch (cmd) {
 	case CCISS_DEREGDISK:
 	case CCISS_REGNEWDISK:
@@ -5618,23 +6145,65 @@
 	return rc;
 }
 
-static ssize_t pqi_version_show(struct device *dev,
+static ssize_t pqi_firmware_version_show(struct device *dev,
 	struct device_attribute *attr, char *buffer)
 {
-	ssize_t count = 0;
 	struct Scsi_Host *shost;
 	struct pqi_ctrl_info *ctrl_info;
 
 	shost = class_to_shost(dev);
 	ctrl_info = shost_to_hba(shost);
 
-	count += snprintf(buffer + count, PAGE_SIZE - count,
-		"  driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+	return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
+}
 
-	count += snprintf(buffer + count, PAGE_SIZE - count,
-		"firmware: %s\n", ctrl_info->firmware_version);
+static ssize_t pqi_driver_version_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct Scsi_Host *shost;
+	struct pqi_ctrl_info *ctrl_info;
 
-	return count;
+	shost = class_to_shost(dev);
+	ctrl_info = shost_to_hba(shost);
+
+	return snprintf(buffer, PAGE_SIZE,
+		"%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+}
+
+static ssize_t pqi_serial_number_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct Scsi_Host *shost;
+	struct pqi_ctrl_info *ctrl_info;
+
+	shost = class_to_shost(dev);
+	ctrl_info = shost_to_hba(shost);
+
+	return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
+}
+
+static ssize_t pqi_model_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct Scsi_Host *shost;
+	struct pqi_ctrl_info *ctrl_info;
+
+	shost = class_to_shost(dev);
+	ctrl_info = shost_to_hba(shost);
+
+	return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
+}
+
+static ssize_t pqi_vendor_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct Scsi_Host *shost;
+	struct pqi_ctrl_info *ctrl_info;
+
+	shost = class_to_shost(dev);
+	ctrl_info = shost_to_hba(shost);
+
+	return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
 }
 
 static ssize_t pqi_host_rescan_store(struct device *dev,
@@ -5687,18 +6256,170 @@
 	return -EINVAL;
 }
 
-static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
+static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
+static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
+static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
+static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
+static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
 static DEVICE_ATTR(lockup_action, 0644,
 	pqi_lockup_action_show, pqi_lockup_action_store);
 
 static struct device_attribute *pqi_shost_attrs[] = {
-	&dev_attr_version,
+	&dev_attr_driver_version,
+	&dev_attr_firmware_version,
+	&dev_attr_model,
+	&dev_attr_serial_number,
+	&dev_attr_vendor,
 	&dev_attr_rescan,
 	&dev_attr_lockup_action,
 	NULL
 };
 
+static ssize_t pqi_unique_id_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct pqi_ctrl_info *ctrl_info;
+	struct scsi_device *sdev;
+	struct pqi_scsi_dev *device;
+	unsigned long flags;
+	unsigned char uid[16];
+
+	sdev = to_scsi_device(dev);
+	ctrl_info = shost_to_hba(sdev->host);
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	device = sdev->hostdata;
+	if (!device) {
+		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+			flags);
+		return -ENODEV;
+	}
+	memcpy(uid, device->unique_id, sizeof(uid));
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	return snprintf(buffer, PAGE_SIZE,
+		"%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+		uid[0], uid[1], uid[2], uid[3],
+		uid[4], uid[5], uid[6], uid[7],
+		uid[8], uid[9], uid[10], uid[11],
+		uid[12], uid[13], uid[14], uid[15]);
+}
+
+static ssize_t pqi_lunid_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct pqi_ctrl_info *ctrl_info;
+	struct scsi_device *sdev;
+	struct pqi_scsi_dev *device;
+	unsigned long flags;
+	u8 lunid[8];
+
+	sdev = to_scsi_device(dev);
+	ctrl_info = shost_to_hba(sdev->host);
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	device = sdev->hostdata;
+	if (!device) {
+		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+			flags);
+		return -ENODEV;
+	}
+	memcpy(lunid, device->scsi3addr, sizeof(lunid));
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
+}
+
+#define MAX_PATHS 8
+static ssize_t pqi_path_info_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct pqi_ctrl_info *ctrl_info;
+	struct scsi_device *sdev;
+	struct pqi_scsi_dev *device;
+	unsigned long flags;
+	int i;
+	int output_len = 0;
+	u8 box;
+	u8 bay;
+	u8 path_map_index = 0;
+	char *active;
+	unsigned char phys_connector[2];
+
+	sdev = to_scsi_device(dev);
+	ctrl_info = shost_to_hba(sdev->host);
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	device = sdev->hostdata;
+	if (!device) {
+		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+			flags);
+		return -ENODEV;
+	}
+
+	bay = device->bay;
+	for (i = 0; i < MAX_PATHS; i++) {
+		path_map_index = 1<<i;
+		if (i == device->active_path_index)
+			active = "Active";
+		else if (device->path_map & path_map_index)
+			active = "Inactive";
+		else
+			continue;
+
+		output_len += scnprintf(buf + output_len,
+					PAGE_SIZE - output_len,
+					"[%d:%d:%d:%d] %20.20s ",
+					ctrl_info->scsi_host->host_no,
+					device->bus, device->target,
+					device->lun,
+					scsi_device_type(device->devtype));
+
+		if (device->devtype == TYPE_RAID ||
+			pqi_is_logical_device(device))
+			goto end_buffer;
+
+		memcpy(&phys_connector, &device->phys_connector[i],
+			sizeof(phys_connector));
+		if (phys_connector[0] < '0')
+			phys_connector[0] = '0';
+		if (phys_connector[1] < '0')
+			phys_connector[1] = '0';
+
+		output_len += scnprintf(buf + output_len,
+					PAGE_SIZE - output_len,
+					"PORT: %.2s ", phys_connector);
+
+		box = device->box[i];
+		if (box != 0 && box != 0xFF)
+			output_len += scnprintf(buf + output_len,
+						PAGE_SIZE - output_len,
+						"BOX: %hhu ", box);
+
+		if ((device->devtype == TYPE_DISK ||
+			device->devtype == TYPE_ZBC) &&
+			pqi_expose_device(device))
+			output_len += scnprintf(buf + output_len,
+						PAGE_SIZE - output_len,
+						"BAY: %hhu ", bay);
+
+end_buffer:
+		output_len += scnprintf(buf + output_len,
+					PAGE_SIZE - output_len,
+					"%s\n", active);
+	}
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+	return output_len;
+}
+
+
 static ssize_t pqi_sas_address_show(struct device *dev,
 	struct device_attribute *attr, char *buffer)
 {
@@ -5775,12 +6496,18 @@
 	return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
 }
 
+static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
+static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
+static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
 static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
 	pqi_ssd_smart_path_enabled_show, NULL);
 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
 
 static struct device_attribute *pqi_sdev_attrs[] = {
+	&dev_attr_lunid,
+	&dev_attr_unique_id,
+	&dev_attr_path_info,
 	&dev_attr_sas_address,
 	&dev_attr_ssd_smart_path_enabled,
 	&dev_attr_raid_level,
@@ -5795,7 +6522,6 @@
 	.scan_start = pqi_scan_start,
 	.scan_finished = pqi_scan_finished,
 	.this_id = -1,
-	.use_clustering = ENABLE_CLUSTERING,
 	.eh_device_reset_handler = pqi_eh_device_reset_handler,
 	.ioctl = pqi_ioctl,
 	.slave_alloc = pqi_slave_alloc,
@@ -5936,7 +6662,30 @@
 	return rc;
 }
 
-static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
+static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	struct bmic_sense_subsystem_info *sense_info;
+
+	sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
+	if (!sense_info)
+		return -ENOMEM;
+
+	rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
+	if (rc)
+		goto out;
+
+	memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
+		sizeof(sense_info->ctrl_serial_number));
+	ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
+
+out:
+	kfree(sense_info);
+
+	return rc;
+}
+
+static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
 {
 	int rc;
 	struct bmic_identify_controller *identify;
@@ -5957,12 +6706,258 @@
 		sizeof(ctrl_info->firmware_version),
 		"-%u", get_unaligned_le16(&identify->firmware_build_number));
 
+	memcpy(ctrl_info->model, identify->product_id,
+		sizeof(identify->product_id));
+	ctrl_info->model[sizeof(identify->product_id)] = '\0';
+
+	memcpy(ctrl_info->vendor, identify->vendor_id,
+		sizeof(identify->vendor_id));
+	ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
+
 out:
 	kfree(identify);
 
 	return rc;
 }
 
+struct pqi_config_table_section_info {
+	struct pqi_ctrl_info *ctrl_info;
+	void		*section;
+	u32		section_offset;
+	void __iomem	*section_iomem_addr;
+};
+
+static inline bool pqi_is_firmware_feature_supported(
+	struct pqi_config_table_firmware_features *firmware_features,
+	unsigned int bit_position)
+{
+	unsigned int byte_index;
+
+	byte_index = bit_position / BITS_PER_BYTE;
+
+	if (byte_index >= le16_to_cpu(firmware_features->num_elements))
+		return false;
+
+	return firmware_features->features_supported[byte_index] &
+		(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
+}
+
+static inline bool pqi_is_firmware_feature_enabled(
+	struct pqi_config_table_firmware_features *firmware_features,
+	void __iomem *firmware_features_iomem_addr,
+	unsigned int bit_position)
+{
+	unsigned int byte_index;
+	u8 __iomem *features_enabled_iomem_addr;
+
+	byte_index = (bit_position / BITS_PER_BYTE) +
+		(le16_to_cpu(firmware_features->num_elements) * 2);
+
+	features_enabled_iomem_addr = firmware_features_iomem_addr +
+		offsetof(struct pqi_config_table_firmware_features,
+			features_supported) + byte_index;
+
+	return *((__force u8 *)features_enabled_iomem_addr) &
+		(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
+}
+
+static inline void pqi_request_firmware_feature(
+	struct pqi_config_table_firmware_features *firmware_features,
+	unsigned int bit_position)
+{
+	unsigned int byte_index;
+
+	byte_index = (bit_position / BITS_PER_BYTE) +
+		le16_to_cpu(firmware_features->num_elements);
+
+	firmware_features->features_supported[byte_index] |=
+		(1 << (bit_position % BITS_PER_BYTE));
+}
+
+static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
+	u16 first_section, u16 last_section)
+{
+	struct pqi_vendor_general_request request;
+
+	memset(&request, 0, sizeof(request));
+
+	request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
+	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
+		&request.header.iu_length);
+	put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
+		&request.function_code);
+	put_unaligned_le16(first_section,
+		&request.data.config_table_update.first_section);
+	put_unaligned_le16(last_section,
+		&request.data.config_table_update.last_section);
+
+	return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+		0, NULL, NO_TIMEOUT);
+}
+
+static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_config_table_firmware_features *firmware_features,
+	void __iomem *firmware_features_iomem_addr)
+{
+	void *features_requested;
+	void __iomem *features_requested_iomem_addr;
+
+	features_requested = firmware_features->features_supported +
+		le16_to_cpu(firmware_features->num_elements);
+
+	features_requested_iomem_addr = firmware_features_iomem_addr +
+		(features_requested - (void *)firmware_features);
+
+	memcpy_toio(features_requested_iomem_addr, features_requested,
+		le16_to_cpu(firmware_features->num_elements));
+
+	return pqi_config_table_update(ctrl_info,
+		PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
+		PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
+}
+
+struct pqi_firmware_feature {
+	char		*feature_name;
+	unsigned int	feature_bit;
+	bool		supported;
+	bool		enabled;
+	void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
+		struct pqi_firmware_feature *firmware_feature);
+};
+
+static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_firmware_feature *firmware_feature)
+{
+	if (!firmware_feature->supported) {
+		dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
+			firmware_feature->feature_name);
+		return;
+	}
+
+	if (firmware_feature->enabled) {
+		dev_info(&ctrl_info->pci_dev->dev,
+			"%s enabled\n", firmware_feature->feature_name);
+		return;
+	}
+
+	dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
+		firmware_feature->feature_name);
+}
+
+static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_firmware_feature *firmware_feature)
+{
+	if (firmware_feature->feature_status)
+		firmware_feature->feature_status(ctrl_info, firmware_feature);
+}
+
+static DEFINE_MUTEX(pqi_firmware_features_mutex);
+
+static struct pqi_firmware_feature pqi_firmware_features[] = {
+	{
+		.feature_name = "Online Firmware Activation",
+		.feature_bit = PQI_FIRMWARE_FEATURE_OFA,
+		.feature_status = pqi_firmware_feature_status,
+	},
+	{
+		.feature_name = "Serial Management Protocol",
+		.feature_bit = PQI_FIRMWARE_FEATURE_SMP,
+		.feature_status = pqi_firmware_feature_status,
+	},
+	{
+		.feature_name = "New Soft Reset Handshake",
+		.feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
+		.feature_status = pqi_firmware_feature_status,
+	},
+};
+
+static void pqi_process_firmware_features(
+	struct pqi_config_table_section_info *section_info)
+{
+	int rc;
+	struct pqi_ctrl_info *ctrl_info;
+	struct pqi_config_table_firmware_features *firmware_features;
+	void __iomem *firmware_features_iomem_addr;
+	unsigned int i;
+	unsigned int num_features_supported;
+
+	ctrl_info = section_info->ctrl_info;
+	firmware_features = section_info->section;
+	firmware_features_iomem_addr = section_info->section_iomem_addr;
+
+	for (i = 0, num_features_supported = 0;
+		i < ARRAY_SIZE(pqi_firmware_features); i++) {
+		if (pqi_is_firmware_feature_supported(firmware_features,
+			pqi_firmware_features[i].feature_bit)) {
+			pqi_firmware_features[i].supported = true;
+			num_features_supported++;
+		} else {
+			pqi_firmware_feature_update(ctrl_info,
+				&pqi_firmware_features[i]);
+		}
+	}
+
+	if (num_features_supported == 0)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+		if (!pqi_firmware_features[i].supported)
+			continue;
+		pqi_request_firmware_feature(firmware_features,
+			pqi_firmware_features[i].feature_bit);
+	}
+
+	rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
+		firmware_features_iomem_addr);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"failed to enable firmware features in PQI configuration table\n");
+		for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+			if (!pqi_firmware_features[i].supported)
+				continue;
+			pqi_firmware_feature_update(ctrl_info,
+				&pqi_firmware_features[i]);
+		}
+		return;
+	}
+
+	ctrl_info->soft_reset_handshake_supported = false;
+	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+		if (!pqi_firmware_features[i].supported)
+			continue;
+		if (pqi_is_firmware_feature_enabled(firmware_features,
+			firmware_features_iomem_addr,
+			pqi_firmware_features[i].feature_bit)) {
+			pqi_firmware_features[i].enabled = true;
+			if (pqi_firmware_features[i].feature_bit ==
+			    PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE)
+				ctrl_info->soft_reset_handshake_supported =
+									true;
+		}
+		pqi_firmware_feature_update(ctrl_info,
+			&pqi_firmware_features[i]);
+	}
+}
+
+static void pqi_init_firmware_features(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+		pqi_firmware_features[i].supported = false;
+		pqi_firmware_features[i].enabled = false;
+	}
+}
+
+static void pqi_process_firmware_features_section(
+	struct pqi_config_table_section_info *section_info)
+{
+	mutex_lock(&pqi_firmware_features_mutex);
+	pqi_init_firmware_features();
+	pqi_process_firmware_features(section_info);
+	mutex_unlock(&pqi_firmware_features_mutex);
+}
+
 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
 {
 	u32 table_length;
@@ -5970,8 +6965,11 @@
 	void __iomem *table_iomem_addr;
 	struct pqi_config_table *config_table;
 	struct pqi_config_table_section_header *section;
+	struct pqi_config_table_section_info section_info;
 
 	table_length = ctrl_info->config_table_length;
+	if (table_length == 0)
+		return 0;
 
 	config_table = kmalloc(table_length, GFP_KERNEL);
 	if (!config_table) {
@@ -5988,13 +6986,22 @@
 		ctrl_info->config_table_offset;
 	memcpy_fromio(config_table, table_iomem_addr, table_length);
 
+	section_info.ctrl_info = ctrl_info;
 	section_offset =
 		get_unaligned_le32(&config_table->first_section_offset);
 
 	while (section_offset) {
 		section = (void *)config_table + section_offset;
 
+		section_info.section = section;
+		section_info.section_offset = section_offset;
+		section_info.section_iomem_addr =
+			table_iomem_addr + section_offset;
+
 		switch (get_unaligned_le16(&section->section_id)) {
+		case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
+			pqi_process_firmware_features_section(&section_info);
+			break;
 		case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
 			if (pqi_disable_heartbeat)
 				dev_warn(&ctrl_info->pci_dev->dev,
@@ -6007,6 +7014,13 @@
 					struct pqi_config_table_heartbeat,
 						heartbeat_counter);
 			break;
+		case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
+			ctrl_info->soft_reset_status =
+				table_iomem_addr +
+				section_offset +
+				offsetof(struct pqi_config_table_soft_reset,
+						soft_reset_status);
+			break;
 		}
 
 		section_offset =
@@ -6139,10 +7153,6 @@
 	ctrl_info->pqi_mode_enabled = true;
 	pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
 
-	rc = pqi_process_config_table(ctrl_info);
-	if (rc)
-		return rc;
-
 	rc = pqi_alloc_admin_queues(ctrl_info);
 	if (rc) {
 		dev_err(&ctrl_info->pci_dev->dev,
@@ -6204,6 +7214,11 @@
 	pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
 
 	ctrl_info->controller_online = true;
+
+	rc = pqi_process_config_table(ctrl_info);
+	if (rc)
+		return rc;
+
 	pqi_start_heartbeat_timer(ctrl_info);
 
 	rc = pqi_enable_events(ctrl_info);
@@ -6218,10 +7233,24 @@
 	if (rc)
 		return rc;
 
-	rc = pqi_get_ctrl_firmware_version(ctrl_info);
+	rc = pqi_get_ctrl_product_details(ctrl_info);
 	if (rc) {
 		dev_err(&ctrl_info->pci_dev->dev,
-			"error obtaining firmware version\n");
+			"error obtaining product details\n");
+		return rc;
+	}
+
+	rc = pqi_get_ctrl_serial_number(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error obtaining ctrl serial number\n");
+		return rc;
+	}
+
+	rc = pqi_set_diag_rescan(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error enabling multi-lun rescan\n");
 		return rc;
 	}
 
@@ -6282,6 +7311,24 @@
 		return rc;
 
 	/*
+	 * Get the controller properties.  This allows us to determine
+	 * whether or not it supports PQI mode.
+	 */
+	rc = sis_get_ctrl_properties(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error obtaining controller properties\n");
+		return rc;
+	}
+
+	rc = sis_get_pqi_capabilities(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error obtaining controller capabilities\n");
+		return rc;
+	}
+
+	/*
 	 * If the function we are about to call succeeds, the
 	 * controller will transition from legacy SIS mode
 	 * into PQI mode.
@@ -6321,9 +7368,14 @@
 	pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
 
 	ctrl_info->controller_online = true;
-	pqi_start_heartbeat_timer(ctrl_info);
 	pqi_ctrl_unblock_requests(ctrl_info);
 
+	rc = pqi_process_config_table(ctrl_info);
+	if (rc)
+		return rc;
+
+	pqi_start_heartbeat_timer(ctrl_info);
+
 	rc = pqi_enable_events(ctrl_info);
 	if (rc) {
 		dev_err(&ctrl_info->pci_dev->dev,
@@ -6331,6 +7383,20 @@
 		return rc;
 	}
 
+	rc = pqi_get_ctrl_product_details(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error obtaining product detail\n");
+		return rc;
+	}
+
+	rc = pqi_set_diag_rescan(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error enabling multi-lun rescan\n");
+		return rc;
+	}
+
 	rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
 	if (rc) {
 		dev_err(&ctrl_info->pci_dev->dev,
@@ -6369,7 +7435,7 @@
 	else
 		mask = DMA_BIT_MASK(32);
 
-	rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
+	rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
 	if (rc) {
 		dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
 		goto disable_device;
@@ -6441,6 +7507,7 @@
 
 	mutex_init(&ctrl_info->scan_mutex);
 	mutex_init(&ctrl_info->lun_reset_mutex);
+	mutex_init(&ctrl_info->ofa_mutex);
 
 	INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
 	spin_lock_init(&ctrl_info->scsi_device_list_lock);
@@ -6517,6 +7584,217 @@
 	pqi_free_ctrl_resources(ctrl_info);
 }
 
+static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
+{
+	pqi_cancel_update_time_worker(ctrl_info);
+	pqi_cancel_rescan_worker(ctrl_info);
+	pqi_wait_until_lun_reset_finished(ctrl_info);
+	pqi_wait_until_scan_finished(ctrl_info);
+	pqi_ctrl_ofa_start(ctrl_info);
+	pqi_ctrl_block_requests(ctrl_info);
+	pqi_ctrl_wait_until_quiesced(ctrl_info);
+	pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
+	pqi_fail_io_queued_for_all_devices(ctrl_info);
+	pqi_wait_until_inbound_queues_empty(ctrl_info);
+	pqi_stop_heartbeat_timer(ctrl_info);
+	ctrl_info->pqi_mode_enabled = false;
+	pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
+}
+
+static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
+{
+	pqi_ofa_free_host_buffer(ctrl_info);
+	ctrl_info->pqi_mode_enabled = true;
+	pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
+	ctrl_info->controller_online = true;
+	pqi_ctrl_unblock_requests(ctrl_info);
+	pqi_start_heartbeat_timer(ctrl_info);
+	pqi_schedule_update_time_worker(ctrl_info);
+	pqi_clear_soft_reset_status(ctrl_info,
+		PQI_SOFT_RESET_ABORT);
+	pqi_scan_scsi_devices(ctrl_info);
+}
+
+static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
+	u32 total_size, u32 chunk_size)
+{
+	u32 sg_count;
+	u32 size;
+	int i;
+	struct pqi_sg_descriptor *mem_descriptor = NULL;
+	struct device *dev;
+	struct pqi_ofa_memory *ofap;
+
+	dev = &ctrl_info->pci_dev->dev;
+
+	sg_count = (total_size + chunk_size - 1);
+	sg_count /= chunk_size;
+
+	ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+
+	if (sg_count*chunk_size < total_size)
+		goto out;
+
+	ctrl_info->pqi_ofa_chunk_virt_addr =
+				kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
+	if (!ctrl_info->pqi_ofa_chunk_virt_addr)
+		goto out;
+
+	for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
+		dma_addr_t dma_handle;
+
+		ctrl_info->pqi_ofa_chunk_virt_addr[i] =
+			dma_alloc_coherent(dev, chunk_size, &dma_handle,
+					   GFP_KERNEL);
+
+		if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
+			break;
+
+		mem_descriptor = &ofap->sg_descriptor[i];
+		put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
+		put_unaligned_le32 (chunk_size, &mem_descriptor->length);
+	}
+
+	if (!size || size < total_size)
+		goto out_free_chunks;
+
+	put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
+	put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
+	put_unaligned_le32(size, &ofap->bytes_allocated);
+
+	return 0;
+
+out_free_chunks:
+	while (--i >= 0) {
+		mem_descriptor = &ofap->sg_descriptor[i];
+		dma_free_coherent(dev, chunk_size,
+				ctrl_info->pqi_ofa_chunk_virt_addr[i],
+				get_unaligned_le64(&mem_descriptor->address));
+	}
+	kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
+
+out:
+	put_unaligned_le32 (0, &ofap->bytes_allocated);
+	return -ENOMEM;
+}
+
+static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
+{
+	u32 total_size;
+	u32 min_chunk_size;
+	u32 chunk_sz;
+
+	total_size = le32_to_cpu(
+			ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
+	min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
+
+	for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
+		if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
+			return 0;
+
+	return -ENOMEM;
+}
+
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
+	u32 bytes_requested)
+{
+	struct pqi_ofa_memory *pqi_ofa_memory;
+	struct device *dev;
+
+	dev = &ctrl_info->pci_dev->dev;
+	pqi_ofa_memory = dma_alloc_coherent(dev,
+					    PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
+					    &ctrl_info->pqi_ofa_mem_dma_handle,
+					    GFP_KERNEL);
+
+	if (!pqi_ofa_memory)
+		return;
+
+	put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
+	memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
+					sizeof(pqi_ofa_memory->signature));
+	pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
+
+	ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
+
+	if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
+		dev_err(dev, "Failed to allocate host buffer of size = %u",
+			bytes_requested);
+	}
+}
+
+static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
+{
+	int i;
+	struct pqi_sg_descriptor *mem_descriptor;
+	struct pqi_ofa_memory *ofap;
+
+	ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+
+	if (!ofap)
+		return;
+
+	if (!ofap->bytes_allocated)
+		goto out;
+
+	mem_descriptor = ofap->sg_descriptor;
+
+	for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
+		i++) {
+		dma_free_coherent(&ctrl_info->pci_dev->dev,
+			get_unaligned_le32(&mem_descriptor[i].length),
+			ctrl_info->pqi_ofa_chunk_virt_addr[i],
+			get_unaligned_le64(&mem_descriptor[i].address));
+	}
+	kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
+
+out:
+	dma_free_coherent(&ctrl_info->pci_dev->dev,
+			PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
+			ctrl_info->pqi_ofa_mem_dma_handle);
+	ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+}
+
+static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
+{
+	struct pqi_vendor_general_request request;
+	size_t size;
+	struct pqi_ofa_memory *ofap;
+
+	memset(&request, 0, sizeof(request));
+
+	ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+
+	request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
+	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
+		&request.header.iu_length);
+	put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
+		&request.function_code);
+
+	if (ofap) {
+		size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
+			get_unaligned_le16(&ofap->num_memory_descriptors) *
+			sizeof(struct pqi_sg_descriptor);
+
+		put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
+			&request.data.ofa_memory_allocation.buffer_address);
+		put_unaligned_le32(size,
+			&request.data.ofa_memory_allocation.buffer_length);
+
+	}
+
+	return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+		0, NULL, NO_TIMEOUT);
+}
+
+#define PQI_POST_RESET_DELAY_B4_MSGU_READY	5000
+
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
+{
+	msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+	return pqi_ctrl_init_resume(ctrl_info);
+}
+
 static void pqi_perform_lockup_action(void)
 {
 	switch (pqi_lockup_action) {
@@ -6615,7 +7893,7 @@
 	const struct pci_device_id *id)
 {
 	int rc;
-	int node;
+	int node, cp_node;
 	struct pqi_ctrl_info *ctrl_info;
 
 	pqi_print_ctrl_info(pci_dev, id);
@@ -6633,8 +7911,12 @@
 			"controller device ID matched using wildcards\n");
 
 	node = dev_to_node(&pci_dev->dev);
-	if (node == NUMA_NO_NODE)
-		set_dev_node(&pci_dev->dev, 0);
+	if (node == NUMA_NO_NODE) {
+		cp_node = cpu_to_node(0);
+		if (cp_node == NUMA_NO_NODE)
+			cp_node = 0;
+		set_dev_node(&pci_dev->dev, cp_node);
+	}
 
 	ctrl_info = pqi_alloc_ctrl_info(node);
 	if (!ctrl_info) {
@@ -6669,6 +7951,8 @@
 	if (!ctrl_info)
 		return;
 
+	ctrl_info->in_shutdown = true;
+
 	pqi_remove_ctrl(ctrl_info);
 }
 
@@ -6686,6 +7970,7 @@
 	 * storage.
 	 */
 	rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
+	pqi_free_interrupts(ctrl_info);
 	pqi_reset(ctrl_info);
 	if (rc == 0)
 		return;
@@ -6730,11 +8015,12 @@
 	pqi_cancel_rescan_worker(ctrl_info);
 	pqi_wait_until_scan_finished(ctrl_info);
 	pqi_wait_until_lun_reset_finished(ctrl_info);
+	pqi_wait_until_ofa_finished(ctrl_info);
 	pqi_flush_cache(ctrl_info, SUSPEND);
 	pqi_ctrl_block_requests(ctrl_info);
 	pqi_ctrl_wait_until_quiesced(ctrl_info);
 	pqi_wait_until_inbound_queues_empty(ctrl_info);
-	pqi_ctrl_wait_for_pending_io(ctrl_info);
+	pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
 	pqi_stop_heartbeat_timer(ctrl_info);
 
 	if (state.event == PM_EVENT_FREEZE)
@@ -6812,6 +8098,22 @@
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0x1104)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0x1105)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0x1106)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0x1107)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       0x193d, 0x8460)
 	},
 	{
@@ -6820,6 +8122,14 @@
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0xc460)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0xc461)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       0x193d, 0xf460)
 	},
 	{
@@ -6856,6 +8166,34 @@
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1bd4, 0x004f)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x19e5, 0xd227)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x19e5, 0xd228)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x19e5, 0xd229)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x19e5, 0xd22a)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x19e5, 0xd22b)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x19e5, 0xd22c)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_ADAPTEC2, 0x0110)
 	},
 	{
@@ -6896,6 +8234,14 @@
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0808)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0809)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_ADAPTEC2, 0x0900)
 	},
 	{
@@ -7052,6 +8398,26 @@
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1d8d, 0x0800)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1d8d, 0x0908)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1d8d, 0x0806)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1d8d, 0x0916)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_GIGABYTE, 0x1000)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_ANY_ID, PCI_ANY_ID)
 	},
 	{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index b209a35..6776dfc 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,25 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2016-2017 Microsemi Corporation
+ *    Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *    NON INFRINGEMENT.  See the GNU General Public License for more details.
- *
- *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *    Questions/Comments/Bugfixes to storagedev@microchip.com
  *
  */
 
 #include <linux/kernel.h>
+#include <linux/bsg-lib.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_transport_sas.h>
+#include <asm/unaligned.h>
 #include "smartpqi.h"
 
 static struct pqi_sas_phy *pqi_alloc_sas_phy(struct pqi_sas_port *pqi_sas_port)
@@ -97,14 +92,32 @@
 
 	identify = &rphy->identify;
 	identify->sas_address = pqi_sas_port->sas_address;
-	identify->initiator_port_protocols = SAS_PROTOCOL_STP;
-	identify->target_port_protocols = SAS_PROTOCOL_STP;
+
+	if (pqi_sas_port->device &&
+		pqi_sas_port->device->is_expander_smp_device) {
+		identify->initiator_port_protocols = SAS_PROTOCOL_SMP;
+		identify->target_port_protocols = SAS_PROTOCOL_SMP;
+	} else {
+		identify->initiator_port_protocols = SAS_PROTOCOL_STP;
+		identify->target_port_protocols = SAS_PROTOCOL_STP;
+	}
 
 	return sas_rphy_add(rphy);
 }
 
+static struct sas_rphy *pqi_sas_rphy_alloc(struct pqi_sas_port *pqi_sas_port)
+{
+	if (pqi_sas_port->device &&
+		pqi_sas_port->device->is_expander_smp_device)
+		return sas_expander_alloc(pqi_sas_port->port,
+				SAS_FANOUT_EXPANDER_DEVICE);
+
+	return sas_end_device_alloc(pqi_sas_port->port);
+}
+
 static struct pqi_sas_port *pqi_alloc_sas_port(
-	struct pqi_sas_node *pqi_sas_node, u64 sas_address)
+	struct pqi_sas_node *pqi_sas_node, u64 sas_address,
+	struct pqi_scsi_dev *device)
 {
 	int rc;
 	struct pqi_sas_port *pqi_sas_port;
@@ -127,6 +140,7 @@
 
 	pqi_sas_port->port = port;
 	pqi_sas_port->sas_address = sas_address;
+	pqi_sas_port->device = device;
 	list_add_tail(&pqi_sas_port->port_list_entry,
 		&pqi_sas_node->port_list_head);
 
@@ -146,7 +160,7 @@
 	struct pqi_sas_phy *next;
 
 	list_for_each_entry_safe(pqi_sas_phy, next,
-			&pqi_sas_port->phy_list_head, phy_list_entry)
+		&pqi_sas_port->phy_list_head, phy_list_entry)
 		pqi_free_sas_phy(pqi_sas_phy);
 
 	sas_port_delete(pqi_sas_port->port);
@@ -176,7 +190,7 @@
 		return;
 
 	list_for_each_entry_safe(pqi_sas_port, next,
-			&pqi_sas_node->port_list_head, port_list_entry)
+		&pqi_sas_node->port_list_head, port_list_entry)
 		pqi_free_sas_port(pqi_sas_port);
 
 	kfree(pqi_sas_node);
@@ -206,13 +220,14 @@
 	struct pqi_sas_port *pqi_sas_port;
 	struct pqi_sas_phy *pqi_sas_phy;
 
-	parent_dev = &shost->shost_gendev;
+	parent_dev = &shost->shost_dev;
 
 	pqi_sas_node = pqi_alloc_sas_node(parent_dev);
 	if (!pqi_sas_node)
 		return -ENOMEM;
 
-	pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, ctrl_info->sas_address);
+	pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node,
+		ctrl_info->sas_address, NULL);
 	if (!pqi_sas_port) {
 		rc = -ENODEV;
 		goto free_sas_node;
@@ -254,11 +269,12 @@
 	struct pqi_sas_port *pqi_sas_port;
 	struct sas_rphy *rphy;
 
-	pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, device->sas_address);
+	pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node,
+		device->sas_address, device);
 	if (!pqi_sas_port)
 		return -ENOMEM;
 
-	rphy = sas_end_device_alloc(pqi_sas_port->port);
+	rphy = pqi_sas_rphy_alloc(pqi_sas_port);
 	if (!rphy) {
 		rc = -ENODEV;
 		goto free_sas_port;
@@ -296,12 +312,110 @@
 static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
 	u64 *identifier)
 {
-	return 0;
+
+	int rc;
+	unsigned long flags;
+	struct Scsi_Host *shost;
+	struct pqi_ctrl_info *ctrl_info;
+	struct pqi_scsi_dev *found_device;
+	struct pqi_scsi_dev *device;
+
+	if (!rphy)
+		return -ENODEV;
+
+	shost = rphy_to_shost(rphy);
+	ctrl_info = shost_to_hba(shost);
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+	found_device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
+
+	if (!found_device) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	if (found_device->devtype == TYPE_ENCLOSURE) {
+		*identifier = get_unaligned_be64(&found_device->wwid);
+		rc = 0;
+		goto out;
+	}
+
+	if (found_device->box_index == 0xff ||
+		found_device->phys_box_on_bus == 0 ||
+		found_device->bay == 0xff) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	list_for_each_entry(device, &ctrl_info->scsi_device_list,
+		scsi_device_list_entry) {
+		if (device->devtype == TYPE_ENCLOSURE &&
+			device->box_index == found_device->box_index &&
+			device->phys_box_on_bus ==
+				found_device->phys_box_on_bus &&
+			memcmp(device->phys_connector,
+				found_device->phys_connector, 2) == 0) {
+			*identifier =
+				get_unaligned_be64(&device->wwid);
+			rc = 0;
+			goto out;
+		}
+	}
+
+	if (found_device->phy_connected_dev_type != SA_CONTROLLER_DEVICE) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	list_for_each_entry(device, &ctrl_info->scsi_device_list,
+		scsi_device_list_entry) {
+		if (device->devtype == TYPE_ENCLOSURE &&
+			CISS_GET_DRIVE_NUMBER(device->scsi3addr) ==
+				PQI_VSEP_CISS_BTL) {
+			*identifier = get_unaligned_be64(&device->wwid);
+			rc = 0;
+			goto out;
+		}
+	}
+
+	rc = -EINVAL;
+out:
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	return rc;
+
 }
 
 static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy)
 {
-	return -ENXIO;
+
+	int rc;
+	unsigned long flags;
+	struct pqi_ctrl_info *ctrl_info;
+	struct pqi_scsi_dev *device;
+	struct Scsi_Host *shost;
+
+	if (!rphy)
+		return -ENODEV;
+
+	shost = rphy_to_shost(rphy);
+	ctrl_info = shost_to_hba(shost);
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+	device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
+
+	if (!device) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	if (device->bay == 0xff)
+		rc = -EINVAL;
+	else
+		rc = device->bay;
+
+out:
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	return rc;
 }
 
 static int pqi_sas_phy_reset(struct sas_phy *phy, int hard_reset)
@@ -329,6 +443,128 @@
 	return -EINVAL;
 }
 
+#define CSMI_IOCTL_TIMEOUT	60
+#define SMP_CRC_FIELD_LENGTH	4
+
+static struct bmic_csmi_smp_passthru_buffer *
+pqi_build_csmi_smp_passthru_buffer(struct sas_rphy *rphy,
+	struct bsg_job *job)
+{
+	struct bmic_csmi_smp_passthru_buffer *smp_buf;
+	struct bmic_csmi_ioctl_header *ioctl_header;
+	struct bmic_csmi_smp_passthru *parameters;
+	u32 req_size;
+	u32 resp_size;
+
+	smp_buf = kzalloc(sizeof(*smp_buf), GFP_KERNEL);
+	if (!smp_buf)
+		return NULL;
+
+	req_size = job->request_payload.payload_len;
+	resp_size = job->reply_payload.payload_len;
+
+	ioctl_header = &smp_buf->ioctl_header;
+	put_unaligned_le32(sizeof(smp_buf->ioctl_header),
+		&ioctl_header->header_length);
+	put_unaligned_le32(CSMI_IOCTL_TIMEOUT, &ioctl_header->timeout);
+	put_unaligned_le32(CSMI_CC_SAS_SMP_PASSTHRU,
+		&ioctl_header->control_code);
+	put_unaligned_le32(sizeof(smp_buf->parameters), &ioctl_header->length);
+
+	parameters = &smp_buf->parameters;
+	parameters->phy_identifier = rphy->identify.phy_identifier;
+	parameters->port_identifier = 0;
+	parameters->connection_rate = 0;
+	put_unaligned_be64(rphy->identify.sas_address,
+		&parameters->destination_sas_address);
+
+	if (req_size > SMP_CRC_FIELD_LENGTH)
+		req_size -= SMP_CRC_FIELD_LENGTH;
+
+	put_unaligned_le32(req_size, &parameters->request_length);
+
+	put_unaligned_le32(resp_size, &parameters->response_length);
+
+	sg_copy_to_buffer(job->request_payload.sg_list,
+		job->reply_payload.sg_cnt, &parameters->request,
+		req_size);
+
+	return smp_buf;
+}
+
+static unsigned int pqi_build_sas_smp_handler_reply(
+	struct bmic_csmi_smp_passthru_buffer *smp_buf, struct bsg_job *job,
+	struct pqi_raid_error_info *error_info)
+{
+	sg_copy_from_buffer(job->reply_payload.sg_list,
+		job->reply_payload.sg_cnt, &smp_buf->parameters.response,
+		le32_to_cpu(smp_buf->parameters.response_length));
+
+	job->reply_len = le16_to_cpu(error_info->sense_data_length);
+	memcpy(job->reply, error_info->data,
+			le16_to_cpu(error_info->sense_data_length));
+
+	return job->reply_payload.payload_len -
+		get_unaligned_le32(&error_info->data_in_transferred);
+}
+
+void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+	struct sas_rphy *rphy)
+{
+	int rc;
+	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+	struct bmic_csmi_smp_passthru_buffer *smp_buf;
+	struct pqi_raid_error_info error_info;
+	unsigned int reslen = 0;
+
+	pqi_ctrl_busy(ctrl_info);
+
+	if (job->reply_payload.payload_len == 0) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	if (!rphy) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (rphy->identify.device_type != SAS_FANOUT_EXPANDER_DEVICE) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (job->request_payload.sg_cnt > 1 || job->reply_payload.sg_cnt > 1) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (pqi_ctrl_offline(ctrl_info)) {
+		rc = -ENXIO;
+		goto out;
+	}
+
+	if (pqi_ctrl_blocked(ctrl_info)) {
+		rc = -EBUSY;
+		goto out;
+	}
+
+	smp_buf = pqi_build_csmi_smp_passthru_buffer(rphy, job);
+	if (!smp_buf) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	rc = pqi_csmi_smp_passthru(ctrl_info, smp_buf, sizeof(*smp_buf),
+		&error_info);
+	if (rc)
+		goto out;
+
+	reslen = pqi_build_sas_smp_handler_reply(smp_buf, job, &error_info);
+out:
+	bsg_job_done(job, rc, reslen);
+	pqi_ctrl_unbusy(ctrl_info);
+}
 struct sas_function_template pqi_sas_transport_functions = {
 	.get_linkerrors = pqi_sas_get_linkerrors,
 	.get_enclosure_identifier = pqi_sas_get_enclosure_identifier,
@@ -338,4 +574,5 @@
 	.phy_setup = pqi_sas_phy_setup,
 	.phy_release = pqi_sas_phy_release,
 	.set_phy_speed = pqi_sas_phy_speed,
+	.smp_handler = pqi_sas_smp_handler,
 };
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 5141bd4..f0d6e88 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,18 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2016-2017 Microsemi Corporation
+ *    Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *    NON INFRINGEMENT.  See the GNU General Public License for more details.
- *
- *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *    Questions/Comments/Bugfixes to storagedev@microchip.com
  *
  */
 
@@ -34,6 +27,7 @@
 #define SIS_REENABLE_SIS_MODE			0x1
 #define SIS_ENABLE_MSIX				0x40
 #define SIS_ENABLE_INTX				0x80
+#define SIS_SOFT_RESET				0x100
 #define SIS_CMD_READY				0x200
 #define SIS_TRIGGER_SHUTDOWN			0x800000
 #define SIS_PQI_RESET_QUIESCE			0x1000000
@@ -59,7 +53,7 @@
 
 #define SIS_CTRL_KERNEL_UP			0x80
 #define SIS_CTRL_KERNEL_PANIC			0x100
-#define SIS_CTRL_READY_TIMEOUT_SECS		30
+#define SIS_CTRL_READY_TIMEOUT_SECS		180
 #define SIS_CTRL_READY_RESUME_TIMEOUT_SECS	90
 #define SIS_CTRL_READY_POLL_INTERVAL_MSECS	10
 
@@ -90,7 +84,7 @@
 	unsigned long timeout;
 	u32 status;
 
-	timeout = (timeout_secs * HZ) + jiffies;
+	timeout = (timeout_secs * PQI_HZ) + jiffies;
 
 	while (1) {
 		status = readl(&ctrl_info->registers->sis_firmware_status);
@@ -202,7 +196,7 @@
 	 * the top of the loop in order to give the controller time to start
 	 * processing the command before we start polling.
 	 */
-	timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * HZ) + jiffies;
+	timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * PQI_HZ) + jiffies;
 	while (1) {
 		msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS);
 		doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
@@ -316,9 +310,9 @@
 	put_unaligned_le32(ctrl_info->max_io_slots,
 		&base_struct->error_buffer_num_elements);
 
-	bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
-		sizeof(*base_struct), PCI_DMA_TODEVICE);
-	if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
+	bus_address = dma_map_single(&ctrl_info->pci_dev->dev, base_struct,
+		sizeof(*base_struct), DMA_TO_DEVICE);
+	if (dma_mapping_error(&ctrl_info->pci_dev->dev, bus_address)) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -331,9 +325,8 @@
 	rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
 		&params);
 
-	pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
-		PCI_DMA_TODEVICE);
-
+	dma_unmap_single(&ctrl_info->pci_dev->dev, bus_address,
+			sizeof(*base_struct), DMA_TO_DEVICE);
 out:
 	kfree(base_struct_unaligned);
 
@@ -349,7 +342,7 @@
 	u32 doorbell_register;
 	unsigned long timeout;
 
-	timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies;
+	timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * PQI_HZ) + jiffies;
 
 	while (1) {
 		doorbell_register =
@@ -421,6 +414,12 @@
 	return readl(&ctrl_info->registers->sis_driver_scratch);
 }
 
+void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
+{
+	writel(SIS_SOFT_RESET,
+		&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
 static void __attribute__((unused)) verify_structures(void)
 {
 	BUILD_BUG_ON(offsetof(struct sis_base_struct,
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 2bf889d..86b0e48 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,18 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2016-2017 Microsemi Corporation
+ *    Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *    NON INFRINGEMENT.  See the GNU General Public License for more details.
- *
- *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *    Questions/Comments/Bugfixes to storagedev@microchip.com
  *
  */
 
@@ -33,5 +26,6 @@
 int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
 void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
 u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
+void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
 
 #endif	/* _SMARTPQI_SIS_H */
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 1f9a087..a85d52b 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /* -*- mode: c; c-basic-offset: 8 -*- */
 
 /* SNI RM driver
@@ -5,19 +6,6 @@
  * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
 **-----------------------------------------------------------------------------
 **
-**  This program is free software; you can redistribute it and/or modify
-**  it under the terms of the GNU General Public License as published by
-**  the Free Software Foundation; either version 2 of the License, or
-**  (at your option) any later version.
-**
-**  This program is distributed in the hope that it will be useful,
-**  but WITHOUT ANY WARRANTY; without even the implied warranty of
-**  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-**  GNU General Public License for more details.
-**
-**  You should have received a copy of the GNU General Public License
-**  along with this program; if not, write to the Free Software
-**  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 **
 **-----------------------------------------------------------------------------
  */
@@ -78,10 +66,8 @@
 
 	base = res->start;
 	hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
-	if (!hostdata) {
-		dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
+	if (!hostdata)
 		return -ENOMEM;
-	}
 
 	hostdata->dev = &dev->dev;
 	dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 0abe17c..2b34936 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -30,33 +30,13 @@
  * fnic directory and statistics directory for trace buffer and
  * stats logging
  */
-
-int
-snic_debugfs_init(void)
+void snic_debugfs_init(void)
 {
-	int rc = -1;
-	struct dentry *de = NULL;
+	snic_glob->trc_root = debugfs_create_dir("snic", NULL);
 
-	de = debugfs_create_dir("snic", NULL);
-	if (!de) {
-		SNIC_DBG("Cannot create debugfs root\n");
-
-		return rc;
-	}
-	snic_glob->trc_root = de;
-
-	de = debugfs_create_dir("statistics", snic_glob->trc_root);
-	if (!de) {
-		SNIC_DBG("Cannot create Statistics directory\n");
-
-		return rc;
-	}
-	snic_glob->stats_root = de;
-
-	rc = 0;
-
-	return rc;
-} /* end of snic_debugfs_init */
+	snic_glob->stats_root = debugfs_create_dir("statistics",
+						   snic_glob->trc_root);
+}
 
 /*
  * snic_debugfs_term - Tear down debugfs intrastructure
@@ -391,56 +371,23 @@
  * It will create file stats and reset_stats under statistics/host# directory
  * to log per snic stats
  */
-int
-snic_stats_debugfs_init(struct snic *snic)
+void snic_stats_debugfs_init(struct snic *snic)
 {
-	int rc = -1;
 	char name[16];
-	struct dentry *de = NULL;
 
 	snprintf(name, sizeof(name), "host%d", snic->shost->host_no);
-	if (!snic_glob->stats_root) {
-		SNIC_DBG("snic_stats root doesn't exist\n");
 
-		return rc;
-	}
+	snic->stats_host = debugfs_create_dir(name, snic_glob->stats_root);
 
-	de = debugfs_create_dir(name, snic_glob->stats_root);
-	if (!de) {
-		SNIC_DBG("Cannot create host directory\n");
+	snic->stats_file = debugfs_create_file("stats", S_IFREG|S_IRUGO,
+					       snic->stats_host, snic,
+					       &snic_stats_fops);
 
-		return rc;
-	}
-	snic->stats_host = de;
-
-	de = debugfs_create_file("stats",
-				S_IFREG|S_IRUGO,
-				snic->stats_host,
-				snic,
-				&snic_stats_fops);
-	if (!de) {
-		SNIC_DBG("Cannot create host's stats file\n");
-
-		return rc;
-	}
-	snic->stats_file = de;
-
-	de = debugfs_create_file("reset_stats",
-				S_IFREG|S_IRUGO|S_IWUSR,
-				snic->stats_host,
-				snic,
-				&snic_reset_stats_fops);
-
-	if (!de) {
-		SNIC_DBG("Cannot create host's reset_stats file\n");
-
-		return rc;
-	}
-	snic->reset_stats_file = de;
-	rc = 0;
-
-	return rc;
-} /* end of snic_stats_debugfs_init */
+	snic->reset_stats_file = debugfs_create_file("reset_stats",
+						     S_IFREG|S_IRUGO|S_IWUSR,
+						     snic->stats_host, snic,
+						     &snic_reset_stats_fops);
+}
 
 /*
  * snic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
@@ -517,46 +464,18 @@
  * snic_trc_debugfs_init : creates trace/tracing_enable files for trace
  * under debugfs
  */
-int
-snic_trc_debugfs_init(void)
+void snic_trc_debugfs_init(void)
 {
-	struct dentry *de = NULL;
-	int ret = -1;
+	snic_glob->trc.trc_enable = debugfs_create_bool("tracing_enable",
+							S_IFREG | S_IRUGO | S_IWUSR,
+							snic_glob->trc_root,
+							&snic_glob->trc.enable);
 
-	if (!snic_glob->trc_root) {
-		SNIC_ERR("Debugfs root directory for snic doesn't exist.\n");
-
-		return ret;
-	}
-
-	de = debugfs_create_bool("tracing_enable",
-				 S_IFREG | S_IRUGO | S_IWUSR,
-				 snic_glob->trc_root,
-				 &snic_glob->trc.enable);
-
-	if (!de) {
-		SNIC_ERR("Can't create trace_enable file.\n");
-
-		return ret;
-	}
-	snic_glob->trc.trc_enable = de;
-
-	de = debugfs_create_file("trace",
-				 S_IFREG | S_IRUGO | S_IWUSR,
-				 snic_glob->trc_root,
-				 NULL,
-				 &snic_trc_fops);
-
-	if (!de) {
-		SNIC_ERR("Cannot create trace file.\n");
-
-		return ret;
-	}
-	snic_glob->trc.trc_file = de;
-	ret = 0;
-
-	return ret;
-} /* end of snic_trc_debugfs_init */
+	snic_glob->trc.trc_file = debugfs_create_file("trace",
+						      S_IFREG | S_IRUGO | S_IWUSR,
+						      snic_glob->trc_root, NULL,
+						      &snic_trc_fops);
+}
 
 /*
  * snic_trc_debugfs_term : cleans up the files created for trace under debugfs
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index b106596..e9ccfb9 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -111,8 +111,8 @@
 
 	SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
 
-	pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE);
-	if (pci_dma_mapping_error(snic->pdev, pa)) {
+	pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE);
+	if (dma_mapping_error(&snic->pdev->dev, pa)) {
 		SNIC_HOST_ERR(snic->shost,
 			      "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
 			      buf);
@@ -138,7 +138,8 @@
 
 	ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
 	if (ret) {
-		pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE);
+		dma_unmap_single(&snic->pdev->dev, pa, buf_len,
+				 DMA_FROM_DEVICE);
 		kfree(buf);
 		rqi->sge_va = 0;
 		snic_release_untagged_req(snic, rqi);
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
index 8e69548..159ee94 100644
--- a/drivers/scsi/snic/snic_io.c
+++ b/drivers/scsi/snic/snic_io.c
@@ -102,7 +102,8 @@
 	struct snic_req_info *rqi = NULL;
 	unsigned long flags;
 
-	pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
+	dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len,
+			 DMA_TO_DEVICE);
 
 	rqi = req_to_rqi(req);
 	spin_lock_irqsave(&snic->spl_cmd_lock, flags);
@@ -172,8 +173,8 @@
 	snic_print_desc(__func__, os_buf, len);
 
 	/* Map request buffer */
-	pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE);
-	if (pci_dma_mapping_error(snic->pdev, pa)) {
+	pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&snic->pdev->dev, pa)) {
 		SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
 
 		return -ENOMEM;
@@ -186,7 +187,7 @@
 	spin_lock_irqsave(&snic->wq_lock[q_num], flags);
 	desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
 	if (desc_avail <= 0) {
-		pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
+		dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE);
 		req->req_pa = 0;
 		spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
 		atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
@@ -350,29 +351,29 @@
 
 	if (rqi->abort_req) {
 		if (rqi->abort_req->req_pa)
-			pci_unmap_single(snic->pdev,
+			dma_unmap_single(&snic->pdev->dev,
 					 rqi->abort_req->req_pa,
 					 sizeof(struct snic_host_req),
-					 PCI_DMA_TODEVICE);
+					 DMA_TO_DEVICE);
 
 		mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
 	}
 
 	if (rqi->dr_req) {
 		if (rqi->dr_req->req_pa)
-			pci_unmap_single(snic->pdev,
+			dma_unmap_single(&snic->pdev->dev,
 					 rqi->dr_req->req_pa,
 					 sizeof(struct snic_host_req),
-					 PCI_DMA_TODEVICE);
+					 DMA_TO_DEVICE);
 
 		mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
 	}
 
 	if (rqi->req->req_pa)
-		pci_unmap_single(snic->pdev,
+		dma_unmap_single(&snic->pdev->dev,
 				 rqi->req->req_pa,
 				 rqi->req_len,
-				 PCI_DMA_TODEVICE);
+				 DMA_TO_DEVICE);
 
 	mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
 }
@@ -384,10 +385,10 @@
 
 	sgd = req_to_sgl(rqi_to_req(rqi));
 	SNIC_BUG_ON(sgd[0].addr == 0);
-	pci_unmap_single(snic->pdev,
+	dma_unmap_single(&snic->pdev->dev,
 			 le64_to_cpu(sgd[0].addr),
 			 le32_to_cpu(sgd[0].len),
-			 PCI_DMA_FROMDEVICE);
+			 DMA_FROM_DEVICE);
 }
 
 /*
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 7cf70aa..14f4ce6 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -127,7 +127,6 @@
 	.this_id = -1,
 	.cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
 	.can_queue = SNIC_MAX_IO_REQ,
-	.use_clustering = ENABLE_CLUSTERING,
 	.sg_tablesize = SNIC_MAX_SG_DESC_CNT,
 	.max_sectors = 0x800,
 	.shost_attrs = snic_attrs,
@@ -398,12 +397,7 @@
 		       PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
 	/* Per snic debugfs init */
-	ret = snic_stats_debugfs_init(snic);
-	if (ret) {
-		SNIC_HOST_ERR(snic->shost,
-			      "Failed to initialize debugfs stats\n");
-		snic_stats_debugfs_remove(snic);
-	}
+	snic_stats_debugfs_init(snic);
 #endif
 
 	/* Setup PCI Resources */
@@ -435,37 +429,17 @@
 	 * limitation for the device. Try 43-bit first, and
 	 * fail to 32-bit.
 	 */
-	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
 	if (ret) {
-		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 		if (ret) {
 			SNIC_HOST_ERR(shost,
 				      "No Usable DMA Configuration, aborting %d\n",
 				      ret);
-
-			goto err_rel_regions;
-		}
-
-		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (ret) {
-			SNIC_HOST_ERR(shost,
-				      "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
-				      ret);
-
-			goto err_rel_regions;
-		}
-	} else {
-		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
-		if (ret) {
-			SNIC_HOST_ERR(shost,
-				      "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
-				      ret);
-
 			goto err_rel_regions;
 		}
 	}
 
-
 	/* Map vNIC resources from BAR0 */
 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 		SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
@@ -871,12 +845,7 @@
 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
 	/* Debugfs related Initialization */
 	/* Create debugfs entries for snic */
-	ret = snic_debugfs_init();
-	if (ret < 0) {
-		SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n");
-		snic_debugfs_term();
-		/* continue even if it fails */
-	}
+	snic_debugfs_init();
 
 	/* Trace related Initialization */
 	/* Allocate memory for trace buffer */
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index d9b2e46..b3650c9 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -146,10 +146,10 @@
 		      CMD_FLAGS(sc));
 
 	if (req->u.icmnd.sense_addr)
-		pci_unmap_single(snic->pdev,
+		dma_unmap_single(&snic->pdev->dev,
 				 le64_to_cpu(req->u.icmnd.sense_addr),
 				 SCSI_SENSE_BUFFERSIZE,
-				 PCI_DMA_FROMDEVICE);
+				 DMA_FROM_DEVICE);
 
 	scsi_dma_unmap(sc);
 
@@ -185,12 +185,11 @@
 		}
 	}
 
-	pa = pci_map_single(snic->pdev,
+	pa = dma_map_single(&snic->pdev->dev,
 			    sc->sense_buffer,
 			    SCSI_SENSE_BUFFERSIZE,
-			    PCI_DMA_FROMDEVICE);
-
-	if (pci_dma_mapping_error(snic->pdev, pa)) {
+			    DMA_FROM_DEVICE);
+	if (dma_mapping_error(&snic->pdev->dev, pa)) {
 		SNIC_HOST_ERR(snic->shost,
 			      "QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
 			      sc->sense_buffer, snic_cmd_tag(sc));
@@ -2001,7 +2000,7 @@
 	}
 
 dr_failed:
-	SNIC_BUG_ON(!spin_is_locked(io_lock));
+	lockdep_assert_held(io_lock);
 	if (rqi)
 		CMD_SP(sc) = NULL;
 	spin_unlock_irqrestore(io_lock, flags);
@@ -2604,7 +2603,7 @@
 	ret = SUCCESS;
 
 skip_internal_abts:
-	SNIC_BUG_ON(!spin_is_locked(io_lock));
+	lockdep_assert_held(io_lock);
 	spin_unlock_irqrestore(io_lock, flags);
 
 	return ret;
diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h
index fd1066b..faf0cb6 100644
--- a/drivers/scsi/snic/snic_stats.h
+++ b/drivers/scsi/snic/snic_stats.h
@@ -99,7 +99,7 @@
 	atomic64_t io_cmpl_skip;
 };
 
-int snic_stats_debugfs_init(struct snic *);
+void snic_stats_debugfs_init(struct snic *);
 void snic_stats_debugfs_remove(struct snic *);
 
 /* Auxillary function to update active IO counter */
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c
index fc60c93..f23fe2f 100644
--- a/drivers/scsi/snic/snic_trc.c
+++ b/drivers/scsi/snic/snic_trc.c
@@ -126,7 +126,7 @@
 	int tbuf_sz = 0, ret;
 
 	tbuf_sz = (snic_trace_max_pages * PAGE_SIZE);
-	tbuf = vmalloc(tbuf_sz);
+	tbuf = vzalloc(tbuf_sz);
 	if (!tbuf) {
 		SNIC_ERR("Failed to Allocate Trace Buffer Size. %d\n", tbuf_sz);
 		SNIC_ERR("Trace Facility not enabled.\n");
@@ -135,16 +135,10 @@
 		return ret;
 	}
 
-	memset(tbuf, 0, tbuf_sz);
 	trc->buf = (struct snic_trc_data *) tbuf;
 	spin_lock_init(&trc->lock);
 
-	ret = snic_trc_debugfs_init();
-	if (ret) {
-		SNIC_ERR("Failed to create Debugfs Files.\n");
-
-		goto error;
-	}
+	snic_trc_debugfs_init();
 
 	trc->max_idx = (tbuf_sz / SNIC_TRC_ENTRY_SZ);
 	trc->rd_idx = trc->wr_idx = 0;
@@ -154,11 +148,6 @@
 	ret = 0;
 
 	return ret;
-
-error:
-	snic_trc_free();
-
-	return ret;
 } /* end of snic_trc_init */
 
 /*
diff --git a/drivers/scsi/snic/snic_trc.h b/drivers/scsi/snic/snic_trc.h
index b37f886..87dcc74 100644
--- a/drivers/scsi/snic/snic_trc.h
+++ b/drivers/scsi/snic/snic_trc.h
@@ -53,12 +53,12 @@
 
 int snic_trc_init(void);
 void snic_trc_free(void);
-int snic_trc_debugfs_init(void);
+void snic_trc_debugfs_init(void);
 void snic_trc_debugfs_term(void);
 struct snic_trc_data *snic_get_trc_buf(void);
 int snic_get_trc_data(char *buf, int buf_sz);
 
-int snic_debugfs_init(void);
+void snic_debugfs_init(void);
 void snic_debugfs_term(void);
 
 static inline void
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
index dad5fc6..05e374f 100644
--- a/drivers/scsi/snic/vnic_dev.c
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -225,10 +225,9 @@
 {
 	svnic_dev_desc_ring_size(ring, desc_count, desc_size);
 
-	ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
-		ring->size_unaligned,
-		&ring->base_addr_unaligned);
-
+	ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
+			ring->size_unaligned, &ring->base_addr_unaligned,
+			GFP_KERNEL);
 	if (!ring->descs_unaligned) {
 		pr_err("Failed to allocate ring (size=%d), aborting\n",
 			(int)ring->size);
@@ -251,7 +250,7 @@
 void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
 {
 	if (ring->descs) {
-		pci_free_consistent(vdev->pdev,
+		dma_free_coherent(&vdev->pdev->dev,
 			ring->size_unaligned,
 			ring->descs_unaligned,
 			ring->base_addr_unaligned);
@@ -470,9 +469,9 @@
 	int err = 0;
 
 	if (!vdev->fw_info) {
-		vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+		vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
 			sizeof(struct vnic_devcmd_fw_info),
-			&vdev->fw_info_pa);
+			&vdev->fw_info_pa, GFP_KERNEL);
 		if (!vdev->fw_info)
 			return -ENOMEM;
 
@@ -534,8 +533,8 @@
 	int wait = VNIC_DVCMD_TMO;
 
 	if (!vdev->stats) {
-		vdev->stats = pci_alloc_consistent(vdev->pdev,
-			sizeof(struct vnic_stats), &vdev->stats_pa);
+		vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
+			sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
 		if (!vdev->stats)
 			return -ENOMEM;
 	}
@@ -607,9 +606,9 @@
 	int wait = VNIC_DVCMD_TMO;
 
 	if (!vdev->notify) {
-		vdev->notify = pci_alloc_consistent(vdev->pdev,
+		vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
 			sizeof(struct vnic_devcmd_notify),
-			&vdev->notify_pa);
+			&vdev->notify_pa, GFP_KERNEL);
 		if (!vdev->notify)
 			return -ENOMEM;
 	}
@@ -697,21 +696,21 @@
 {
 	if (vdev) {
 		if (vdev->notify)
-			pci_free_consistent(vdev->pdev,
+			dma_free_coherent(&vdev->pdev->dev,
 				sizeof(struct vnic_devcmd_notify),
 				vdev->notify,
 				vdev->notify_pa);
 		if (vdev->linkstatus)
-			pci_free_consistent(vdev->pdev,
+			dma_free_coherent(&vdev->pdev->dev,
 				sizeof(u32),
 				vdev->linkstatus,
 				vdev->linkstatus_pa);
 		if (vdev->stats)
-			pci_free_consistent(vdev->pdev,
+			dma_free_coherent(&vdev->pdev->dev,
 				sizeof(struct vnic_stats),
 				vdev->stats, vdev->stats_pa);
 		if (vdev->fw_info)
-			pci_free_consistent(vdev->pdev,
+			dma_free_coherent(&vdev->pdev->dev,
 				sizeof(struct vnic_devcmd_fw_info),
 				vdev->fw_info, vdev->fw_info_pa);
 		if (vdev->devcmd2)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index d0389b2..4664fdf 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  sr.c Copyright (C) 1992 David Giller
  *           Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
@@ -43,6 +44,7 @@
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/blkdev.h>
+#include <linux/blk-pm.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
@@ -79,7 +81,7 @@
 static DEFINE_MUTEX(sr_mutex);
 static int sr_probe(struct device *);
 static int sr_remove(struct device *);
-static int sr_init_command(struct scsi_cmnd *SCpnt);
+static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt);
 static int sr_done(struct scsi_cmnd *);
 static int sr_runtime_suspend(struct device *dev);
 
@@ -383,22 +385,21 @@
 	return good_bytes;
 }
 
-static int sr_init_command(struct scsi_cmnd *SCpnt)
+static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
 {
 	int block = 0, this_count, s_size;
 	struct scsi_cd *cd;
 	struct request *rq = SCpnt->request;
-	int ret;
+	blk_status_t ret;
 
 	ret = scsi_init_io(SCpnt);
-	if (ret != BLKPREP_OK)
+	if (ret != BLK_STS_OK)
 		goto out;
-	WARN_ON_ONCE(SCpnt != rq->special);
 	cd = scsi_cd(rq->rq_disk);
 
 	/* from here on until we're complete, any goto out
 	 * is used for a killable error condition */
-	ret = BLKPREP_KILL;
+	ret = BLK_STS_IOERR;
 
 	SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
 		"Doing sr request, block = %d\n", block));
@@ -515,7 +516,7 @@
 	 * This indicates that the command is ready from our end to be
 	 * queued.
 	 */
-	ret = BLKPREP_OK;
+	ret = BLK_STS_OK;
  out:
 	return ret;
 }
@@ -716,6 +717,7 @@
 	disk->fops = &sr_bdops;
 	disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
 	disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
+	disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
 
 	blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
 
@@ -758,7 +760,7 @@
 
 	dev_set_drvdata(dev, cd);
 	disk->flags |= GENHD_FL_REMOVABLE;
-	device_add_disk(&sdev->sdev_gendev, disk);
+	device_add_disk(&sdev->sdev_gendev, disk, NULL);
 
 	sdev_printk(KERN_DEBUG, sdev,
 		    "Attached scsi CD-ROM %s\n", cd->cdi.name);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 307df2f..e3266a6 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
    SCSI Tape Driver for Linux version 1.1 and newer. See the accompanying
    file Documentation/scsi/st.txt for more information.
@@ -169,7 +170,7 @@
 
 /* Remove mode bits and auto-rewind bit (7) */
 #define TAPE_NR(x) ( ((iminor(x) & ~255) >> (ST_NBR_MODE_BITS + 1)) | \
-    (iminor(x) & ~(-1 << ST_MODE_SHIFT)) )
+	(iminor(x) & ((1 << ST_MODE_SHIFT)-1)))
 #define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT)
 
 /* Construct the minor number from the device (d), mode (m), and non-rewind (n) data */
@@ -227,7 +228,6 @@
 
 
 
-#include "osst_detect.h"
 #ifndef SIGS_FROM_OSST
 #define SIGS_FROM_OSST \
 	{"OnStream", "SC-", "", "osst"}, \
@@ -337,12 +337,14 @@
 		switch (sense[0] & 0x7f) {
 		case 0x71:
 			s->deferred = 1;
+			/* fall through */
 		case 0x70:
 			s->fixed_format = 1;
 			s->flags = sense[2] & 0xe0;
 			break;
 		case 0x73:
 			s->deferred = 1;
+			/* fall through */
 		case 0x72:
 			s->fixed_format = 0;
 			ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4);
@@ -530,7 +532,7 @@
 		complete(SRpnt->waiting);
 
 	blk_rq_unmap_user(tmp);
-	__blk_put_request(req->q, req);
+	blk_put_request(req);
 }
 
 static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
@@ -2721,6 +2723,7 @@
 	switch (cmd_in) {
 	case MTFSFM:
 		chg_eof = 0;	/* Changed from the FSF after this */
+		/* fall through */
 	case MTFSF:
 		cmd[0] = SPACE;
 		cmd[1] = 0x01;	/* Space FileMarks */
@@ -2735,6 +2738,7 @@
 		break;
 	case MTBSFM:
 		chg_eof = 0;	/* Changed from the FSF after this */
+		/* fall through */
 	case MTBSF:
 		cmd[0] = SPACE;
 		cmd[1] = 0x01;	/* Space FileMarks */
@@ -4262,9 +4266,10 @@
 	if (SDp->type != TYPE_TAPE)
 		return -ENODEV;
 	if ((stp = st_incompatible(SDp))) {
-		sdev_printk(KERN_INFO, SDp, "Found incompatible tape\n");
 		sdev_printk(KERN_INFO, SDp,
-			    "st: The suggested driver is %s.\n", stp);
+			    "OnStream tapes are no longer supported;\n");
+		sdev_printk(KERN_INFO, SDp,
+			    "please mail to linux-scsi@vger.kernel.org.\n");
 		return -ENODEV;
 	}
 
@@ -4918,7 +4923,8 @@
 
         /* Try to fault in all of the necessary pages */
         /* rw==READ means read from drive, write into memory area */
-	res = get_user_pages_fast(uaddr, nr_pages, rw == READ, pages);
+	res = get_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0,
+				  pages);
 
 	/* Errors and no page mapped should return here */
 	if (res < nr_pages)
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 9b20643..33287b6 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1,16 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * SuperTrak EX Series Storage Controller driver for Linux
  *
  *	Copyright (C) 2005-2015 Promise Technology Inc.
  *
- *	This program is free software; you can redistribute it and/or
- *	modify it under the terms of the GNU General Public License
- *	as published by the Free Software Foundation; either version
- *	2 of the License, or (at your option) any later version.
- *
  *	Written By:
  *		Ed Lin <promise_linux@promise.com>
- *
  */
 
 #include <linux/init.h>
@@ -1489,6 +1484,7 @@
 	.eh_abort_handler		= stex_abort,
 	.eh_host_reset_handler		= stex_reset,
 	.this_id			= -1,
+	.dma_boundary			= PAGE_SIZE - 1,
 };
 
 static struct pci_device_id stex_pci_tbl[] = {
@@ -1617,19 +1613,6 @@
 	},
 };
 
-static int stex_set_dma_mask(struct pci_dev * pdev)
-{
-	int ret;
-
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
-		&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
-		return 0;
-	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (!ret)
-		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-	return ret;
-}
-
 static int stex_request_irq(struct st_hba *hba)
 {
 	struct pci_dev *pdev = hba->pdev;
@@ -1710,7 +1693,9 @@
 		goto out_release_regions;
 	}
 
-	err = stex_set_dma_mask(pdev);
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (err)
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 	if (err) {
 		printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
 			pci_name(pdev));
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index f03dc03..542d2ba 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2009, Microsoft Corporation.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
  * Authors:
  *   Haiyang Zhang <haiyangz@microsoft.com>
  *   Hank Janssen  <hjanssen@microsoft.com>
@@ -385,8 +373,9 @@
  * This is the end of Protocol specific defines.
  */
 
-static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
+static int storvsc_ringbuffer_size = (128 * 1024);
 static u32 max_outstanding_req_per_channel;
+static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
 
 static int storvsc_vcpus_per_sub_channel = 4;
 
@@ -446,7 +435,6 @@
 
 	bool	 destroy;
 	bool	 drain_notify;
-	bool	 open_sub_channel;
 	atomic_t num_outstanding_req;
 	struct Scsi_Host *host;
 
@@ -636,41 +624,55 @@
 static void handle_sc_creation(struct vmbus_channel *new_sc)
 {
 	struct hv_device *device = new_sc->primary_channel->device_obj;
+	struct device *dev = &device->device;
 	struct storvsc_device *stor_device;
 	struct vmstorage_channel_properties props;
+	int ret;
 
 	stor_device = get_out_stor_device(device);
 	if (!stor_device)
 		return;
 
-	if (stor_device->open_sub_channel == false)
-		return;
-
 	memset(&props, 0, sizeof(struct vmstorage_channel_properties));
 
-	vmbus_open(new_sc,
-		   storvsc_ringbuffer_size,
-		   storvsc_ringbuffer_size,
-		   (void *)&props,
-		   sizeof(struct vmstorage_channel_properties),
-		   storvsc_on_channel_callback, new_sc);
+	ret = vmbus_open(new_sc,
+			 storvsc_ringbuffer_size,
+			 storvsc_ringbuffer_size,
+			 (void *)&props,
+			 sizeof(struct vmstorage_channel_properties),
+			 storvsc_on_channel_callback, new_sc);
 
-	if (new_sc->state == CHANNEL_OPENED_STATE) {
-		stor_device->stor_chns[new_sc->target_cpu] = new_sc;
-		cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
+	/* In case vmbus_open() fails, we don't use the sub-channel. */
+	if (ret != 0) {
+		dev_err(dev, "Failed to open sub-channel: err=%d\n", ret);
+		return;
 	}
+
+	/* Add the sub-channel to the array of available channels. */
+	stor_device->stor_chns[new_sc->target_cpu] = new_sc;
+	cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
 }
 
 static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
 {
+	struct device *dev = &device->device;
 	struct storvsc_device *stor_device;
-	int num_cpus = num_online_cpus();
 	int num_sc;
 	struct storvsc_cmd_request *request;
 	struct vstor_packet *vstor_packet;
 	int ret, t;
 
-	num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
+	/*
+	 * If the number of CPUs is artificially restricted, such as
+	 * with maxcpus=1 on the kernel boot line, Hyper-V could offer
+	 * sub-channels >= the number of CPUs. These sub-channels
+	 * should not be created. The primary channel is already created
+	 * and assigned to one CPU, so check against # CPUs - 1.
+	 */
+	num_sc = min((int)(num_online_cpus() - 1), max_chns);
+	if (!num_sc)
+		return;
+
 	stor_device = get_out_stor_device(device);
 	if (!stor_device)
 		return;
@@ -679,22 +681,12 @@
 	request = &stor_device->init_request;
 	vstor_packet = &request->vstor_packet;
 
-	stor_device->open_sub_channel = true;
 	/*
 	 * Establish a handler for dealing with subchannels.
 	 */
 	vmbus_set_sc_create_callback(device->channel, handle_sc_creation);
 
 	/*
-	 * Check to see if sub-channels have already been created. This
-	 * can happen when this driver is re-loaded after unloading.
-	 */
-
-	if (vmbus_are_subchannels_present(device->channel))
-		return;
-
-	stor_device->open_sub_channel = false;
-	/*
 	 * Request the host to create sub-channels.
 	 */
 	memset(request, 0, sizeof(struct storvsc_cmd_request));
@@ -710,23 +702,29 @@
 			       VM_PKT_DATA_INBAND,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 
-	if (ret != 0)
+	if (ret != 0) {
+		dev_err(dev, "Failed to create sub-channel: err=%d\n", ret);
 		return;
+	}
 
 	t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
-	if (t == 0)
+	if (t == 0) {
+		dev_err(dev, "Failed to create sub-channel: timed out\n");
 		return;
+	}
 
 	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
-	    vstor_packet->status != 0)
+	    vstor_packet->status != 0) {
+		dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n",
+			vstor_packet->operation, vstor_packet->status);
 		return;
+	}
 
 	/*
-	 * Now that we created the sub-channels, invoke the check; this
-	 * may trigger the callback.
+	 * We need to do nothing here, because vmbus_process_offer()
+	 * invokes channel->sc_creation_callback, which will open and use
+	 * the sub-channel(s).
 	 */
-	stor_device->open_sub_channel = true;
-	vmbus_are_subchannels_present(device->channel);
 }
 
 static void cache_wwn(struct storvsc_device *stor_device,
@@ -1425,9 +1423,6 @@
 {
 	blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
 
-	/* Ensure there are no gaps in presented sgls */
-	blk_queue_virt_boundary(sdevice->request_queue, PAGE_SIZE - 1);
-
 	sdevice->no_write_same = 1;
 
 	/*
@@ -1698,11 +1693,13 @@
 	.slave_configure =	storvsc_device_configure,
 	.cmd_per_lun =		2048,
 	.this_id =		-1,
-	.use_clustering =	ENABLE_CLUSTERING,
 	/* Make sure we dont get a sg segment crosses a page boundary */
 	.dma_boundary =		PAGE_SIZE-1,
+	/* Ensure there are no gaps in presented sgls */
+	.virt_boundary_mask =	PAGE_SIZE-1,
 	.no_write_same =	1,
 	.track_queue_depth =	1,
+	.change_queue_depth =	storvsc_change_queue_depth,
 };
 
 enum {
@@ -1794,7 +1791,6 @@
 	}
 
 	stor_device->destroy = false;
-	stor_device->open_sub_channel = false;
 	init_waitqueue_head(&stor_device->waiting_to_drain);
 	stor_device->device = device;
 	stor_device->host = host;
@@ -1841,8 +1837,7 @@
 	/*
 	 * Set the number of HW queues we are supporting.
 	 */
-	if (stor_device->num_sc != 0)
-		host->nr_hw_queues = stor_device->num_sc + 1;
+	host->nr_hw_queues = num_present_cpus();
 
 	/*
 	 * Set the error handler work queue.
@@ -1910,6 +1905,15 @@
 	return ret;
 }
 
+/* Change a scsi target's queue depth */
+static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth)
+{
+	if (queue_depth > scsi_driver.can_queue)
+		queue_depth = scsi_driver.can_queue;
+
+	return scsi_change_queue_depth(sdev, queue_depth);
+}
+
 static int storvsc_remove(struct hv_device *dev)
 {
 	struct storvsc_device *stor_device = hv_get_drvdata(dev);
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 9492638..955e4c9 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl)
  *
@@ -396,10 +397,12 @@
 		case CSR_LEFT_3:
 			*vaddr = (dregs->bpack_lo & 0xff00) >> 8;
 			vaddr--;
+			/* Fall through */
 
 		case CSR_LEFT_2:
 			*vaddr = (dregs->bpack_hi & 0x00ff);
 			vaddr--;
+			/* Fall through */
 
 		case CSR_LEFT_1:
 			*vaddr = (dregs->bpack_hi & 0xff00) >> 8;
@@ -500,7 +503,7 @@
 	.this_id		= 7,
 	.sg_tablesize		= SG_NONE,
 	.cmd_per_lun		= 2,
-	.use_clustering		= DISABLE_CLUSTERING,
+	.dma_boundary		= PAGE_SIZE - 1,
 	.cmd_size		= NCR5380_CMD_SIZE,
 };
 
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 0b1421c..440a73e 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* sun3x_esp.c: ESP front-end for Sun3x systems.
  *
  * Copyright (C) 2007,2008 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
@@ -60,30 +61,6 @@
 	return readb(esp->regs + (reg * 4UL));
 }
 
-static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
-				      size_t sz, int dir)
-{
-	return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
-				  int num_sg, int dir)
-{
-	return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
-				  size_t sz, int dir)
-{
-	dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
-			      int num_sg, int dir)
-{
-	dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
 static int sun3x_esp_irq_pending(struct esp *esp)
 {
 	if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
@@ -182,10 +159,6 @@
 static const struct esp_driver_ops sun3x_esp_ops = {
 	.esp_write8	=	sun3x_esp_write8,
 	.esp_read8	=	sun3x_esp_read8,
-	.map_single	=	sun3x_esp_map_single,
-	.map_sg		=	sun3x_esp_map_sg,
-	.unmap_single	=	sun3x_esp_unmap_single,
-	.unmap_sg	=	sun3x_esp_unmap_sg,
 	.irq_pending	=	sun3x_esp_irq_pending,
 	.reset_dma	=	sun3x_esp_reset_dma,
 	.dma_drain	=	sun3x_esp_dma_drain,
@@ -246,7 +219,7 @@
 
 	dev_set_drvdata(&dev->dev, esp);
 
-	err = scsi_esp_register(esp, &dev->dev);
+	err = scsi_esp_register(esp);
 	if (err)
 		goto fail_free_irq;
 
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 747ee64..964130d 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /* sun_esp.c: ESP front-end for Sparc SBUS systems.
  *
  * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
@@ -80,7 +81,7 @@
 
 static int esp_sbus_map_regs(struct esp *esp, int hme)
 {
-	struct platform_device *op = esp->dev;
+	struct platform_device *op = to_platform_device(esp->dev);
 	struct resource *res;
 
 	/* On HME, two reg sets exist, first is DVMA,
@@ -100,11 +101,9 @@
 
 static int esp_sbus_map_command_block(struct esp *esp)
 {
-	struct platform_device *op = esp->dev;
-
-	esp->command_block = dma_alloc_coherent(&op->dev, 16,
+	esp->command_block = dma_alloc_coherent(esp->dev, 16,
 						&esp->command_block_dma,
-						GFP_ATOMIC);
+						GFP_KERNEL);
 	if (!esp->command_block)
 		return -ENOMEM;
 	return 0;
@@ -113,7 +112,7 @@
 static int esp_sbus_register_irq(struct esp *esp)
 {
 	struct Scsi_Host *host = esp->host;
-	struct platform_device *op = esp->dev;
+	struct platform_device *op = to_platform_device(esp->dev);
 
 	host->irq = op->archdata.irqs[0];
 	return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
@@ -121,7 +120,7 @@
 
 static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
 {
-	struct platform_device *op = esp->dev;
+	struct platform_device *op = to_platform_device(esp->dev);
 	struct device_node *dp;
 
 	dp = op->dev.of_node;
@@ -143,7 +142,7 @@
 
 static void esp_get_differential(struct esp *esp)
 {
-	struct platform_device *op = esp->dev;
+	struct platform_device *op = to_platform_device(esp->dev);
 	struct device_node *dp;
 
 	dp = op->dev.of_node;
@@ -155,7 +154,7 @@
 
 static void esp_get_clock_params(struct esp *esp)
 {
-	struct platform_device *op = esp->dev;
+	struct platform_device *op = to_platform_device(esp->dev);
 	struct device_node *bus_dp, *dp;
 	int fmhz;
 
@@ -172,7 +171,7 @@
 static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
 {
 	struct device_node *dma_dp = dma_of->dev.of_node;
-	struct platform_device *op = esp->dev;
+	struct platform_device *op = to_platform_device(esp->dev);
 	struct device_node *dp;
 	u8 bursts, val;
 
@@ -212,38 +211,6 @@
 	return sbus_readb(esp->regs + (reg * 4UL));
 }
 
-static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
-				      size_t sz, int dir)
-{
-	struct platform_device *op = esp->dev;
-
-	return dma_map_single(&op->dev, buf, sz, dir);
-}
-
-static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
-				  int num_sg, int dir)
-{
-	struct platform_device *op = esp->dev;
-
-	return dma_map_sg(&op->dev, sg, num_sg, dir);
-}
-
-static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
-				  size_t sz, int dir)
-{
-	struct platform_device *op = esp->dev;
-
-	dma_unmap_single(&op->dev, addr, sz, dir);
-}
-
-static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
-			      int num_sg, int dir)
-{
-	struct platform_device *op = esp->dev;
-
-	dma_unmap_sg(&op->dev, sg, num_sg, dir);
-}
-
 static int sbus_esp_irq_pending(struct esp *esp)
 {
 	if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
@@ -255,14 +222,13 @@
 {
 	int can_do_burst16, can_do_burst32, can_do_burst64;
 	int can_do_sbus64, lim;
-	struct platform_device *op;
+	struct platform_device *op = to_platform_device(esp->dev);
 	u32 val;
 
 	can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
 	can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
 	can_do_burst64 = 0;
 	can_do_sbus64 = 0;
-	op = esp->dev;
 	if (sbus_can_dma_64bit())
 		can_do_sbus64 = 1;
 	if (sbus_can_burst64())
@@ -474,10 +440,6 @@
 static const struct esp_driver_ops sbus_esp_ops = {
 	.esp_write8	=	sbus_esp_write8,
 	.esp_read8	=	sbus_esp_read8,
-	.map_single	=	sbus_esp_map_single,
-	.map_sg		=	sbus_esp_map_sg,
-	.unmap_single	=	sbus_esp_unmap_single,
-	.unmap_sg	=	sbus_esp_unmap_sg,
 	.irq_pending	=	sbus_esp_irq_pending,
 	.reset_dma	=	sbus_esp_reset_dma,
 	.dma_drain	=	sbus_esp_dma_drain,
@@ -504,7 +466,7 @@
 	esp = shost_priv(host);
 
 	esp->host = host;
-	esp->dev = op;
+	esp->dev = &op->dev;
 	esp->ops = &sbus_esp_ops;
 
 	if (hme)
@@ -540,7 +502,7 @@
 
 	dev_set_drvdata(&op->dev, esp);
 
-	err = scsi_esp_register(esp, &op->dev);
+	err = scsi_esp_register(esp);
 	if (err)
 		goto fail_free_irq;
 
@@ -568,11 +530,10 @@
 	int hme = 0;
 	int ret;
 
-	if (dp->parent &&
-	    (!strcmp(dp->parent->name, "espdma") ||
-	     !strcmp(dp->parent->name, "dma")))
+	if (of_node_name_eq(dp->parent, "espdma") ||
+	    of_node_name_eq(dp->parent, "dma"))
 		dma_node = dp->parent;
-	else if (!strcmp(dp->name, "SUNW,fas")) {
+	else if (of_node_name_eq(dp, "SUNW,fas")) {
 		dma_node = op->dev.of_node;
 		hme = 1;
 	}
diff --git a/drivers/scsi/sym53c8xx_2/Makefile b/drivers/scsi/sym53c8xx_2/Makefile
index 873e8ce..0751e2a 100644
--- a/drivers/scsi/sym53c8xx_2/Makefile
+++ b/drivers/scsi/sym53c8xx_2/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 # Makefile for the NCR/SYMBIOS/LSI 53C8XX PCI SCSI controllers driver.
 
 sym53c8xx-objs := sym_fw.o sym_glue.o sym_hipd.o sym_malloc.o sym_nvram.o
diff --git a/drivers/scsi/sym53c8xx_2/sym53c8xx.h b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
index 62d29cf..11f5dc2 100644
--- a/drivers/scsi/sym53c8xx_2/sym53c8xx.h
+++ b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -21,20 +22,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #ifndef SYM53C8XX_H
diff --git a/drivers/scsi/sym53c8xx_2/sym_defs.h b/drivers/scsi/sym53c8xx_2/sym_defs.h
index defccc4..317289e 100644
--- a/drivers/scsi/sym53c8xx_2/sym_defs.h
+++ b/drivers/scsi/sym53c8xx_2/sym_defs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -21,20 +22,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #ifndef SYM_DEFS_H
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c
index 91db177..6d7651a 100644
--- a/drivers/scsi/sym53c8xx_2/sym_fw.c
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -21,20 +22,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include "sym_glue.h"
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.h b/drivers/scsi/sym53c8xx_2/sym_fw.h
index ae7e0f9..bbba011 100644
--- a/drivers/scsi/sym53c8xx_2/sym_fw.h
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -21,20 +22,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #ifndef	SYM_FW_H
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw1.h b/drivers/scsi/sym53c8xx_2/sym_fw1.h
index 63952ee..d98ec67 100644
--- a/drivers/scsi/sym53c8xx_2/sym_fw1.h
+++ b/drivers/scsi/sym53c8xx_2/sym_fw1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -21,20 +22,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 /*
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw2.h b/drivers/scsi/sym53c8xx_2/sym_fw2.h
index c87d724..4d1779b 100644
--- a/drivers/scsi/sym53c8xx_2/sym_fw2.h
+++ b/drivers/scsi/sym53c8xx_2/sym_fw2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -21,20 +22,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 /*
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 5f10aa9..2ca018c 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -22,20 +23,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/ctype.h>
 #include <linux/init.h>
@@ -1312,9 +1299,9 @@
 	sprintf(np->s.inst_name, "sym%d", np->s.unit);
 
 	if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) &&
-			!pci_set_dma_mask(pdev, DMA_DAC_MASK)) {
+			!dma_set_mask(&pdev->dev, DMA_DAC_MASK)) {
 		set_dac(np);
-	} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+	} else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 		printf_warning("%s: No suitable DMA available\n", sym_name(np));
 		goto attach_failed;
 	}
@@ -1660,7 +1647,6 @@
 	.eh_bus_reset_handler	= sym53c8xx_eh_bus_reset_handler,
 	.eh_host_reset_handler	= sym53c8xx_eh_host_reset_handler,
 	.this_id		= 7,
-	.use_clustering		= ENABLE_CLUSTERING,
 	.max_sectors		= 0xFFFF,
 #ifdef SYM_LINUX_PROC_INFO_SUPPORT
 	.show_info		= sym_show_info,
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index e34801a..7d5c9b9 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -21,20 +22,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #ifndef SYM_GLUE_H
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index bd3f6e2..a428cae 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -22,20 +23,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/slab.h>
@@ -3072,6 +3059,7 @@
 			sym_print_addr(cp->cmd, "%s\n",
 			        s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
 		}
+		/* fall through */
 	default:	/* S_INT, S_INT_COND_MET, S_CONFLICT */
 		sym_complete_error (np, cp);
 		break;
@@ -4370,6 +4358,13 @@
 	OUTB(np, HS_PRT, HS_BUSY);
 }
 
+#define sym_printk(lvl, tp, cp, fmt, v...) do { \
+	if (cp)							\
+		scmd_printk(lvl, cp->cmd, fmt, ##v);		\
+	else							\
+		starget_printk(lvl, tp->starget, fmt, ##v);	\
+} while (0)
+
 /*
  *  chip exception handler for programmed interrupts.
  */
@@ -4415,7 +4410,7 @@
 	 *  been selected with ATN.  We do not want to handle that.
 	 */
 	case SIR_SEL_ATN_NO_MSG_OUT:
-		scmd_printk(KERN_WARNING, cp->cmd,
+		sym_printk(KERN_WARNING, tp, cp,
 				"No MSG OUT phase after selection with ATN\n");
 		goto out_stuck;
 	/*
@@ -4423,7 +4418,7 @@
 	 *  having reselected the initiator.
 	 */
 	case SIR_RESEL_NO_MSG_IN:
-		scmd_printk(KERN_WARNING, cp->cmd,
+		sym_printk(KERN_WARNING, tp, cp,
 				"No MSG IN phase after reselection\n");
 		goto out_stuck;
 	/*
@@ -4431,7 +4426,7 @@
 	 *  an IDENTIFY.
 	 */
 	case SIR_RESEL_NO_IDENTIFY:
-		scmd_printk(KERN_WARNING, cp->cmd,
+		sym_printk(KERN_WARNING, tp, cp,
 				"No IDENTIFY after reselection\n");
 		goto out_stuck;
 	/*
@@ -4460,7 +4455,7 @@
 	case SIR_RESEL_ABORTED:
 		np->lastmsg = np->msgout[0];
 		np->msgout[0] = M_NOOP;
-		scmd_printk(KERN_WARNING, cp->cmd,
+		sym_printk(KERN_WARNING, tp, cp,
 			"message %x sent on bad reselection\n", np->lastmsg);
 		goto out;
 	/*
@@ -4625,6 +4620,7 @@
 	 *  Negotiation failed.
 	 *  Target does not want answer message.
 	 */
+	/* fall through */
 	case SIR_NEGO_PROTO:
 		sym_nego_default(np, tp, cp);
 		goto out;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index a141b17..9231a28 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -21,20 +22,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/gfp.h>
diff --git a/drivers/scsi/sym53c8xx_2/sym_malloc.c b/drivers/scsi/sym53c8xx_2/sym_malloc.c
index 6f9af0d..eb5c045 100644
--- a/drivers/scsi/sym53c8xx_2/sym_malloc.c
+++ b/drivers/scsi/sym53c8xx_2/sym_malloc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -21,20 +22,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include "sym_glue.h"
diff --git a/drivers/scsi/sym53c8xx_2/sym_misc.h b/drivers/scsi/sym53c8xx_2/sym_misc.h
index 96c1514..ef419b7 100644
--- a/drivers/scsi/sym53c8xx_2/sym_misc.h
+++ b/drivers/scsi/sym53c8xx_2/sym_misc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -21,20 +22,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #ifndef SYM_MISC_H
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
index 5662fbb..9dc17f1 100644
--- a/drivers/scsi/sym53c8xx_2/sym_nvram.c
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -21,20 +22,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include "sym_glue.h"
@@ -661,7 +648,7 @@
 {
 	u_char gpcntl, gpreg;
 	u_char old_gpcntl, old_gpreg;
-	int retv = 1;
+	int retv;
 
 	/* save current state of GPCNTL and GPREG */
 	old_gpreg	= INB(np, nc_gpreg);
@@ -708,6 +695,7 @@
 					  data, len);
 		if (!x)
 			break;
+		/* fall through */
 	default:
 		x = sym_read_T93C46_nvram(np, nvram);
 		break;
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.h b/drivers/scsi/sym53c8xx_2/sym_nvram.h
index bdfbbb0..d07da39 100644
--- a/drivers/scsi/sym53c8xx_2/sym_nvram.h
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
  * of PCI-SCSI IO processors.
@@ -21,20 +22,6 @@
  * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  *
  *-----------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #ifndef SYM_NVRAM_H
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index e09fe6a..0b845ab 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -80,6 +80,14 @@
 
 	  If unsure, say N.
 
+config SCSI_UFS_CDNS_PLATFORM
+	tristate "Cadence UFS Controller platform driver"
+	depends on SCSI_UFSHCD_PLATFORM
+	help
+	This selects the Cadence-specific additions to UFSHCD platform driver.
+
+	  If unsure, say N.
+
 config SCSI_UFS_DWC_TC_PLATFORM
 	tristate "DesignWare platform support using a G210 Test Chip"
 	depends on SCSI_UFSHCD_PLATFORM
@@ -91,7 +99,7 @@
 config SCSI_UFS_QCOM
 	tristate "QCOM specific hooks to UFS controller platform driver"
 	depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
-	select PHY_QCOM_UFS
+	select RESET_CONTROLLER
 	help
 	  This selects the QCOM specific additions to UFSHCD platform driver.
 	  UFS host on QCOM needs some vendor specific configuration before
@@ -101,6 +109,20 @@
 	  Select this if you have UFS controller on QCOM chipset.
 	  If unsure, say N.
 
+config SCSI_UFS_MEDIATEK
+	tristate "Mediatek specific hooks to UFS controller platform driver"
+	depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
+	select PHY_MTK_UFS
+	help
+	  This selects the Mediatek specific additions to UFSHCD platform driver.
+	  UFS host on Mediatek needs some vendor specific configuration before
+	  accessing the hardware which includes PHY configuration and vendor
+	  specific registers.
+
+	  Select this if you have UFS controller on Mediatek chipset.
+
+	  If unsure, say N.
+
 config SCSI_UFS_HISI
 	tristate "Hisilicon specific hooks to UFS controller platform driver"
 	depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
@@ -109,3 +131,22 @@
 
 	  Select this if you have UFS controller on Hisilicon chipset.
 	  If unsure, say N.
+
+config SCSI_UFS_BSG
+	bool "Universal Flash Storage BSG device node"
+	depends on SCSI_UFSHCD
+	select BLK_DEV_BSGLIB
+	help
+	  Universal Flash Storage (UFS) is SCSI transport specification for
+	  accessing flash storage on digital cameras, mobile phones and
+	  consumer electronic devices.
+	  A UFS controller communicates with a UFS device by exchanging
+	  UFS Protocol Information Units (UPIUs).
+	  UPIUs can not only be used as a transport layer for the SCSI protocol
+	  but are also used by the UFS native command set.
+	  This transport driver supports exchanging UFS protocol information units
+	  with a UFS device. See also the ufshcd driver, which is a SCSI driver
+	  that supports UFS devices.
+
+	  Select this if you need a bsg device node for your UFS controller.
+	  If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 2c50f03..2a90979 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -2,9 +2,12 @@
 # UFSHCD makefile
 obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
 obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
+obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o
 obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-objs := ufshcd.o ufs-sysfs.o
+ufshcd-core-y				+= ufshcd.o ufs-sysfs.o
+ufshcd-core-$(CONFIG_SCSI_UFS_BSG)	+= ufs_bsg.o
 obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
 obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
 obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
+obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
new file mode 100644
index 0000000..b2af04c
--- /dev/null
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Platform UFS Host driver for Cadence controller
+ *
+ * Copyright (C) 2018 Cadence Design Systems, Inc.
+ *
+ * Authors:
+ *	Jan Kotas <jank@cadence.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/time.h>
+
+#include "ufshcd-pltfrm.h"
+
+#define CDNS_UFS_REG_HCLKDIV	0xFC
+#define CDNS_UFS_REG_PHY_XCFGD1	0x113C
+
+/**
+ * Sets HCLKDIV register value based on the core_clk
+ * @hba: host controller instance
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
+{
+	struct ufs_clk_info *clki;
+	struct list_head *head = &hba->clk_list_head;
+	unsigned long core_clk_rate = 0;
+	u32 core_clk_div = 0;
+
+	if (list_empty(head))
+		return 0;
+
+	list_for_each_entry(clki, head, list) {
+		if (IS_ERR_OR_NULL(clki->clk))
+			continue;
+		if (!strcmp(clki->name, "core_clk"))
+			core_clk_rate = clk_get_rate(clki->clk);
+	}
+
+	if (!core_clk_rate) {
+		dev_err(hba->dev, "%s: unable to find core_clk rate\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	core_clk_div = core_clk_rate / USEC_PER_SEC;
+
+	ufshcd_writel(hba, core_clk_div, CDNS_UFS_REG_HCLKDIV);
+	/**
+	 * Make sure the register was updated,
+	 * UniPro layer will not work with an incorrect value.
+	 */
+	mb();
+
+	return 0;
+}
+
+/**
+ * Called before and after HCE enable bit is set.
+ * @hba: host controller instance
+ * @status: notify stage (pre, post change)
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
+				      enum ufs_notify_change_status status)
+{
+	if (status != PRE_CHANGE)
+		return 0;
+
+	return cdns_ufs_set_hclkdiv(hba);
+}
+
+/**
+ * Called before and after Link startup is carried out.
+ * @hba: host controller instance
+ * @status: notify stage (pre, post change)
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
+					enum ufs_notify_change_status status)
+{
+	if (status != PRE_CHANGE)
+		return 0;
+
+	/*
+	 * Some UFS devices have issues if LCC is enabled.
+	 * So we are setting PA_Local_TX_LCC_Enable to 0
+	 * before link startup which will make sure that both host
+	 * and device TX LCC are disabled once link startup is
+	 * completed.
+	 */
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+
+	return 0;
+}
+
+/**
+ * cdns_ufs_init - performs additional ufs initialization
+ * @hba: host controller instance
+ *
+ * Returns status of initialization
+ */
+static int cdns_ufs_init(struct ufs_hba *hba)
+{
+	int status = 0;
+
+	if (hba->vops && hba->vops->phy_initialization)
+		status = hba->vops->phy_initialization(hba);
+
+	return status;
+}
+
+/**
+ * cdns_ufs_m31_16nm_phy_initialization - performs m31 phy initialization
+ * @hba: host controller instance
+ *
+ * Always returns 0
+ */
+static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
+{
+	u32 data;
+
+	/* Increase RX_Advanced_Min_ActivateTime_Capability */
+	data = ufshcd_readl(hba, CDNS_UFS_REG_PHY_XCFGD1);
+	data |= BIT(24);
+	ufshcd_writel(hba, data, CDNS_UFS_REG_PHY_XCFGD1);
+
+	return 0;
+}
+
+static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
+	.name = "cdns-ufs-pltfm",
+	.hce_enable_notify = cdns_ufs_hce_enable_notify,
+	.link_startup_notify = cdns_ufs_link_startup_notify,
+};
+
+static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
+	.name = "cdns-ufs-pltfm",
+	.init = cdns_ufs_init,
+	.hce_enable_notify = cdns_ufs_hce_enable_notify,
+	.link_startup_notify = cdns_ufs_link_startup_notify,
+	.phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
+};
+
+static const struct of_device_id cdns_ufs_of_match[] = {
+	{
+		.compatible = "cdns,ufshc",
+		.data =  &cdns_ufs_pltfm_hba_vops,
+	},
+	{
+		.compatible = "cdns,ufshc-m31-16nm",
+		.data =  &cdns_ufs_m31_16nm_pltfm_hba_vops,
+	},
+	{ },
+};
+
+MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
+
+/**
+ * cdns_ufs_pltfrm_probe - probe routine of the driver
+ * @pdev: pointer to platform device handle
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
+{
+	int err;
+	const struct of_device_id *of_id;
+	struct ufs_hba_variant_ops *vops;
+	struct device *dev = &pdev->dev;
+
+	of_id = of_match_node(cdns_ufs_of_match, dev->of_node);
+	vops = (struct ufs_hba_variant_ops *)of_id->data;
+
+	/* Perform generic probe */
+	err = ufshcd_pltfrm_init(pdev, vops);
+	if (err)
+		dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+
+	return err;
+}
+
+/**
+ * cdns_ufs_pltfrm_remove - removes the ufs driver
+ * @pdev: pointer to platform device handle
+ *
+ * Always returns 0
+ */
+static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
+{
+	struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+	ufshcd_remove(hba);
+	return 0;
+}
+
+static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
+	.suspend         = ufshcd_pltfrm_suspend,
+	.resume          = ufshcd_pltfrm_resume,
+	.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+	.runtime_resume  = ufshcd_pltfrm_runtime_resume,
+	.runtime_idle    = ufshcd_pltfrm_runtime_idle,
+};
+
+static struct platform_driver cdns_ufs_pltfrm_driver = {
+	.probe	= cdns_ufs_pltfrm_probe,
+	.remove	= cdns_ufs_pltfrm_remove,
+	.driver	= {
+		.name   = "cdns-ufshcd",
+		.pm     = &cdns_ufs_dev_pm_ops,
+		.of_match_table = cdns_ufs_of_match,
+	},
+};
+
+module_platform_driver(cdns_ufs_pltfrm_driver);
+
+MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
+MODULE_DESCRIPTION("Cadence UFS host controller platform driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
index 2f41722..67a6a61 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Synopsys G210 Test Chip driver
  *
  * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
  *
  * Authors: Joao Pinto <jpinto@synopsys.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #include "ufshcd.h"
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
index 6dfe5a9..a1268e4 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Synopsys G210 Test Chip driver
  *
  * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
  *
  * Authors: Joao Pinto <jpinto@synopsys.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/scsi/ufs/tc-dwc-g210.c
index 3a8bc6d..f954a68 100644
--- a/drivers/scsi/ufs/tc-dwc-g210.c
+++ b/drivers/scsi/ufs/tc-dwc-g210.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Synopsys G210 Test Chip driver
  *
  * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
  *
  * Authors: Joao Pinto <jpinto@synopsys.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #include "ufshcd.h"
diff --git a/drivers/scsi/ufs/tc-dwc-g210.h b/drivers/scsi/ufs/tc-dwc-g210.h
index fb177db..5a506da 100644
--- a/drivers/scsi/ufs/tc-dwc-g210.h
+++ b/drivers/scsi/ufs/tc-dwc-g210.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Synopsys G210 Test Chip driver
  *
  * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
  *
  * Authors: Joao Pinto <jpinto@synopsys.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #ifndef _TC_DWC_G210_H
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 452e19f..6bbb167 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * HiSilicon Hixxxx UFS Driver
  *
  * Copyright (c) 2016-2017 Linaro Ltd.
  * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
- *
- * Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
  */
 
 #include <linux/time.h>
@@ -66,7 +64,7 @@
 	return err;
 }
 
-static void ufs_hi3660_clk_init(struct ufs_hba *hba)
+static void ufs_hisi_clk_init(struct ufs_hba *hba)
 {
 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
 
@@ -80,7 +78,7 @@
 	ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
 }
 
-static void ufs_hi3660_soc_init(struct ufs_hba *hba)
+static void ufs_hisi_soc_init(struct ufs_hba *hba)
 {
 	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
 	u32 reg;
@@ -139,6 +137,7 @@
 
 static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
 {
+	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
 	int err;
 	uint32_t value;
 	uint32_t reg;
@@ -153,6 +152,14 @@
 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
 	/* MPHY CBOVRCTRL3 */
 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
+
+	if (host->caps & UFS_HISI_CAP_PHY10nm) {
+		/* MPHY CBOVRCTRL4 */
+		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98);
+		/* MPHY CBOVRCTRL5 */
+		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1);
+	}
+
 	/* Unipro VS_MphyCfgUpdt */
 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
 	/* MPHY RXOVRCTRL4 rx0 */
@@ -173,10 +180,21 @@
 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
 
-	/* Tactive RX */
-	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
-	/* Tactive RX */
-	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
+	if (host->caps & UFS_HISI_CAP_PHY10nm) {
+		/* RX_Hibern8Time_Capability*/
+		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA);
+		/* RX_Hibern8Time_Capability*/
+		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA);
+		/* RX_Min_ActivateTime */
+		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA);
+		/* RX_Min_ActivateTime*/
+		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA);
+	} else {
+		/* Tactive RX */
+		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
+		/* Tactive RX */
+		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
+	}
 
 	/* Gear3 Synclength */
 	ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
@@ -208,7 +226,8 @@
 	if (err)
 		dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
 
-	ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
+	if (!(host->caps & UFS_HISI_CAP_PHY10nm))
+		ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
 
 	/* disable auto H8 */
 	reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
@@ -253,7 +272,7 @@
 	return 0;
 }
 
-static int ufs_hi3660_link_startup_notify(struct ufs_hba *hba,
+static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
 					  enum ufs_notify_change_status status)
 {
 	int err = 0;
@@ -272,108 +291,7 @@
 	return err;
 }
 
-struct ufs_hisi_dev_params {
-	u32 pwm_rx_gear; /* pwm rx gear to work in */
-	u32 pwm_tx_gear; /* pwm tx gear to work in */
-	u32 hs_rx_gear;  /* hs rx gear to work in */
-	u32 hs_tx_gear;  /* hs tx gear to work in */
-	u32 rx_lanes;    /* number of rx lanes */
-	u32 tx_lanes;    /* number of tx lanes */
-	u32 rx_pwr_pwm;  /* rx pwm working pwr */
-	u32 tx_pwr_pwm;  /* tx pwm working pwr */
-	u32 rx_pwr_hs;   /* rx hs working pwr */
-	u32 tx_pwr_hs;   /* tx hs working pwr */
-	u32 hs_rate;     /* rate A/B to work in HS */
-	u32 desired_working_mode;
-};
-
-static int ufs_hisi_get_pwr_dev_param(
-				    struct ufs_hisi_dev_params *hisi_param,
-				    struct ufs_pa_layer_attr *dev_max,
-				    struct ufs_pa_layer_attr *agreed_pwr)
-{
-	int min_hisi_gear;
-	int min_dev_gear;
-	bool is_dev_sup_hs = false;
-	bool is_hisi_max_hs = false;
-
-	if (dev_max->pwr_rx == FASTAUTO_MODE || dev_max->pwr_rx == FAST_MODE)
-		is_dev_sup_hs = true;
-
-	if (hisi_param->desired_working_mode == FAST) {
-		is_hisi_max_hs = true;
-		min_hisi_gear = min_t(u32, hisi_param->hs_rx_gear,
-				       hisi_param->hs_tx_gear);
-	} else {
-		min_hisi_gear = min_t(u32, hisi_param->pwm_rx_gear,
-				       hisi_param->pwm_tx_gear);
-	}
-
-	/*
-	 * device doesn't support HS but
-	 * hisi_param->desired_working_mode is HS,
-	 * thus device and hisi_param don't agree
-	 */
-	if (!is_dev_sup_hs && is_hisi_max_hs) {
-		pr_err("%s: device not support HS\n", __func__);
-		return -ENOTSUPP;
-	} else if (is_dev_sup_hs && is_hisi_max_hs) {
-		/*
-		 * since device supports HS, it supports FAST_MODE.
-		 * since hisi_param->desired_working_mode is also HS
-		 * then final decision (FAST/FASTAUTO) is done according
-		 * to hisi_params as it is the restricting factor
-		 */
-		agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
-			hisi_param->rx_pwr_hs;
-	} else {
-		/*
-		 * here hisi_param->desired_working_mode is PWM.
-		 * it doesn't matter whether device supports HS or PWM,
-		 * in both cases hisi_param->desired_working_mode will
-		 * determine the mode
-		 */
-		agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
-			hisi_param->rx_pwr_pwm;
-	}
-
-	/*
-	 * we would like tx to work in the minimum number of lanes
-	 * between device capability and vendor preferences.
-	 * the same decision will be made for rx
-	 */
-	agreed_pwr->lane_tx =
-		min_t(u32, dev_max->lane_tx, hisi_param->tx_lanes);
-	agreed_pwr->lane_rx =
-		min_t(u32, dev_max->lane_rx, hisi_param->rx_lanes);
-
-	/* device maximum gear is the minimum between device rx and tx gears */
-	min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
-
-	/*
-	 * if both device capabilities and vendor pre-defined preferences are
-	 * both HS or both PWM then set the minimum gear to be the chosen
-	 * working gear.
-	 * if one is PWM and one is HS then the one that is PWM get to decide
-	 * what is the gear, as it is the one that also decided previously what
-	 * pwr the device will be configured to.
-	 */
-	if ((is_dev_sup_hs && is_hisi_max_hs) ||
-	    (!is_dev_sup_hs && !is_hisi_max_hs))
-		agreed_pwr->gear_rx = agreed_pwr->gear_tx =
-			min_t(u32, min_dev_gear, min_hisi_gear);
-	else
-		agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_hisi_gear;
-
-	agreed_pwr->hs_rate = hisi_param->hs_rate;
-
-	pr_info("ufs final power mode: gear = %d, lane = %d, pwr = %d, rate = %d\n",
-		agreed_pwr->gear_rx, agreed_pwr->lane_rx, agreed_pwr->pwr_rx,
-		agreed_pwr->hs_rate);
-	return 0;
-}
-
-static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
+static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
 {
 	hisi_param->rx_lanes = UFS_HISI_LIMIT_NUM_LANES_RX;
 	hisi_param->tx_lanes = UFS_HISI_LIMIT_NUM_LANES_TX;
@@ -391,6 +309,28 @@
 
 static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
 {
+	struct ufs_hisi_host *host = ufshcd_get_variant(hba);
+
+	if (host->caps & UFS_HISI_CAP_PHY10nm) {
+		/*
+		 * Boston platform need to set SaveConfigTime to 0x13,
+		 * and change sync length to maximum value
+		 */
+		/* VS_DebugSaveConfigTime */
+		ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13);
+		/* g1 sync length */
+		ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f);
+		/* g2 sync length */
+		ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f);
+		/* g3 sync length */
+		ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f);
+		/* PA_Hibern8Time */
+		ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA);
+		/* PA_Tactivate */
+		ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA);
+		ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01);
+	}
+
 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
 		pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
 		/* VS_DebugSaveConfigTime */
@@ -429,12 +369,12 @@
 	ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
 }
 
-static int ufs_hi3660_pwr_change_notify(struct ufs_hba *hba,
+static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
 				       enum ufs_notify_change_status status,
 				       struct ufs_pa_layer_attr *dev_max_params,
 				       struct ufs_pa_layer_attr *dev_req_params)
 {
-	struct ufs_hisi_dev_params ufs_hisi_cap;
+	struct ufs_dev_params ufs_hisi_cap;
 	int ret = 0;
 
 	if (!dev_req_params) {
@@ -447,8 +387,8 @@
 	switch (status) {
 	case PRE_CHANGE:
 		ufs_hisi_set_dev_cap(&ufs_hisi_cap);
-		ret = ufs_hisi_get_pwr_dev_param(
-			&ufs_hisi_cap, dev_max_params, dev_req_params);
+		ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
+					       dev_max_params, dev_req_params);
 		if (ret) {
 			dev_err(hba->dev,
 			    "%s: failed to determine capabilities\n", __func__);
@@ -507,13 +447,11 @@
 
 static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
 {
-	struct resource *mem_res;
 	struct device *dev = host->hba->dev;
 	struct platform_device *pdev = to_platform_device(dev);
 
 	/* get resource of ufs sys ctrl */
-	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	host->ufs_sys_ctrl = devm_ioremap_resource(dev, mem_res);
+	host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
 	if (IS_ERR(host->ufs_sys_ctrl))
 		return PTR_ERR(host->ufs_sys_ctrl);
 
@@ -544,6 +482,10 @@
 	ufshcd_set_variant(hba, host);
 
 	host->rst  = devm_reset_control_get(dev, "rst");
+	if (IS_ERR(host->rst)) {
+		dev_err(dev, "%s: failed to get reset control\n", __func__);
+		return PTR_ERR(host->rst);
+	}
 
 	ufs_hisi_set_pm_lvl(hba);
 
@@ -567,25 +509,69 @@
 		return ret;
 	}
 
-	ufs_hi3660_clk_init(hba);
+	ufs_hisi_clk_init(hba);
 
-	ufs_hi3660_soc_init(hba);
+	ufs_hisi_soc_init(hba);
 
 	return 0;
 }
 
-static struct ufs_hba_variant_ops ufs_hba_hisi_vops = {
+static int ufs_hi3670_init(struct ufs_hba *hba)
+{
+	int ret = 0;
+	struct device *dev = hba->dev;
+	struct ufs_hisi_host *host;
+
+	ret = ufs_hisi_init_common(hba);
+	if (ret) {
+		dev_err(dev, "%s: ufs common init fail\n", __func__);
+		return ret;
+	}
+
+	ufs_hisi_clk_init(hba);
+
+	ufs_hisi_soc_init(hba);
+
+	/* Add cap for 10nm PHY variant on HI3670 SoC */
+	host = ufshcd_get_variant(hba);
+	host->caps |= UFS_HISI_CAP_PHY10nm;
+
+	return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
 	.name = "hi3660",
 	.init = ufs_hi3660_init,
-	.link_startup_notify = ufs_hi3660_link_startup_notify,
-	.pwr_change_notify = ufs_hi3660_pwr_change_notify,
+	.link_startup_notify = ufs_hisi_link_startup_notify,
+	.pwr_change_notify = ufs_hisi_pwr_change_notify,
 	.suspend = ufs_hisi_suspend,
 	.resume = ufs_hisi_resume,
 };
 
+static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
+	.name = "hi3670",
+	.init = ufs_hi3670_init,
+	.link_startup_notify = ufs_hisi_link_startup_notify,
+	.pwr_change_notify = ufs_hisi_pwr_change_notify,
+	.suspend = ufs_hisi_suspend,
+	.resume = ufs_hisi_resume,
+};
+
+static const struct of_device_id ufs_hisi_of_match[] = {
+	{ .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops },
+	{ .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
+
 static int ufs_hisi_probe(struct platform_device *pdev)
 {
-	return ufshcd_pltfrm_init(pdev, &ufs_hba_hisi_vops);
+	const struct of_device_id *of_id;
+
+	of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node);
+
+	return ufshcd_pltfrm_init(pdev, of_id->data);
 }
 
 static int ufs_hisi_remove(struct platform_device *pdev)
@@ -596,13 +582,6 @@
 	return 0;
 }
 
-static const struct of_device_id ufs_hisi_of_match[] = {
-	{ .compatible = "hisilicon,hi3660-ufs" },
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
-
 static const struct dev_pm_ops ufs_hisi_pm_ops = {
 	.suspend	= ufshcd_pltfrm_suspend,
 	.resume		= ufshcd_pltfrm_resume,
diff --git a/drivers/scsi/ufs/ufs-hisi.h b/drivers/scsi/ufs/ufs-hisi.h
index 3df9cd7..3231d3d 100644
--- a/drivers/scsi/ufs/ufs-hisi.h
+++ b/drivers/scsi/ufs/ufs-hisi.h
@@ -1,8 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017, HiSilicon. All rights reserved.
- *
- * Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
  */
 
 #ifndef UFS_HISI_H_
@@ -91,6 +89,9 @@
 #define UFS_HISI_LIMIT_HS_RATE	PA_HS_MODE_B
 #define UFS_HISI_LIMIT_DESIRED_MODE	FAST
 
+#define UFS_HISI_CAP_RESERVED		BIT(0)
+#define UFS_HISI_CAP_PHY10nm		BIT(1)
+
 struct ufs_hisi_host {
 	struct ufs_hba *hba;
 	void __iomem *ufs_sys_ctrl;
@@ -112,4 +113,5 @@
 	ufs_sys_ctrl_writel((host),                                            \
 			    ((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \
 			    (reg))
+
 #endif /* UFS_HISI_H_ */
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
new file mode 100644
index 0000000..0f6ff33
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Authors:
+ *	Stanley Chu <stanley.chu@mediatek.com>
+ *	Peter Wang <peter.wang@mediatek.com>
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#include "ufshcd.h"
+#include "ufshcd-pltfrm.h"
+#include "unipro.h"
+#include "ufs-mediatek.h"
+
+static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
+{
+	u32 tmp;
+
+	if (enable) {
+		ufshcd_dme_get(hba,
+			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
+		tmp = tmp |
+		      (1 << RX_SYMBOL_CLK_GATE_EN) |
+		      (1 << SYS_CLK_GATE_EN) |
+		      (1 << TX_CLK_GATE_EN);
+		ufshcd_dme_set(hba,
+			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+
+		ufshcd_dme_get(hba,
+			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
+		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
+		ufshcd_dme_set(hba,
+			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
+	} else {
+		ufshcd_dme_get(hba,
+			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
+		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
+			      (1 << SYS_CLK_GATE_EN) |
+			      (1 << TX_CLK_GATE_EN));
+		ufshcd_dme_set(hba,
+			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+
+		ufshcd_dme_get(hba,
+			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
+		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
+		ufshcd_dme_set(hba,
+			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
+	}
+}
+
+static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
+{
+	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+	struct device *dev = hba->dev;
+	struct device_node *np = dev->of_node;
+	int err = 0;
+
+	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
+
+	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
+		/*
+		 * UFS driver might be probed before the phy driver does.
+		 * In that case we would like to return EPROBE_DEFER code.
+		 */
+		err = -EPROBE_DEFER;
+		dev_info(dev,
+			 "%s: required phy hasn't probed yet. err = %d\n",
+			__func__, err);
+	} else if (IS_ERR(host->mphy)) {
+		err = PTR_ERR(host->mphy);
+		dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
+	}
+
+	if (err)
+		host->mphy = NULL;
+
+	return err;
+}
+
+/**
+ * ufs_mtk_setup_clocks - enables/disable clocks
+ * @hba: host controller instance
+ * @on: If true, enable clocks else disable them.
+ * @status: PRE_CHANGE or POST_CHANGE notify
+ *
+ * Returns 0 on success, non-zero on failure.
+ */
+static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
+				enum ufs_notify_change_status status)
+{
+	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+	int ret = -EINVAL;
+
+	/*
+	 * In case ufs_mtk_init() is not yet done, simply ignore.
+	 * This ufs_mtk_setup_clocks() shall be called from
+	 * ufs_mtk_init() after init is done.
+	 */
+	if (!host)
+		return 0;
+
+	switch (status) {
+	case PRE_CHANGE:
+		if (!on)
+			ret = phy_power_off(host->mphy);
+		break;
+	case POST_CHANGE:
+		if (on)
+			ret = phy_power_on(host->mphy);
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * ufs_mtk_init - find other essential mmio bases
+ * @hba: host controller instance
+ *
+ * Binds PHY with controller and powers up PHY enabling clocks
+ * and regulators.
+ *
+ * Returns -EPROBE_DEFER if binding fails, returns negative error
+ * on phy power up failure and returns zero on success.
+ */
+static int ufs_mtk_init(struct ufs_hba *hba)
+{
+	struct ufs_mtk_host *host;
+	struct device *dev = hba->dev;
+	int err = 0;
+
+	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+	if (!host) {
+		err = -ENOMEM;
+		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
+		goto out;
+	}
+
+	host->hba = hba;
+	ufshcd_set_variant(hba, host);
+
+	err = ufs_mtk_bind_mphy(hba);
+	if (err)
+		goto out_variant_clear;
+
+	/*
+	 * ufshcd_vops_init() is invoked after
+	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
+	 * phy clock setup is skipped.
+	 *
+	 * Enable phy clocks specifically here.
+	 */
+	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
+
+	goto out;
+
+out_variant_clear:
+	ufshcd_set_variant(hba, NULL);
+out:
+	return err;
+}
+
+static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
+				  struct ufs_pa_layer_attr *dev_max_params,
+				  struct ufs_pa_layer_attr *dev_req_params)
+{
+	struct ufs_dev_params host_cap;
+	int ret;
+
+	host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
+	host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX;
+	host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX;
+	host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX;
+	host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX;
+	host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX;
+	host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM;
+	host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM;
+	host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS;
+	host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS;
+	host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE;
+	host_cap.desired_working_mode =
+				UFS_MTK_LIMIT_DESIRED_MODE;
+
+	ret = ufshcd_get_pwr_dev_param(&host_cap,
+				       dev_max_params,
+				       dev_req_params);
+	if (ret) {
+		pr_info("%s: failed to determine capabilities\n",
+			__func__);
+	}
+
+	return ret;
+}
+
+static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
+				     enum ufs_notify_change_status stage,
+				     struct ufs_pa_layer_attr *dev_max_params,
+				     struct ufs_pa_layer_attr *dev_req_params)
+{
+	int ret = 0;
+
+	switch (stage) {
+	case PRE_CHANGE:
+		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
+					     dev_req_params);
+		break;
+	case POST_CHANGE:
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int ufs_mtk_pre_link(struct ufs_hba *hba)
+{
+	int ret;
+	u32 tmp;
+
+	/* disable deep stall */
+	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
+	if (ret)
+		return ret;
+
+	tmp &= ~(1 << 6);
+
+	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+
+	return ret;
+}
+
+static int ufs_mtk_post_link(struct ufs_hba *hba)
+{
+	/* disable device LCC */
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+
+	/* enable unipro clock gating feature */
+	ufs_mtk_cfg_unipro_cg(hba, true);
+
+	return 0;
+}
+
+static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
+				       enum ufs_notify_change_status stage)
+{
+	int ret = 0;
+
+	switch (stage) {
+	case PRE_CHANGE:
+		ret = ufs_mtk_pre_link(hba);
+		break;
+	case POST_CHANGE:
+		ret = ufs_mtk_post_link(hba);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+	if (ufshcd_is_link_hibern8(hba))
+		phy_power_off(host->mphy);
+
+	return 0;
+}
+
+static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+	if (ufshcd_is_link_hibern8(hba))
+		phy_power_on(host->mphy);
+
+	return 0;
+}
+
+/**
+ * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
+ *
+ * The variant operations configure the necessary controller and PHY
+ * handshake during initialization.
+ */
+static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
+	.name                = "mediatek.ufshci",
+	.init                = ufs_mtk_init,
+	.setup_clocks        = ufs_mtk_setup_clocks,
+	.link_startup_notify = ufs_mtk_link_startup_notify,
+	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
+	.suspend             = ufs_mtk_suspend,
+	.resume              = ufs_mtk_resume,
+};
+
+/**
+ * ufs_mtk_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int ufs_mtk_probe(struct platform_device *pdev)
+{
+	int err;
+	struct device *dev = &pdev->dev;
+
+	/* perform generic probe */
+	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
+	if (err)
+		dev_info(dev, "probe failed %d\n", err);
+
+	return err;
+}
+
+/**
+ * ufs_mtk_remove - set driver_data of the device to NULL
+ * @pdev: pointer to platform device handle
+ *
+ * Always return 0
+ */
+static int ufs_mtk_remove(struct platform_device *pdev)
+{
+	struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+	pm_runtime_get_sync(&(pdev)->dev);
+	ufshcd_remove(hba);
+	return 0;
+}
+
+static const struct of_device_id ufs_mtk_of_match[] = {
+	{ .compatible = "mediatek,mt8183-ufshci"},
+	{},
+};
+
+static const struct dev_pm_ops ufs_mtk_pm_ops = {
+	.suspend         = ufshcd_pltfrm_suspend,
+	.resume          = ufshcd_pltfrm_resume,
+	.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+	.runtime_resume  = ufshcd_pltfrm_runtime_resume,
+	.runtime_idle    = ufshcd_pltfrm_runtime_idle,
+};
+
+static struct platform_driver ufs_mtk_pltform = {
+	.probe      = ufs_mtk_probe,
+	.remove     = ufs_mtk_remove,
+	.shutdown   = ufshcd_pltfrm_shutdown,
+	.driver = {
+		.name   = "ufshcd-mtk",
+		.pm     = &ufs_mtk_pm_ops,
+		.of_match_table = ufs_mtk_of_match,
+	},
+};
+
+MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
+MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek UFS Host Driver");
+MODULE_LICENSE("GPL v2");
+
+module_platform_driver(ufs_mtk_pltform);
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
new file mode 100644
index 0000000..19f8c42
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ */
+
+#ifndef _UFS_MEDIATEK_H
+#define _UFS_MEDIATEK_H
+
+/*
+ * Vendor specific pre-defined parameters
+ */
+#define UFS_MTK_LIMIT_NUM_LANES_RX  1
+#define UFS_MTK_LIMIT_NUM_LANES_TX  1
+#define UFS_MTK_LIMIT_HSGEAR_RX     UFS_HS_G3
+#define UFS_MTK_LIMIT_HSGEAR_TX     UFS_HS_G3
+#define UFS_MTK_LIMIT_PWMGEAR_RX    UFS_PWM_G4
+#define UFS_MTK_LIMIT_PWMGEAR_TX    UFS_PWM_G4
+#define UFS_MTK_LIMIT_RX_PWR_PWM    SLOW_MODE
+#define UFS_MTK_LIMIT_TX_PWR_PWM    SLOW_MODE
+#define UFS_MTK_LIMIT_RX_PWR_HS     FAST_MODE
+#define UFS_MTK_LIMIT_TX_PWR_HS     FAST_MODE
+#define UFS_MTK_LIMIT_HS_RATE       PA_HS_MODE_B
+#define UFS_MTK_LIMIT_DESIRED_MODE  UFS_HS_MODE
+
+/*
+ * Other attributes
+ */
+#define VS_DEBUGCLOCKENABLE         0xD0A1
+#define VS_SAVEPOWERCONTROL         0xD0A6
+#define VS_UNIPROPOWERDOWNCONTROL   0xD0A8
+
+/*
+ * VS_DEBUGCLOCKENABLE
+ */
+enum {
+	TX_SYMBOL_CLK_REQ_FORCE = 5,
+};
+
+/*
+ * VS_SAVEPOWERCONTROL
+ */
+enum {
+	RX_SYMBOL_CLK_GATE_EN   = 0,
+	SYS_CLK_GATE_EN         = 2,
+	TX_CLK_GATE_EN          = 3,
+};
+
+struct ufs_mtk_host {
+	struct ufs_hba *hba;
+	struct phy *mphy;
+};
+
+#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 75ee590..a5b7148 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1,22 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
 
+#include <linux/acpi.h>
 #include <linux/time.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/phy/phy.h>
-#include <linux/phy/phy-qcom-ufs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/reset-controller.h>
 
 #include "ufshcd.h"
 #include "ufshcd-pltfrm.h"
@@ -50,6 +43,11 @@
 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
 						       u32 clk_cycles);
 
+static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
+{
+	return container_of(rcd, struct ufs_qcom_host, rcdev);
+}
+
 static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
 				       const char *prefix, void *priv)
 {
@@ -70,20 +68,27 @@
 }
 
 static int ufs_qcom_host_clk_get(struct device *dev,
-		const char *name, struct clk **clk_out)
+		const char *name, struct clk **clk_out, bool optional)
 {
 	struct clk *clk;
 	int err = 0;
 
 	clk = devm_clk_get(dev, name);
-	if (IS_ERR(clk)) {
-		err = PTR_ERR(clk);
-		dev_err(dev, "%s: failed to get %s err %d",
-				__func__, name, err);
-	} else {
+	if (!IS_ERR(clk)) {
 		*clk_out = clk;
+		return 0;
 	}
 
+	err = PTR_ERR(clk);
+
+	if (optional && err == -ENOENT) {
+		*clk_out = NULL;
+		return 0;
+	}
+
+	if (err != -EPROBE_DEFER)
+		dev_err(dev, "failed to get %s err %d\n", name, err);
+
 	return err;
 }
 
@@ -104,11 +109,9 @@
 	if (!host->is_lane_clks_enabled)
 		return;
 
-	if (host->hba->lanes_per_direction > 1)
-		clk_disable_unprepare(host->tx_l1_sync_clk);
+	clk_disable_unprepare(host->tx_l1_sync_clk);
 	clk_disable_unprepare(host->tx_l0_sync_clk);
-	if (host->hba->lanes_per_direction > 1)
-		clk_disable_unprepare(host->rx_l1_sync_clk);
+	clk_disable_unprepare(host->rx_l1_sync_clk);
 	clk_disable_unprepare(host->rx_l0_sync_clk);
 
 	host->is_lane_clks_enabled = false;
@@ -132,24 +135,21 @@
 	if (err)
 		goto disable_rx_l0;
 
-	if (host->hba->lanes_per_direction > 1) {
-		err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
+	err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
 			host->rx_l1_sync_clk);
-		if (err)
-			goto disable_tx_l0;
+	if (err)
+		goto disable_tx_l0;
 
-		err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+	err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
 			host->tx_l1_sync_clk);
-		if (err)
-			goto disable_rx_l1;
-	}
+	if (err)
+		goto disable_rx_l1;
 
 	host->is_lane_clks_enabled = true;
 	goto out;
 
 disable_rx_l1:
-	if (host->hba->lanes_per_direction > 1)
-		clk_disable_unprepare(host->rx_l1_sync_clk);
+	clk_disable_unprepare(host->rx_l1_sync_clk);
 disable_tx_l0:
 	clk_disable_unprepare(host->tx_l0_sync_clk);
 disable_rx_l0:
@@ -163,25 +163,28 @@
 	int err = 0;
 	struct device *dev = host->hba->dev;
 
-	err = ufs_qcom_host_clk_get(dev,
-			"rx_lane0_sync_clk", &host->rx_l0_sync_clk);
+	if (has_acpi_companion(dev))
+		return 0;
+
+	err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
+					&host->rx_l0_sync_clk, false);
 	if (err)
 		goto out;
 
-	err = ufs_qcom_host_clk_get(dev,
-			"tx_lane0_sync_clk", &host->tx_l0_sync_clk);
+	err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
+					&host->tx_l0_sync_clk, false);
 	if (err)
 		goto out;
 
 	/* In case of single lane per direction, don't read lane1 clocks */
 	if (host->hba->lanes_per_direction > 1) {
 		err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
-			&host->rx_l1_sync_clk);
+			&host->rx_l1_sync_clk, false);
 		if (err)
 			goto out;
 
 		err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
-			&host->tx_l1_sync_clk);
+			&host->tx_l1_sync_clk, true);
 	}
 out:
 	return err;
@@ -189,22 +192,9 @@
 
 static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
 {
-	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-	struct phy *phy = host->generic_phy;
 	u32 tx_lanes;
-	int err = 0;
 
-	err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
-	if (err)
-		goto out;
-
-	err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
-	if (err)
-		dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
-			__func__);
-
-out:
-	return err;
+	return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
 }
 
 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
@@ -267,11 +257,6 @@
 	if (is_rate_B)
 		phy_set_mode(phy, PHY_MODE_UFS_HS_B);
 
-	/* Assert PHY reset and apply PHY calibration values */
-	ufs_qcom_assert_reset(hba);
-	/* provide 1ms delay to let the reset pulse propagate */
-	usleep_range(1000, 1100);
-
 	/* phy initialization - calibrate the phy */
 	ret = phy_init(phy);
 	if (ret) {
@@ -280,15 +265,6 @@
 		goto out;
 	}
 
-	/* De-assert PHY reset and start serdes */
-	ufs_qcom_deassert_reset(hba);
-
-	/*
-	 * after reset deassertion, phy will need all ref clocks,
-	 * voltage, current to settle down before starting serdes.
-	 */
-	usleep_range(1000, 1100);
-
 	/* power on phy - start serdes and phy's power and clocks */
 	ret = phy_power_on(phy);
 	if (ret) {
@@ -302,7 +278,6 @@
 	return 0;
 
 out_disable_phy:
-	ufs_qcom_assert_reset(hba);
 	phy_exit(phy);
 out:
 	return ret;
@@ -566,21 +541,10 @@
 		ufs_qcom_disable_lane_clks(host);
 		phy_power_off(phy);
 
-		/* Assert PHY soft reset */
-		ufs_qcom_assert_reset(hba);
-		goto out;
-	}
-
-	/*
-	 * If UniPro link is not active, PHY ref_clk, main PHY analog power
-	 * rail and low noise analog power rail for PLL can be switched off.
-	 */
-	if (!ufs_qcom_is_link_active(hba)) {
+	} else if (!ufs_qcom_is_link_active(hba)) {
 		ufs_qcom_disable_lane_clks(host);
-		phy_power_off(phy);
 	}
 
-out:
 	return ret;
 }
 
@@ -590,118 +554,25 @@
 	struct phy *phy = host->generic_phy;
 	int err;
 
-	err = phy_power_on(phy);
-	if (err) {
-		dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
-			__func__, err);
-		goto out;
-	}
+	if (ufs_qcom_is_link_off(hba)) {
+		err = phy_power_on(phy);
+		if (err) {
+			dev_err(hba->dev, "%s: failed PHY power on: %d\n",
+				__func__, err);
+			return err;
+		}
 
-	err = ufs_qcom_enable_lane_clks(host);
-	if (err)
-		goto out;
+		err = ufs_qcom_enable_lane_clks(host);
+		if (err)
+			return err;
+
+	} else if (!ufs_qcom_is_link_active(hba)) {
+		err = ufs_qcom_enable_lane_clks(host);
+		if (err)
+			return err;
+	}
 
 	hba->is_sys_suspended = false;
-
-out:
-	return err;
-}
-
-struct ufs_qcom_dev_params {
-	u32 pwm_rx_gear;	/* pwm rx gear to work in */
-	u32 pwm_tx_gear;	/* pwm tx gear to work in */
-	u32 hs_rx_gear;		/* hs rx gear to work in */
-	u32 hs_tx_gear;		/* hs tx gear to work in */
-	u32 rx_lanes;		/* number of rx lanes */
-	u32 tx_lanes;		/* number of tx lanes */
-	u32 rx_pwr_pwm;		/* rx pwm working pwr */
-	u32 tx_pwr_pwm;		/* tx pwm working pwr */
-	u32 rx_pwr_hs;		/* rx hs working pwr */
-	u32 tx_pwr_hs;		/* tx hs working pwr */
-	u32 hs_rate;		/* rate A/B to work in HS */
-	u32 desired_working_mode;
-};
-
-static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
-				      struct ufs_pa_layer_attr *dev_max,
-				      struct ufs_pa_layer_attr *agreed_pwr)
-{
-	int min_qcom_gear;
-	int min_dev_gear;
-	bool is_dev_sup_hs = false;
-	bool is_qcom_max_hs = false;
-
-	if (dev_max->pwr_rx == FAST_MODE)
-		is_dev_sup_hs = true;
-
-	if (qcom_param->desired_working_mode == FAST) {
-		is_qcom_max_hs = true;
-		min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
-				      qcom_param->hs_tx_gear);
-	} else {
-		min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
-				      qcom_param->pwm_tx_gear);
-	}
-
-	/*
-	 * device doesn't support HS but qcom_param->desired_working_mode is
-	 * HS, thus device and qcom_param don't agree
-	 */
-	if (!is_dev_sup_hs && is_qcom_max_hs) {
-		pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
-			__func__);
-		return -ENOTSUPP;
-	} else if (is_dev_sup_hs && is_qcom_max_hs) {
-		/*
-		 * since device supports HS, it supports FAST_MODE.
-		 * since qcom_param->desired_working_mode is also HS
-		 * then final decision (FAST/FASTAUTO) is done according
-		 * to qcom_params as it is the restricting factor
-		 */
-		agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
-						qcom_param->rx_pwr_hs;
-	} else {
-		/*
-		 * here qcom_param->desired_working_mode is PWM.
-		 * it doesn't matter whether device supports HS or PWM,
-		 * in both cases qcom_param->desired_working_mode will
-		 * determine the mode
-		 */
-		 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
-						qcom_param->rx_pwr_pwm;
-	}
-
-	/*
-	 * we would like tx to work in the minimum number of lanes
-	 * between device capability and vendor preferences.
-	 * the same decision will be made for rx
-	 */
-	agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
-						qcom_param->tx_lanes);
-	agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
-						qcom_param->rx_lanes);
-
-	/* device maximum gear is the minimum between device rx and tx gears */
-	min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
-
-	/*
-	 * if both device capabilities and vendor pre-defined preferences are
-	 * both HS or both PWM then set the minimum gear to be the chosen
-	 * working gear.
-	 * if one is PWM and one is HS then the one that is PWM get to decide
-	 * what is the gear, as it is the one that also decided previously what
-	 * pwr the device will be configured to.
-	 */
-	if ((is_dev_sup_hs && is_qcom_max_hs) ||
-	    (!is_dev_sup_hs && !is_qcom_max_hs))
-		agreed_pwr->gear_rx = agreed_pwr->gear_tx =
-			min_t(u32, min_dev_gear, min_qcom_gear);
-	else if (!is_dev_sup_hs)
-		agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
-	else
-		agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
-
-	agreed_pwr->hs_rate = qcom_param->hs_rate;
 	return 0;
 }
 
@@ -930,12 +801,9 @@
 				struct ufs_pa_layer_attr *dev_max_params,
 				struct ufs_pa_layer_attr *dev_req_params)
 {
-	u32 val;
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-	struct phy *phy = host->generic_phy;
-	struct ufs_qcom_dev_params ufs_qcom_cap;
+	struct ufs_dev_params ufs_qcom_cap;
 	int ret = 0;
-	int res = 0;
 
 	if (!dev_req_params) {
 		pr_err("%s: incoming dev_req_params is NULL\n", __func__);
@@ -973,9 +841,9 @@
 				ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
 		}
 
-		ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
-						 dev_max_params,
-						 dev_req_params);
+		ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
+					       dev_max_params,
+					       dev_req_params);
 		if (ret) {
 			pr_err("%s: failed to determine capabilities\n",
 					__func__);
@@ -1001,14 +869,6 @@
 			ret = -EINVAL;
 		}
 
-		val = ~(MAX_U32 << dev_req_params->lane_tx);
-		res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
-		if (res) {
-			dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
-				__func__, res);
-			ret = res;
-		}
-
 		/* cache the power mode parameters to use internally */
 		memcpy(&host->dev_req_params,
 				dev_req_params, sizeof(*dev_req_params));
@@ -1138,8 +998,6 @@
 		return 0;
 
 	if (on && (status == POST_CHANGE)) {
-		phy_power_on(host->generic_phy);
-
 		/* enable the device ref clock for HS mode*/
 		if (ufshcd_is_hs_mode(&hba->pwr_info))
 			ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1151,9 +1009,6 @@
 		if (!ufs_qcom_is_link_active(hba)) {
 			/* disable device ref_clk */
 			ufs_qcom_dev_ref_clk_ctrl(host, false);
-
-			/* powering off PHY during aggressive clk gating */
-			phy_power_off(host->generic_phy);
 		}
 
 		vote = host->bus_vote.min_bw_vote;
@@ -1167,6 +1022,41 @@
 	return err;
 }
 
+static int
+ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+	struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
+
+	/* Currently this code only knows about a single reset. */
+	WARN_ON(id);
+	ufs_qcom_assert_reset(host->hba);
+	/* provide 1ms delay to let the reset pulse propagate. */
+	usleep_range(1000, 1100);
+	return 0;
+}
+
+static int
+ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+	struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
+
+	/* Currently this code only knows about a single reset. */
+	WARN_ON(id);
+	ufs_qcom_deassert_reset(host->hba);
+
+	/*
+	 * after reset deassertion, phy will need all ref clocks,
+	 * voltage, current to settle down before starting serdes.
+	 */
+	usleep_range(1000, 1100);
+	return 0;
+}
+
+static const struct reset_control_ops ufs_qcom_reset_ops = {
+	.assert = ufs_qcom_reset_assert,
+	.deassert = ufs_qcom_reset_deassert,
+};
+
 #define	ANDROID_BOOT_DEV_MAX	30
 static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
 
@@ -1211,6 +1101,17 @@
 	host->hba = hba;
 	ufshcd_set_variant(hba, host);
 
+	/* Fire up the reset controller. Failure here is non-fatal. */
+	host->rcdev.of_node = dev->of_node;
+	host->rcdev.ops = &ufs_qcom_reset_ops;
+	host->rcdev.owner = dev->driver->owner;
+	host->rcdev.nr_resets = 1;
+	err = devm_reset_controller_register(dev, &host->rcdev);
+	if (err) {
+		dev_warn(dev, "Failed to register reset controller\n");
+		err = 0;
+	}
+
 	/*
 	 * voting/devoting device ref_clk source is time consuming hence
 	 * skip devoting it during aggressive clock gating. This clock
@@ -1228,8 +1129,21 @@
 			__func__, err);
 		goto out_variant_clear;
 	} else if (IS_ERR(host->generic_phy)) {
-		err = PTR_ERR(host->generic_phy);
-		dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
+		if (has_acpi_companion(dev)) {
+			host->generic_phy = NULL;
+		} else {
+			err = PTR_ERR(host->generic_phy);
+			dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
+			goto out_variant_clear;
+		}
+	}
+
+	host->device_reset = devm_gpiod_get_optional(dev, "reset",
+						     GPIOD_OUT_HIGH);
+	if (IS_ERR(host->device_reset)) {
+		err = PTR_ERR(host->device_reset);
+		if (err != -EPROBE_DEFER)
+			dev_err(dev, "failed to acquire reset gpio: %d\n", err);
 		goto out_variant_clear;
 	}
 
@@ -1264,10 +1178,6 @@
 		}
 	}
 
-	/* update phy revision information before calling phy_init() */
-	ufs_qcom_phy_save_controller_version(host->generic_phy,
-		host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
-
 	err = ufs_qcom_init_lane_clks(host);
 	if (err)
 		goto out_variant_clear;
@@ -1643,12 +1553,37 @@
 }
 
 /**
+ * ufs_qcom_device_reset() - toggle the (optional) device reset line
+ * @hba: per-adapter instance
+ *
+ * Toggles the (optional) reset line to reset the attached device.
+ */
+static void ufs_qcom_device_reset(struct ufs_hba *hba)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+	/* reset gpio is optional */
+	if (!host->device_reset)
+		return;
+
+	/*
+	 * The UFS device shall detect reset pulses of 1us, sleep for 10us to
+	 * be on the safe side.
+	 */
+	gpiod_set_value_cansleep(host->device_reset, 1);
+	usleep_range(10, 15);
+
+	gpiod_set_value_cansleep(host->device_reset, 0);
+	usleep_range(10, 15);
+}
+
+/**
  * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
  *
  * The variant operations configure the necessary controller and PHY
  * handshake during initialization.
  */
-static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
+static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
 	.name                   = "qcom",
 	.init                   = ufs_qcom_init,
 	.exit                   = ufs_qcom_exit,
@@ -1662,6 +1597,7 @@
 	.suspend		= ufs_qcom_suspend,
 	.resume			= ufs_qcom_resume,
 	.dbg_register_dump	= ufs_qcom_dump_dbg_regs,
+	.device_reset		= ufs_qcom_device_reset,
 };
 
 /**
@@ -1704,6 +1640,14 @@
 };
 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ufs_qcom_acpi_match[] = {
+	{ "QCOM24A5" },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
+#endif
+
 static const struct dev_pm_ops ufs_qcom_pm_ops = {
 	.suspend	= ufshcd_pltfrm_suspend,
 	.resume		= ufshcd_pltfrm_resume,
@@ -1720,6 +1664,7 @@
 		.name	= "ufshcd-qcom",
 		.pm	= &ufs_qcom_pm_ops,
 		.of_match_table = of_match_ptr(ufs_qcom_of_match),
+		.acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
 	},
 };
 module_platform_driver(ufs_qcom_pltform);
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 295f4be..d401f17 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -1,19 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
 
 #ifndef UFS_QCOM_H_
 #define UFS_QCOM_H_
 
+#include <linux/reset-controller.h>
+
 #define MAX_UFS_QCOM_HOSTS	1
 #define MAX_U32                 (~(u32)0)
 #define MPHY_TX_FSM_STATE       0x41
@@ -129,11 +122,6 @@
 	MASK_CLK_NS_REG                     = 0xFFFC00,
 };
 
-enum ufs_qcom_phy_init_type {
-	UFS_PHY_INIT_FULL,
-	UFS_PHY_INIT_CFG_RESTORE,
-};
-
 /* QCOM UFS debug print bit mask */
 #define UFS_QCOM_DBG_PRINT_REGS_EN	BIT(0)
 #define UFS_QCOM_DBG_PRINT_ICE_REGS_EN	BIT(1)
@@ -207,6 +195,8 @@
 	u8 select_minor;
 };
 
+struct gpio_desc;
+
 struct ufs_qcom_host {
 	/*
 	 * Set this capability if host controller supports the QUniPro mode
@@ -242,6 +232,10 @@
 	/* Bitmask for enabling debug prints */
 	u32 dbg_print_en;
 	struct ufs_qcom_testbus testbus;
+
+	struct reset_controller_dev rcdev;
+
+	struct gpio_desc *device_reset;
 };
 
 static inline u32
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 8d9332b..969a36b 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -122,7 +122,7 @@
 {
 	unsigned long flags;
 
-	if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+	if (!ufshcd_is_auto_hibern8_supported(hba))
 		return;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
@@ -164,7 +164,7 @@
 {
 	struct ufs_hba *hba = dev_get_drvdata(dev);
 
-	if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+	if (!ufshcd_is_auto_hibern8_supported(hba))
 		return -EOPNOTSUPP;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n", ufshcd_ahit_to_us(hba->ahit));
@@ -177,7 +177,7 @@
 	struct ufs_hba *hba = dev_get_drvdata(dev);
 	unsigned int timer;
 
-	if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+	if (!ufshcd_is_auto_hibern8_supported(hba))
 		return -EOPNOTSUPP;
 
 	if (kstrtouint(buf, 0, &timer))
@@ -571,9 +571,10 @@
 	int ret;							\
 	int desc_len = QUERY_DESC_MAX_SIZE;				\
 	u8 *desc_buf;							\
+									\
 	desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC);		\
-	if (!desc_buf)							\
-		return -ENOMEM;						\
+	if (!desc_buf)                                                  \
+		return -ENOMEM;                                         \
 	ret = ufshcd_query_descriptor_retry(hba,			\
 		UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE,	\
 		0, 0, desc_buf, &desc_len);				\
@@ -582,14 +583,13 @@
 		goto out;						\
 	}								\
 	index = desc_buf[DEVICE_DESC_PARAM##_pname];			\
-	memset(desc_buf, 0, QUERY_DESC_MAX_SIZE);			\
-	if (ufshcd_read_string_desc(hba, index, desc_buf,		\
-		QUERY_DESC_MAX_SIZE, true)) {				\
-		ret = -EINVAL;						\
+	kfree(desc_buf);						\
+	desc_buf = NULL;						\
+	ret = ufshcd_read_string_desc(hba, index, &desc_buf,		\
+				      SD_ASCII_STD);			\
+	if (ret < 0)							\
 		goto out;						\
-	}								\
-	ret = snprintf(buf, PAGE_SIZE, "%s\n",				\
-		desc_buf + QUERY_DESC_HDR_SIZE);			\
+	ret = snprintf(buf, PAGE_SIZE, "%s\n", desc_buf);		\
 out:									\
 	kfree(desc_buf);						\
 	return ret;							\
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 14e5bf7..3327981 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -38,15 +38,15 @@
 
 #include <linux/mutex.h>
 #include <linux/types.h>
+#include <uapi/scsi/scsi_bsg_ufs.h>
 
-#define MAX_CDB_SIZE	16
-#define GENERAL_UPIU_REQUEST_SIZE 32
+#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
 #define QUERY_DESC_MAX_SIZE       255
 #define QUERY_DESC_MIN_SIZE       2
 #define QUERY_DESC_HDR_SIZE       2
 #define QUERY_OSF_SIZE            (GENERAL_UPIU_REQUEST_SIZE - \
 					(sizeof(struct utp_upiu_header)))
-#define RESPONSE_UPIU_SENSE_DATA_LENGTH	18
+#define UFS_SENSE_SIZE	18
 
 #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
 			cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -195,7 +195,7 @@
 	QUERY_DESC_CONFIGURATION_DEF_SIZE	= 0x90,
 	QUERY_DESC_UNIT_DEF_SIZE		= 0x23,
 	QUERY_DESC_INTERCONNECT_DEF_SIZE	= 0x06,
-	QUERY_DESC_GEOMETRY_DEF_SIZE		= 0x44,
+	QUERY_DESC_GEOMETRY_DEF_SIZE		= 0x48,
 	QUERY_DESC_POWER_DEF_SIZE		= 0x62,
 	QUERY_DESC_HEALTH_DEF_SIZE		= 0x25,
 };
@@ -378,6 +378,20 @@
 	UPIU_QUERY_OPCODE_TOGGLE_FLAG	= 0x8,
 };
 
+/* bRefClkFreq attribute values */
+enum ufs_ref_clk_freq {
+	REF_CLK_FREQ_19_2_MHZ	= 0,
+	REF_CLK_FREQ_26_MHZ	= 1,
+	REF_CLK_FREQ_38_4_MHZ	= 2,
+	REF_CLK_FREQ_52_MHZ	= 3,
+	REF_CLK_FREQ_INVAL	= -1,
+};
+
+struct ufs_ref_clk {
+	unsigned long freq_hz;
+	enum ufs_ref_clk_freq val;
+};
+
 /* Query response result code */
 enum {
 	QUERY_RESULT_SUCCESS                    = 0x00,
@@ -414,6 +428,7 @@
 	MASK_RSP_UPIU_DATA_SEG_LEN	= 0xFFFF,
 	MASK_RSP_EXCEPTION_EVENT        = 0x10000,
 	MASK_TM_SERVICE_RESP		= 0xFF,
+	MASK_TM_FUNC			= 0xFF,
 };
 
 /* Task management service response */
@@ -433,65 +448,6 @@
 };
 
 /**
- * struct utp_upiu_header - UPIU header structure
- * @dword_0: UPIU header DW-0
- * @dword_1: UPIU header DW-1
- * @dword_2: UPIU header DW-2
- */
-struct utp_upiu_header {
-	__be32 dword_0;
-	__be32 dword_1;
-	__be32 dword_2;
-};
-
-/**
- * struct utp_upiu_cmd - Command UPIU structure
- * @data_transfer_len: Data Transfer Length DW-3
- * @cdb: Command Descriptor Block CDB DW-4 to DW-7
- */
-struct utp_upiu_cmd {
-	__be32 exp_data_transfer_len;
-	u8 cdb[MAX_CDB_SIZE];
-};
-
-/**
- * struct utp_upiu_query - upiu request buffer structure for
- * query request.
- * @opcode: command to perform B-0
- * @idn: a value that indicates the particular type of data B-1
- * @index: Index to further identify data B-2
- * @selector: Index to further identify data B-3
- * @reserved_osf: spec reserved field B-4,5
- * @length: number of descriptor bytes to read/write B-6,7
- * @value: Attribute value to be written DW-5
- * @reserved: spec reserved DW-6,7
- */
-struct utp_upiu_query {
-	u8 opcode;
-	u8 idn;
-	u8 index;
-	u8 selector;
-	__be16 reserved_osf;
-	__be16 length;
-	__be32 value;
-	__be32 reserved[2];
-};
-
-/**
- * struct utp_upiu_req - general upiu request structure
- * @header:UPIU header structure DW-0 to DW-2
- * @sc: fields structure for scsi command DW-3 to DW-7
- * @qr: fields structure for query request DW-3 to DW-7
- */
-struct utp_upiu_req {
-	struct utp_upiu_header header;
-	union {
-		struct utp_upiu_cmd sc;
-		struct utp_upiu_query qr;
-	};
-};
-
-/**
  * struct utp_cmd_rsp - Response UPIU structure
  * @residual_transfer_count: Residual transfer count DW-3
  * @reserved: Reserved double words DW-4 to DW-7
@@ -502,7 +458,7 @@
 	__be32 residual_transfer_count;
 	__be32 reserved[4];
 	__be16 sense_data_len;
-	u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
+	u8 sense_data[UFS_SENSE_SIZE];
 };
 
 /**
@@ -520,36 +476,6 @@
 };
 
 /**
- * struct utp_upiu_task_req - Task request UPIU structure
- * @header - UPIU header structure DW0 to DW-2
- * @input_param1: Input parameter 1 DW-3
- * @input_param2: Input parameter 2 DW-4
- * @input_param3: Input parameter 3 DW-5
- * @reserved: Reserved double words DW-6 to DW-7
- */
-struct utp_upiu_task_req {
-	struct utp_upiu_header header;
-	__be32 input_param1;
-	__be32 input_param2;
-	__be32 input_param3;
-	__be32 reserved[2];
-};
-
-/**
- * struct utp_upiu_task_rsp - Task Management Response UPIU structure
- * @header: UPIU header structure DW0-DW-2
- * @output_param1: Ouput parameter 1 DW3
- * @output_param2: Output parameter 2 DW4
- * @reserved: Reserved double words DW-5 to DW-7
- */
-struct utp_upiu_task_rsp {
-	struct utp_upiu_header header;
-	__be32 output_param1;
-	__be32 output_param2;
-	__be32 reserved[3];
-};
-
-/**
  * struct ufs_query_req - parameters for building a query request
  * @query_func: UPIU header query function
  * @upiu_req: the query request data
@@ -588,10 +514,8 @@
 	struct regulator *reg;
 	const char *name;
 	bool enabled;
-	bool unused;
 	int min_uV;
 	int max_uV;
-	int min_uA;
 	int max_uA;
 };
 
@@ -617,7 +541,7 @@
  */
 struct ufs_dev_desc {
 	u16 wmanufacturerid;
-	char model[MAX_MODEL_LEN + 1];
+	u8 *model;
 };
 
 /**
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
new file mode 100644
index 0000000..dc2f6d2
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bsg endpoint that supports UPIUs
+ *
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#include "ufs_bsg.h"
+
+static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
+				       struct utp_upiu_query *qr)
+{
+	int desc_size = be16_to_cpu(qr->length);
+	int desc_id = qr->idn;
+	int ret;
+
+	if (desc_size <= 0)
+		return -EINVAL;
+
+	ret = ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
+	if (ret || !*desc_len)
+		return -EINVAL;
+
+	*desc_len = min_t(int, *desc_len, desc_size);
+
+	return 0;
+}
+
+static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
+				     unsigned int request_len,
+				     unsigned int reply_len)
+{
+	int min_req_len = sizeof(struct ufs_bsg_request);
+	int min_rsp_len = sizeof(struct ufs_bsg_reply);
+
+	if (min_req_len > request_len || min_rsp_len > reply_len) {
+		dev_err(hba->dev, "not enough space assigned\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job,
+				     uint8_t **desc_buff, int *desc_len,
+				     enum query_opcode desc_op)
+{
+	struct ufs_bsg_request *bsg_request = job->request;
+	struct utp_upiu_query *qr;
+	u8 *descp;
+
+	if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC &&
+	    desc_op != UPIU_QUERY_OPCODE_READ_DESC)
+		goto out;
+
+	qr = &bsg_request->upiu_req.qr;
+	if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) {
+		dev_err(hba->dev, "Illegal desc size\n");
+		return -EINVAL;
+	}
+
+	if (*desc_len > job->request_payload.payload_len) {
+		dev_err(hba->dev, "Illegal desc size\n");
+		return -EINVAL;
+	}
+
+	descp = kzalloc(*desc_len, GFP_KERNEL);
+	if (!descp)
+		return -ENOMEM;
+
+	if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
+		sg_copy_to_buffer(job->request_payload.sg_list,
+				  job->request_payload.sg_cnt, descp,
+				  *desc_len);
+
+	*desc_buff = descp;
+
+out:
+	return 0;
+}
+
+static int ufs_bsg_request(struct bsg_job *job)
+{
+	struct ufs_bsg_request *bsg_request = job->request;
+	struct ufs_bsg_reply *bsg_reply = job->reply;
+	struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
+	unsigned int req_len = job->request_len;
+	unsigned int reply_len = job->reply_len;
+	struct uic_command uc = {};
+	int msgcode;
+	uint8_t *desc_buff = NULL;
+	int desc_len = 0;
+	enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
+	int ret;
+
+	ret = ufs_bsg_verify_query_size(hba, req_len, reply_len);
+	if (ret)
+		goto out;
+
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	pm_runtime_get_sync(hba->dev);
+
+	msgcode = bsg_request->msgcode;
+	switch (msgcode) {
+	case UPIU_TRANSACTION_QUERY_REQ:
+		desc_op = bsg_request->upiu_req.qr.opcode;
+		ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
+						&desc_len, desc_op);
+		if (ret)
+			goto out;
+
+		/* fall through */
+	case UPIU_TRANSACTION_NOP_OUT:
+	case UPIU_TRANSACTION_TASK_REQ:
+		ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
+					       &bsg_reply->upiu_rsp, msgcode,
+					       desc_buff, &desc_len, desc_op);
+		if (ret)
+			dev_err(hba->dev,
+				"exe raw upiu: error code %d\n", ret);
+
+		break;
+	case UPIU_TRANSACTION_UIC_CMD:
+		memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
+		ret = ufshcd_send_uic_cmd(hba, &uc);
+		if (ret)
+			dev_err(hba->dev,
+				"send uic cmd: error code %d\n", ret);
+
+		memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
+
+		break;
+	default:
+		ret = -ENOTSUPP;
+		dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
+
+		break;
+	}
+
+	pm_runtime_put_sync(hba->dev);
+
+	if (!desc_buff)
+		goto out;
+
+	if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len)
+		bsg_reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(job->request_payload.sg_list,
+					    job->request_payload.sg_cnt,
+					    desc_buff, desc_len);
+
+	kfree(desc_buff);
+
+out:
+	bsg_reply->result = ret;
+	job->reply_len = sizeof(struct ufs_bsg_reply);
+	/* complete the job here only if no error */
+	if (ret == 0)
+		bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
+
+	return ret;
+}
+
+/**
+ * ufs_bsg_remove - detach and remove the added ufs-bsg node
+ *
+ * Should be called when unloading the driver.
+ */
+void ufs_bsg_remove(struct ufs_hba *hba)
+{
+	struct device *bsg_dev = &hba->bsg_dev;
+
+	if (!hba->bsg_queue)
+		return;
+
+	bsg_remove_queue(hba->bsg_queue);
+
+	device_del(bsg_dev);
+	put_device(bsg_dev);
+}
+
+static inline void ufs_bsg_node_release(struct device *dev)
+{
+	put_device(dev->parent);
+}
+
+/**
+ * ufs_bsg_probe - Add ufs bsg device node
+ * @hba: per adapter object
+ *
+ * Called during initial loading of the driver, and before scsi_scan_host.
+ */
+int ufs_bsg_probe(struct ufs_hba *hba)
+{
+	struct device *bsg_dev = &hba->bsg_dev;
+	struct Scsi_Host *shost = hba->host;
+	struct device *parent = &shost->shost_gendev;
+	struct request_queue *q;
+	int ret;
+
+	device_initialize(bsg_dev);
+
+	bsg_dev->parent = get_device(parent);
+	bsg_dev->release = ufs_bsg_node_release;
+
+	dev_set_name(bsg_dev, "ufs-bsg");
+
+	ret = device_add(bsg_dev);
+	if (ret)
+		goto out;
+
+	q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0);
+	if (IS_ERR(q)) {
+		ret = PTR_ERR(q);
+		goto out;
+	}
+
+	hba->bsg_queue = q;
+
+	return 0;
+
+out:
+	dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no);
+	put_device(bsg_dev);
+	return ret;
+}
diff --git a/drivers/scsi/ufs/ufs_bsg.h b/drivers/scsi/ufs/ufs_bsg.h
new file mode 100644
index 0000000..d099187
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_bsg.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#ifndef UFS_BSG_H
+#define UFS_BSG_H
+
+#include <linux/bsg-lib.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include "ufshcd.h"
+#include "ufs.h"
+
+#ifdef CONFIG_SCSI_UFS_BSG
+void ufs_bsg_remove(struct ufs_hba *hba);
+int ufs_bsg_probe(struct ufs_hba *hba);
+#else
+static inline void ufs_bsg_remove(struct ufs_hba *hba) {}
+static inline int ufs_bsg_probe(struct ufs_hba *hba) {return 0; }
+#endif
+
+#endif /* UFS_BSG_H */
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 5d2dfdb..fe6cad9 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
 
 #ifndef _UFS_QUIRKS_H_
@@ -45,21 +36,6 @@
 }
 
 /*
- * If UFS device is having issue in processing LCC (Line Control
- * Command) coming from UFS host controller then enable this quirk.
- * When this quirk is enabled, host controller driver should disable
- * the LCC transmission on UFS host controller (by clearing
- * TX_LCC_ENABLE attribute of host to 0).
- */
-#define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0)
-
-/*
- * Some UFS devices don't need VCCQ rail for device operations. Enabling this
- * quirk for such devices will make sure that VCCQ rail is not voted.
- */
-#define UFS_DEVICE_NO_VCCQ (1 << 1)
-
-/*
  * Some vendor's UFS device sends back to back NACs for the DL data frames
  * causing the host controller to raise the DFES error status. Sometimes
  * such UFS devices send back to back NAC without waiting for new
@@ -85,13 +61,6 @@
 #define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
 
 /*
- * Some UFS devices may not work properly after resume if the link was kept
- * in off state during suspend. Enabling this quirk will not allow the
- * link to be kept in off state during suspend.
- */
-#define UFS_DEVICE_QUIRK_NO_LINK_OFF	(1 << 3)
-
-/*
  * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
  * 600us which may not be enough for reliable hibern8 exit hardware sequence
  * from UFS device.
@@ -101,13 +70,6 @@
 #define UFS_DEVICE_QUIRK_PA_TACTIVATE	(1 << 4)
 
 /*
- * Some UFS memory devices may have really low read/write throughput in
- * FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is
- * never enabled for such devices.
- */
-#define UFS_DEVICE_NO_FASTAUTO		(1 << 5)
-
-/*
  * It seems some UFS devices may keep drawing more than sleep current
  * (atleast for 500us) from UFS rails (especially from VCCQ rail).
  * To avoid this situation, add 2ms delay before putting these UFS
diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c
index 5fd16c7..fb9e2ff 100644
--- a/drivers/scsi/ufs/ufshcd-dwc.c
+++ b/drivers/scsi/ufs/ufshcd-dwc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * UFS Host driver for Synopsys Designware Core
  *
  * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
  *
  * Authors: Joao Pinto <jpinto@synopsys.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #include "ufshcd.h"
@@ -50,7 +47,7 @@
 /**
  * ufshcd_dwc_link_is_up()
  * Check if link is up
- * @hba: private structure poitner
+ * @hba: private structure pointer
  *
  * Returns 0 on success, non-zero value on failure
  */
@@ -110,7 +107,7 @@
 /**
  * ufshcd_dwc_link_startup_notify()
  * UFS Host DWC specific link startup sequence
- * @hba: private structure poitner
+ * @hba: private structure pointer
  * @status: Callback notify status
  *
  * Returns 0 on success, non-zero value on failure
diff --git a/drivers/scsi/ufs/ufshcd-dwc.h b/drivers/scsi/ufs/ufshcd-dwc.h
index c8be295..4268ca2 100644
--- a/drivers/scsi/ufs/ufshcd-dwc.h
+++ b/drivers/scsi/ufs/ufshcd-dwc.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * UFS Host driver for Synopsys Designware Core
  *
  * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
  *
  * Authors: Joao Pinto <jpinto@synopsys.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #ifndef _UFSHCD_DWC_H
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index ffe6f82..3b19de3 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -200,6 +200,8 @@
 static const struct pci_device_id ufshcd_pci_tbl[] = {
 	{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 	{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
+	{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
+	{ PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
 	{ }	/* terminate list */
 };
 
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 895a9b5..8d40dc9 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -39,6 +39,7 @@
 
 #include "ufshcd.h"
 #include "ufshcd-pltfrm.h"
+#include "unipro.h"
 
 #define UFSHCD_DEFAULT_LANES_PER_DIRECTION		2
 
@@ -151,20 +152,12 @@
 
 	vreg->name = kstrdup(name, GFP_KERNEL);
 
-	/* if fixed regulator no need further initialization */
-	snprintf(prop_name, MAX_PROP_SIZE, "%s-fixed-regulator", name);
-	if (of_property_read_bool(np, prop_name))
-		goto out;
-
 	snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
-	ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
-	if (ret) {
-		dev_err(dev, "%s: unable to find %s err %d\n",
-				__func__, prop_name, ret);
-		goto out;
+	if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
+		dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
+		vreg->max_uA = 0;
 	}
 
-	vreg->min_uA = 0;
 	if (!strcmp(name, "vcc")) {
 		if (of_property_read_bool(np, "vcc-supply-1p8")) {
 			vreg->min_uV = UFS_VREG_VCC_1P8_MIN_UV;
@@ -290,6 +283,103 @@
 }
 
 /**
+ * ufshcd_get_pwr_dev_param - get finally agreed attributes for
+ *                            power mode change
+ * @pltfrm_param: pointer to platform parameters
+ * @dev_max: pointer to device attributes
+ * @agreed_pwr: returned agreed attributes
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
+			     struct ufs_pa_layer_attr *dev_max,
+			     struct ufs_pa_layer_attr *agreed_pwr)
+{
+	int min_pltfrm_gear;
+	int min_dev_gear;
+	bool is_dev_sup_hs = false;
+	bool is_pltfrm_max_hs = false;
+
+	if (dev_max->pwr_rx == FAST_MODE)
+		is_dev_sup_hs = true;
+
+	if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
+		is_pltfrm_max_hs = true;
+		min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
+					pltfrm_param->hs_tx_gear);
+	} else {
+		min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
+					pltfrm_param->pwm_tx_gear);
+	}
+
+	/*
+	 * device doesn't support HS but
+	 * pltfrm_param->desired_working_mode is HS,
+	 * thus device and pltfrm_param don't agree
+	 */
+	if (!is_dev_sup_hs && is_pltfrm_max_hs) {
+		pr_info("%s: device doesn't support HS\n",
+			__func__);
+		return -ENOTSUPP;
+	} else if (is_dev_sup_hs && is_pltfrm_max_hs) {
+		/*
+		 * since device supports HS, it supports FAST_MODE.
+		 * since pltfrm_param->desired_working_mode is also HS
+		 * then final decision (FAST/FASTAUTO) is done according
+		 * to pltfrm_params as it is the restricting factor
+		 */
+		agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
+		agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
+	} else {
+		/*
+		 * here pltfrm_param->desired_working_mode is PWM.
+		 * it doesn't matter whether device supports HS or PWM,
+		 * in both cases pltfrm_param->desired_working_mode will
+		 * determine the mode
+		 */
+		agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
+		agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
+	}
+
+	/*
+	 * we would like tx to work in the minimum number of lanes
+	 * between device capability and vendor preferences.
+	 * the same decision will be made for rx
+	 */
+	agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
+				    pltfrm_param->tx_lanes);
+	agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
+				    pltfrm_param->rx_lanes);
+
+	/* device maximum gear is the minimum between device rx and tx gears */
+	min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
+
+	/*
+	 * if both device capabilities and vendor pre-defined preferences are
+	 * both HS or both PWM then set the minimum gear to be the chosen
+	 * working gear.
+	 * if one is PWM and one is HS then the one that is PWM get to decide
+	 * what is the gear, as it is the one that also decided previously what
+	 * pwr the device will be configured to.
+	 */
+	if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
+	    (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
+		agreed_pwr->gear_rx =
+			min_t(u32, min_dev_gear, min_pltfrm_gear);
+	} else if (!is_dev_sup_hs) {
+		agreed_pwr->gear_rx = min_dev_gear;
+	} else {
+		agreed_pwr->gear_rx = min_pltfrm_gear;
+	}
+	agreed_pwr->gear_tx = agreed_pwr->gear_rx;
+
+	agreed_pwr->hs_rate = pltfrm_param->hs_rate;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
+
+/**
  * ufshcd_pltfrm_init - probe routine of the driver
  * @pdev: pointer to Platform device handle
  * @vops: pointer to variant ops
@@ -297,16 +387,14 @@
  * Returns 0 on success, non-zero value on failure
  */
 int ufshcd_pltfrm_init(struct platform_device *pdev,
-		       struct ufs_hba_variant_ops *vops)
+		       const struct ufs_hba_variant_ops *vops)
 {
 	struct ufs_hba *hba;
 	void __iomem *mmio_base;
-	struct resource *mem_res;
 	int irq, err;
 	struct device *dev = &pdev->dev;
 
-	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	mmio_base = devm_ioremap_resource(dev, mem_res);
+	mmio_base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(mmio_base)) {
 		err = PTR_ERR(mmio_base);
 		goto out;
@@ -340,24 +428,21 @@
 		goto dealloc_host;
 	}
 
-	pm_runtime_set_active(&pdev->dev);
-	pm_runtime_enable(&pdev->dev);
-
 	ufshcd_init_lanes_per_dir(hba);
 
 	err = ufshcd_init(hba, mmio_base, irq);
 	if (err) {
 		dev_err(dev, "Initialization failed\n");
-		goto out_disable_rpm;
+		goto dealloc_host;
 	}
 
 	platform_set_drvdata(pdev, hba);
 
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
 	return 0;
 
-out_disable_rpm:
-	pm_runtime_disable(&pdev->dev);
-	pm_runtime_set_suspended(&pdev->dev);
 dealloc_host:
 	ufshcd_dealloc_host(hba);
 out:
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h
index df64c41..b79cdf9 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.h
@@ -1,14 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
 
 #ifndef UFSHCD_PLTFRM_H_
@@ -16,8 +7,29 @@
 
 #include "ufshcd.h"
 
+#define UFS_PWM_MODE 1
+#define UFS_HS_MODE  2
+
+struct ufs_dev_params {
+	u32 pwm_rx_gear;        /* pwm rx gear to work in */
+	u32 pwm_tx_gear;        /* pwm tx gear to work in */
+	u32 hs_rx_gear;         /* hs rx gear to work in */
+	u32 hs_tx_gear;         /* hs tx gear to work in */
+	u32 rx_lanes;           /* number of rx lanes */
+	u32 tx_lanes;           /* number of tx lanes */
+	u32 rx_pwr_pwm;         /* rx pwm working pwr */
+	u32 tx_pwr_pwm;         /* tx pwm working pwr */
+	u32 rx_pwr_hs;          /* rx hs working pwr */
+	u32 tx_pwr_hs;          /* tx hs working pwr */
+	u32 hs_rate;            /* rate A/B to work in HS */
+	u32 desired_working_mode;
+};
+
+int ufshcd_get_pwr_dev_param(struct ufs_dev_params *dev_param,
+			     struct ufs_pa_layer_attr *dev_max,
+			     struct ufs_pa_layer_attr *agreed_pwr);
 int ufshcd_pltfrm_init(struct platform_device *pdev,
-		       struct ufs_hba_variant_ops *vops);
+		       const struct ufs_hba_variant_ops *vops);
 void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
 
 #ifdef CONFIG_PM
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 0b81d9d..11a87f5 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -46,12 +46,11 @@
 #include "ufs_quirks.h"
 #include "unipro.h"
 #include "ufs-sysfs.h"
+#include "ufs_bsg.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ufs.h>
 
-#define UFSHCD_REQ_SENSE_SIZE	18
-
 #define UFSHCD_ENABLE_INTRS	(UTP_TRANSFER_REQ_COMPL |\
 				 UTP_TASK_REQ_COMPL |\
 				 UFSHCD_ERROR_MASK)
@@ -109,13 +108,19 @@
 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
 		     const char *prefix)
 {
-	u8 *regs;
+	u32 *regs;
+	size_t pos;
+
+	if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
+		return -EINVAL;
 
 	regs = kzalloc(len, GFP_KERNEL);
 	if (!regs)
 		return -ENOMEM;
 
-	memcpy_fromio(regs, hba->mmio_base + offset, len);
+	for (pos = 0; pos < len; pos += 4)
+		regs[pos / 4] = ufshcd_readl(hba, offset + pos);
+
 	ufshcd_hex_dump(prefix, regs, len);
 	kfree(regs);
 
@@ -214,12 +219,9 @@
 	/* UFS cards deviations table */
 	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
-	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
 	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
 	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-		UFS_DEVICE_NO_FASTAUTO),
-	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
 	UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
@@ -227,7 +229,6 @@
 		UFS_DEVICE_QUIRK_PA_TACTIVATE),
 	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
 		UFS_DEVICE_QUIRK_PA_TACTIVATE),
-	UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
 	UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
 	UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
@@ -246,7 +247,6 @@
 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 				 bool skip_ref_clk);
 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
@@ -299,16 +299,6 @@
 		scsi_block_requests(hba->host);
 }
 
-/* replace non-printable or non-ASCII characters with spaces */
-static inline void ufshcd_remove_non_printable(char *val)
-{
-	if (!val)
-		return;
-
-	if (*val < 0x20 || *val > 0x7e)
-		*val = ' ';
-}
-
 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
 		const char *str)
 {
@@ -328,14 +318,11 @@
 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
 		const char *str)
 {
-	struct utp_task_req_desc *descp;
-	struct utp_upiu_task_req *task_req;
 	int off = (int)tag - hba->nutrs;
+	struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
 
-	descp = &hba->utmrdl_base_addr[off];
-	task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
-	trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
-			&task_req->input_param1);
+	trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
+			&descp->input_param1);
 }
 
 static void ufshcd_add_command_trace(struct ufs_hba *hba,
@@ -393,19 +380,25 @@
 	}
 }
 
-static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
-		struct ufs_uic_err_reg_hist *err_hist, char *err_name)
+static void ufshcd_print_err_hist(struct ufs_hba *hba,
+				  struct ufs_err_reg_hist *err_hist,
+				  char *err_name)
 {
 	int i;
+	bool found = false;
 
-	for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
-		int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
+	for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
+		int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
 
 		if (err_hist->reg[p] == 0)
 			continue;
-		dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
+		dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
 			err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+		found = true;
 	}
+
+	if (!found)
+		dev_err(hba->dev, "No record of %s errors\n", err_name);
 }
 
 static void ufshcd_print_host_regs(struct ufs_hba *hba)
@@ -421,11 +414,22 @@
 		ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
 		hba->ufs_stats.hibern8_exit_cnt);
 
-	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
-	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
-	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
-	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
-	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
+			      "auto_hibern8_err");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
+			      "link_startup_fail");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
+			      "suspend_fail");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
+	ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
 
 	ufshcd_print_clk_freqs(hba);
 
@@ -477,22 +481,13 @@
 
 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
 {
-	struct utp_task_req_desc *tmrdp;
 	int tag;
 
 	for_each_set_bit(tag, &bitmap, hba->nutmrs) {
-		tmrdp = &hba->utmrdl_base_addr[tag];
+		struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
+
 		dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
-		ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
-				sizeof(struct request_desc_header));
-		dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
-				tag);
-		ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
-				sizeof(struct utp_upiu_req));
-		dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
-				tag);
-		ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
-				sizeof(struct utp_task_req_desc));
+		ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
 	}
 }
 
@@ -648,19 +643,6 @@
 }
 
 /**
- * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
- * @task_req_descp: pointer to utp_task_req_desc structure
- *
- * This function is used to get the OCS field from UTMRD
- * Returns the OCS field in the UTMRD
- */
-static inline int
-ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
-{
-	return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
-}
-
-/**
  * ufshcd_get_tm_free_slot - get a free slot for task management request
  * @hba: per adapter instance
  * @free_slot: pointer to variable with available slot value
@@ -1575,6 +1557,7 @@
 		 * currently running. Hence, fall through to cancel gating
 		 * work and to enable clocks.
 		 */
+		/* fallthrough */
 	case CLKS_OFF:
 		ufshcd_scsi_block_requests(hba);
 		hba->clk_gating.state = REQ_CLKS_ON;
@@ -1586,6 +1569,7 @@
 		 * fall through to check if we should wait for this
 		 * work to be done or not.
 		 */
+		/* fallthrough */
 	case REQ_CLKS_ON:
 		if (async) {
 			rc = -EAGAIN;
@@ -1766,6 +1750,34 @@
 	return count;
 }
 
+static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
+{
+	char wq_name[sizeof("ufs_clkscaling_00")];
+
+	if (!ufshcd_is_clkscaling_supported(hba))
+		return;
+
+	INIT_WORK(&hba->clk_scaling.suspend_work,
+		  ufshcd_clk_scaling_suspend_work);
+	INIT_WORK(&hba->clk_scaling.resume_work,
+		  ufshcd_clk_scaling_resume_work);
+
+	snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
+		 hba->host->host_no);
+	hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+	ufshcd_clkscaling_init_sysfs(hba);
+}
+
+static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+{
+	if (!ufshcd_is_clkscaling_supported(hba))
+		return;
+
+	destroy_workqueue(hba->clk_scaling.workq);
+	ufshcd_devfreq_remove(hba);
+}
+
 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 {
 	char wq_name[sizeof("ufs_clk_gating_00")];
@@ -1886,11 +1898,10 @@
 		int len_to_copy;
 
 		len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
-		len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+		len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
 
-		memcpy(lrbp->sense_buffer,
-			lrbp->ucd_rsp_ptr->sr.sense_data,
-			min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
+		memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
+		       len_to_copy);
 	}
 }
 
@@ -1908,7 +1919,8 @@
 	memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
 
 	/* Get the descriptor */
-	if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+	if (hba->dev_cmd.query.descriptor &&
+	    lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
 		u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
 				GENERAL_UPIU_REQUEST_SIZE;
 		u16 resp_len;
@@ -2058,8 +2070,7 @@
  *
  * Returns 0 only if success.
  */
-static int
-ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 {
 	int ret;
 	unsigned long flags;
@@ -2241,8 +2252,8 @@
 	ucd_req_ptr->sc.exp_data_transfer_len =
 		cpu_to_be32(lrbp->cmd->sdb.length);
 
-	cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
-	memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
+	cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
+	memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
 	memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
 
 	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
@@ -2261,7 +2272,6 @@
 	struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
 	struct ufs_query *query = &hba->dev_cmd.query;
 	u16 len = be16_to_cpu(query->request.upiu_req.length);
-	u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
 
 	/* Query request header */
 	ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
@@ -2283,7 +2293,7 @@
 
 	/* Copy the Descriptor */
 	if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
-		memcpy(descp, query->descriptor, len);
+		memcpy(ucd_req_ptr + 1, query->descriptor, len);
 
 	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 }
@@ -2454,7 +2464,7 @@
 
 	WARN_ON(lrbp->cmd);
 	lrbp->cmd = cmd;
-	lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
+	lrbp->sense_bufflen = UFS_SENSE_SIZE;
 	lrbp->sense_buffer = cmd->sense_buffer;
 	lrbp->task_tag = tag;
 	lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
@@ -3191,7 +3201,7 @@
 static inline int ufshcd_read_desc(struct ufs_hba *hba,
 				   enum desc_idn desc_id,
 				   int desc_index,
-				   u8 *buf,
+				   void *buf,
 				   u32 size)
 {
 	return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
@@ -3210,48 +3220,77 @@
 }
 
 /**
+ * struct uc_string_id - unicode string
+ *
+ * @len: size of this descriptor inclusive
+ * @type: descriptor type
+ * @uc: unicode string character
+ */
+struct uc_string_id {
+	u8 len;
+	u8 type;
+	wchar_t uc[0];
+} __packed;
+
+/* replace non-printable or non-ASCII characters with spaces */
+static inline char ufshcd_remove_non_printable(u8 ch)
+{
+	return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
+}
+
+/**
  * ufshcd_read_string_desc - read string descriptor
  * @hba: pointer to adapter instance
  * @desc_index: descriptor index
- * @buf: pointer to buffer where descriptor would be read
- * @size: size of buf
+ * @buf: pointer to buffer where descriptor would be read,
+ *       the caller should free the memory.
  * @ascii: if true convert from unicode to ascii characters
+ *         null terminated string.
  *
- * Return 0 in case of success, non-zero otherwise
+ * Return:
+ * *      string size on success.
+ * *      -ENOMEM: on allocation failure
+ * *      -EINVAL: on a wrong parameter
  */
-int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
-			    u8 *buf, u32 size, bool ascii)
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+			    u8 **buf, bool ascii)
 {
-	int err = 0;
+	struct uc_string_id *uc_str;
+	u8 *str;
+	int ret;
 
-	err = ufshcd_read_desc(hba,
-				QUERY_DESC_IDN_STRING, desc_index, buf, size);
+	if (!buf)
+		return -EINVAL;
 
-	if (err) {
-		dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
-			__func__, QUERY_REQ_RETRIES, err);
+	uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
+	if (!uc_str)
+		return -ENOMEM;
+
+	ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING,
+			       desc_index, uc_str,
+			       QUERY_DESC_MAX_SIZE);
+	if (ret < 0) {
+		dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
+			QUERY_REQ_RETRIES, ret);
+		str = NULL;
+		goto out;
+	}
+
+	if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
+		dev_dbg(hba->dev, "String Desc is of zero length\n");
+		str = NULL;
+		ret = 0;
 		goto out;
 	}
 
 	if (ascii) {
-		int desc_len;
-		int ascii_len;
+		ssize_t ascii_len;
 		int i;
-		char *buff_ascii;
-
-		desc_len = buf[0];
 		/* remove header and divide by 2 to move from UTF16 to UTF8 */
-		ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
-		if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
-			dev_err(hba->dev, "%s: buffer allocated size is too small\n",
-					__func__);
-			err = -ENOMEM;
-			goto out;
-		}
-
-		buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
-		if (!buff_ascii) {
-			err = -ENOMEM;
+		ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
+		str = kzalloc(ascii_len, GFP_KERNEL);
+		if (!str) {
+			ret = -ENOMEM;
 			goto out;
 		}
 
@@ -3259,22 +3298,28 @@
 		 * the descriptor contains string in UTF16 format
 		 * we need to convert to utf-8 so it can be displayed
 		 */
-		utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
-				desc_len - QUERY_DESC_HDR_SIZE,
-				UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
+		ret = utf16s_to_utf8s(uc_str->uc,
+				      uc_str->len - QUERY_DESC_HDR_SIZE,
+				      UTF16_BIG_ENDIAN, str, ascii_len);
 
 		/* replace non-printable or non-ASCII characters with spaces */
-		for (i = 0; i < ascii_len; i++)
-			ufshcd_remove_non_printable(&buff_ascii[i]);
+		for (i = 0; i < ret; i++)
+			str[i] = ufshcd_remove_non_printable(str[i]);
 
-		memset(buf + QUERY_DESC_HDR_SIZE, 0,
-				size - QUERY_DESC_HDR_SIZE);
-		memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
-		buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
-		kfree(buff_ascii);
+		str[ret++] = '\0';
+
+	} else {
+		str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
+		if (!str) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		ret = uc_str->len;
 	}
 out:
-	return err;
+	*buf = str;
+	kfree(uc_str);
+	return ret;
 }
 
 /**
@@ -3900,7 +3945,7 @@
 {
 	unsigned long flags;
 
-	if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
+	if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
 		return;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
@@ -4206,12 +4251,6 @@
 {
 	int retry;
 
-	/*
-	 * msleep of 1 and 5 used in this function might result in msleep(20),
-	 * but it was necessary to send the UFS FPGA to reset mode during
-	 * development and testing of this driver. msleep can be changed to
-	 * mdelay and retry count can be reduced based on the controller.
-	 */
 	if (!ufshcd_is_hba_active(hba))
 		/* change controller state to "reset state" */
 		ufshcd_hba_stop(hba, true);
@@ -4234,7 +4273,7 @@
 	 * instruction might be read back.
 	 * This delay can be changed based on the controller.
 	 */
-	msleep(1);
+	usleep_range(1000, 1100);
 
 	/* wait for the host controller to complete initialization */
 	retry = 10;
@@ -4246,7 +4285,7 @@
 				"Controller enable failed\n");
 			return -EIO;
 		}
-		msleep(5);
+		usleep_range(5000, 5100);
 	}
 
 	/* enable UIC related interrupts */
@@ -4318,6 +4357,14 @@
 	return ufshcd_disable_tx_lcc(hba, true);
 }
 
+static void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
+				   u32 reg)
+{
+	reg_hist->reg[reg_hist->pos] = reg;
+	reg_hist->tstamp[reg_hist->pos] = ktime_get();
+	reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
+}
+
 /**
  * ufshcd_link_startup - Initialize unipro link startup
  * @hba: per adapter instance
@@ -4345,6 +4392,8 @@
 
 		/* check if device is detected by inter-connect layer */
 		if (!ret && !ufshcd_is_device_present(hba)) {
+			ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
+					       0);
 			dev_err(hba->dev, "%s: Device not present\n", __func__);
 			ret = -ENXIO;
 			goto out;
@@ -4355,13 +4404,19 @@
 		 * but we can't be sure if the link is up until link startup
 		 * succeeds. So reset the local Uni-Pro and try again.
 		 */
-		if (ret && ufshcd_hba_enable(hba))
+		if (ret && ufshcd_hba_enable(hba)) {
+			ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
+					       (u32)ret);
 			goto out;
+		}
 	} while (ret && retries--);
 
-	if (ret)
+	if (ret) {
 		/* failed to get the link up... retire */
+		ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
+				       (u32)ret);
 		goto out;
+	}
 
 	if (link_startup_again) {
 		link_startup_again = false;
@@ -4579,8 +4634,6 @@
 	struct request_queue *q = sdev->request_queue;
 
 	blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
-	blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
-
 	return 0;
 }
 
@@ -4604,46 +4657,6 @@
 }
 
 /**
- * ufshcd_task_req_compl - handle task management request completion
- * @hba: per adapter instance
- * @index: index of the completed request
- * @resp: task management service response
- *
- * Returns non-zero value on error, zero on success
- */
-static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
-{
-	struct utp_task_req_desc *task_req_descp;
-	struct utp_upiu_task_rsp *task_rsp_upiup;
-	unsigned long flags;
-	int ocs_value;
-	int task_result;
-
-	spin_lock_irqsave(hba->host->host_lock, flags);
-
-	/* Clear completed tasks from outstanding_tasks */
-	__clear_bit(index, &hba->outstanding_tasks);
-
-	task_req_descp = hba->utmrdl_base_addr;
-	ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
-
-	if (ocs_value == OCS_SUCCESS) {
-		task_rsp_upiup = (struct utp_upiu_task_rsp *)
-				task_req_descp[index].task_rsp_upiu;
-		task_result = be32_to_cpu(task_rsp_upiup->output_param1);
-		task_result = task_result & MASK_TM_SERVICE_RESP;
-		if (resp)
-			*resp = (u8)task_result;
-	} else {
-		dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
-				__func__, ocs_value);
-	}
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-	return ocs_value;
-}
-
-/**
  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
  * @lrbp: pointer to local reference block of completed command
  * @scsi_status: SCSI command status
@@ -4658,6 +4671,7 @@
 	switch (scsi_status) {
 	case SAM_STAT_CHECK_CONDITION:
 		ufshcd_copy_sense_data(lrbp);
+		/* fallthrough */
 	case SAM_STAT_GOOD:
 		result |= DID_OK << 16 |
 			  COMMAND_COMPLETE << 8 |
@@ -4736,10 +4750,10 @@
 				"Reject UPIU not fully implemented\n");
 			break;
 		default:
-			result = DID_ERROR << 16;
 			dev_err(hba->dev,
 				"Unexpected request response code = %x\n",
 				result);
+			result = DID_ERROR << 16;
 			break;
 		}
 		break;
@@ -5286,6 +5300,7 @@
 			goto skip_err_handling;
 	}
 	if ((hba->saved_err & INT_FATAL_ERRORS) ||
+	    (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
 	    ((hba->saved_err & UIC_ERROR) &&
 	    (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
 				   UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
@@ -5377,14 +5392,6 @@
 	pm_runtime_put_sync(hba->dev);
 }
 
-static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
-		u32 reg)
-{
-	reg_hist->reg[reg_hist->pos] = reg;
-	reg_hist->tstamp[reg_hist->pos] = ktime_get();
-	reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
-}
-
 /**
  * ufshcd_update_uic_error - check and set fatal UIC error flags.
  * @hba: per-adapter instance
@@ -5403,13 +5410,13 @@
 		 * must be checked but this error is handled separately.
 		 */
 		dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
-		ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+		ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
 	}
 
 	/* PA_INIT_ERROR is fatal and needs UIC reset */
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
 	if (reg)
-		ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
+		ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
 
 	if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
 		hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
@@ -5425,19 +5432,19 @@
 	/* UIC NL/TL/DME errors needs software retry */
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
 	if (reg) {
-		ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
+		ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
 		hba->uic_error |= UFSHCD_UIC_NL_ERROR;
 	}
 
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
 	if (reg) {
-		ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
+		ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
 		hba->uic_error |= UFSHCD_UIC_TL_ERROR;
 	}
 
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
 	if (reg) {
-		ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
+		ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
 		hba->uic_error |= UFSHCD_UIC_DME_ERROR;
 	}
 
@@ -5445,6 +5452,23 @@
 			__func__, hba->uic_error);
 }
 
+static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
+					 u32 intr_mask)
+{
+	if (!ufshcd_is_auto_hibern8_supported(hba))
+		return false;
+
+	if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
+		return false;
+
+	if (hba->active_uic_cmd &&
+	    (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
+	    hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
+		return false;
+
+	return true;
+}
+
 /**
  * ufshcd_check_errors - Check for errors that need s/w attention
  * @hba: per-adapter instance
@@ -5453,8 +5477,10 @@
 {
 	bool queue_eh_work = false;
 
-	if (hba->errors & INT_FATAL_ERRORS)
+	if (hba->errors & INT_FATAL_ERRORS) {
+		ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
 		queue_eh_work = true;
+	}
 
 	if (hba->errors & UIC_ERROR) {
 		hba->uic_error = 0;
@@ -5463,6 +5489,17 @@
 			queue_eh_work = true;
 	}
 
+	if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
+		dev_err(hba->dev,
+			"%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
+			__func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
+			"Enter" : "Exit",
+			hba->errors, ufshcd_get_upmcrs(hba));
+		ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
+				       hba->errors);
+		queue_eh_work = true;
+	}
+
 	if (queue_eh_work) {
 		/*
 		 * update the transfer error masks to sticky bits, let's do this
@@ -5525,6 +5562,10 @@
 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
 {
 	hba->errors = UFSHCD_ERROR_MASK & intr_status;
+
+	if (ufshcd_is_auto_hibern8_error(hba, intr_status))
+		hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
+
 	if (hba->errors)
 		ufshcd_check_errors(hba);
 
@@ -5600,28 +5641,12 @@
 	return err;
 }
 
-/**
- * ufshcd_issue_tm_cmd - issues task management commands to controller
- * @hba: per adapter instance
- * @lun_id: LUN ID to which TM command is sent
- * @task_id: task ID to which the TM command is applicable
- * @tm_function: task management function opcode
- * @tm_response: task management service response return value
- *
- * Returns non-zero value on error, zero on success.
- */
-static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
-		u8 tm_function, u8 *tm_response)
+static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
+		struct utp_task_req_desc *treq, u8 tm_function)
 {
-	struct utp_task_req_desc *task_req_descp;
-	struct utp_upiu_task_req *task_req_upiup;
-	struct Scsi_Host *host;
+	struct Scsi_Host *host = hba->host;
 	unsigned long flags;
-	int free_slot;
-	int err;
-	int task_tag;
-
-	host = hba->host;
+	int free_slot, task_tag, err;
 
 	/*
 	 * Get free slot, sleep if slots are unavailable.
@@ -5632,30 +5657,11 @@
 	ufshcd_hold(hba, false);
 
 	spin_lock_irqsave(host->host_lock, flags);
-	task_req_descp = hba->utmrdl_base_addr;
-	task_req_descp += free_slot;
-
-	/* Configure task request descriptor */
-	task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
-	task_req_descp->header.dword_2 =
-			cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
-
-	/* Configure task request UPIU */
-	task_req_upiup =
-		(struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
 	task_tag = hba->nutrs + free_slot;
-	task_req_upiup->header.dword_0 =
-		UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
-					      lun_id, task_tag);
-	task_req_upiup->header.dword_1 =
-		UPIU_HEADER_DWORD(0, tm_function, 0, 0);
-	/*
-	 * The host shall provide the same value for LUN field in the basic
-	 * header and for Input Parameter.
-	 */
-	task_req_upiup->input_param1 = cpu_to_be32(lun_id);
-	task_req_upiup->input_param2 = cpu_to_be32(task_id);
 
+	treq->req_header.dword_0 |= cpu_to_be32(task_tag);
+
+	memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
 	ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
 
 	/* send command to the controller */
@@ -5685,10 +5691,16 @@
 					__func__, free_slot);
 		err = -ETIMEDOUT;
 	} else {
-		err = ufshcd_task_req_compl(hba, free_slot, tm_response);
+		err = 0;
+		memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
+
 		ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
 	}
 
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	__clear_bit(free_slot, &hba->outstanding_tasks);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
 	clear_bit(free_slot, &hba->tm_condition);
 	ufshcd_put_tm_slot(hba, free_slot);
 	wake_up(&hba->tm_tag_wq);
@@ -5698,6 +5710,236 @@
 }
 
 /**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lun_id: LUN ID to which TM command is sent
+ * @task_id: task ID to which the TM command is applicable
+ * @tm_function: task management function opcode
+ * @tm_response: task management service response return value
+ *
+ * Returns non-zero value on error, zero on success.
+ */
+static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
+		u8 tm_function, u8 *tm_response)
+{
+	struct utp_task_req_desc treq = { { 0 }, };
+	int ocs_value, err;
+
+	/* Configure task request descriptor */
+	treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+	treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+	/* Configure task request UPIU */
+	treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
+				  cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
+	treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
+
+	/*
+	 * The host shall provide the same value for LUN field in the basic
+	 * header and for Input Parameter.
+	 */
+	treq.input_param1 = cpu_to_be32(lun_id);
+	treq.input_param2 = cpu_to_be32(task_id);
+
+	err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
+	if (err == -ETIMEDOUT)
+		return err;
+
+	ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+	if (ocs_value != OCS_SUCCESS)
+		dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
+				__func__, ocs_value);
+	else if (tm_response)
+		*tm_response = be32_to_cpu(treq.output_param1) &
+				MASK_TM_SERVICE_RESP;
+	return err;
+}
+
+/**
+ * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
+ * @hba:	per-adapter instance
+ * @req_upiu:	upiu request
+ * @rsp_upiu:	upiu reply
+ * @msgcode:	message code, one of UPIU Transaction Codes Initiator to Target
+ * @desc_buff:	pointer to descriptor buffer, NULL if NA
+ * @buff_len:	descriptor size, 0 if NA
+ * @desc_op:	descriptor operation
+ *
+ * Those type of requests uses UTP Transfer Request Descriptor - utrd.
+ * Therefore, it "rides" the device management infrastructure: uses its tag and
+ * tasks work queues.
+ *
+ * Since there is only one available tag for device management commands,
+ * the caller is expected to hold the hba->dev_cmd.lock mutex.
+ */
+static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
+					struct utp_upiu_req *req_upiu,
+					struct utp_upiu_req *rsp_upiu,
+					u8 *desc_buff, int *buff_len,
+					int cmd_type,
+					enum query_opcode desc_op)
+{
+	struct ufshcd_lrb *lrbp;
+	int err = 0;
+	int tag;
+	struct completion wait;
+	unsigned long flags;
+	u32 upiu_flags;
+
+	down_read(&hba->clk_scaling_lock);
+
+	wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+
+	init_completion(&wait);
+	lrbp = &hba->lrb[tag];
+	WARN_ON(lrbp->cmd);
+
+	lrbp->cmd = NULL;
+	lrbp->sense_bufflen = 0;
+	lrbp->sense_buffer = NULL;
+	lrbp->task_tag = tag;
+	lrbp->lun = 0;
+	lrbp->intr_cmd = true;
+	hba->dev_cmd.type = cmd_type;
+
+	switch (hba->ufs_version) {
+	case UFSHCI_VERSION_10:
+	case UFSHCI_VERSION_11:
+		lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+		break;
+	default:
+		lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+		break;
+	}
+
+	/* update the task tag in the request upiu */
+	req_upiu->header.dword_0 |= cpu_to_be32(tag);
+
+	ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+
+	/* just copy the upiu request as it is */
+	memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
+	if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
+		/* The Data Segment Area is optional depending upon the query
+		 * function value. for WRITE DESCRIPTOR, the data segment
+		 * follows right after the tsf.
+		 */
+		memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
+		*buff_len = 0;
+	}
+
+	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
+	hba->dev_cmd.complete = &wait;
+
+	/* Make sure descriptors are ready before ringing the doorbell */
+	wmb();
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	ufshcd_send_command(hba, tag);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	/*
+	 * ignore the returning value here - ufshcd_check_query_response is
+	 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
+	 * read the response directly ignoring all errors.
+	 */
+	ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
+
+	/* just copy the upiu response as it is */
+	memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
+	if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
+		u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
+		u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
+			       MASK_QUERY_DATA_SEG_LEN;
+
+		if (*buff_len >= resp_len) {
+			memcpy(desc_buff, descp, resp_len);
+			*buff_len = resp_len;
+		} else {
+			dev_warn(hba->dev, "rsp size is bigger than buffer");
+			*buff_len = 0;
+			err = -EINVAL;
+		}
+	}
+
+	ufshcd_put_dev_cmd_tag(hba, tag);
+	wake_up(&hba->dev_cmd.tag_wq);
+	up_read(&hba->clk_scaling_lock);
+	return err;
+}
+
+/**
+ * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
+ * @hba:	per-adapter instance
+ * @req_upiu:	upiu request
+ * @rsp_upiu:	upiu reply - only 8 DW as we do not support scsi commands
+ * @msgcode:	message code, one of UPIU Transaction Codes Initiator to Target
+ * @desc_buff:	pointer to descriptor buffer, NULL if NA
+ * @buff_len:	descriptor size, 0 if NA
+ * @desc_op:	descriptor operation
+ *
+ * Supports UTP Transfer requests (nop and query), and UTP Task
+ * Management requests.
+ * It is up to the caller to fill the upiu conent properly, as it will
+ * be copied without any further input validations.
+ */
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+			     struct utp_upiu_req *req_upiu,
+			     struct utp_upiu_req *rsp_upiu,
+			     int msgcode,
+			     u8 *desc_buff, int *buff_len,
+			     enum query_opcode desc_op)
+{
+	int err;
+	int cmd_type = DEV_CMD_TYPE_QUERY;
+	struct utp_task_req_desc treq = { { 0 }, };
+	int ocs_value;
+	u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
+
+	switch (msgcode) {
+	case UPIU_TRANSACTION_NOP_OUT:
+		cmd_type = DEV_CMD_TYPE_NOP;
+		/* fall through */
+	case UPIU_TRANSACTION_QUERY_REQ:
+		ufshcd_hold(hba, false);
+		mutex_lock(&hba->dev_cmd.lock);
+		err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
+						   desc_buff, buff_len,
+						   cmd_type, desc_op);
+		mutex_unlock(&hba->dev_cmd.lock);
+		ufshcd_release(hba);
+
+		break;
+	case UPIU_TRANSACTION_TASK_REQ:
+		treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+		treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+		memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
+
+		err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
+		if (err == -ETIMEDOUT)
+			break;
+
+		ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+		if (ocs_value != OCS_SUCCESS) {
+			dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
+				ocs_value);
+			break;
+		}
+
+		memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
+
+		break;
+	default:
+		err = -EINVAL;
+
+		break;
+	}
+
+	return err;
+}
+
+/**
  * ufshcd_eh_device_reset_handler - device reset handler registered to
  *                                    scsi layer.
  * @cmd: SCSI command pointer
@@ -5741,6 +5983,7 @@
 
 out:
 	hba->req_abort_count = 0;
+	ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
 	if (!err) {
 		err = SUCCESS;
 	} else {
@@ -5834,6 +6077,7 @@
 	 */
 	scsi_print_command(hba->lrb[tag].cmd);
 	if (!hba->req_abort_count) {
+		ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
 		ufshcd_print_host_regs(hba);
 		ufshcd_print_host_state(hba);
 		ufshcd_print_pwr_info(hba);
@@ -5969,7 +6213,7 @@
 out:
 	if (err)
 		dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
-
+	ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
 	return err;
 }
 
@@ -5989,6 +6233,9 @@
 	int retries = MAX_HOST_RESET_RETRIES;
 
 	do {
+		/* Reset the attached device */
+		ufshcd_vops_device_reset(hba);
+
 		err = ufshcd_host_reset_and_restore(hba);
 	} while (err && --retries);
 
@@ -6124,19 +6371,19 @@
 		goto out;
 	}
 
-	if (hba->vreg_info.vcc)
+	if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
 		icc_level = ufshcd_get_max_icc_level(
 				hba->vreg_info.vcc->max_uA,
 				POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
 				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
 
-	if (hba->vreg_info.vccq)
+	if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
 		icc_level = ufshcd_get_max_icc_level(
 				hba->vreg_info.vccq->max_uA,
 				icc_level,
 				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
 
-	if (hba->vreg_info.vccq2)
+	if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
 		icc_level = ufshcd_get_max_icc_level(
 				hba->vreg_info.vccq2->max_uA,
 				icc_level,
@@ -6253,6 +6500,9 @@
 	u8 model_index;
 	u8 *desc_buf;
 
+	if (!dev_desc)
+		return -EINVAL;
+
 	buff_len = max_t(size_t, hba->desc_size.dev_desc,
 			 QUERY_DESC_MAX_SIZE + 1);
 	desc_buf = kmalloc(buff_len, GFP_KERNEL);
@@ -6276,31 +6526,31 @@
 				     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
 
 	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
-
-	/* Zero-pad entire buffer for string termination. */
-	memset(desc_buf, 0, buff_len);
-
-	err = ufshcd_read_string_desc(hba, model_index, desc_buf,
-				      QUERY_DESC_MAX_SIZE, true/*ASCII*/);
-	if (err) {
+	err = ufshcd_read_string_desc(hba, model_index,
+				      &dev_desc->model, SD_ASCII_STD);
+	if (err < 0) {
 		dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
 			__func__, err);
 		goto out;
 	}
 
-	desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
-	strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
-		min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
-		      MAX_MODEL_LEN));
-
-	/* Null terminate the model string */
-	dev_desc->model[MAX_MODEL_LEN] = '\0';
+	/*
+	 * ufshcd_read_string_desc returns size of the string
+	 * reset the error value
+	 */
+	err = 0;
 
 out:
 	kfree(desc_buf);
 	return err;
 }
 
+static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc)
+{
+	kfree(dev_desc->model);
+	dev_desc->model = NULL;
+}
+
 static void ufs_fixup_device_setup(struct ufs_hba *hba,
 				   struct ufs_dev_desc *dev_desc)
 {
@@ -6309,8 +6559,9 @@
 	for (f = ufs_fixups; f->quirk; f++) {
 		if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
 		     f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
-		    (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
-		     !strcmp(f->card.model, UFS_ANY_MODEL)))
+		     ((dev_desc->model &&
+		       STR_PRFX_EQUAL(f->card.model, dev_desc->model)) ||
+		      !strcmp(f->card.model, UFS_ANY_MODEL)))
 			hba->dev_quirks |= f->quirk;
 	}
 }
@@ -6481,17 +6732,8 @@
 
 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
 {
-	int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
-
 	hba->ufs_stats.hibern8_exit_cnt = 0;
 	hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
-
-	memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
-	memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
-	memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
-	memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
-	memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
-
 	hba->req_abort_count = 0;
 }
 
@@ -6545,6 +6787,74 @@
 	hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
 }
 
+static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
+	{19200000, REF_CLK_FREQ_19_2_MHZ},
+	{26000000, REF_CLK_FREQ_26_MHZ},
+	{38400000, REF_CLK_FREQ_38_4_MHZ},
+	{52000000, REF_CLK_FREQ_52_MHZ},
+	{0, REF_CLK_FREQ_INVAL},
+};
+
+static enum ufs_ref_clk_freq
+ufs_get_bref_clk_from_hz(unsigned long freq)
+{
+	int i;
+
+	for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
+		if (ufs_ref_clk_freqs[i].freq_hz == freq)
+			return ufs_ref_clk_freqs[i].val;
+
+	return REF_CLK_FREQ_INVAL;
+}
+
+void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
+{
+	unsigned long freq;
+
+	freq = clk_get_rate(refclk);
+
+	hba->dev_ref_clk_freq =
+		ufs_get_bref_clk_from_hz(freq);
+
+	if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
+		dev_err(hba->dev,
+		"invalid ref_clk setting = %ld\n", freq);
+}
+
+static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
+{
+	int err;
+	u32 ref_clk;
+	u32 freq = hba->dev_ref_clk_freq;
+
+	err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+			QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
+
+	if (err) {
+		dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
+			err);
+		goto out;
+	}
+
+	if (ref_clk == freq)
+		goto out; /* nothing to update */
+
+	err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+			QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
+
+	if (err) {
+		dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
+			ufs_ref_clk_freqs[freq].freq_hz);
+		goto out;
+	}
+
+	dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
+			ufs_ref_clk_freqs[freq].freq_hz);
+
+out:
+	return err;
+}
+
 /**
  * ufshcd_probe_hba - probe hba to detect device and initialize
  * @hba: per-adapter instance
@@ -6593,12 +6903,9 @@
 	}
 
 	ufs_fixup_device_setup(hba, &card);
-	ufshcd_tune_unipro_params(hba);
+	ufs_put_device_desc(&card);
 
-	ret = ufshcd_set_vccq_rail_unused(hba,
-		(hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
-	if (ret)
-		goto out;
+	ufshcd_tune_unipro_params(hba);
 
 	/* UFS device is also active now */
 	ufshcd_set_ufs_dev_active(hba);
@@ -6610,6 +6917,12 @@
 			"%s: Failed getting max supported power mode\n",
 			__func__);
 	} else {
+		/*
+		 * Set the right value to bRefClkFreq before attempting to
+		 * switch to HS gears.
+		 */
+		if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
+			ufshcd_set_dev_ref_clk(hba);
 		ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
 		if (ret) {
 			dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
@@ -6655,6 +6968,8 @@
 			hba->clk_scaling.is_allowed = true;
 		}
 
+		ufs_bsg_probe(hba);
+
 		scsi_scan_host(hba->host);
 		pm_runtime_put_sync(hba->dev);
 	}
@@ -6669,6 +6984,7 @@
 	 */
 	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
 		pm_runtime_put_sync(hba->dev);
+		ufshcd_exit_clk_scaling(hba);
 		ufshcd_hba_exit(hba);
 	}
 
@@ -6748,9 +7064,11 @@
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= UFSHCD_CMD_PER_LUN,
 	.can_queue		= UFSHCD_CAN_QUEUE,
+	.max_segment_size	= PRDT_DATA_BYTE_COUNT_MAX,
 	.max_host_blocked	= 1,
 	.track_queue_depth	= 1,
 	.sdev_groups		= ufshcd_driver_groups,
+	.dma_boundary		= PAGE_SIZE - 1,
 };
 
 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
@@ -6761,6 +7079,15 @@
 	if (!vreg)
 		return 0;
 
+	/*
+	 * "set_load" operation shall be required on those regulators
+	 * which specifically configured current limitation. Otherwise
+	 * zero max_uA may cause unexpected behavior when regulator is
+	 * enabled or set as high power mode.
+	 */
+	if (!vreg->max_uA)
+		return 0;
+
 	ret = regulator_set_load(vreg->reg, ua);
 	if (ret < 0) {
 		dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
@@ -6773,13 +7100,7 @@
 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
 					 struct ufs_vreg *vreg)
 {
-	if (!vreg)
-		return 0;
-	else if (vreg->unused)
-		return 0;
-	else
-		return ufshcd_config_vreg_load(hba->dev, vreg,
-					       UFS_VREG_LPM_LOAD_UA);
+	return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
 }
 
 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -6787,10 +7108,8 @@
 {
 	if (!vreg)
 		return 0;
-	else if (vreg->unused)
-		return 0;
-	else
-		return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+
+	return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
 }
 
 static int ufshcd_config_vreg(struct device *dev,
@@ -6807,12 +7126,15 @@
 	name = vreg->name;
 
 	if (regulator_count_voltages(reg) > 0) {
-		min_uV = on ? vreg->min_uV : 0;
-		ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
-		if (ret) {
-			dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+		if (vreg->min_uV && vreg->max_uV) {
+			min_uV = on ? vreg->min_uV : 0;
+			ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+			if (ret) {
+				dev_err(dev,
+					"%s: %s set voltage failed, err=%d\n",
 					__func__, name, ret);
-			goto out;
+				goto out;
+			}
 		}
 
 		uA_load = on ? vreg->max_uA : 0;
@@ -6828,9 +7150,7 @@
 {
 	int ret = 0;
 
-	if (!vreg)
-		goto out;
-	else if (vreg->enabled || vreg->unused)
+	if (!vreg || vreg->enabled)
 		goto out;
 
 	ret = ufshcd_config_vreg(dev, vreg, true);
@@ -6850,9 +7170,7 @@
 {
 	int ret = 0;
 
-	if (!vreg)
-		goto out;
-	else if (!vreg->enabled || vreg->unused)
+	if (!vreg || !vreg->enabled)
 		goto out;
 
 	ret = regulator_disable(vreg->reg);
@@ -6875,9 +7193,6 @@
 	struct device *dev = hba->dev;
 	struct ufs_vreg_info *info = &hba->vreg_info;
 
-	if (!info)
-		goto out;
-
 	ret = ufshcd_toggle_vreg(dev, info->vcc, on);
 	if (ret)
 		goto out;
@@ -6903,10 +7218,7 @@
 {
 	struct ufs_vreg_info *info = &hba->vreg_info;
 
-	if (info)
-		return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
-
-	return 0;
+	return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
 }
 
 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
@@ -6932,9 +7244,6 @@
 	struct device *dev = hba->dev;
 	struct ufs_vreg_info *info = &hba->vreg_info;
 
-	if (!info)
-		goto out;
-
 	ret = ufshcd_get_vreg(dev, info->vcc);
 	if (ret)
 		goto out;
@@ -6958,36 +7267,6 @@
 	return 0;
 }
 
-static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
-{
-	int ret = 0;
-	struct ufs_vreg_info *info = &hba->vreg_info;
-
-	if (!info)
-		goto out;
-	else if (!info->vccq)
-		goto out;
-
-	if (unused) {
-		/* shut off the rail here */
-		ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
-		/*
-		 * Mark this rail as no longer used, so it doesn't get enabled
-		 * later by mistake
-		 */
-		if (!ret)
-			info->vccq->unused = true;
-	} else {
-		/*
-		 * rail should have been already enabled hence just make sure
-		 * that unused flag is cleared.
-		 */
-		info->vccq->unused = false;
-	}
-out:
-	return ret;
-}
-
 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 					bool skip_ref_clk)
 {
@@ -7093,6 +7372,14 @@
 			goto out;
 		}
 
+		/*
+		 * Parse device ref clk freq as per device tree "ref_clk".
+		 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
+		 * in ufshcd_alloc_host().
+		 */
+		if (!strcmp(clki->name, "ref_clk"))
+			ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
+
 		if (clki->max_freq) {
 			ret = clk_set_rate(clki->clk, clki->max_freq);
 			if (ret) {
@@ -7204,12 +7491,9 @@
 		ufshcd_variant_hba_exit(hba);
 		ufshcd_setup_vreg(hba, false);
 		ufshcd_suspend_clkscaling(hba);
-		if (ufshcd_is_clkscaling_supported(hba)) {
+		if (ufshcd_is_clkscaling_supported(hba))
 			if (hba->devfreq)
 				ufshcd_suspend_clkscaling(hba);
-			destroy_workqueue(hba->clk_scaling.workq);
-			ufshcd_devfreq_remove(hba);
-		}
 		ufshcd_setup_clocks(hba, false);
 		ufshcd_setup_hba_vreg(hba, false);
 		hba->is_powered = false;
@@ -7223,19 +7507,19 @@
 				0,
 				0,
 				0,
-				UFSHCD_REQ_SENSE_SIZE,
+				UFS_SENSE_SIZE,
 				0};
 	char *buffer;
 	int ret;
 
-	buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
+	buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
 	if (!buffer) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
 	ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
-			UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
+			UFS_SENSE_SIZE, NULL, NULL,
 			msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
 	if (ret)
 		pr_err("%s: failed with err %d\n", __func__, ret);
@@ -7583,6 +7867,8 @@
 	ufshcd_release(hba);
 out:
 	hba->pm_op_in_progress = 0;
+	if (ret)
+		ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
 	return ret;
 }
 
@@ -7685,6 +7971,8 @@
 	ufshcd_setup_clocks(hba, false);
 out:
 	hba->pm_op_in_progress = 0;
+	if (ret)
+		ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
 	return ret;
 }
 
@@ -7762,6 +8050,8 @@
 	trace_ufshcd_system_resume(dev_name(hba->dev), ret,
 		ktime_to_us(ktime_sub(ktime_get(), start)),
 		hba->curr_dev_pwr_mode, hba->uic_link_state);
+	if (!ret)
+		hba->is_sys_suspended = false;
 	return ret;
 }
 EXPORT_SYMBOL(ufshcd_system_resume);
@@ -7853,6 +8143,9 @@
 {
 	int ret = 0;
 
+	if (!hba->is_powered)
+		goto out;
+
 	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
 		goto out;
 
@@ -7878,12 +8171,14 @@
  */
 void ufshcd_remove(struct ufs_hba *hba)
 {
+	ufs_bsg_remove(hba);
 	ufs_sysfs_remove_nodes(hba->dev);
 	scsi_remove_host(hba->host);
 	/* disable interrupts */
 	ufshcd_disable_intr(hba, hba->intr_mask);
 	ufshcd_hba_stop(hba, true);
 
+	ufshcd_exit_clk_scaling(hba);
 	ufshcd_exit_clk_gating(hba);
 	if (ufshcd_is_clkscaling_supported(hba))
 		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
@@ -7943,17 +8238,11 @@
 		err = -ENOMEM;
 		goto out_error;
 	}
-
-	/*
-	 * Do not use blk-mq at this time because blk-mq does not support
-	 * runtime pm.
-	 */
-	host->use_blk_mq = false;
-
 	hba = shost_priv(host);
 	hba->host = host;
 	hba->dev = dev;
 	*hba_handle = hba;
+	hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
 
 	INIT_LIST_HEAD(&hba->clk_list_head);
 
@@ -8030,7 +8319,7 @@
 	host->max_lun = UFS_MAX_LUNS;
 	host->max_channel = UFSHCD_MAX_CHANNEL;
 	host->unique_id = host->host_no;
-	host->max_cmd_len = MAX_CDB_SIZE;
+	host->max_cmd_len = UFS_CDB_SIZE;
 
 	hba->max_pwr_info.is_valid = false;
 
@@ -8055,6 +8344,8 @@
 
 	ufshcd_init_clk_gating(hba);
 
+	ufshcd_init_clk_scaling(hba);
+
 	/*
 	 * In order to avoid any spurious interrupt immediately after
 	 * registering UFS controller interrupt handler, clear any pending UFS
@@ -8084,6 +8375,9 @@
 		goto exit_gating;
 	}
 
+	/* Reset the attached device */
+	ufshcd_vops_device_reset(hba);
+
 	/* Host controller enable */
 	err = ufshcd_hba_enable(hba);
 	if (err) {
@@ -8093,21 +8387,6 @@
 		goto out_remove_scsi_host;
 	}
 
-	if (ufshcd_is_clkscaling_supported(hba)) {
-		char wq_name[sizeof("ufs_clkscaling_00")];
-
-		INIT_WORK(&hba->clk_scaling.suspend_work,
-			  ufshcd_clk_scaling_suspend_work);
-		INIT_WORK(&hba->clk_scaling.resume_work,
-			  ufshcd_clk_scaling_resume_work);
-
-		snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
-			 host->host_no);
-		hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
-
-		ufshcd_clkscaling_init_sysfs(hba);
-	}
-
 	/*
 	 * Set the default power management level for runtime and system PM.
 	 * Default power saving mode is to keep UFS link in Hibern8 state
@@ -8121,7 +8400,7 @@
 						UIC_LINK_HIBERN8_STATE);
 
 	/* Set the default auto-hiberate idle timer value to 150 ms */
-	if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
+	if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
 		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
 			    FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
 	}
@@ -8145,6 +8424,7 @@
 out_remove_scsi_host:
 	scsi_remove_host(hba->host);
 exit_gating:
+	ufshcd_exit_clk_scaling(hba);
 	ufshcd_exit_clk_gating(hba);
 out_disable:
 	hba->is_irq_enabled = false;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 33fdd3f..c94cfda 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -298,6 +298,7 @@
  * @resume: called during host controller PM callback
  * @dbg_register_dump: used to dump controller debug information
  * @phy_initialization: used to initialize phys
+ * @device_reset: called to issue a reset pulse on the UFS device
  */
 struct ufs_hba_variant_ops {
 	const char *name;
@@ -326,6 +327,7 @@
 	int     (*resume)(struct ufs_hba *, enum ufs_pm_op);
 	void	(*dbg_register_dump)(struct ufs_hba *hba);
 	int	(*phy_initialization)(struct ufs_hba *);
+	void	(*device_reset)(struct ufs_hba *hba);
 };
 
 /* clock gating state  */
@@ -412,17 +414,17 @@
 	u32 icc_level;
 };
 
-#define UIC_ERR_REG_HIST_LENGTH 8
+#define UFS_ERR_REG_HIST_LENGTH 8
 /**
- * struct ufs_uic_err_reg_hist - keeps history of uic errors
+ * struct ufs_err_reg_hist - keeps history of errors
  * @pos: index to indicate cyclic buffer position
  * @reg: cyclic buffer for registers value
  * @tstamp: cyclic buffer for time stamp
  */
-struct ufs_uic_err_reg_hist {
+struct ufs_err_reg_hist {
 	int pos;
-	u32 reg[UIC_ERR_REG_HIST_LENGTH];
-	ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH];
+	u32 reg[UFS_ERR_REG_HIST_LENGTH];
+	ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH];
 };
 
 /**
@@ -436,15 +438,37 @@
  * @nl_err: tracks nl-uic errors
  * @tl_err: tracks tl-uic errors
  * @dme_err: tracks dme errors
+ * @auto_hibern8_err: tracks auto-hibernate errors
+ * @fatal_err: tracks fatal errors
+ * @linkup_err: tracks link-startup errors
+ * @resume_err: tracks resume errors
+ * @suspend_err: tracks suspend errors
+ * @dev_reset: tracks device reset events
+ * @host_reset: tracks host reset events
+ * @tsk_abort: tracks task abort events
  */
 struct ufs_stats {
 	u32 hibern8_exit_cnt;
 	ktime_t last_hibern8_exit_tstamp;
-	struct ufs_uic_err_reg_hist pa_err;
-	struct ufs_uic_err_reg_hist dl_err;
-	struct ufs_uic_err_reg_hist nl_err;
-	struct ufs_uic_err_reg_hist tl_err;
-	struct ufs_uic_err_reg_hist dme_err;
+
+	/* uic specific errors */
+	struct ufs_err_reg_hist pa_err;
+	struct ufs_err_reg_hist dl_err;
+	struct ufs_err_reg_hist nl_err;
+	struct ufs_err_reg_hist tl_err;
+	struct ufs_err_reg_hist dme_err;
+
+	/* fatal errors */
+	struct ufs_err_reg_hist auto_hibern8_err;
+	struct ufs_err_reg_hist fatal_err;
+	struct ufs_err_reg_hist link_startup_err;
+	struct ufs_err_reg_hist resume_err;
+	struct ufs_err_reg_hist suspend_err;
+
+	/* abnormal events */
+	struct ufs_err_reg_hist dev_reset;
+	struct ufs_err_reg_hist host_reset;
+	struct ufs_err_reg_hist task_abort;
 };
 
 /**
@@ -546,10 +570,11 @@
 	int nutrs;
 	int nutmrs;
 	u32 ufs_version;
-	struct ufs_hba_variant_ops *vops;
+	const struct ufs_hba_variant_ops *vops;
 	void *priv;
 	unsigned int irq;
 	bool is_irq_enabled;
+	enum ufs_ref_clk_freq dev_ref_clk_freq;
 
 	/* Interrupt aggregation support is broken */
 	#define UFSHCD_QUIRK_BROKEN_INTR_AGGR			0x1
@@ -702,6 +727,9 @@
 	struct rw_semaphore clk_scaling_lock;
 	struct ufs_desc_size desc_size;
 	atomic_t scsi_block_reqs_cnt;
+
+	struct device		bsg_dev;
+	struct request_queue	*bsg_queue;
 };
 
 /* Returns true if clocks can be gated. Otherwise false */
@@ -736,6 +764,11 @@
 #endif
 }
 
+static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
+{
+	return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
+}
+
 #define ufshcd_writel(hba, val, reg)	\
 	writel((val), (hba)->mmio_base + (reg))
 #define ufshcd_readl(hba, reg)	\
@@ -765,6 +798,7 @@
 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
 				u32 val, unsigned long interval_us,
 				unsigned long timeout_ms, bool can_sleep);
+void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
 
 static inline void check_upiu_size(void)
 {
@@ -881,8 +915,11 @@
 		      enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 	enum flag_idn idn, bool *flag_res);
-int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
-			    u8 *buf, u32 size, bool ascii);
+
+#define SD_ASCII_STD true
+#define SD_RAW false
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+			    u8 **buf, bool ascii);
 
 int ufshcd_hold(struct ufs_hba *hba, bool async);
 void ufshcd_release(struct ufs_hba *hba);
@@ -892,6 +929,15 @@
 
 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
 
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
+
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+			     struct utp_upiu_req *req_upiu,
+			     struct utp_upiu_req *rsp_upiu,
+			     int msgcode,
+			     u8 *desc_buff, int *buff_len,
+			     enum query_opcode desc_op);
+
 /* Wrapper functions for safely calling variant operations */
 static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
 {
@@ -1026,6 +1072,12 @@
 		hba->vops->dbg_register_dump(hba);
 }
 
+static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
+{
+	if (hba->vops && hba->vops->device_reset)
+		hba->vops->device_reset(hba);
+}
+
 extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
 
 /*
diff --git a/drivers/scsi/ufs/ufshci-dwc.h b/drivers/scsi/ufs/ufshci-dwc.h
index ca341fe..6c290e2 100644
--- a/drivers/scsi/ufs/ufshci-dwc.h
+++ b/drivers/scsi/ufs/ufshci-dwc.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * UFS Host driver for Synopsys Designware Core
  *
  * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
  *
  * Authors: Joao Pinto <jpinto@synopsys.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #ifndef _UFSHCI_DWC_H
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index bb5d9c7..dbb75cd 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -144,8 +144,10 @@
 #define CONTROLLER_FATAL_ERROR			0x10000
 #define SYSTEM_BUS_FATAL_ERROR			0x20000
 
-#define UFSHCD_UIC_PWR_MASK	(UIC_HIBERNATE_ENTER |\
-				UIC_HIBERNATE_EXIT |\
+#define UFSHCD_UIC_HIBERN8_MASK	(UIC_HIBERNATE_ENTER |\
+				UIC_HIBERNATE_EXIT)
+
+#define UFSHCD_UIC_PWR_MASK	(UFSHCD_UIC_HIBERN8_MASK |\
 				UIC_POWER_MODE)
 
 #define UFSHCD_UIC_MASK		(UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
@@ -433,22 +435,25 @@
 	__le16  prd_table_offset;
 };
 
-/**
- * struct utp_task_req_desc - UTMRD structure
- * @header: UTMRD header DW-0 to DW-3
- * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11
- * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19
+/*
+ * UTMRD structure.
  */
 struct utp_task_req_desc {
-
 	/* DW 0-3 */
 	struct request_desc_header header;
 
-	/* DW 4-11 */
-	__le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
+	/* DW 4-11 - Task request UPIU structure */
+	struct utp_upiu_header	req_header;
+	__be32			input_param1;
+	__be32			input_param2;
+	__be32			input_param3;
+	__be32			__reserved1[2];
 
-	/* DW 12-19 */
-	__le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
+	/* DW 12-19 - Task Management Response UPIU structure */
+	struct utp_upiu_header	rsp_header;
+	__be32			output_param1;
+	__be32			output_param2;
+	__be32			__reserved2[3];
 };
 
 #endif /* End of Header */
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 23129d7..f539f87 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * drivers/scsi/ufs/unipro.h
  *
  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
  */
 
 #ifndef _UNIPRO_H_
@@ -52,7 +48,7 @@
 #define RX_HS_UNTERMINATED_ENABLE		0x00A6
 #define RX_ENTER_HIBERN8			0x00A7
 #define RX_BYPASS_8B10B_ENABLE			0x00A8
-#define RX_TERMINATION_FORCE_ENABLE		0x0089
+#define RX_TERMINATION_FORCE_ENABLE		0x00A9
 #define RX_MIN_ACTIVATETIME_CAPABILITY		0x008F
 #define RX_HIBERN8TIME_CAPABILITY		0x0092
 #define RX_REFCLKFREQ				0x00EB
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 1c72db9..bfec84a 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Virtio SCSI HBA driver
  *
@@ -7,10 +8,6 @@
  * Authors:
  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
  *  Paolo Bonzini   <pbonzini@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -33,6 +30,8 @@
 #include <linux/seqlock.h>
 #include <linux/blk-mq-virtio.h>
 
+#include "sd.h"
+
 #define VIRTIO_SCSI_MEMPOOL_SZ 64
 #define VIRTIO_SCSI_EVENT_LEN 8
 #define VIRTIO_SCSI_VQ_BASE 2
@@ -68,33 +67,6 @@
 	struct virtqueue *vq;
 };
 
-/*
- * Per-target queue state.
- *
- * This struct holds the data needed by the queue steering policy.  When a
- * target is sent multiple requests, we need to drive them to the same queue so
- * that FIFO processing order is kept.  However, if a target was idle, we can
- * choose a queue arbitrarily.  In this case the queue is chosen according to
- * the current VCPU, so the driver expects the number of request queues to be
- * equal to the number of VCPUs.  This makes it easy and fast to select the
- * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
- * (each virtqueue's affinity is set to the CPU that "owns" the queue).
- *
- * tgt_seq is held to serialize reading and writing req_vq.
- *
- * Decrements of reqs are never concurrent with writes of req_vq: before the
- * decrement reqs will be != 0; after the decrement the virtqueue completion
- * routine will not use the req_vq so it can be changed by a new request.
- * Thus they can happen outside the tgt_seq, provided of course we make reqs
- * an atomic_t.
- */
-struct virtio_scsi_target_state {
-	seqcount_t tgt_seq;
-
-	/* Currently active virtqueue for requests sent to this target. */
-	struct virtio_scsi_vq *req_vq;
-};
-
 /* Driver instance state */
 struct virtio_scsi {
 	struct virtio_device *vdev;
@@ -104,9 +76,6 @@
 
 	u32 num_queues;
 
-	/* If the affinity hint is set for virtqueues */
-	bool affinity_hint_set;
-
 	struct hlist_node node;
 
 	/* Protected by event_vq lock */
@@ -127,16 +96,8 @@
 
 static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
 {
-	if (!resid)
-		return;
-
-	if (!scsi_bidi_cmnd(sc)) {
+	if (resid)
 		scsi_set_resid(sc, resid);
-		return;
-	}
-
-	scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
-	scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
 }
 
 /**
@@ -365,6 +326,36 @@
 	scsi_device_put(sdev);
 }
 
+static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
+{
+	struct scsi_device *sdev;
+	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
+	int result, inquiry_len, inq_result_len = 256;
+	char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
+
+	shost_for_each_device(sdev, shost) {
+		inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
+
+		memset(scsi_cmd, 0, sizeof(scsi_cmd));
+		scsi_cmd[0] = INQUIRY;
+		scsi_cmd[4] = (unsigned char) inquiry_len;
+
+		memset(inq_result, 0, inq_result_len);
+
+		result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
+					  inq_result, inquiry_len, NULL,
+					  SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+
+		if (result == 0 && inq_result[0] >> 5) {
+			/* PQ indicates the LUN is not attached */
+			scsi_remove_device(sdev);
+		}
+	}
+
+	kfree(inq_result);
+}
+
 static void virtscsi_handle_event(struct work_struct *work)
 {
 	struct virtio_scsi_event_node *event_node =
@@ -376,6 +367,7 @@
 	    cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
 		event->event &= ~cpu_to_virtio32(vscsi->vdev,
 						   VIRTIO_SCSI_T_EVENTS_MISSED);
+		virtscsi_rescan_hotunplug(vscsi);
 		scsi_scan_host(virtio_scsi_host(vscsi->vdev));
 	}
 
@@ -410,14 +402,7 @@
 	virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
 };
 
-/**
- * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
- * @vq		: the struct virtqueue we're talking about
- * @cmd		: command structure
- * @req_size	: size of the request buffer
- * @resp_size	: size of the response buffer
- */
-static int virtscsi_add_cmd(struct virtqueue *vq,
+static int __virtscsi_add_cmd(struct virtqueue *vq,
 			    struct virtio_scsi_cmd *cmd,
 			    size_t req_size, size_t resp_size)
 {
@@ -430,9 +415,9 @@
 
 	if (sc && sc->sc_data_direction != DMA_NONE) {
 		if (sc->sc_data_direction != DMA_FROM_DEVICE)
-			out = &scsi_out(sc)->table;
+			out = &sc->sdb.table;
 		if (sc->sc_data_direction != DMA_TO_DEVICE)
-			in = &scsi_in(sc)->table;
+			in = &sc->sdb.table;
 	}
 
 	/* Request header.  */
@@ -462,17 +447,39 @@
 	return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
 }
 
-static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
+static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
+{
+	bool needs_kick;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vq->vq_lock, flags);
+	needs_kick = virtqueue_kick_prepare(vq->vq);
+	spin_unlock_irqrestore(&vq->vq_lock, flags);
+
+	if (needs_kick)
+		virtqueue_notify(vq->vq);
+}
+
+/**
+ * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
+ * @vq		: the struct virtqueue we're talking about
+ * @cmd		: command structure
+ * @req_size	: size of the request buffer
+ * @resp_size	: size of the response buffer
+ * @kick	: whether to kick the virtqueue immediately
+ */
+static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
 			     struct virtio_scsi_cmd *cmd,
-			     size_t req_size, size_t resp_size)
+			     size_t req_size, size_t resp_size,
+			     bool kick)
 {
 	unsigned long flags;
 	int err;
 	bool needs_kick = false;
 
 	spin_lock_irqsave(&vq->vq_lock, flags);
-	err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
-	if (!err)
+	err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
+	if (!err && kick)
 		needs_kick = virtqueue_kick_prepare(vq->vq);
 
 	spin_unlock_irqrestore(&vq->vq_lock, flags);
@@ -537,6 +544,7 @@
 	struct virtio_scsi *vscsi = shost_priv(shost);
 	struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
 	struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+	bool kick;
 	unsigned long flags;
 	int req_size;
 	int ret;
@@ -566,7 +574,8 @@
 		req_size = sizeof(cmd->req.cmd);
 	}
 
-	ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+	kick = (sc->flags & SCMD_LAST) != 0;
+	ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
 	if (ret == -EIO) {
 		cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
 		spin_lock_irqsave(&req_vq->vq_lock, flags);
@@ -584,8 +593,8 @@
 	int ret = FAILED;
 
 	cmd->comp = &comp;
-	if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
-			      sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
+	if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
+			      sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
 		goto out;
 
 	wait_for_completion(&comp);
@@ -621,7 +630,6 @@
 		return FAILED;
 
 	memset(cmd, 0, sizeof(*cmd));
-	cmd->sc = sc;
 	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
 		.type = VIRTIO_SCSI_T_TMF,
 		.subtype = cpu_to_virtio32(vscsi->vdev,
@@ -680,7 +688,6 @@
 		return FAILED;
 
 	memset(cmd, 0, sizeof(*cmd));
-	cmd->sc = sc;
 	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
 		.type = VIRTIO_SCSI_T_TMF,
 		.subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
@@ -693,34 +700,19 @@
 	return virtscsi_tmf(vscsi, cmd);
 }
 
-static int virtscsi_target_alloc(struct scsi_target *starget)
-{
-	struct Scsi_Host *sh = dev_to_shost(starget->dev.parent);
-	struct virtio_scsi *vscsi = shost_priv(sh);
-
-	struct virtio_scsi_target_state *tgt =
-				kmalloc(sizeof(*tgt), GFP_KERNEL);
-	if (!tgt)
-		return -ENOMEM;
-
-	seqcount_init(&tgt->tgt_seq);
-	tgt->req_vq = &vscsi->req_vqs[0];
-
-	starget->hostdata = tgt;
-	return 0;
-}
-
-static void virtscsi_target_destroy(struct scsi_target *starget)
-{
-	struct virtio_scsi_target_state *tgt = starget->hostdata;
-	kfree(tgt);
-}
-
 static int virtscsi_map_queues(struct Scsi_Host *shost)
 {
 	struct virtio_scsi *vscsi = shost_priv(shost);
+	struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
 
-	return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2);
+	return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
+}
+
+static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
+{
+	struct virtio_scsi *vscsi = shost_priv(shost);
+
+	virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
 }
 
 /*
@@ -740,6 +732,7 @@
 	.this_id = -1,
 	.cmd_size = sizeof(struct virtio_scsi_cmd),
 	.queuecommand = virtscsi_queuecommand,
+	.commit_rqs = virtscsi_commit_rqs,
 	.change_queue_depth = virtscsi_change_queue_depth,
 	.eh_abort_handler = virtscsi_abort,
 	.eh_device_reset_handler = virtscsi_device_reset,
@@ -747,9 +740,6 @@
 	.slave_alloc = virtscsi_device_alloc,
 
 	.dma_boundary = UINT_MAX,
-	.use_clustering = ENABLE_CLUSTERING,
-	.target_alloc = virtscsi_target_alloc,
-	.target_destroy = virtscsi_target_destroy,
 	.map_queues = virtscsi_map_queues,
 	.track_queue_depth = 1,
 	.force_blk_mq = 1,
@@ -855,11 +845,12 @@
 
 	/* We need to know how many queues before we allocate. */
 	num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
+	num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
 
 	num_targets = virtscsi_config_get(vdev, max_target) + 1;
 
 	shost = scsi_host_alloc(&virtscsi_host_template,
-		sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
+				struct_size(vscsi, req_vqs, num_queues));
 	if (!shost)
 		return -ENOMEM;
 
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 890b8aa..7000881 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -335,7 +335,7 @@
 	BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
 
 	sge = &ctx->sgl->sge[0];
-	for (i = 0; i < count; i++, sg++) {
+	for (i = 0; i < count; i++, sg = sg_next(sg)) {
 		sge[i].addr   = sg_dma_address(sg);
 		sge[i].length = sg_dma_len(sg);
 		sge[i].flags  = 0;
@@ -372,9 +372,9 @@
 			pvscsi_create_sg(ctx, sg, segs);
 
 			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
-			ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
-						    SGL_SIZE, PCI_DMA_TODEVICE);
-			if (pci_dma_mapping_error(adapter->dev, ctx->sglPA)) {
+			ctx->sglPA = dma_map_single(&adapter->dev->dev,
+					ctx->sgl, SGL_SIZE, DMA_TO_DEVICE);
+			if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) {
 				scmd_printk(KERN_ERR, cmd,
 					    "vmw_pvscsi: Failed to map ctx sglist for DMA.\n");
 				scsi_dma_unmap(cmd);
@@ -389,9 +389,9 @@
 		 * In case there is no S/G list, scsi_sglist points
 		 * directly to the buffer.
 		 */
-		ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
+		ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
 					     cmd->sc_data_direction);
-		if (pci_dma_mapping_error(adapter->dev, ctx->dataPA)) {
+		if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
 			scmd_printk(KERN_ERR, cmd,
 				    "vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
 			return -ENOMEM;
@@ -417,23 +417,23 @@
 		if (count != 0) {
 			scsi_dma_unmap(cmd);
 			if (ctx->sglPA) {
-				pci_unmap_single(adapter->dev, ctx->sglPA,
-						 SGL_SIZE, PCI_DMA_TODEVICE);
+				dma_unmap_single(&adapter->dev->dev, ctx->sglPA,
+						 SGL_SIZE, DMA_TO_DEVICE);
 				ctx->sglPA = 0;
 			}
 		} else
-			pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
-					 cmd->sc_data_direction);
+			dma_unmap_single(&adapter->dev->dev, ctx->dataPA,
+					 bufflen, cmd->sc_data_direction);
 	}
 	if (cmd->sense_buffer)
-		pci_unmap_single(adapter->dev, ctx->sensePA,
-				 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+		dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
+				 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 }
 
 static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
 {
-	adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
-						    &adapter->ringStatePA);
+	adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
+			&adapter->ringStatePA, GFP_KERNEL);
 	if (!adapter->rings_state)
 		return -ENOMEM;
 
@@ -441,17 +441,17 @@
 				 pvscsi_ring_pages);
 	adapter->req_depth = adapter->req_pages
 					* PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
-	adapter->req_ring = pci_alloc_consistent(adapter->dev,
-						 adapter->req_pages * PAGE_SIZE,
-						 &adapter->reqRingPA);
+	adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev,
+			adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA,
+			GFP_KERNEL);
 	if (!adapter->req_ring)
 		return -ENOMEM;
 
 	adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
 				 pvscsi_ring_pages);
-	adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
-						 adapter->cmp_pages * PAGE_SIZE,
-						 &adapter->cmpRingPA);
+	adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev,
+			adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA,
+			GFP_KERNEL);
 	if (!adapter->cmp_ring)
 		return -ENOMEM;
 
@@ -464,9 +464,9 @@
 
 	adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
 				 pvscsi_msg_ring_pages);
-	adapter->msg_ring = pci_alloc_consistent(adapter->dev,
-						 adapter->msg_pages * PAGE_SIZE,
-						 &adapter->msgRingPA);
+	adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev,
+			adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA,
+			GFP_KERNEL);
 	if (!adapter->msg_ring)
 		return -ENOMEM;
 	BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
@@ -708,10 +708,10 @@
 	e->lun[1] = sdev->lun;
 
 	if (cmd->sense_buffer) {
-		ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
-					      SCSI_SENSE_BUFFERSIZE,
-					      PCI_DMA_FROMDEVICE);
-		if (pci_dma_mapping_error(adapter->dev, ctx->sensePA)) {
+		ctx->sensePA = dma_map_single(&adapter->dev->dev,
+				cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+				DMA_FROM_DEVICE);
+		if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
 			scmd_printk(KERN_ERR, cmd,
 				    "vmw_pvscsi: Failed to map sense buffer for DMA.\n");
 			ctx->sensePA = 0;
@@ -740,9 +740,9 @@
 
 	if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) {
 		if (cmd->sense_buffer) {
-			pci_unmap_single(adapter->dev, ctx->sensePA,
+			dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
 					 SCSI_SENSE_BUFFERSIZE,
-					 PCI_DMA_FROMDEVICE);
+					 DMA_FROM_DEVICE);
 			ctx->sensePA = 0;
 		}
 		return -ENOMEM;
@@ -763,6 +763,7 @@
 	struct pvscsi_adapter *adapter = shost_priv(host);
 	struct pvscsi_ctx *ctx;
 	unsigned long flags;
+	unsigned char op;
 
 	spin_lock_irqsave(&adapter->hw_lock, flags);
 
@@ -775,13 +776,14 @@
 	}
 
 	cmd->scsi_done = done;
+	op = cmd->cmnd[0];
 
 	dev_dbg(&cmd->device->sdev_gendev,
-		"queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
+		"queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op);
 
 	spin_unlock_irqrestore(&adapter->hw_lock, flags);
 
-	pvscsi_kick_io(adapter, cmd->cmnd[0]);
+	pvscsi_kick_io(adapter, op);
 
 	return 0;
 }
@@ -1007,7 +1009,6 @@
 	.sg_tablesize			= PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
 	.dma_boundary			= UINT_MAX,
 	.max_sectors			= 0xffff,
-	.use_clustering			= ENABLE_CLUSTERING,
 	.change_queue_depth		= pvscsi_change_queue_depth,
 	.eh_abort_handler		= pvscsi_abort,
 	.eh_device_reset_handler	= pvscsi_device_reset,
@@ -1216,21 +1217,21 @@
 	}
 
 	if (adapter->rings_state)
-		pci_free_consistent(adapter->dev, PAGE_SIZE,
+		dma_free_coherent(&adapter->dev->dev, PAGE_SIZE,
 				    adapter->rings_state, adapter->ringStatePA);
 
 	if (adapter->req_ring)
-		pci_free_consistent(adapter->dev,
+		dma_free_coherent(&adapter->dev->dev,
 				    adapter->req_pages * PAGE_SIZE,
 				    adapter->req_ring, adapter->reqRingPA);
 
 	if (adapter->cmp_ring)
-		pci_free_consistent(adapter->dev,
+		dma_free_coherent(&adapter->dev->dev,
 				    adapter->cmp_pages * PAGE_SIZE,
 				    adapter->cmp_ring, adapter->cmpRingPA);
 
 	if (adapter->msg_ring)
-		pci_free_consistent(adapter->dev,
+		dma_free_coherent(&adapter->dev->dev,
 				    adapter->msg_pages * PAGE_SIZE,
 				    adapter->msg_ring, adapter->msgRingPA);
 }
@@ -1289,8 +1290,8 @@
 	u32 numPhys = 16;
 
 	dev = pvscsi_dev(adapter);
-	config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
-					   &configPagePA);
+	config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
+			&configPagePA, GFP_KERNEL);
 	if (!config_page) {
 		dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
 		goto exit;
@@ -1324,7 +1325,8 @@
 	} else
 		dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
 			 header->hostStatus, header->scsiStatus);
-	pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
+	dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page,
+			  configPagePA);
 exit:
 	return numPhys;
 }
@@ -1344,11 +1346,9 @@
 	if (pci_enable_device(pdev))
 		return error;
 
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
-	    pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
 		printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
-	} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
-		   pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
+	} else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
 		printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
 	} else {
 		printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 74be04f..f81046f 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright (c) 1996 John Shifflett, GeoLog Consulting
  *    john@geolog.com
  *    jshiffle@netcom.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 /*
@@ -744,7 +735,7 @@
  * source or destination for THIS transfer.
  */
 	if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
-		++cmd->SCp.buffer;
+		cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
 		--cmd->SCp.buffers_residual;
 		cmd->SCp.this_residual = cmd->SCp.buffer->length;
 		cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
@@ -1863,6 +1854,7 @@
 		case 1: --x;
 			break;
 		case 2: ++x;
+			/* fall through */
 		case 3: ++x;
 	}
 	return x;
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
index 08abe50..2edec34 100644
--- a/drivers/scsi/wd33c93.h
+++ b/drivers/scsi/wd33c93.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  *    wd33c93.h -  Linux device driver definitions for the
  *                 Commodore Amiga A2091/590 SCSI controller card
@@ -7,17 +8,6 @@
  * Copyright (c) 1996 John Shifflett, GeoLog Consulting
  *    john@geolog.com
  *    jshiffle@netcom.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  */
 #ifndef WD33C93_H
 #define WD33C93_H
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 974bfb3..edc8a13 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Driver for Western Digital WD7193, WD7197 and WD7296 SCSI cards
  * Copyright 2013 Ondrej Zary
@@ -107,8 +108,15 @@
 	}
 
 	if (status != WD719X_INT_NOERRORS) {
+		u8 sue = wd719x_readb(wd, WD719X_AMR_SCB_ERROR);
+		/* we get this after wd719x_dev_reset, it's not an error */
+		if (sue == WD719X_SUE_TERM)
+			return 0;
+		/* we get this after wd719x_bus_reset, it's not an error */
+		if (sue == WD719X_SUE_RESET)
+			return 0;
 		dev_err(&wd->pdev->dev, "direct command failed, status 0x%02x, SUE 0x%02x\n",
-			status, wd719x_readb(wd, WD719X_AMR_SCB_ERROR));
+			status, sue);
 		return -EIO;
 	}
 
@@ -127,8 +135,10 @@
 	if (wd719x_wait_ready(wd))
 		return -ETIMEDOUT;
 
-	/* make sure we get NO interrupts */
-	dev |= WD719X_DISABLE_INT;
+	/* disable interrupts except for RESET/ABORT (it breaks them) */
+	if (opcode != WD719X_CMD_BUSRESET && opcode != WD719X_CMD_ABORT &&
+	    opcode != WD719X_CMD_ABORT_TAG && opcode != WD719X_CMD_RESET)
+		dev |= WD719X_DISABLE_INT;
 	wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, dev);
 	wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_2, lun);
 	wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_3, tag);
@@ -153,8 +163,6 @@
 
 static void wd719x_destroy(struct wd719x *wd)
 {
-	struct wd719x_scb *scb;
-
 	/* stop the RISC */
 	if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0,
 			      WD719X_WAIT_FOR_RISC))
@@ -162,37 +170,35 @@
 	/* disable RISC */
 	wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0);
 
-	/* free all SCBs */
-	list_for_each_entry(scb, &wd->active_scbs, list)
-		pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb,
-				    scb->phys);
-	list_for_each_entry(scb, &wd->free_scbs, list)
-		pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb,
-				    scb->phys);
+	WARN_ON_ONCE(!list_empty(&wd->active_scbs));
+
 	/* free internal buffers */
-	pci_free_consistent(wd->pdev, wd->fw_size, wd->fw_virt, wd->fw_phys);
+	dma_free_coherent(&wd->pdev->dev, wd->fw_size, wd->fw_virt,
+			  wd->fw_phys);
 	wd->fw_virt = NULL;
-	pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
-			    wd->hash_phys);
+	dma_free_coherent(&wd->pdev->dev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
+			  wd->hash_phys);
 	wd->hash_virt = NULL;
-	pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param),
-			    wd->params, wd->params_phys);
+	dma_free_coherent(&wd->pdev->dev, sizeof(struct wd719x_host_param),
+			  wd->params, wd->params_phys);
 	wd->params = NULL;
 	free_irq(wd->pdev->irq, wd);
 }
 
-/* finish a SCSI command, mark SCB (if any) as free, unmap buffers */
-static void wd719x_finish_cmd(struct scsi_cmnd *cmd, int result)
+/* finish a SCSI command, unmap buffers */
+static void wd719x_finish_cmd(struct wd719x_scb *scb, int result)
 {
+	struct scsi_cmnd *cmd = scb->cmd;
 	struct wd719x *wd = shost_priv(cmd->device->host);
-	struct wd719x_scb *scb = (struct wd719x_scb *) cmd->host_scribble;
 
-	if (scb) {
-		list_move(&scb->list, &wd->free_scbs);
-		dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
-				 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
-		scsi_dma_unmap(cmd);
-	}
+	list_del(&scb->list);
+
+	dma_unmap_single(&wd->pdev->dev, scb->phys,
+			sizeof(struct wd719x_scb), DMA_BIDIRECTIONAL);
+	scsi_dma_unmap(cmd);
+	dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
+			 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+
 	cmd->result = result << 16;
 	cmd->scsi_done(cmd);
 }
@@ -202,36 +208,10 @@
 {
 	int i, count_sg;
 	unsigned long flags;
-	struct wd719x_scb *scb;
+	struct wd719x_scb *scb = scsi_cmd_priv(cmd);
 	struct wd719x *wd = shost_priv(sh);
-	dma_addr_t phys;
 
-	cmd->host_scribble = NULL;
-
-	/* get a free SCB - either from existing ones or allocate a new one */
-	spin_lock_irqsave(wd->sh->host_lock, flags);
-	scb = list_first_entry_or_null(&wd->free_scbs, struct wd719x_scb, list);
-	if (scb) {
-		list_del(&scb->list);
-		phys = scb->phys;
-	} else {
-		spin_unlock_irqrestore(wd->sh->host_lock, flags);
-		scb = pci_alloc_consistent(wd->pdev, sizeof(struct wd719x_scb),
-					   &phys);
-		spin_lock_irqsave(wd->sh->host_lock, flags);
-		if (!scb) {
-			dev_err(&wd->pdev->dev, "unable to allocate SCB\n");
-			wd719x_finish_cmd(cmd, DID_ERROR);
-			spin_unlock_irqrestore(wd->sh->host_lock, flags);
-			return 0;
-		}
-	}
-	memset(scb, 0, sizeof(struct wd719x_scb));
-	list_add(&scb->list, &wd->active_scbs);
-
-	scb->phys = phys;
 	scb->cmd = cmd;
-	cmd->host_scribble = (char *) scb;
 
 	scb->CDB_tag = 0;	/* Tagged queueing not supported yet */
 	scb->devid = cmd->device->id;
@@ -240,10 +220,19 @@
 	/* copy the command */
 	memcpy(scb->CDB, cmd->cmnd, cmd->cmd_len);
 
+	/* map SCB */
+	scb->phys = dma_map_single(&wd->pdev->dev, scb, sizeof(*scb),
+				   DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(&wd->pdev->dev, scb->phys))
+		goto out_error;
+
 	/* map sense buffer */
 	scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE;
 	cmd->SCp.dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
 			SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(&wd->pdev->dev, cmd->SCp.dma_handle))
+		goto out_unmap_scb;
 	scb->sense_buf = cpu_to_le32(cmd->SCp.dma_handle);
 
 	/* request autosense */
@@ -258,11 +247,8 @@
 
 	/* Scather/gather */
 	count_sg = scsi_dma_map(cmd);
-	if (count_sg < 0) {
-		wd719x_finish_cmd(cmd, DID_ERROR);
-		spin_unlock_irqrestore(wd->sh->host_lock, flags);
-		return 0;
-	}
+	if (count_sg < 0)
+		goto out_unmap_sense;
 	BUG_ON(count_sg > WD719X_SG);
 
 	if (count_sg) {
@@ -283,19 +269,33 @@
 		scb->data_p = 0;
 	}
 
+	spin_lock_irqsave(wd->sh->host_lock, flags);
+
 	/* check if the Command register is free */
 	if (wd719x_readb(wd, WD719X_AMR_COMMAND) != WD719X_CMD_READY) {
 		spin_unlock_irqrestore(wd->sh->host_lock, flags);
 		return SCSI_MLQUEUE_HOST_BUSY;
 	}
 
+	list_add(&scb->list, &wd->active_scbs);
+
 	/* write pointer to the AMR */
 	wd719x_writel(wd, WD719X_AMR_SCB_IN, scb->phys);
 	/* send SCB opcode */
 	wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_PROCESS_SCB);
 
 	spin_unlock_irqrestore(wd->sh->host_lock, flags);
+	return 0;
 
+out_unmap_sense:
+	dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
+			 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+out_unmap_scb:
+	dma_unmap_single(&wd->pdev->dev, scb->phys, sizeof(*scb),
+			 DMA_BIDIRECTIONAL);
+out_error:
+	cmd->result = DID_ERROR << 16;
+	cmd->scsi_done(cmd);
 	return 0;
 }
 
@@ -327,8 +327,8 @@
 	wd->fw_size = ALIGN(fw_wcs->size, 4) + fw_risc->size;
 
 	if (!wd->fw_virt)
-		wd->fw_virt = pci_alloc_consistent(wd->pdev, wd->fw_size,
-						   &wd->fw_phys);
+		wd->fw_virt = dma_alloc_coherent(&wd->pdev->dev, wd->fw_size,
+						 &wd->fw_phys, GFP_KERNEL);
 	if (!wd->fw_virt) {
 		ret = -ENOMEM;
 		goto wd719x_init_end;
@@ -464,7 +464,7 @@
 {
 	int action, result;
 	unsigned long flags;
-	struct wd719x_scb *scb = (struct wd719x_scb *)cmd->host_scribble;
+	struct wd719x_scb *scb = scsi_cmd_priv(cmd);
 	struct wd719x *wd = shost_priv(cmd->device->host);
 
 	dev_info(&wd->pdev->dev, "abort command, tag: %x\n", cmd->tag);
@@ -474,6 +474,7 @@
 	spin_lock_irqsave(wd->sh->host_lock, flags);
 	result = wd719x_direct_cmd(wd, action, cmd->device->id,
 				   cmd->device->lun, cmd->tag, scb->phys, 0);
+	wd719x_finish_cmd(scb, DID_ABORT);
 	spin_unlock_irqrestore(wd->sh->host_lock, flags);
 	if (result)
 		return FAILED;
@@ -486,6 +487,7 @@
 	int result;
 	unsigned long flags;
 	struct wd719x *wd = shost_priv(cmd->device->host);
+	struct wd719x_scb *scb, *tmp;
 
 	dev_info(&wd->pdev->dev, "%s reset requested\n",
 		 (opcode == WD719X_CMD_BUSRESET) ? "bus" : "device");
@@ -493,6 +495,12 @@
 	spin_lock_irqsave(wd->sh->host_lock, flags);
 	result = wd719x_direct_cmd(wd, opcode, device, 0, 0, 0,
 				   WD719X_WAIT_FOR_SCSI_RESET);
+	/* flush all SCBs (or all for a device if dev_reset) */
+	list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) {
+		if (opcode == WD719X_CMD_BUSRESET ||
+		    scb->cmd->device->id == device)
+			wd719x_finish_cmd(scb, DID_RESET);
+	}
 	spin_unlock_irqrestore(wd->sh->host_lock, flags);
 	if (result)
 		return FAILED;
@@ -515,24 +523,23 @@
 	struct wd719x *wd = shost_priv(cmd->device->host);
 	struct wd719x_scb *scb, *tmp;
 	unsigned long flags;
-	int result;
 
 	dev_info(&wd->pdev->dev, "host reset requested\n");
 	spin_lock_irqsave(wd->sh->host_lock, flags);
-	/* Try to reinit the RISC */
-	if (wd719x_chip_init(wd) == 0)
-		result = SUCCESS;
-	else
-		result = FAILED;
+	/* stop the RISC */
+	if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0,
+			      WD719X_WAIT_FOR_RISC))
+		dev_warn(&wd->pdev->dev, "RISC sleep command failed\n");
+	/* disable RISC */
+	wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0);
 
 	/* flush all SCBs */
-	list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) {
-		struct scsi_cmnd *tmp_cmd = scb->cmd;
-		wd719x_finish_cmd(tmp_cmd, result);
-	}
+	list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list)
+		wd719x_finish_cmd(scb, DID_RESET);
 	spin_unlock_irqrestore(wd->sh->host_lock, flags);
 
-	return result;
+	/* Try to reinit the RISC */
+	return wd719x_chip_init(wd) == 0 ? SUCCESS : FAILED;
 }
 
 static int wd719x_biosparam(struct scsi_device *sdev, struct block_device *bdev,
@@ -555,7 +562,6 @@
 					union wd719x_regs regs,
 					struct wd719x_scb *scb)
 {
-	struct scsi_cmnd *cmd;
 	int result;
 
 	/* now have to find result from card */
@@ -643,9 +649,8 @@
 		result = DID_ERROR;
 		break;
 	}
-	cmd = scb->cmd;
 
-	wd719x_finish_cmd(cmd, result);
+	wd719x_finish_cmd(scb, result);
 }
 
 static irqreturn_t wd719x_interrupt(int irq, void *dev_id)
@@ -686,7 +691,7 @@
 			else
 				dev_err(&wd->pdev->dev, "card returned invalid SCB pointer\n");
 		} else
-			dev_warn(&wd->pdev->dev, "direct command 0x%x completed\n",
+			dev_dbg(&wd->pdev->dev, "direct command 0x%x completed\n",
 				 regs.bytes.OPC);
 		break;
 	case WD719X_INT_PIOREADY:
@@ -809,7 +814,6 @@
 	int ret;
 
 	INIT_LIST_HEAD(&wd->active_scbs);
-	INIT_LIST_HEAD(&wd->free_scbs);
 
 	sh->base = pci_resource_start(wd->pdev, 0);
 
@@ -820,17 +824,18 @@
 	wd->fw_virt = NULL;
 
 	/* memory area for host (EEPROM) parameters */
-	wd->params = pci_alloc_consistent(wd->pdev,
-					  sizeof(struct wd719x_host_param),
-					  &wd->params_phys);
+	wd->params = dma_alloc_coherent(&wd->pdev->dev,
+					sizeof(struct wd719x_host_param),
+					&wd->params_phys, GFP_KERNEL);
 	if (!wd->params) {
 		dev_warn(&wd->pdev->dev, "unable to allocate parameter buffer\n");
 		return -ENOMEM;
 	}
 
 	/* memory area for the RISC for hash table of outstanding requests */
-	wd->hash_virt = pci_alloc_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE,
-					     &wd->hash_phys);
+	wd->hash_virt = dma_alloc_coherent(&wd->pdev->dev,
+					   WD719X_HASH_TABLE_SIZE,
+					   &wd->hash_phys, GFP_KERNEL);
 	if (!wd->hash_virt) {
 		dev_warn(&wd->pdev->dev, "unable to allocate hash buffer\n");
 		ret = -ENOMEM;
@@ -862,10 +867,10 @@
 fail_free_irq:
 	free_irq(wd->pdev->irq, wd);
 fail_free_hash:
-	pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
+	dma_free_coherent(&wd->pdev->dev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
 			    wd->hash_phys);
 fail_free_params:
-	pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param),
+	dma_free_coherent(&wd->pdev->dev, sizeof(struct wd719x_host_param),
 			    wd->params, wd->params_phys);
 
 	return ret;
@@ -874,6 +879,7 @@
 static struct scsi_host_template wd719x_template = {
 	.module				= THIS_MODULE,
 	.name				= "Western Digital 719x",
+	.cmd_size			= sizeof(struct wd719x_scb),
 	.queuecommand			= wd719x_queuecommand,
 	.eh_abort_handler		= wd719x_abort,
 	.eh_device_reset_handler	= wd719x_dev_reset,
@@ -884,7 +890,6 @@
 	.can_queue			= 255,
 	.this_id			= 7,
 	.sg_tablesize			= WD719X_SG,
-	.use_clustering			= ENABLE_CLUSTERING,
 };
 
 static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d)
@@ -897,7 +902,7 @@
 	if (err)
 		goto fail;
 
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 		dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n");
 		goto disable_device;
 	}
diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h
index 0455b16..abaabd4 100644
--- a/drivers/scsi/wd719x.h
+++ b/drivers/scsi/wd719x.h
@@ -74,7 +74,6 @@
 	void *hash_virt;	/* hash table CPU address */
 	dma_addr_t hash_phys;	/* hash table bus address */
 	struct list_head active_scbs;
-	struct list_head free_scbs;
 };
 
 /* timeout delays in microsecs */
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 61389bd..f0068e9 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -696,7 +696,6 @@
 	.this_id		= -1,
 	.cmd_size		= sizeof(struct vscsifrnt_shadow),
 	.sg_tablesize		= VSCSIIF_SG_TABLESIZE,
-	.use_clustering		= DISABLE_CLUSTERING,
 	.proc_name		= "scsifront",
 };
 
@@ -1112,7 +1111,7 @@
 	case XenbusStateClosed:
 		if (dev->state == XenbusStateClosed)
 			break;
-		/* Missed the backend's Closing state -- fallthrough */
+		/* fall through - Missed the backend's Closing state */
 	case XenbusStateClosing:
 		scsifront_disconnect(info);
 		break;
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 4722660..77bce20 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Zalon 53c7xx device driver.
  * By Richard Hirst (rhirst@linuxcare.com)
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index aff3199..27b9e2b 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
  *		Amiga MacroSystemUS WarpEngine SCSI controller.
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index bb70882..ca8e3ab 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -9,8 +9,6 @@
  *
  * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
  *               Blizzard 1230 DMA and probe function fixes
- *
- * Copyright (C) 2017 Finn Thain for PIO code from Mac ESP driver adapted here
  */
 /*
  * ZORRO bus code from:
@@ -159,7 +157,6 @@
 struct zorro_esp_priv {
 	struct esp *esp;		/* our ESP instance - for Scsi_host* */
 	void __iomem *board_base;	/* virtual address (Zorro III board) */
-	int error;			/* PIO error flag */
 	int zorro3;			/* board is Zorro III */
 	unsigned char ctrl_data;	/* shadow copy of ctrl_reg */
 };
@@ -182,30 +179,6 @@
 	return readb(esp->regs + (reg * 4UL));
 }
 
-static dma_addr_t zorro_esp_map_single(struct esp *esp, void *buf,
-				      size_t sz, int dir)
-{
-	return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int zorro_esp_map_sg(struct esp *esp, struct scatterlist *sg,
-				  int num_sg, int dir)
-{
-	return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void zorro_esp_unmap_single(struct esp *esp, dma_addr_t addr,
-				  size_t sz, int dir)
-{
-	dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void zorro_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
-			      int num_sg, int dir)
-{
-	dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
 static int zorro_esp_irq_pending(struct esp *esp)
 {
 	/* check ESP status register; DMA has no status reg. */
@@ -245,7 +218,7 @@
 static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
 					u32 dma_len)
 {
-	return dma_len > 0xFFFFFF ? 0xFFFFFF : dma_len;
+	return dma_len > 0xFFFF ? 0xFFFF : dma_len;
 }
 
 static void zorro_esp_reset_dma(struct esp *esp)
@@ -274,192 +247,29 @@
 	z_writel(0, zep->board_base);
 }
 
-/*
- * Programmed IO routines follow.
- */
-
-static inline unsigned int zorro_esp_wait_for_fifo(struct esp *esp)
-{
-	int i = 500000;
-
-	do {
-		unsigned int fbytes = zorro_esp_read8(esp, ESP_FFLAGS)
-							& ESP_FF_FBYTES;
-
-		if (fbytes)
-			return fbytes;
-
-		udelay(2);
-	} while (--i);
-
-	pr_err("FIFO is empty (sreg %02x)\n",
-	       zorro_esp_read8(esp, ESP_STATUS));
-	return 0;
-}
-
-static inline int zorro_esp_wait_for_intr(struct esp *esp)
-{
-	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
-	int i = 500000;
-
-	do {
-		esp->sreg = zorro_esp_read8(esp, ESP_STATUS);
-		if (esp->sreg & ESP_STAT_INTR)
-			return 0;
-
-		udelay(2);
-	} while (--i);
-
-	pr_err("IRQ timeout (sreg %02x)\n", esp->sreg);
-	zep->error = 1;
-	return 1;
-}
-
-/*
- * PIO macros as used in mac_esp.c.
- * Note that addr and fifo arguments are local-scope variables declared
- * in zorro_esp_send_pio_cmd(), the macros are only used in that function,
- * and addr and fifo are referenced in each use of the macros so there
- * is no need to pass them as macro parameters.
- */
-#define ZORRO_ESP_PIO_LOOP(operands, reg1) \
-	asm volatile ( \
-	     "1:     moveb " operands "\n" \
-	     "       subqw #1,%1       \n" \
-	     "       jbne 1b           \n" \
-	     : "+a" (addr), "+r" (reg1) \
-	     : "a" (fifo));
-
-#define ZORRO_ESP_PIO_FILL(operands, reg1) \
-	asm volatile ( \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       moveb " operands "\n" \
-	     "       subqw #8,%1       \n" \
-	     "       subqw #8,%1       \n" \
-	     : "+a" (addr), "+r" (reg1) \
-	     : "a" (fifo));
-
-#define ZORRO_ESP_FIFO_SIZE 16
-
-static void zorro_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
-				 u32 dma_count, int write, u8 cmd)
-{
-	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
-	u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
-	u8 phase = esp->sreg & ESP_STAT_PMASK;
-
-	cmd &= ~ESP_CMD_DMA;
-
-	if (write) {
-		u8 *dst = (u8 *)addr;
-		u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
-
-		scsi_esp_cmd(esp, cmd);
-
-		while (1) {
-			if (!zorro_esp_wait_for_fifo(esp))
-				break;
-
-			*dst++ = zorro_esp_read8(esp, ESP_FDATA);
-			--esp_count;
-
-			if (!esp_count)
-				break;
-
-			if (zorro_esp_wait_for_intr(esp))
-				break;
-
-			if ((esp->sreg & ESP_STAT_PMASK) != phase)
-				break;
-
-			esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
-			if (esp->ireg & mask) {
-				zep->error = 1;
-				break;
-			}
-
-			if (phase == ESP_MIP)
-				scsi_esp_cmd(esp, ESP_CMD_MOK);
-
-			scsi_esp_cmd(esp, ESP_CMD_TI);
-		}
-	} else {	/* unused, as long as we only handle MIP here */
-		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
-
-		if (esp_count >= ZORRO_ESP_FIFO_SIZE)
-			ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
-		else
-			ZORRO_ESP_PIO_LOOP("%0@+,%2@", esp_count)
-
-		scsi_esp_cmd(esp, cmd);
-
-		while (esp_count) {
-			unsigned int n;
-
-			if (zorro_esp_wait_for_intr(esp))
-				break;
-
-			if ((esp->sreg & ESP_STAT_PMASK) != phase)
-				break;
-
-			esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
-			if (esp->ireg & ~ESP_INTR_BSERV) {
-				zep->error = 1;
-				break;
-			}
-
-			n = ZORRO_ESP_FIFO_SIZE -
-			    (zorro_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES);
-			if (n > esp_count)
-				n = esp_count;
-
-			if (n == ZORRO_ESP_FIFO_SIZE)
-				ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
-			else {
-				esp_count -= n;
-				ZORRO_ESP_PIO_LOOP("%0@+,%2@", n)
-			}
-
-			scsi_esp_cmd(esp, ESP_CMD_TI);
-		}
-	}
-}
-
 /* Blizzard 1230/60 SCSI-IV DMA */
 
 static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
 			u32 esp_count, u32 dma_count, int write, u8 cmd)
 {
-	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
 	struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
 	u8 phase = esp->sreg & ESP_STAT_PMASK;
 
-	zep->error = 0;
 	/*
 	 * Use PIO if transferring message bytes to esp->command_block_dma.
 	 * PIO requires a virtual address, so substitute esp->command_block
 	 * for addr.
 	 */
 	if (phase == ESP_MIP && addr == esp->command_block_dma) {
-		zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
-					esp_count, dma_count, write, cmd);
+		esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+				 dma_count, write, cmd);
 		return;
 	}
 
+	/* Clear the results of a possible prior esp->ops->send_dma_cmd() */
+	esp->send_cmd_error = 0;
+	esp->send_cmd_residual = 0;
+
 	if (write)
 		/* DMA receive */
 		dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -484,7 +294,6 @@
 	scsi_esp_cmd(esp, ESP_CMD_DMA);
 	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
 	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
-	zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 
 	scsi_esp_cmd(esp, cmd);
 }
@@ -494,18 +303,19 @@
 static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
 			u32 esp_count, u32 dma_count, int write, u8 cmd)
 {
-	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
 	struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
 	u8 phase = esp->sreg & ESP_STAT_PMASK;
 
-	zep->error = 0;
 	/* Use PIO if transferring message bytes to esp->command_block_dma */
 	if (phase == ESP_MIP && addr == esp->command_block_dma) {
-		zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
-					esp_count, dma_count, write, cmd);
+		esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+				 dma_count, write, cmd);
 		return;
 	}
 
+	esp->send_cmd_error = 0;
+	esp->send_cmd_residual = 0;
+
 	if (write)
 		/* DMA receive */
 		dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -529,7 +339,6 @@
 	scsi_esp_cmd(esp, ESP_CMD_DMA);
 	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
 	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
-	zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 
 	scsi_esp_cmd(esp, cmd);
 }
@@ -539,18 +348,19 @@
 static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
 			u32 esp_count, u32 dma_count, int write, u8 cmd)
 {
-	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
 	struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
 	u8 phase = esp->sreg & ESP_STAT_PMASK;
 
-	zep->error = 0;
 	/* Use PIO if transferring message bytes to esp->command_block_dma */
 	if (phase == ESP_MIP && addr == esp->command_block_dma) {
-		zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
-					esp_count, dma_count, write, cmd);
+		esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+				 dma_count, write, cmd);
 		return;
 	}
 
+	esp->send_cmd_error = 0;
+	esp->send_cmd_residual = 0;
+
 	if (write)
 		/* DMA receive */
 		dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -574,7 +384,6 @@
 	scsi_esp_cmd(esp, ESP_CMD_DMA);
 	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
 	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
-	zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 
 	scsi_esp_cmd(esp, cmd);
 }
@@ -589,17 +398,18 @@
 	u8 phase = esp->sreg & ESP_STAT_PMASK;
 	unsigned char *ctrl_data = &zep->ctrl_data;
 
-	zep->error = 0;
 	/* Use PIO if transferring message bytes to esp->command_block_dma */
 	if (phase == ESP_MIP && addr == esp->command_block_dma) {
-		zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
-					esp_count, dma_count, write, cmd);
+		esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+				 dma_count, write, cmd);
 		return;
 	}
 
+	esp->send_cmd_error = 0;
+	esp->send_cmd_residual = 0;
+
 	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
 	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
-	zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 
 	if (write) {
 		/* DMA receive */
@@ -635,21 +445,21 @@
 static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
 			u32 esp_count, u32 dma_count, int write, u8 cmd)
 {
-	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
 	struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
 	u8 phase = esp->sreg & ESP_STAT_PMASK;
 
-	zep->error = 0;
 	/* Use PIO if transferring message bytes to esp->command_block_dma */
 	if (phase == ESP_MIP && addr == esp->command_block_dma) {
-		zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
-					esp_count, dma_count, write, cmd);
+		esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+				 dma_count, write, cmd);
 		return;
 	}
 
+	esp->send_cmd_error = 0;
+	esp->send_cmd_residual = 0;
+
 	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
 	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
-	zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 
 	if (write) {
 		/* DMA receive */
@@ -681,17 +491,18 @@
 	u8 phase = esp->sreg & ESP_STAT_PMASK;
 	unsigned char *ctrl_data = &zep->ctrl_data;
 
-	zep->error = 0;
 	/* Use PIO if transferring message bytes to esp->command_block_dma */
 	if (phase == ESP_MIP && addr == esp->command_block_dma) {
-		zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
-					esp_count, dma_count, write, cmd);
+		esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+				 dma_count, write, cmd);
 		return;
 	}
 
+	esp->send_cmd_error = 0;
+	esp->send_cmd_residual = 0;
+
 	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
 	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
-	zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 
 	if (write) {
 		/* DMA receive */
@@ -724,14 +535,7 @@
 
 static int zorro_esp_dma_error(struct esp *esp)
 {
-	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
-
-	/* check for error in case we've been doing PIO */
-	if (zep->error == 1)
-		return 1;
-
-	/* do nothing - there seems to be no way to check for DMA errors */
-	return 0;
+	return esp->send_cmd_error;
 }
 
 /* per-board ESP driver ops */
@@ -739,10 +543,6 @@
 static const struct esp_driver_ops blz1230_esp_ops = {
 	.esp_write8		= zorro_esp_write8,
 	.esp_read8		= zorro_esp_read8,
-	.map_single		= zorro_esp_map_single,
-	.map_sg			= zorro_esp_map_sg,
-	.unmap_single		= zorro_esp_unmap_single,
-	.unmap_sg		= zorro_esp_unmap_sg,
 	.irq_pending		= zorro_esp_irq_pending,
 	.dma_length_limit	= zorro_esp_dma_length_limit,
 	.reset_dma		= zorro_esp_reset_dma,
@@ -755,10 +555,6 @@
 static const struct esp_driver_ops blz1230II_esp_ops = {
 	.esp_write8		= zorro_esp_write8,
 	.esp_read8		= zorro_esp_read8,
-	.map_single		= zorro_esp_map_single,
-	.map_sg			= zorro_esp_map_sg,
-	.unmap_single		= zorro_esp_unmap_single,
-	.unmap_sg		= zorro_esp_unmap_sg,
 	.irq_pending		= zorro_esp_irq_pending,
 	.dma_length_limit	= zorro_esp_dma_length_limit,
 	.reset_dma		= zorro_esp_reset_dma,
@@ -771,10 +567,6 @@
 static const struct esp_driver_ops blz2060_esp_ops = {
 	.esp_write8		= zorro_esp_write8,
 	.esp_read8		= zorro_esp_read8,
-	.map_single		= zorro_esp_map_single,
-	.map_sg			= zorro_esp_map_sg,
-	.unmap_single		= zorro_esp_unmap_single,
-	.unmap_sg		= zorro_esp_unmap_sg,
 	.irq_pending		= zorro_esp_irq_pending,
 	.dma_length_limit	= zorro_esp_dma_length_limit,
 	.reset_dma		= zorro_esp_reset_dma,
@@ -787,10 +579,6 @@
 static const struct esp_driver_ops cyber_esp_ops = {
 	.esp_write8		= zorro_esp_write8,
 	.esp_read8		= zorro_esp_read8,
-	.map_single		= zorro_esp_map_single,
-	.map_sg			= zorro_esp_map_sg,
-	.unmap_single		= zorro_esp_unmap_single,
-	.unmap_sg		= zorro_esp_unmap_sg,
 	.irq_pending		= cyber_esp_irq_pending,
 	.dma_length_limit	= zorro_esp_dma_length_limit,
 	.reset_dma		= zorro_esp_reset_dma,
@@ -803,10 +591,6 @@
 static const struct esp_driver_ops cyberII_esp_ops = {
 	.esp_write8		= zorro_esp_write8,
 	.esp_read8		= zorro_esp_read8,
-	.map_single		= zorro_esp_map_single,
-	.map_sg			= zorro_esp_map_sg,
-	.unmap_single		= zorro_esp_unmap_single,
-	.unmap_sg		= zorro_esp_unmap_sg,
 	.irq_pending		= zorro_esp_irq_pending,
 	.dma_length_limit	= zorro_esp_dma_length_limit,
 	.reset_dma		= zorro_esp_reset_dma,
@@ -819,10 +603,6 @@
 static const struct esp_driver_ops fastlane_esp_ops = {
 	.esp_write8		= zorro_esp_write8,
 	.esp_read8		= zorro_esp_read8,
-	.map_single		= zorro_esp_map_single,
-	.map_sg			= zorro_esp_map_sg,
-	.unmap_single		= zorro_esp_unmap_single,
-	.unmap_sg		= zorro_esp_unmap_sg,
 	.irq_pending		= fastlane_esp_irq_pending,
 	.dma_length_limit	= zorro_esp_dma_length_limit,
 	.reset_dma		= zorro_esp_reset_dma,
@@ -1039,6 +819,8 @@
 		goto fail_unmap_fastlane;
 	}
 
+	esp->fifo_reg = esp->regs + ESP_FDATA * 4;
+
 	/* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
 	if (zdd->scsi_option) {
 		zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
@@ -1082,7 +864,7 @@
 	}
 
 	/* register the chip */
-	err = scsi_esp_register(esp, &z->dev);
+	err = scsi_esp_register(esp);
 
 	if (err) {
 		err = -ENOMEM;