Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 59d7844..fea3cb6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -74,30 +74,55 @@
 #define MAX_HBA_QUEUE_DEPTH	30000
 #define MAX_CHAIN_DEPTH		100000
 static int max_queue_depth = -1;
-module_param(max_queue_depth, int, 0);
+module_param(max_queue_depth, int, 0444);
 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
 
 static int max_sgl_entries = -1;
-module_param(max_sgl_entries, int, 0);
+module_param(max_sgl_entries, int, 0444);
 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
 
 static int msix_disable = -1;
-module_param(msix_disable, int, 0);
+module_param(msix_disable, int, 0444);
 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
 
 static int smp_affinity_enable = 1;
-module_param(smp_affinity_enable, int, S_IRUGO);
+module_param(smp_affinity_enable, int, 0444);
 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
 
 static int max_msix_vectors = -1;
-module_param(max_msix_vectors, int, 0);
+module_param(max_msix_vectors, int, 0444);
 MODULE_PARM_DESC(max_msix_vectors,
 	" max msix vectors");
 
+static int irqpoll_weight = -1;
+module_param(irqpoll_weight, int, 0444);
+MODULE_PARM_DESC(irqpoll_weight,
+	"irq poll weight (default= one fourth of HBA queue depth)");
+
 static int mpt3sas_fwfault_debug;
 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
 	" enable detection of firmware fault and halt firmware - (default=0)");
 
+static int perf_mode = -1;
+module_param(perf_mode, int, 0444);
+MODULE_PARM_DESC(perf_mode,
+	"Performance mode (only for Aero/Sea Generation), options:\n\t\t"
+	"0 - balanced: high iops mode is enabled &\n\t\t"
+	"interrupt coalescing is enabled only on high iops queues,\n\t\t"
+	"1 - iops: high iops mode is disabled &\n\t\t"
+	"interrupt coalescing is enabled on all queues,\n\t\t"
+	"2 - latency: high iops mode is disabled &\n\t\t"
+	"interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
+	"\t\tdefault - default perf_mode is 'balanced'"
+	);
+
+enum mpt3sas_perf_mode {
+	MPT_PERF_MODE_DEFAULT	= -1,
+	MPT_PERF_MODE_BALANCED	= 0,
+	MPT_PERF_MODE_IOPS	= 1,
+	MPT_PERF_MODE_LATENCY	= 2,
+};
+
 static int
 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
 
@@ -122,8 +147,8 @@
 	if (!(status & MPT3_CMD_RESET))
 		issue_reset = 1;
 
-	pr_err(MPT3SAS_FMT "Command %s\n", ioc->name,
-	    ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout"));
+	ioc_err(ioc, "Command %s\n",
+		issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
 	_debug_dump_mf(mpi_request, sz);
 
 	return issue_reset;
@@ -157,6 +182,32 @@
 	param_get_int, &mpt3sas_fwfault_debug, 0644);
 
 /**
+ * _base_readl_aero - retry readl for max three times.
+ * @addr - MPT Fusion system interface register address
+ *
+ * Retry the readl() for max three times if it gets zero value
+ * while reading the system interface register.
+ */
+static inline u32
+_base_readl_aero(const volatile void __iomem *addr)
+{
+	u32 i = 0, ret_val;
+
+	do {
+		ret_val = readl(addr);
+		i++;
+	} while (ret_val == 0 && i < 3);
+
+	return ret_val;
+}
+
+static inline u32
+_base_readl(const volatile void __iomem *addr)
+{
+	return readl(addr);
+}
+
+/**
  * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
  *				  in BAR0 space.
  *
@@ -336,9 +387,7 @@
 				return ct->chain_buffer;
 		}
 	}
-	pr_info(MPT3SAS_FMT
-	    "Provided chain_buffer_dma address is not in the lookup list\n",
-	    ioc->name);
+	ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
 	return NULL;
 }
 
@@ -394,7 +443,7 @@
 		/* Get scsi_cmd using smid */
 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
 		if (scmd == NULL) {
-			pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name);
+			ioc_err(ioc, "scmd is NULL\n");
 			return;
 		}
 
@@ -532,11 +581,11 @@
 	struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
 	struct pci_dev *pdev;
 
-	if ((ioc == NULL))
+	if (!ioc)
 		return -1;
 
 	pdev = ioc->pdev;
-	if ((pdev == NULL))
+	if (!pdev)
 		return -1;
 	pci_stop_and_remove_bus_device_locked(pdev);
 	return 0;
@@ -566,8 +615,7 @@
 
 	doorbell = mpt3sas_base_get_iocstate(ioc, 0);
 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
-		pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
-		    ioc->name);
+		ioc_err(ioc, "SAS host is non-operational !!!!\n");
 
 		/* It may be possible that EEH recovery can resolve some of
 		 * pci bus failure issues rather removing the dead ioc function
@@ -600,13 +648,11 @@
 		p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
 		    "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
 		if (IS_ERR(p))
-			pr_err(MPT3SAS_FMT
-			"%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
-			ioc->name, __func__);
+			ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
+				__func__);
 		else
-			pr_err(MPT3SAS_FMT
-			"%s: Running mpt3sas_dead_ioc thread success !!!!\n",
-			ioc->name, __func__);
+			ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
+				__func__);
 		return; /* don't rearm timer */
 	}
 
@@ -614,8 +660,8 @@
 
 	if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
-		pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
-		    __func__, (rc == 0) ? "success" : "failed");
+		ioc_warn(ioc, "%s: hard reset: %s\n",
+			 __func__, rc == 0 ? "success" : "failed");
 		doorbell = mpt3sas_base_get_iocstate(ioc, 0);
 		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
 			mpt3sas_base_fault_info(ioc, doorbell &
@@ -657,8 +703,7 @@
 	ioc->fault_reset_work_q =
 		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
 	if (!ioc->fault_reset_work_q) {
-		pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
-		    ioc->name, __func__, __LINE__);
+		ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
 		return;
 	}
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
@@ -700,8 +745,7 @@
 void
 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
 {
-	pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
-	    ioc->name, fault_code);
+	ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
 }
 
 /**
@@ -723,13 +767,12 @@
 
 	dump_stack();
 
-	doorbell = readl(&ioc->chip->Doorbell);
+	doorbell = ioc->base_readl(&ioc->chip->Doorbell);
 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
 		mpt3sas_base_fault_info(ioc , doorbell);
 	else {
 		writel(0xC0FFEE00, &ioc->chip->Doorbell);
-		pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
-			ioc->name);
+		ioc_err(ioc, "Firmware is halted due to command timeout\n");
 	}
 
 	if (ioc->fwfault_debug == 2)
@@ -956,8 +999,8 @@
 		break;
 	}
 
-	pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
-		ioc->name, desc, ioc_status, request_hdr, func_str);
+	ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
+		 desc, ioc_status, request_hdr, func_str);
 
 	_debug_dump_mf(request_hdr, frame_sz/4);
 }
@@ -1003,9 +1046,9 @@
 	{
 		Mpi2EventDataSasDiscovery_t *event_data =
 		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
-		pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
-		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
-		    "start" : "stop");
+		ioc_info(ioc, "Discovery: (%s)",
+			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
+			 "start" : "stop");
 		if (event_data->DiscoveryStatus)
 			pr_cont(" discovery_status(0x%08x)",
 			    le32_to_cpu(event_data->DiscoveryStatus));
@@ -1059,14 +1102,13 @@
 	{
 		Mpi26EventDataPCIeEnumeration_t *event_data =
 			(Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
-		pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
-			   (event_data->ReasonCode ==
-				MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
-				"start" : "stop");
+		ioc_info(ioc, "PCIE Enumeration: (%s)",
+			 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
+			 "start" : "stop");
 		if (event_data->EnumerationStatus)
-			pr_info("enumeration_status(0x%08x)",
-				   le32_to_cpu(event_data->EnumerationStatus));
-		pr_info("\n");
+			pr_cont("enumeration_status(0x%08x)",
+				le32_to_cpu(event_data->EnumerationStatus));
+		pr_cont("\n");
 		return;
 	}
 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
@@ -1077,7 +1119,7 @@
 	if (!desc)
 		return;
 
-	pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
+	ioc_info(ioc, "%s\n", desc);
 }
 
 /**
@@ -1128,11 +1170,9 @@
 		break;
 	}
 
-	pr_warn(MPT3SAS_FMT
-		"log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
-		ioc->name, log_info,
-	     originator_str, sas_loginfo.dw.code,
-	     sas_loginfo.dw.subcode);
+	ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
+		 log_info,
+		 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
 }
 
 /**
@@ -1152,8 +1192,8 @@
 
 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 	if (unlikely(!mpi_reply)) {
-		pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return;
 	}
 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
@@ -1249,9 +1289,9 @@
 		delayed_event_ack->EventContext = mpi_reply->EventContext;
 		list_add_tail(&delayed_event_ack->list,
 				&ioc->delayed_event_ack_list);
-		dewtprintk(ioc, pr_info(MPT3SAS_FMT
-				"DELAYED: EVENT ACK: event (0x%04x)\n",
-				ioc->name, le16_to_cpu(mpi_reply->Event)));
+		dewtprintk(ioc,
+			   ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
+				    le16_to_cpu(mpi_reply->Event)));
 		goto out;
 	}
 
@@ -1262,7 +1302,7 @@
 	ack_request->EventContext = mpi_reply->EventContext;
 	ack_request->VF_ID = 0;  /* TODO */
 	ack_request->VP_ID = 0;
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 
  out:
 
@@ -1336,10 +1376,10 @@
 	u32 him_register;
 
 	ioc->mask_interrupts = 1;
-	him_register = readl(&ioc->chip->HostInterruptMask);
+	him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
 	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
 	writel(him_register, &ioc->chip->HostInterruptMask);
-	readl(&ioc->chip->HostInterruptMask);
+	ioc->base_readl(&ioc->chip->HostInterruptMask);
 }
 
 /**
@@ -1353,7 +1393,7 @@
 {
 	u32 him_register;
 
-	him_register = readl(&ioc->chip->HostInterruptMask);
+	him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
 	him_register &= ~MPI2_HIM_RIM;
 	writel(him_register, &ioc->chip->HostInterruptMask);
 	ioc->mask_interrupts = 0;
@@ -1367,20 +1407,30 @@
 	} u;
 };
 
-/**
- * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
- * @irq: irq number (not used)
- * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
- *
- * Return: IRQ_HANDLED if processed, else IRQ_NONE.
- */
-static irqreturn_t
-_base_interrupt(int irq, void *bus_id)
+static u32 base_mod64(u64 dividend, u32 divisor)
 {
-	struct adapter_reply_queue *reply_q = bus_id;
+	u32 remainder;
+
+	if (!divisor)
+		pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
+	remainder = do_div(dividend, divisor);
+	return remainder;
+}
+
+/**
+ * _base_process_reply_queue - Process reply descriptors from reply
+ *		descriptor post queue.
+ * @reply_q: per IRQ's reply queue object.
+ *
+ * Return: number of reply descriptors processed from reply
+ *		descriptor queue.
+ */
+static int
+_base_process_reply_queue(struct adapter_reply_queue *reply_q)
+{
 	union reply_descriptor rd;
-	u32 completed_cmds;
-	u8 request_desript_type;
+	u64 completed_cmds;
+	u8 request_descript_type;
 	u16 smid;
 	u8 cb_idx;
 	u32 reply;
@@ -1389,21 +1439,18 @@
 	Mpi2ReplyDescriptorsUnion_t *rpf;
 	u8 rc;
 
-	if (ioc->mask_interrupts)
-		return IRQ_NONE;
-
+	completed_cmds = 0;
 	if (!atomic_add_unless(&reply_q->busy, 1, 1))
-		return IRQ_NONE;
+		return completed_cmds;
 
 	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
-	request_desript_type = rpf->Default.ReplyFlags
+	request_descript_type = rpf->Default.ReplyFlags
 	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
-	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+	if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
 		atomic_dec(&reply_q->busy);
-		return IRQ_NONE;
+		return completed_cmds;
 	}
 
-	completed_cmds = 0;
 	cb_idx = 0xFF;
 	do {
 		rd.word = le64_to_cpu(rpf->Words);
@@ -1411,11 +1458,11 @@
 			goto out;
 		reply = 0;
 		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
-		if (request_desript_type ==
+		if (request_descript_type ==
 		    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
-		    request_desript_type ==
+		    request_descript_type ==
 		    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
-		    request_desript_type ==
+		    request_descript_type ==
 		    MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
 			cb_idx = _base_get_cb_idx(ioc, smid);
 			if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
@@ -1425,7 +1472,7 @@
 				if (rc)
 					mpt3sas_base_free_smid(ioc, smid);
 			}
-		} else if (request_desript_type ==
+		} else if (request_descript_type ==
 		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
 			reply = le32_to_cpu(
 			    rpf->AddressReply.ReplyFrameAddress);
@@ -1471,7 +1518,7 @@
 		    (reply_q->reply_post_host_index ==
 		    (ioc->reply_post_queue_depth - 1)) ? 0 :
 		    reply_q->reply_post_host_index + 1;
-		request_desript_type =
+		request_descript_type =
 		    reply_q->reply_post_free[reply_q->reply_post_host_index].
 		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
 		completed_cmds++;
@@ -1480,7 +1527,7 @@
 		 * So that FW can find enough entries to post the Reply
 		 * Descriptors in the reply descriptor post queue.
 		 */
-		if (completed_cmds > ioc->hba_queue_depth/3) {
+		if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
 			if (ioc->combined_reply_queue) {
 				writel(reply_q->reply_post_host_index |
 						((msix_index  & 7) <<
@@ -1492,9 +1539,14 @@
 						 MPI2_RPHI_MSIX_INDEX_SHIFT),
 						&ioc->chip->ReplyPostHostIndex);
 			}
-			completed_cmds = 1;
+			if (!reply_q->irq_poll_scheduled) {
+				reply_q->irq_poll_scheduled = true;
+				irq_poll_sched(&reply_q->irqpoll);
+			}
+			atomic_dec(&reply_q->busy);
+			return completed_cmds;
 		}
-		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+		if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
 			goto out;
 		if (!reply_q->reply_post_host_index)
 			rpf = reply_q->reply_post_free;
@@ -1506,14 +1558,14 @@
 
 	if (!completed_cmds) {
 		atomic_dec(&reply_q->busy);
-		return IRQ_NONE;
+		return completed_cmds;
 	}
 
 	if (ioc->is_warpdrive) {
 		writel(reply_q->reply_post_host_index,
 		ioc->reply_post_host_index[msix_index]);
 		atomic_dec(&reply_q->busy);
-		return IRQ_HANDLED;
+		return completed_cmds;
 	}
 
 	/* Update Reply Post Host Index.
@@ -1540,7 +1592,82 @@
 			MPI2_RPHI_MSIX_INDEX_SHIFT),
 			&ioc->chip->ReplyPostHostIndex);
 	atomic_dec(&reply_q->busy);
-	return IRQ_HANDLED;
+	return completed_cmds;
+}
+
+/**
+ * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
+ * @irq: irq number (not used)
+ * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ *
+ * Return: IRQ_HANDLED if processed, else IRQ_NONE.
+ */
+static irqreturn_t
+_base_interrupt(int irq, void *bus_id)
+{
+	struct adapter_reply_queue *reply_q = bus_id;
+	struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
+
+	if (ioc->mask_interrupts)
+		return IRQ_NONE;
+	if (reply_q->irq_poll_scheduled)
+		return IRQ_HANDLED;
+	return ((_base_process_reply_queue(reply_q) > 0) ?
+			IRQ_HANDLED : IRQ_NONE);
+}
+
+/**
+ * _base_irqpoll - IRQ poll callback handler
+ * @irqpoll - irq_poll object
+ * @budget - irq poll weight
+ *
+ * returns number of reply descriptors processed
+ */
+static int
+_base_irqpoll(struct irq_poll *irqpoll, int budget)
+{
+	struct adapter_reply_queue *reply_q;
+	int num_entries = 0;
+
+	reply_q = container_of(irqpoll, struct adapter_reply_queue,
+			irqpoll);
+	if (reply_q->irq_line_enable) {
+		disable_irq(reply_q->os_irq);
+		reply_q->irq_line_enable = false;
+	}
+	num_entries = _base_process_reply_queue(reply_q);
+	if (num_entries < budget) {
+		irq_poll_complete(irqpoll);
+		reply_q->irq_poll_scheduled = false;
+		reply_q->irq_line_enable = true;
+		enable_irq(reply_q->os_irq);
+	}
+
+	return num_entries;
+}
+
+/**
+ * _base_init_irqpolls - initliaze IRQ polls
+ * @ioc: per adapter object
+ *
+ * returns nothing
+ */
+static void
+_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
+{
+	struct adapter_reply_queue *reply_q, *next;
+
+	if (list_empty(&ioc->reply_queue_list))
+		return;
+
+	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+		irq_poll_init(&reply_q->irqpoll,
+			ioc->hba_queue_depth/4, _base_irqpoll);
+		reply_q->irq_poll_scheduled = false;
+		reply_q->irq_line_enable = true;
+		reply_q->os_irq = pci_irq_vector(ioc->pdev,
+		    reply_q->msix_index);
+	}
 }
 
 /**
@@ -1581,6 +1708,17 @@
 		/* TMs are on msix_index == 0 */
 		if (reply_q->msix_index == 0)
 			continue;
+		if (reply_q->irq_poll_scheduled) {
+			/* Calling irq_poll_disable will wait for any pending
+			 * callbacks to have completed.
+			 */
+			irq_poll_disable(&reply_q->irqpoll);
+			irq_poll_enable(&reply_q->irqpoll);
+			reply_q->irq_poll_scheduled = false;
+			reply_q->irq_line_enable = true;
+			enable_irq(reply_q->os_irq);
+			continue;
+		}
 		synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
 	}
 }
@@ -2122,6 +2260,11 @@
 	bool build_prp = true;
 
 	data_length = scsi_bufflen(scmd);
+	if (pcie_device &&
+	    (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
+		build_prp = false;
+		return build_prp;
+	}
 
 	/* If Datalenth is <= 16K and number of SGE’s entries are <= 2
 	 * we built IEEE SGL
@@ -2270,7 +2413,7 @@
 	sges_left = scsi_dma_map(scmd);
 	if (sges_left < 0) {
 		sdev_printk(KERN_ERR, scmd->device,
-		 "pci_map_sg failed: request for %d bytes!\n",
+		 "scsi_dma_map failed: request for %d bytes!\n",
 		 scsi_bufflen(scmd));
 		return -ENOMEM;
 	}
@@ -2418,7 +2561,7 @@
 	sges_left = scsi_dma_map(scmd);
 	if (sges_left < 0) {
 		sdev_printk(KERN_ERR, scmd->device,
-			"pci_map_sg failed: request for %d bytes!\n",
+			"scsi_dma_map failed: request for %d bytes!\n",
 			scsi_bufflen(scmd));
 		return -ENOMEM;
 	}
@@ -2563,44 +2706,43 @@
 static int
 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
 {
+	u64 required_mask, coherent_mask;
 	struct sysinfo s;
-	u64 consistent_dma_mask;
+	/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+	int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
 
 	if (ioc->is_mcpu_endpoint)
 		goto try_32bit;
 
-	if (ioc->dma_mask)
-		consistent_dma_mask = DMA_BIT_MASK(64);
-	else
-		consistent_dma_mask = DMA_BIT_MASK(32);
+	required_mask = dma_get_required_mask(&pdev->dev);
+	if (sizeof(dma_addr_t) == 4 || required_mask == 32)
+		goto try_32bit;
 
-	if (sizeof(dma_addr_t) > 4) {
-		const uint64_t required_mask =
-		    dma_get_required_mask(&pdev->dev);
-		if ((required_mask > DMA_BIT_MASK(32)) &&
-		    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
-		    !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
-			ioc->base_add_sg_single = &_base_add_sg_single_64;
-			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
-			ioc->dma_mask = 64;
-			goto out;
-		}
-	}
+	if (ioc->dma_mask)
+		coherent_mask = DMA_BIT_MASK(dma_mask);
+	else
+		coherent_mask = DMA_BIT_MASK(32);
+
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
+	    dma_set_coherent_mask(&pdev->dev, coherent_mask))
+		goto try_32bit;
+
+	ioc->base_add_sg_single = &_base_add_sg_single_64;
+	ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+	ioc->dma_mask = dma_mask;
+	goto out;
 
  try_32bit:
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
-	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-		ioc->base_add_sg_single = &_base_add_sg_single_32;
-		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
-		ioc->dma_mask = 32;
-	} else
+	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
 		return -ENODEV;
 
+	ioc->base_add_sg_single = &_base_add_sg_single_32;
+	ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+	ioc->dma_mask = 32;
  out:
 	si_meminfo(&s);
-	pr_info(MPT3SAS_FMT
-		"%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
-		ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
+	ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+		 ioc->dma_mask, convert_to_kb(s.totalram));
 
 	return 0;
 }
@@ -2609,7 +2751,7 @@
 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
 				      struct pci_dev *pdev)
 {
-	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
 			return -ENODEV;
 	}
@@ -2639,8 +2781,7 @@
 
 	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
 	if (!base) {
-		dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
-			ioc->name));
+		dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
 		return -EINVAL;
 	}
 
@@ -2658,9 +2799,8 @@
 		pci_read_config_word(ioc->pdev, base + 2, &message_control);
 		ioc->msix_vector_count = (message_control & 0x3FF) + 1;
 	}
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"msix is supported, vector_count(%d)\n",
-		ioc->name, ioc->msix_vector_count));
+	dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
+				  ioc->msix_vector_count));
 	return 0;
 }
 
@@ -2680,6 +2820,9 @@
 
 	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
 		list_del(&reply_q->list);
+		if (ioc->smp_affinity_enable)
+			irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
+			    reply_q->msix_index), NULL);
 		free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
 			 reply_q);
 		kfree(reply_q);
@@ -2702,8 +2845,8 @@
 
 	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
 	if (!reply_q) {
-		pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
-		    ioc->name, (int)sizeof(struct adapter_reply_queue));
+		ioc_err(ioc, "unable to allocate memory %zu!\n",
+			sizeof(struct adapter_reply_queue));
 		return -ENOMEM;
 	}
 	reply_q->ioc = ioc;
@@ -2719,7 +2862,7 @@
 	r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
 			IRQF_SHARED, reply_q->name, reply_q);
 	if (r) {
-		pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
+		pr_err("%s: unable to allocate interrupt %d!\n",
 		       reply_q->name, pci_irq_vector(pdev, index));
 		kfree(reply_q);
 		return -EBUSY;
@@ -2744,10 +2887,14 @@
 {
 	unsigned int cpu, nr_cpus, nr_msix, index = 0;
 	struct adapter_reply_queue *reply_q;
+	int local_numa_node;
 
 	if (!_base_is_controller_msix_enabled(ioc))
 		return;
 
+	if (ioc->msix_load_balance)
+		return;
+
 	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
 
 	nr_cpus = num_online_cpus();
@@ -2756,14 +2903,33 @@
 	if (!nr_msix)
 		return;
 
-	if (smp_affinity_enable) {
+	if (ioc->smp_affinity_enable) {
+
+		/*
+		 * set irq affinity to local numa node for those irqs
+		 * corresponding to high iops queues.
+		 */
+		if (ioc->high_iops_queues) {
+			local_numa_node = dev_to_node(&ioc->pdev->dev);
+			for (index = 0; index < ioc->high_iops_queues;
+			    index++) {
+				irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
+				    index), cpumask_of_node(local_numa_node));
+			}
+		}
+
 		list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
-			const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
-							reply_q->msix_index);
-			if (!mask) {
-				pr_warn(MPT3SAS_FMT "no affinity for msi %x\n",
-					ioc->name, reply_q->msix_index);
+			const cpumask_t *mask;
+
+			if (reply_q->msix_index < ioc->high_iops_queues)
 				continue;
+
+			mask = pci_irq_get_affinity(ioc->pdev,
+			    reply_q->msix_index);
+			if (!mask) {
+				ioc_warn(ioc, "no affinity for msi %x\n",
+					 reply_q->msix_index);
+				goto fall_back;
 			}
 
 			for_each_cpu_and(cpu, mask, cpu_online_mask) {
@@ -2774,12 +2940,18 @@
 		}
 		return;
 	}
+
+fall_back:
 	cpu = cpumask_first(cpu_online_mask);
+	nr_msix -= ioc->high_iops_queues;
+	index = 0;
 
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
-
 		unsigned int i, group = nr_cpus / nr_msix;
 
+		if (reply_q->msix_index < ioc->high_iops_queues)
+			continue;
+
 		if (cpu >= nr_cpus)
 			break;
 
@@ -2795,6 +2967,52 @@
 }
 
 /**
+ * _base_check_and_enable_high_iops_queues - enable high iops mode
+ * @ ioc - per adapter object
+ * @ hba_msix_vector_count - msix vectors supported by HBA
+ *
+ * Enable high iops queues only if
+ *  - HBA is a SEA/AERO controller and
+ *  - MSI-Xs vector supported by the HBA is 128 and
+ *  - total CPU count in the system >=16 and
+ *  - loaded driver with default max_msix_vectors module parameter and
+ *  - system booted in non kdump mode
+ *
+ * returns nothing.
+ */
+static void
+_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
+		int hba_msix_vector_count)
+{
+	u16 lnksta, speed;
+
+	if (perf_mode == MPT_PERF_MODE_IOPS ||
+	    perf_mode == MPT_PERF_MODE_LATENCY) {
+		ioc->high_iops_queues = 0;
+		return;
+	}
+
+	if (perf_mode == MPT_PERF_MODE_DEFAULT) {
+
+		pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
+		speed = lnksta & PCI_EXP_LNKSTA_CLS;
+
+		if (speed < 0x4) {
+			ioc->high_iops_queues = 0;
+			return;
+		}
+	}
+
+	if (!reset_devices && ioc->is_aero_ioc &&
+	    hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
+	    num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
+	    max_msix_vectors == -1)
+		ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
+	else
+		ioc->high_iops_queues = 0;
+}
+
+/**
  * _base_disable_msix - disables msix
  * @ioc: per adapter object
  *
@@ -2804,11 +3022,38 @@
 {
 	if (!ioc->msix_enable)
 		return;
-	pci_disable_msix(ioc->pdev);
+	pci_free_irq_vectors(ioc->pdev);
 	ioc->msix_enable = 0;
 }
 
 /**
+ * _base_alloc_irq_vectors - allocate msix vectors
+ * @ioc: per adapter object
+ *
+ */
+static int
+_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
+{
+	int i, irq_flags = PCI_IRQ_MSIX;
+	struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
+	struct irq_affinity *descp = &desc;
+
+	if (ioc->smp_affinity_enable)
+		irq_flags |= PCI_IRQ_AFFINITY;
+	else
+		descp = NULL;
+
+	ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
+	    ioc->msix_vector_count);
+
+	i = pci_alloc_irq_vectors_affinity(ioc->pdev,
+	    ioc->high_iops_queues,
+	    ioc->msix_vector_count, irq_flags, descp);
+
+	return i;
+}
+
+/**
  * _base_enable_msix - enables msix, failback to io_apic
  * @ioc: per adapter object
  *
@@ -2819,7 +3064,8 @@
 	int r;
 	int i, local_max_msix_vectors;
 	u8 try_msix = 0;
-	unsigned int irq_flags = PCI_IRQ_MSIX;
+
+	ioc->msix_load_balance = false;
 
 	if (msix_disable == -1 || msix_disable == 0)
 		try_msix = 1;
@@ -2830,13 +3076,16 @@
 	if (_base_check_enable_msix(ioc) != 0)
 		goto try_ioapic;
 
-	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
+	ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
+	pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
+		ioc->cpu_count, max_msix_vectors);
+	if (ioc->is_aero_ioc)
+		_base_check_and_enable_high_iops_queues(ioc,
+			ioc->msix_vector_count);
+	ioc->reply_queue_count =
+		min_t(int, ioc->cpu_count + ioc->high_iops_queues,
 		ioc->msix_vector_count);
 
-	printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
-	  ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
-	  ioc->cpu_count, max_msix_vectors);
-
 	if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
 		local_max_msix_vectors = (reset_devices) ? 1 : 8;
 	else
@@ -2848,18 +3097,27 @@
 	else if (local_max_msix_vectors == 0)
 		goto try_ioapic;
 
-	if (ioc->msix_vector_count < ioc->cpu_count)
-		smp_affinity_enable = 0;
+	/*
+	 * Enable msix_load_balance only if combined reply queue mode is
+	 * disabled on SAS3 & above generation HBA devices.
+	 */
+	if (!ioc->combined_reply_queue &&
+	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+		ioc->msix_load_balance = true;
+	}
 
-	if (smp_affinity_enable)
-		irq_flags |= PCI_IRQ_AFFINITY;
+	/*
+	 * smp affinity setting is not need when msix load balance
+	 * is enabled.
+	 */
+	if (ioc->msix_load_balance)
+		ioc->smp_affinity_enable = 0;
 
-	r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
-				  irq_flags);
+	r = _base_alloc_irq_vectors(ioc);
 	if (r < 0) {
-		dfailprintk(ioc, pr_info(MPT3SAS_FMT
-			"pci_alloc_irq_vectors failed (r=%d) !!!\n",
-			ioc->name, r));
+		dfailprintk(ioc,
+			    ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
+				     r));
 		goto try_ioapic;
 	}
 
@@ -2874,17 +3132,21 @@
 		}
 	}
 
+	ioc_info(ioc, "High IOPs queues : %s\n",
+			ioc->high_iops_queues ? "enabled" : "disabled");
+
 	return 0;
 
 /* failback to io_apic interrupt routing */
  try_ioapic:
-
+	ioc->high_iops_queues = 0;
+	ioc_info(ioc, "High IOPs queues : disabled\n");
 	ioc->reply_queue_count = 1;
 	r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
 	if (r < 0) {
-		dfailprintk(ioc, pr_info(MPT3SAS_FMT
-			"pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
-			ioc->name, r));
+		dfailprintk(ioc,
+			    ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
+				     r));
 	} else
 		r = _base_request_irq(ioc, 0);
 
@@ -2900,8 +3162,7 @@
 {
 	struct pci_dev *pdev = ioc->pdev;
 
-	dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
-		ioc->name, __func__));
+	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	_base_free_irq(ioc);
 	_base_disable_msix(ioc);
@@ -2922,6 +3183,37 @@
 	}
 }
 
+static int
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
+
+/**
+ * _base_check_for_fault_and_issue_reset - check if IOC is in fault state
+ *     and if it is in fault state then issue diag reset.
+ * @ioc: per adapter object
+ *
+ * Returns: 0 for success, non-zero for failure.
+ */
+static int
+_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+	u32 ioc_state;
+	int rc = -EFAULT;
+
+	dinitprintk(ioc, pr_info("%s\n", __func__));
+	if (ioc->pci_error_recovery)
+		return 0;
+	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+	dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
+
+	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+		mpt3sas_base_fault_info(ioc, ioc_state &
+		    MPI2_DOORBELL_DATA_MASK);
+		rc = _base_diag_reset(ioc);
+	}
+
+	return rc;
+}
+
 /**
  * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
  * @ioc: per adapter object
@@ -2934,18 +3226,16 @@
 	struct pci_dev *pdev = ioc->pdev;
 	u32 memap_sz;
 	u32 pio_sz;
-	int i, r = 0;
+	int i, r = 0, rc;
 	u64 pio_chip = 0;
 	phys_addr_t chip_phys = 0;
 	struct adapter_reply_queue *reply_q;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
-	    ioc->name, __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
 	if (pci_enable_device_mem(pdev)) {
-		pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
-			ioc->name);
+		ioc_warn(ioc, "pci_enable_device_mem: failed\n");
 		ioc->bars = 0;
 		return -ENODEV;
 	}
@@ -2953,8 +3243,7 @@
 
 	if (pci_request_selected_regions(pdev, ioc->bars,
 	    ioc->driver_name)) {
-		pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
-			ioc->name);
+		ioc_warn(ioc, "pci_request_selected_regions: failed\n");
 		ioc->bars = 0;
 		r = -ENODEV;
 		goto out_fail;
@@ -2967,8 +3256,7 @@
 
 
 	if (_base_config_dma_addressing(ioc, pdev) != 0) {
-		pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
-		    ioc->name, pci_name(pdev));
+		ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
 		r = -ENODEV;
 		goto out_fail;
 	}
@@ -2991,8 +3279,7 @@
 	}
 
 	if (ioc->chip == NULL) {
-		pr_err(MPT3SAS_FMT "unable to map adapter memory! "
-			" or resource not found\n", ioc->name);
+		ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
 		r = -EINVAL;
 		goto out_fail;
 	}
@@ -3000,8 +3287,11 @@
 	_base_mask_interrupts(ioc);
 
 	r = _base_get_ioc_facts(ioc);
-	if (r)
-		goto out_fail;
+	if (r) {
+		rc = _base_check_for_fault_and_issue_reset(ioc);
+		if (rc || (_base_get_ioc_facts(ioc)))
+			goto out_fail;
+	}
 
 	if (!ioc->rdpq_array_enable_assigned) {
 		ioc->rdpq_array_enable = ioc->rdpq_array_capable;
@@ -3012,6 +3302,8 @@
 	if (r)
 		goto out_fail;
 
+	if (!ioc->is_driver_loading)
+		_base_init_irqpolls(ioc);
 	/* Use the Combined reply queue feature only for SAS3 C0 & higher
 	 * revision HBAs and also only when reply queue count is greater than 8
 	 */
@@ -3026,9 +3318,8 @@
 		     ioc->combined_reply_index_count,
 		     sizeof(resource_size_t *), GFP_KERNEL);
 		if (!ioc->replyPostRegisterIndex) {
-			dfailprintk(ioc, printk(MPT3SAS_FMT
-			"allocation for reply Post Register Index failed!!!\n",
-								   ioc->name));
+			dfailprintk(ioc,
+				    ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
 			r = -ENOMEM;
 			goto out_fail;
 		}
@@ -3053,15 +3344,15 @@
 	}
 
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
-		pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
-		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
-		    "IO-APIC enabled"),
-		    pci_irq_vector(ioc->pdev, reply_q->msix_index));
+		pr_info("%s: %s enabled: IRQ %d\n",
+			reply_q->name,
+			ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
+			pci_irq_vector(ioc->pdev, reply_q->msix_index));
 
-	pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n",
-	    ioc->name, &chip_phys, ioc->chip, memap_sz);
-	pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
-	    ioc->name, (unsigned long long)pio_chip, pio_sz);
+	ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
+		 &chip_phys, ioc->chip, memap_sz);
+	ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
+		 (unsigned long long)pio_chip, pio_sz);
 
 	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
 	pci_save_state(pdev);
@@ -3153,13 +3444,58 @@
 	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
 }
 
+/**
+ * _base_get_msix_index - get the msix index
+ * @ioc: per adapter object
+ * @scmd: scsi_cmnd object
+ *
+ * returns msix index of general reply queues,
+ * i.e. reply queue on which IO request's reply
+ * should be posted by the HBA firmware.
+ */
 static inline u8
-_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
+_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
+	struct scsi_cmnd *scmd)
 {
+	/* Enables reply_queue load balancing */
+	if (ioc->msix_load_balance)
+		return ioc->reply_queue_count ?
+		    base_mod64(atomic64_add_return(1,
+		    &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
+
 	return ioc->cpu_msix_table[raw_smp_processor_id()];
 }
 
 /**
+ * _base_get_high_iops_msix_index - get the msix index of
+ *				high iops queues
+ * @ioc: per adapter object
+ * @scmd: scsi_cmnd object
+ *
+ * Returns: msix index of high iops reply queues.
+ * i.e. high iops reply queue on which IO request's
+ * reply should be posted by the HBA firmware.
+ */
+static inline u8
+_base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
+	struct scsi_cmnd *scmd)
+{
+	/**
+	 * Round robin the IO interrupts among the high iops
+	 * reply queues in terms of batch count 16 when outstanding
+	 * IOs on the target device is >=8.
+	 */
+	if (atomic_read(&scmd->device->device_busy) >
+	    MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
+		return base_mod64((
+		    atomic64_add_return(1, &ioc->high_iops_outstanding) /
+		    MPT3SAS_HIGH_IOPS_BATCH_COUNT),
+		    MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
+
+	return _base_get_msix_index(ioc, scmd);
+}
+
+/**
  * mpt3sas_base_get_smid - obtain a free smid from internal queue
  * @ioc: per adapter object
  * @cb_idx: callback index
@@ -3176,8 +3512,7 @@
 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
 	if (list_empty(&ioc->internal_free_list)) {
 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-		pr_err(MPT3SAS_FMT "%s: smid not available\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: smid not available\n", __func__);
 		return 0;
 	}
 
@@ -3208,8 +3543,8 @@
 
 	smid = tag + 1;
 	request->cb_idx = cb_idx;
-	request->msix_io = _base_get_msix_index(ioc);
 	request->smid = smid;
+	request->scmd = scmd;
 	INIT_LIST_HEAD(&request->chain_list);
 	return smid;
 }
@@ -3263,6 +3598,7 @@
 		return;
 	st->cb_idx = 0xFF;
 	st->direct_io = 0;
+	st->scmd = NULL;
 	atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
 	st->smid = 0;
 }
@@ -3280,12 +3616,18 @@
 
 	if (smid < ioc->hi_priority_smid) {
 		struct scsiio_tracker *st;
+		void *request;
 
 		st = _get_st_from_smid(ioc, smid);
 		if (!st) {
 			_base_recovery_check(ioc);
 			return;
 		}
+
+		/* Clear MPI request frame */
+		request = mpt3sas_base_get_msg_frame(ioc, smid);
+		memset(request, 0, ioc->request_sz);
+
 		mpt3sas_base_clear_st(ioc, st);
 		_base_recovery_check(ioc);
 		return;
@@ -3326,7 +3668,6 @@
 	spin_lock_irqsave(writeq_lock, flags);
 	__raw_writel((u32)(b), addr);
 	__raw_writel((u32)(b >> 32), (addr + 4));
-	mmiowb();
 	spin_unlock_irqrestore(writeq_lock, flags);
 }
 
@@ -3344,8 +3685,9 @@
 static inline void
 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
 {
+	wmb();
 	__raw_writeq(b, addr);
-	mmiowb();
+	barrier();
 }
 #else
 static inline void
@@ -3356,13 +3698,37 @@
 #endif
 
 /**
+ * _base_set_and_get_msix_index - get the msix index and assign to msix_io
+ *                                variable of scsi tracker
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * returns msix index.
+ */
+static u8
+_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+	struct scsiio_tracker *st = NULL;
+
+	if (smid < ioc->hi_priority_smid)
+		st = _get_st_from_smid(ioc, smid);
+
+	if (st == NULL)
+		return  _base_get_msix_index(ioc, NULL);
+
+	st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
+	return st->msix_io;
+}
+
+/**
  * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
  * @ioc: per adapter object
  * @smid: system request message index
  * @handle: device handle
  */
 static void
-_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
+	u16 smid, u16 handle)
 {
 	Mpi2RequestDescriptorUnion_t descriptor;
 	u64 *request = (u64 *)&descriptor;
@@ -3375,7 +3741,7 @@
 	_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
 					ioc->request_sz);
 	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
-	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
+	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
 	descriptor.SCSIIO.LMID = 0;
@@ -3397,7 +3763,7 @@
 
 
 	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
-	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
+	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
 	descriptor.SCSIIO.LMID = 0;
@@ -3406,13 +3772,13 @@
 }
 
 /**
- * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
+ * _base_put_smid_fast_path - send fast path request to firmware
  * @ioc: per adapter object
  * @smid: system request message index
  * @handle: device handle
  */
-void
-mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 	u16 handle)
 {
 	Mpi2RequestDescriptorUnion_t descriptor;
@@ -3420,7 +3786,7 @@
 
 	descriptor.SCSIIO.RequestFlags =
 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
-	descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
 	descriptor.SCSIIO.LMID = 0;
@@ -3429,13 +3795,13 @@
 }
 
 /**
- * mpt3sas_base_put_smid_hi_priority - send Task Management request to firmware
+ * _base_put_smid_hi_priority - send Task Management request to firmware
  * @ioc: per adapter object
  * @smid: system request message index
  * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
  */
-void
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 	u16 msix_task)
 {
 	Mpi2RequestDescriptorUnion_t descriptor;
@@ -3484,7 +3850,7 @@
 
 	descriptor.Default.RequestFlags =
 		MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
-	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
+	descriptor.Default.MSIxIndex =  _base_set_and_get_msix_index(ioc, smid);
 	descriptor.Default.SMID = cpu_to_le16(smid);
 	descriptor.Default.LMID = 0;
 	descriptor.Default.DescriptorTypeDependent = 0;
@@ -3493,12 +3859,12 @@
 }
 
 /**
- * mpt3sas_base_put_smid_default - Default, primarily used for config pages
+ * _base_put_smid_default - Default, primarily used for config pages
  * @ioc: per adapter object
  * @smid: system request message index
  */
-void
-mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+static void
+_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 {
 	Mpi2RequestDescriptorUnion_t descriptor;
 	void *mpi_req_iomem;
@@ -3516,7 +3882,7 @@
 	}
 	request = (u64 *)&descriptor;
 	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
-	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
+	descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
 	descriptor.Default.SMID = cpu_to_le16(smid);
 	descriptor.Default.LMID = 0;
 	descriptor.Default.DescriptorTypeDependent = 0;
@@ -3530,6 +3896,95 @@
 }
 
 /**
+ * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
+ *   Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle, unused in this function, for function type match
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+	u16 handle)
+{
+	Mpi26AtomicRequestDescriptor_t descriptor;
+	u32 *request = (u32 *)&descriptor;
+
+	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
+	descriptor.SMID = cpu_to_le16(smid);
+
+	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_fast_path_atomic - send fast path request to firmware
+ * using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle, unused in this function, for function type match
+ * Return nothing
+ */
+static void
+_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+	u16 handle)
+{
+	Mpi26AtomicRequestDescriptor_t descriptor;
+	u32 *request = (u32 *)&descriptor;
+
+	descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
+	descriptor.SMID = cpu_to_le16(smid);
+
+	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_hi_priority_atomic - send Task Management request to
+ * firmware using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+	u16 msix_task)
+{
+	Mpi26AtomicRequestDescriptor_t descriptor;
+	u32 *request = (u32 *)&descriptor;
+
+	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+	descriptor.MSIxIndex = msix_task;
+	descriptor.SMID = cpu_to_le16(smid);
+
+	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_default - Default, primarily used for config pages
+ * use Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+	Mpi26AtomicRequestDescriptor_t descriptor;
+	u32 *request = (u32 *)&descriptor;
+
+	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
+	descriptor.SMID = cpu_to_le16(smid);
+
+	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
  * _base_display_OEMs_branding - Display branding string
  * @ioc: per adapter object
  */
@@ -3545,89 +4000,87 @@
 		case MPI2_MFGPAGE_DEVID_SAS2008:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT2SAS_INTEL_RMS2LL080_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS2LL080_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS2LL080_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS2LL040_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS2LL040_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS2LL040_BRANDING);
 				break;
 			case MPT2SAS_INTEL_SSD910_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_SSD910_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_SSD910_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				 "Intel(R) Controller: Subsystem ID: 0x%X\n",
-				 ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
+			break;
 		case MPI2_MFGPAGE_DEVID_SAS2308_2:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT2SAS_INTEL_RS25GB008_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RS25GB008_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RS25GB008_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS25JB080_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS25JB080_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS25JB080_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS25JB040_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS25JB040_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS25JB040_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS25KB080_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS25KB080_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS25KB080_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS25KB040_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS25KB040_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS25KB040_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS25LB040_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS25LB040_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS25LB040_BRANDING);
 				break;
 			case MPT2SAS_INTEL_RMS25LB080_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_INTEL_RMS25LB080_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_INTEL_RMS25LB080_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				 "Intel(R) Controller: Subsystem ID: 0x%X\n",
-				 ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
+			break;
 		case MPI25_MFGPAGE_DEVID_SAS3008:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT3SAS_INTEL_RMS3JC080_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_INTEL_RMS3JC080_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_INTEL_RMS3JC080_BRANDING);
 				break;
 
 			case MPT3SAS_INTEL_RS3GC008_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_INTEL_RS3GC008_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_INTEL_RS3GC008_BRANDING);
 				break;
 			case MPT3SAS_INTEL_RS3FC044_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_INTEL_RS3FC044_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_INTEL_RS3FC044_BRANDING);
 				break;
 			case MPT3SAS_INTEL_RS3UC080_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_INTEL_RS3UC080_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_INTEL_RS3UC080_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				 "Intel(R) Controller: Subsystem ID: 0x%X\n",
-				 ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
 			break;
 		default:
-			pr_info(MPT3SAS_FMT
-			 "Intel(R) Controller: Subsystem ID: 0x%X\n",
-			 ioc->name, ioc->pdev->subsystem_device);
+			ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+				 ioc->pdev->subsystem_device);
 			break;
 		}
 		break;
@@ -3636,57 +4089,54 @@
 		case MPI2_MFGPAGE_DEVID_SAS2008:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
 				break;
 			case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
 				break;
 			case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
 				break;
 			case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
 				break;
 			case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
 				break;
 			case MPT2SAS_DELL_PERC_H200_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_PERC_H200_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_PERC_H200_BRANDING);
 				break;
 			case MPT2SAS_DELL_6GBPS_SAS_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				   "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
-				   ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
 			break;
 		case MPI25_MFGPAGE_DEVID_SAS3008:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT3SAS_DELL_12G_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_DELL_12G_HBA_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_DELL_12G_HBA_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				   "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
-				   ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
 			break;
 		default:
-			pr_info(MPT3SAS_FMT
-			   "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
-			   ioc->pdev->subsystem_device);
+			ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
+				 ioc->pdev->subsystem_device);
 			break;
 		}
 		break;
@@ -3695,46 +4145,42 @@
 		case MPI25_MFGPAGE_DEVID_SAS3008:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
 				break;
 			case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
 				break;
 			case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-					MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				  "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
-				  ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
 			break;
 		case MPI25_MFGPAGE_DEVID_SAS3108_1:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
 				break;
 			case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
-				);
+				ioc_info(ioc, "%s\n",
+					 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
-				 ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
 			break;
 		default:
-			pr_info(MPT3SAS_FMT
-			   "Cisco SAS HBA: Subsystem ID: 0x%X\n",
-			   ioc->name, ioc->pdev->subsystem_device);
+			ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
+				 ioc->pdev->subsystem_device);
 			break;
 		}
 		break;
@@ -3743,43 +4189,42 @@
 		case MPI2_MFGPAGE_DEVID_SAS2004:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				   "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
-				   ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
+			break;
 		case MPI2_MFGPAGE_DEVID_SAS2308_2:
 			switch (ioc->pdev->subsystem_device) {
 			case MPT2SAS_HP_2_4_INTERNAL_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_HP_2_4_INTERNAL_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
 				break;
 			case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
 				break;
 			case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
 				break;
 			case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
-				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				    MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
+				ioc_info(ioc, "%s\n",
+					 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
 				break;
 			default:
-				pr_info(MPT3SAS_FMT
-				   "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
-				   ioc->name, ioc->pdev->subsystem_device);
+				ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
+					 ioc->pdev->subsystem_device);
 				break;
 			}
+			break;
 		default:
-			pr_info(MPT3SAS_FMT
-			   "HP SAS HBA: Subsystem ID: 0x%X\n",
-			   ioc->name, ioc->pdev->subsystem_device);
+			ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
+				 ioc->pdev->subsystem_device);
 			break;
 		}
 	default:
@@ -3806,28 +4251,25 @@
 	u16 smid, ioc_status;
 	size_t data_length;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-				__func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
-		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
-				ioc->name, __func__);
+		ioc_err(ioc, "%s: internal command already in use\n", __func__);
 		return -EAGAIN;
 	}
 
 	data_length = sizeof(Mpi2FWImageHeader_t);
-	fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
-			&fwpkg_data_dma);
+	fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
+			&fwpkg_data_dma, GFP_KERNEL);
 	if (!fwpkg_data) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-				ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		return -ENOMEM;
 	}
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-				ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		r = -EAGAIN;
 		goto out;
 	}
@@ -3842,15 +4284,13 @@
 	ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
 			data_length);
 	init_completion(&ioc->base_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	/* Wait for 15 seconds */
 	wait_for_completion_timeout(&ioc->base_cmds.done,
 			FW_IMG_HDR_READ_TIMEOUT*HZ);
-	pr_info(MPT3SAS_FMT "%s: complete\n",
-			ioc->name, __func__);
+	ioc_info(ioc, "%s: complete\n", __func__);
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
-		pr_err(MPT3SAS_FMT "%s: timeout\n",
-				ioc->name, __func__);
+		ioc_err(ioc, "%s: timeout\n", __func__);
 		_debug_dump_mf(mpi_request,
 				sizeof(Mpi25FWUploadRequest_t)/4);
 		r = -ETIME;
@@ -3864,13 +4304,11 @@
 			if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
 				FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
 				if (FWImgHdr->PackageVersion.Word) {
-					pr_info(MPT3SAS_FMT "FW Package Version"
-					"(%02d.%02d.%02d.%02d)\n",
-					ioc->name,
-					FWImgHdr->PackageVersion.Struct.Major,
-					FWImgHdr->PackageVersion.Struct.Minor,
-					FWImgHdr->PackageVersion.Struct.Unit,
-					FWImgHdr->PackageVersion.Struct.Dev);
+					ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
+						 FWImgHdr->PackageVersion.Struct.Major,
+						 FWImgHdr->PackageVersion.Struct.Minor,
+						 FWImgHdr->PackageVersion.Struct.Unit,
+						 FWImgHdr->PackageVersion.Struct.Dev);
 				}
 			} else {
 				_debug_dump_mf(&mpi_reply,
@@ -3881,7 +4319,7 @@
 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
 out:
 	if (fwpkg_data)
-		pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
+		dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
 				fwpkg_data_dma);
 	return r;
 }
@@ -3900,18 +4338,17 @@
 
 	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
 	strncpy(desc, ioc->manu_pg0.ChipName, 16);
-	pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
-	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
-	    ioc->name, desc,
-	   (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
-	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
-	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
-	   ioc->facts.FWVersion.Word & 0x000000FF,
-	   ioc->pdev->revision,
-	   (bios_version & 0xFF000000) >> 24,
-	   (bios_version & 0x00FF0000) >> 16,
-	   (bios_version & 0x0000FF00) >> 8,
-	    bios_version & 0x000000FF);
+	ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
+		 desc,
+		 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+		 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+		 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+		 ioc->facts.FWVersion.Word & 0x000000FF,
+		 ioc->pdev->revision,
+		 (bios_version & 0xFF000000) >> 24,
+		 (bios_version & 0x00FF0000) >> 16,
+		 (bios_version & 0x0000FF00) >> 8,
+		 bios_version & 0x000000FF);
 
 	_base_display_OEMs_branding(ioc);
 
@@ -3920,82 +4357,81 @@
 		i++;
 	}
 
-	pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
+	ioc_info(ioc, "Protocol=(");
 
 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
-		pr_info("Initiator");
+		pr_cont("Initiator");
 		i++;
 	}
 
 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
-		pr_info("%sTarget", i ? "," : "");
+		pr_cont("%sTarget", i ? "," : "");
 		i++;
 	}
 
 	i = 0;
-	pr_info("), ");
-	pr_info("Capabilities=(");
+	pr_cont("), Capabilities=(");
 
 	if (!ioc->hide_ir_msg) {
 		if (ioc->facts.IOCCapabilities &
 		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
-			pr_info("Raid");
+			pr_cont("Raid");
 			i++;
 		}
 	}
 
 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
-		pr_info("%sTLR", i ? "," : "");
+		pr_cont("%sTLR", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
-		pr_info("%sMulticast", i ? "," : "");
+		pr_cont("%sMulticast", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities &
 	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
-		pr_info("%sBIDI Target", i ? "," : "");
+		pr_cont("%sBIDI Target", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
-		pr_info("%sEEDP", i ? "," : "");
+		pr_cont("%sEEDP", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities &
 	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
-		pr_info("%sSnapshot Buffer", i ? "," : "");
+		pr_cont("%sSnapshot Buffer", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities &
 	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
-		pr_info("%sDiag Trace Buffer", i ? "," : "");
+		pr_cont("%sDiag Trace Buffer", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities &
 	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
-		pr_info("%sDiag Extended Buffer", i ? "," : "");
+		pr_cont("%sDiag Extended Buffer", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->facts.IOCCapabilities &
 	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
-		pr_info("%sTask Set Full", i ? "," : "");
+		pr_cont("%sTask Set Full", i ? "," : "");
 		i++;
 	}
 
 	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
 	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
-		pr_info("%sNCQ", i ? "," : "");
+		pr_cont("%sNCQ", i ? "," : "");
 		i++;
 	}
 
-	pr_info(")\n");
+	pr_cont(")\n");
 }
 
 /**
@@ -4028,21 +4464,21 @@
 	    sizeof(Mpi2SasIOUnit1PhyData_t));
 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
 	if (!sas_iounit_pg1) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
 	    sas_iounit_pg1, sz))) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 	    MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-		    ioc->name, __FILE__, __LINE__, __func__);
+		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+			__FILE__, __LINE__, __func__);
 		goto out;
 	}
 
@@ -4074,11 +4510,11 @@
 		else
 			dmd_new =
 		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
-		pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
-			ioc->name, dmd_orignal, dmd_new);
-		pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
-			ioc->name, io_missing_delay_original,
-		    io_missing_delay);
+		ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
+			 dmd_orignal, dmd_new);
+		ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
+			 io_missing_delay_original,
+			 io_missing_delay);
 		ioc->device_missing_delay = dmd_new;
 		ioc->io_missing_delay = io_missing_delay;
 	}
@@ -4088,6 +4524,71 @@
 }
 
 /**
+ * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
+ *    according to performance mode.
+ * @ioc : per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
+{
+	Mpi2IOCPage1_t ioc_pg1;
+	Mpi2ConfigReply_t mpi_reply;
+
+	mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
+	memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
+
+	switch (perf_mode) {
+	case MPT_PERF_MODE_DEFAULT:
+	case MPT_PERF_MODE_BALANCED:
+		if (ioc->high_iops_queues) {
+			ioc_info(ioc,
+				"Enable interrupt coalescing only for first\t"
+				"%d reply queues\n",
+				MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
+			/*
+			 * If 31st bit is zero then interrupt coalescing is
+			 * enabled for all reply descriptor post queues.
+			 * If 31st bit is set to one then user can
+			 * enable/disable interrupt coalescing on per reply
+			 * descriptor post queue group(8) basis. So to enable
+			 * interrupt coalescing only on first reply descriptor
+			 * post queue group 31st bit and zero th bit is enabled.
+			 */
+			ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
+			    ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
+			mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+			ioc_info(ioc, "performance mode: balanced\n");
+			return;
+		}
+		/* Fall through */
+	case MPT_PERF_MODE_LATENCY:
+		/*
+		 * Enable interrupt coalescing on all reply queues
+		 * with timeout value 0xA
+		 */
+		ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
+		ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
+		ioc_pg1.ProductSpecific = 0;
+		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+		ioc_info(ioc, "performance mode: latency\n");
+		break;
+	case MPT_PERF_MODE_IOPS:
+		/*
+		 * Enable interrupt coalescing on all reply queues.
+		 */
+		ioc_info(ioc,
+		    "performance mode: iops with coalescing timeout: 0x%x\n",
+		    le32_to_cpu(ioc_pg1.CoalescingTimeout));
+		ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
+		ioc_pg1.ProductSpecific = 0;
+		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+		break;
+	}
+}
+
+/**
  * _base_static_config_pages - static start of day config pages
  * @ioc: per adapter object
  */
@@ -4108,7 +4609,7 @@
 	 * flag unset in NVDATA.
 	 */
 	mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
-	if (ioc->manu_pg11.EEDPTagMode == 0) {
+	if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
 		pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
 		    ioc->name);
 		ioc->manu_pg11.EEDPTagMode &= ~0x3;
@@ -4154,6 +4655,8 @@
 
 	if (ioc->iounit_pg8.NumSensors)
 		ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
+	if (ioc->is_aero_ioc)
+		_base_update_ioc_page1_inlinewith_perf_mode(ioc);
 }
 
 /**
@@ -4189,33 +4692,32 @@
 	struct chain_tracker *ct;
 	struct reply_post_struct *rps;
 
-	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	if (ioc->request) {
-		pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
+		dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
 		    ioc->request,  ioc->request_dma);
-		dexitprintk(ioc, pr_info(MPT3SAS_FMT
-			"request_pool(0x%p): free\n",
-			ioc->name, ioc->request));
+		dexitprintk(ioc,
+			    ioc_info(ioc, "request_pool(0x%p): free\n",
+				     ioc->request));
 		ioc->request = NULL;
 	}
 
 	if (ioc->sense) {
 		dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
 		dma_pool_destroy(ioc->sense_dma_pool);
-		dexitprintk(ioc, pr_info(MPT3SAS_FMT
-			"sense_pool(0x%p): free\n",
-			ioc->name, ioc->sense));
+		dexitprintk(ioc,
+			    ioc_info(ioc, "sense_pool(0x%p): free\n",
+				     ioc->sense));
 		ioc->sense = NULL;
 	}
 
 	if (ioc->reply) {
 		dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
 		dma_pool_destroy(ioc->reply_dma_pool);
-		dexitprintk(ioc, pr_info(MPT3SAS_FMT
-			"reply_pool(0x%p): free\n",
-			ioc->name, ioc->reply));
+		dexitprintk(ioc,
+			    ioc_info(ioc, "reply_pool(0x%p): free\n",
+				     ioc->reply));
 		ioc->reply = NULL;
 	}
 
@@ -4223,9 +4725,9 @@
 		dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
 		    ioc->reply_free_dma);
 		dma_pool_destroy(ioc->reply_free_dma_pool);
-		dexitprintk(ioc, pr_info(MPT3SAS_FMT
-			"reply_free_pool(0x%p): free\n",
-			ioc->name, ioc->reply_free));
+		dexitprintk(ioc,
+			    ioc_info(ioc, "reply_free_pool(0x%p): free\n",
+				     ioc->reply_free));
 		ioc->reply_free = NULL;
 	}
 
@@ -4237,9 +4739,9 @@
 				    ioc->reply_post_free_dma_pool,
 				    rps->reply_post_free,
 				    rps->reply_post_free_dma);
-				dexitprintk(ioc, pr_info(MPT3SAS_FMT
-				    "reply_post_free_pool(0x%p): free\n",
-				    ioc->name, rps->reply_post_free));
+				dexitprintk(ioc,
+					    ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
+						     rps->reply_post_free));
 				rps->reply_post_free = NULL;
 			}
 		} while (ioc->rdpq_array_enable &&
@@ -4267,10 +4769,10 @@
 	}
 
 	if (ioc->config_page) {
-		dexitprintk(ioc, pr_info(MPT3SAS_FMT
-		    "config_page(0x%p): free\n", ioc->name,
-		    ioc->config_page));
-		pci_free_consistent(ioc->pdev, ioc->config_page_sz,
+		dexitprintk(ioc,
+			    ioc_info(ioc, "config_page(0x%p): free\n",
+				     ioc->config_page));
+		dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
 		    ioc->config_page, ioc->config_page_dma);
 	}
 
@@ -4338,8 +4840,7 @@
 	int i, j;
 	struct chain_tracker *ct;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 
 	retry_sz = 0;
@@ -4368,10 +4869,8 @@
 		else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
 			sg_tablesize = min_t(unsigned short, sg_tablesize,
 					SG_MAX_SEGMENTS);
-			pr_warn(MPT3SAS_FMT
-				"sg_tablesize(%u) is bigger than kernel "
-				"defined SG_CHUNK_SIZE(%u)\n", ioc->name,
-				sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
+			ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
+				 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
 		}
 		ioc->shost->sg_tablesize = sg_tablesize;
 	}
@@ -4381,9 +4880,8 @@
 	if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
 		if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
 				INTERNAL_SCSIIO_CMDS_COUNT)) {
-			pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
-			    Credits, it has just %d number of credits\n",
-			    ioc->name, facts->RequestCredit);
+			ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
+				facts->RequestCredit);
 			return -ENOMEM;
 		}
 		ioc->internal_depth = 10;
@@ -4482,11 +4980,12 @@
 		ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
 	}
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
-	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
-	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
-	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
-	    ioc->chains_needed_per_io));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
+			     ioc->max_sges_in_main_message,
+			     ioc->max_sges_in_chain_message,
+			     ioc->shost->sg_tablesize,
+			     ioc->chains_needed_per_io));
 
 	/* reply post queue, 16 byte align */
 	reply_post_free_sz = ioc->reply_post_queue_depth *
@@ -4501,48 +5000,40 @@
 	    sizeof(struct reply_post_struct), GFP_KERNEL);
 
 	if (!ioc->reply_post) {
-		pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
-			ioc->name);
+		ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
 		goto out;
 	}
 	ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
 	    &ioc->pdev->dev, sz, 16, 0);
 	if (!ioc->reply_post_free_dma_pool) {
-		pr_err(MPT3SAS_FMT
-		 "reply_post_free pool: dma_pool_create failed\n",
-		 ioc->name);
+		ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
 		goto out;
 	}
 	i = 0;
 	do {
 		ioc->reply_post[i].reply_post_free =
-		    dma_pool_alloc(ioc->reply_post_free_dma_pool,
+		    dma_pool_zalloc(ioc->reply_post_free_dma_pool,
 		    GFP_KERNEL,
 		    &ioc->reply_post[i].reply_post_free_dma);
 		if (!ioc->reply_post[i].reply_post_free) {
-			pr_err(MPT3SAS_FMT
-			"reply_post_free pool: dma_pool_alloc failed\n",
-			ioc->name);
+			ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
 			goto out;
 		}
-		memset(ioc->reply_post[i].reply_post_free, 0, sz);
-		dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		    "reply post free pool (0x%p): depth(%d),"
-		    "element_size(%d), pool_size(%d kB)\n", ioc->name,
-		    ioc->reply_post[i].reply_post_free,
-		    ioc->reply_post_queue_depth, 8, sz/1024));
-		dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		    "reply_post_free_dma = (0x%llx)\n", ioc->name,
-		    (unsigned long long)
-		    ioc->reply_post[i].reply_post_free_dma));
+		dinitprintk(ioc,
+			    ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+				     ioc->reply_post[i].reply_post_free,
+				     ioc->reply_post_queue_depth,
+				     8, sz / 1024));
+		dinitprintk(ioc,
+			    ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
+				     (u64)ioc->reply_post[i].reply_post_free_dma));
 		total_sz += sz;
 	} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
 
-	if (ioc->dma_mask == 64) {
+	if (ioc->dma_mask > 32) {
 		if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
-			pr_warn(MPT3SAS_FMT
-			    "no suitable consistent DMA mask for %s\n",
-			    ioc->name, pci_name(ioc->pdev));
+			ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
+				 pci_name(ioc->pdev));
 			goto out;
 		}
 	}
@@ -4554,9 +5045,9 @@
 	 * with some internal commands that could be outstanding
 	 */
 	ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"scsi host: can_queue depth (%d)\n",
-		ioc->name, ioc->shost->can_queue));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
+			     ioc->shost->can_queue));
 
 
 	/* contiguous pool for request and chains, 16 byte align, one extra "
@@ -4572,12 +5063,12 @@
 	sz += (ioc->internal_depth * ioc->request_sz);
 
 	ioc->request_dma_sz = sz;
-	ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
+	ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
+			&ioc->request_dma, GFP_KERNEL);
 	if (!ioc->request) {
-		pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
-		    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
-		    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
-		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+		ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
+			ioc->hba_queue_depth, ioc->chains_needed_per_io,
+			ioc->request_sz, sz / 1024);
 		if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
 			goto out;
 		retry_sz = 64;
@@ -4585,12 +5076,12 @@
 		_base_release_memory_pools(ioc);
 		goto retry_allocation;
 	}
+	memset(ioc->request, 0, sz);
 
 	if (retry_sz)
-		pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
-		    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
-		    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
-		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+		ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
+			ioc->hba_queue_depth, ioc->chains_needed_per_io,
+			ioc->request_sz, sz / 1024);
 
 	/* hi-priority queue */
 	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
@@ -4604,24 +5095,26 @@
 	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
 	    ioc->request_sz);
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
-		ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
-	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+			     ioc->request, ioc->hba_queue_depth,
+			     ioc->request_sz,
+			     (ioc->hba_queue_depth * ioc->request_sz) / 1024));
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
-	    ioc->name, (unsigned long long) ioc->request_dma));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "request pool: dma(0x%llx)\n",
+			     (unsigned long long)ioc->request_dma));
 	total_sz += sz;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
-		ioc->name, ioc->request, ioc->scsiio_depth));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
+			     ioc->request, ioc->scsiio_depth));
 
 	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
 	sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
 	ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
 	if (!ioc->chain_lookup) {
-		pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages "
-				"failed\n", ioc->name);
+		ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
 		goto out;
 	}
 
@@ -4629,8 +5122,7 @@
 	for (i = 0; i < ioc->scsiio_depth; i++) {
 		ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
 		if (!ioc->chain_lookup[i].chains_per_smid) {
-			pr_err(MPT3SAS_FMT "chain_lookup: "
-					" kzalloc failed\n", ioc->name);
+			ioc_err(ioc, "chain_lookup: kzalloc failed\n");
 			goto out;
 		}
 	}
@@ -4639,29 +5131,27 @@
 	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
 	    sizeof(struct request_tracker), GFP_KERNEL);
 	if (!ioc->hpr_lookup) {
-		pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
-		    ioc->name);
+		ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
 		goto out;
 	}
 	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"hi_priority(0x%p): depth(%d), start smid(%d)\n",
-		ioc->name, ioc->hi_priority,
-	    ioc->hi_priority_depth, ioc->hi_priority_smid));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
+			     ioc->hi_priority,
+			     ioc->hi_priority_depth, ioc->hi_priority_smid));
 
 	/* initialize internal queue smid's */
 	ioc->internal_lookup = kcalloc(ioc->internal_depth,
 	    sizeof(struct request_tracker), GFP_KERNEL);
 	if (!ioc->internal_lookup) {
-		pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
-		    ioc->name);
+		ioc_err(ioc, "internal_lookup: kcalloc failed\n");
 		goto out;
 	}
 	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"internal(0x%p): depth(%d), start smid(%d)\n",
-		ioc->name, ioc->internal,
-	    ioc->internal_depth, ioc->internal_smid));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
+			     ioc->internal,
+			     ioc->internal_depth, ioc->internal_smid));
 	/*
 	 * The number of NVMe page sized blocks needed is:
 	 *     (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
@@ -4685,17 +5175,14 @@
 		sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
 		ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
 		if (!ioc->pcie_sg_lookup) {
-			pr_info(MPT3SAS_FMT
-			    "PCIe SGL lookup: kzalloc failed\n", ioc->name);
+			ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
 			goto out;
 		}
 		sz = nvme_blocks_needed * ioc->page_size;
 		ioc->pcie_sgl_dma_pool =
 			dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
 		if (!ioc->pcie_sgl_dma_pool) {
-			pr_info(MPT3SAS_FMT
-			    "PCIe SGL pool: dma_pool_create failed\n",
-			    ioc->name);
+			ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
 			goto out;
 		}
 
@@ -4708,9 +5195,7 @@
 				ioc->pcie_sgl_dma_pool, GFP_KERNEL,
 				&ioc->pcie_sg_lookup[i].pcie_sgl_dma);
 			if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
-				pr_info(MPT3SAS_FMT
-				    "PCIe SGL pool: dma_pool_alloc failed\n",
-				    ioc->name);
+				ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
 				goto out;
 			}
 			for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
@@ -4724,20 +5209,20 @@
 			}
 		}
 
-		dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
-			"element_size(%d), pool_size(%d kB)\n", ioc->name,
-			ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
-		dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can "
-		    "fit in a PRP page(%d)\n", ioc->name,
-		    ioc->chains_per_prp_buffer));
+		dinitprintk(ioc,
+			    ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
+				     ioc->scsiio_depth, sz,
+				     (sz * ioc->scsiio_depth) / 1024));
+		dinitprintk(ioc,
+			    ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
+				     ioc->chains_per_prp_buffer));
 		total_sz += sz * ioc->scsiio_depth;
 	}
 
 	ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
 	    ioc->chain_segment_sz, 16, 0);
 	if (!ioc->chain_dma_pool) {
-		pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
-			ioc->name);
+		ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
 		goto out;
 	}
 	for (i = 0; i < ioc->scsiio_depth; i++) {
@@ -4748,8 +5233,7 @@
 					ioc->chain_dma_pool, GFP_KERNEL,
 					&ct->chain_buffer_dma);
 			if (!ct->chain_buffer) {
-				pr_err(MPT3SAS_FMT "chain_lookup: "
-				" pci_pool_alloc failed\n", ioc->name);
+				ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
 				_base_release_memory_pools(ioc);
 				goto out;
 			}
@@ -4757,25 +5241,23 @@
 		total_sz += ioc->chain_segment_sz;
 	}
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
-		ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
-		((ioc->chain_depth *  ioc->chain_segment_sz))/1024));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+			     ioc->chain_depth, ioc->chain_segment_sz,
+			     (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
 
 	/* sense buffers, 4 byte align */
 	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
 	ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
 					      4, 0);
 	if (!ioc->sense_dma_pool) {
-		pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n",
-		    ioc->name);
+		ioc_err(ioc, "sense pool: dma_pool_create failed\n");
 		goto out;
 	}
 	ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
 	    &ioc->sense_dma);
 	if (!ioc->sense) {
-		pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n",
-		    ioc->name);
+		ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
 		goto out;
 	}
 	/* sense buffer requires to be in same 4 gb region.
@@ -4797,24 +5279,23 @@
 			dma_pool_create("sense pool", &ioc->pdev->dev, sz,
 						roundup_pow_of_two(sz), 0);
 		if (!ioc->sense_dma_pool) {
-			pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
-					ioc->name);
+			ioc_err(ioc, "sense pool: pci_pool_create failed\n");
 			goto out;
 		}
 		ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
 				&ioc->sense_dma);
 		if (!ioc->sense) {
-			pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
-					ioc->name);
+			ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
 			goto out;
 		}
 	}
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-	    "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
-	    "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
-	    SCSI_SENSE_BUFFERSIZE, sz/1024));
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
-	    ioc->name, (unsigned long long)ioc->sense_dma));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+			     ioc->sense, ioc->scsiio_depth,
+			     SCSI_SENSE_BUFFERSIZE, sz / 1024));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "sense_dma(0x%llx)\n",
+			     (unsigned long long)ioc->sense_dma));
 	total_sz += sz;
 
 	/* reply pool, 4 byte align */
@@ -4822,25 +5303,24 @@
 	ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
 					      4, 0);
 	if (!ioc->reply_dma_pool) {
-		pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n",
-		    ioc->name);
+		ioc_err(ioc, "reply pool: dma_pool_create failed\n");
 		goto out;
 	}
 	ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
 	    &ioc->reply_dma);
 	if (!ioc->reply) {
-		pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n",
-		    ioc->name);
+		ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
 		goto out;
 	}
 	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
 	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
-		ioc->name, ioc->reply,
-	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
-	    ioc->name, (unsigned long long)ioc->reply_dma));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+			     ioc->reply, ioc->reply_free_queue_depth,
+			     ioc->reply_sz, sz / 1024));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "reply_dma(0x%llx)\n",
+			     (unsigned long long)ioc->reply_dma));
 	total_sz += sz;
 
 	/* reply free queue, 16 byte align */
@@ -4848,24 +5328,22 @@
 	ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
 	    &ioc->pdev->dev, sz, 16, 0);
 	if (!ioc->reply_free_dma_pool) {
-		pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n",
-			ioc->name);
+		ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
 		goto out;
 	}
-	ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL,
+	ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
 	    &ioc->reply_free_dma);
 	if (!ioc->reply_free) {
-		pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n",
-			ioc->name);
+		ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
 		goto out;
 	}
-	memset(ioc->reply_free, 0, sz);
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
-	    "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
-	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"reply_free_dma (0x%llx)\n",
-		ioc->name, (unsigned long long)ioc->reply_free_dma));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+			     ioc->reply_free, ioc->reply_free_queue_depth,
+			     4, sz / 1024));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "reply_free_dma (0x%llx)\n",
+			     (unsigned long long)ioc->reply_free_dma));
 	total_sz += sz;
 
 	if (ioc->rdpq_array_enable) {
@@ -4876,8 +5354,7 @@
 		    &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
 		if (!ioc->reply_post_free_array_dma_pool) {
 			dinitprintk(ioc,
-			    pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
-			    "dma_pool_create failed\n", ioc->name));
+				    ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
 			goto out;
 		}
 		ioc->reply_post_free_array =
@@ -4885,34 +5362,31 @@
 		    GFP_KERNEL, &ioc->reply_post_free_array_dma);
 		if (!ioc->reply_post_free_array) {
 			dinitprintk(ioc,
-			    pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
-			    "dma_pool_alloc failed\n", ioc->name));
+				    ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
 			goto out;
 		}
 	}
 	ioc->config_page_sz = 512;
-	ioc->config_page = pci_alloc_consistent(ioc->pdev,
-	    ioc->config_page_sz, &ioc->config_page_dma);
+	ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
+			ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
 	if (!ioc->config_page) {
-		pr_err(MPT3SAS_FMT
-			"config page: dma_pool_alloc failed\n",
-			ioc->name);
+		ioc_err(ioc, "config page: dma_pool_alloc failed\n");
 		goto out;
 	}
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"config page(0x%p): size(%d)\n",
-		ioc->name, ioc->config_page, ioc->config_page_sz));
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
-		ioc->name, (unsigned long long)ioc->config_page_dma));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "config page(0x%p): size(%d)\n",
+			     ioc->config_page, ioc->config_page_sz));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "config_page_dma(0x%llx)\n",
+			     (unsigned long long)ioc->config_page_dma));
 	total_sz += ioc->config_page_sz;
 
-	pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
-	    ioc->name, total_sz/1024);
-	pr_info(MPT3SAS_FMT
-		"Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
-	    ioc->name, ioc->shost->can_queue, facts->RequestCredit);
-	pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
-	    ioc->name, ioc->shost->sg_tablesize);
+	ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
+		 total_sz / 1024);
+	ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
+		 ioc->shost->can_queue, facts->RequestCredit);
+	ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
+		 ioc->shost->sg_tablesize);
 	return 0;
 
  out:
@@ -4932,7 +5406,7 @@
 {
 	u32 s, sc;
 
-	s = readl(&ioc->chip->Doorbell);
+	s = ioc->base_readl(&ioc->chip->Doorbell);
 	sc = s & MPI2_IOC_STATE_MASK;
 	return cooked ? sc : s;
 }
@@ -4976,8 +5450,6 @@
  *
  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
  */
-static int
-_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
 
 static int
 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
@@ -4988,11 +5460,11 @@
 	count = 0;
 	cntdn = 1000 * timeout;
 	do {
-		int_status = readl(&ioc->chip->HostInterruptStatus);
+		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
 		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
-			dhsprintk(ioc, pr_info(MPT3SAS_FMT
-				"%s: successful count(%d), timeout(%d)\n",
-				ioc->name, __func__, count, timeout));
+			dhsprintk(ioc,
+				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+					   __func__, count, timeout));
 			return 0;
 		}
 
@@ -5000,9 +5472,8 @@
 		count++;
 	} while (--cntdn);
 
-	pr_err(MPT3SAS_FMT
-		"%s: failed due to timeout count(%d), int_status(%x)!\n",
-		ioc->name, __func__, count, int_status);
+	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+		__func__, count, int_status);
 	return -EFAULT;
 }
 
@@ -5015,11 +5486,11 @@
 	count = 0;
 	cntdn = 2000 * timeout;
 	do {
-		int_status = readl(&ioc->chip->HostInterruptStatus);
+		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
 		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
-			dhsprintk(ioc, pr_info(MPT3SAS_FMT
-				"%s: successful count(%d), timeout(%d)\n",
-				ioc->name, __func__, count, timeout));
+			dhsprintk(ioc,
+				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+					   __func__, count, timeout));
 			return 0;
 		}
 
@@ -5027,9 +5498,8 @@
 		count++;
 	} while (--cntdn);
 
-	pr_err(MPT3SAS_FMT
-		"%s: failed due to timeout count(%d), int_status(%x)!\n",
-		ioc->name, __func__, count, int_status);
+	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+		__func__, count, int_status);
 	return -EFAULT;
 
 }
@@ -5054,14 +5524,14 @@
 	count = 0;
 	cntdn = 1000 * timeout;
 	do {
-		int_status = readl(&ioc->chip->HostInterruptStatus);
+		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
 		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
-			dhsprintk(ioc, pr_info(MPT3SAS_FMT
-				"%s: successful count(%d), timeout(%d)\n",
-				ioc->name, __func__, count, timeout));
+			dhsprintk(ioc,
+				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+					   __func__, count, timeout));
 			return 0;
 		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
-			doorbell = readl(&ioc->chip->Doorbell);
+			doorbell = ioc->base_readl(&ioc->chip->Doorbell);
 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
 			    MPI2_IOC_STATE_FAULT) {
 				mpt3sas_base_fault_info(ioc , doorbell);
@@ -5075,9 +5545,8 @@
 	} while (--cntdn);
 
  out:
-	pr_err(MPT3SAS_FMT
-	 "%s: failed due to timeout count(%d), int_status(%x)!\n",
-	 ioc->name, __func__, count, int_status);
+	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+		__func__, count, int_status);
 	return -EFAULT;
 }
 
@@ -5097,11 +5566,11 @@
 	count = 0;
 	cntdn = 1000 * timeout;
 	do {
-		doorbell_reg = readl(&ioc->chip->Doorbell);
+		doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
 		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
-			dhsprintk(ioc, pr_info(MPT3SAS_FMT
-				"%s: successful count(%d), timeout(%d)\n",
-				ioc->name, __func__, count, timeout));
+			dhsprintk(ioc,
+				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+					   __func__, count, timeout));
 			return 0;
 		}
 
@@ -5109,9 +5578,8 @@
 		count++;
 	} while (--cntdn);
 
-	pr_err(MPT3SAS_FMT
-		"%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
-		ioc->name, __func__, count, doorbell_reg);
+	ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
+		__func__, count, doorbell_reg);
 	return -EFAULT;
 }
 
@@ -5130,8 +5598,7 @@
 	int r = 0;
 
 	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
-		pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: unknown reset_type\n", __func__);
 		return -EFAULT;
 	}
 
@@ -5139,7 +5606,7 @@
 	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
 		return -EFAULT;
 
-	pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
+	ioc_info(ioc, "sending message unit reset !!\n");
 
 	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
 	    &ioc->chip->Doorbell);
@@ -5149,19 +5616,51 @@
 	}
 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
 	if (ioc_state) {
-		pr_err(MPT3SAS_FMT
-			"%s: failed going to ready state (ioc_state=0x%x)\n",
-			ioc->name, __func__, ioc_state);
+		ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+			__func__, ioc_state);
 		r = -EFAULT;
 		goto out;
 	}
  out:
-	pr_info(MPT3SAS_FMT "message unit reset: %s\n",
-	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+	ioc_info(ioc, "message unit reset: %s\n",
+		 r == 0 ? "SUCCESS" : "FAILED");
 	return r;
 }
 
 /**
+ * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
+ * @ioc: per adapter object
+ * @wait_count: timeout in seconds
+ *
+ * Return: Waits up to timeout seconds for the IOC to
+ * become operational. Returns 0 if IOC is present
+ * and operational; otherwise returns -EFAULT.
+ */
+
+int
+mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
+{
+	int wait_state_count = 0;
+	u32 ioc_state;
+
+	do {
+		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+		if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
+			break;
+		ssleep(1);
+		ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+				__func__, ++wait_state_count);
+	} while (--timeout);
+	if (!timeout) {
+		ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
+		return -EFAULT;
+	}
+	if (wait_state_count)
+		ioc_info(ioc, "ioc is operational\n");
+	return 0;
+}
+
+/**
  * _base_handshake_req_reply_wait - send request thru doorbell interface
  * @ioc: per adapter object
  * @request_bytes: request length
@@ -5182,15 +5681,13 @@
 	__le32 *mfp;
 
 	/* make sure doorbell is not in use */
-	if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
-		pr_err(MPT3SAS_FMT
-			"doorbell is in use (line=%d)\n",
-			ioc->name, __LINE__);
+	if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+		ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
 		return -EFAULT;
 	}
 
 	/* clear pending doorbell interrupts from previous state changes */
-	if (readl(&ioc->chip->HostInterruptStatus) &
+	if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
 	    MPI2_HIS_IOC2SYS_DB_STATUS)
 		writel(0, &ioc->chip->HostInterruptStatus);
 
@@ -5200,17 +5697,15 @@
 	    &ioc->chip->Doorbell);
 
 	if ((_base_spin_on_doorbell_int(ioc, 5))) {
-		pr_err(MPT3SAS_FMT
-			"doorbell handshake int failed (line=%d)\n",
-			ioc->name, __LINE__);
+		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+			__LINE__);
 		return -EFAULT;
 	}
 	writel(0, &ioc->chip->HostInterruptStatus);
 
 	if ((_base_wait_for_doorbell_ack(ioc, 5))) {
-		pr_err(MPT3SAS_FMT
-			"doorbell handshake ack failed (line=%d)\n",
-			ioc->name, __LINE__);
+		ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
+			__LINE__);
 		return -EFAULT;
 	}
 
@@ -5222,53 +5717,51 @@
 	}
 
 	if (failed) {
-		pr_err(MPT3SAS_FMT
-			"doorbell handshake sending request failed (line=%d)\n",
-			ioc->name, __LINE__);
+		ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
+			__LINE__);
 		return -EFAULT;
 	}
 
 	/* now wait for the reply */
 	if ((_base_wait_for_doorbell_int(ioc, timeout))) {
-		pr_err(MPT3SAS_FMT
-			"doorbell handshake int failed (line=%d)\n",
-			ioc->name, __LINE__);
+		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+			__LINE__);
 		return -EFAULT;
 	}
 
 	/* read the first two 16-bits, it gives the total length of the reply */
-	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+	reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
 	    & MPI2_DOORBELL_DATA_MASK);
 	writel(0, &ioc->chip->HostInterruptStatus);
 	if ((_base_wait_for_doorbell_int(ioc, 5))) {
-		pr_err(MPT3SAS_FMT
-			"doorbell handshake int failed (line=%d)\n",
-			ioc->name, __LINE__);
+		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+			__LINE__);
 		return -EFAULT;
 	}
-	reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+	reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
 	    & MPI2_DOORBELL_DATA_MASK);
 	writel(0, &ioc->chip->HostInterruptStatus);
 
 	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
 		if ((_base_wait_for_doorbell_int(ioc, 5))) {
-			pr_err(MPT3SAS_FMT
-				"doorbell handshake int failed (line=%d)\n",
-				ioc->name, __LINE__);
+			ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+				__LINE__);
 			return -EFAULT;
 		}
 		if (i >=  reply_bytes/2) /* overflow case */
-			readl(&ioc->chip->Doorbell);
+			ioc->base_readl(&ioc->chip->Doorbell);
 		else
-			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+			reply[i] = le16_to_cpu(
+			    ioc->base_readl(&ioc->chip->Doorbell)
 			    & MPI2_DOORBELL_DATA_MASK);
 		writel(0, &ioc->chip->HostInterruptStatus);
 	}
 
 	_base_wait_for_doorbell_int(ioc, 5);
 	if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
-		dhsprintk(ioc, pr_info(MPT3SAS_FMT
-			"doorbell is in use (line=%d)\n", ioc->name, __LINE__));
+		dhsprintk(ioc,
+			  ioc_info(ioc, "doorbell is in use (line=%d)\n",
+				   __LINE__));
 	}
 	writel(0, &ioc->chip->HostInterruptStatus);
 
@@ -5302,45 +5795,27 @@
 	Mpi2SasIoUnitControlRequest_t *mpi_request)
 {
 	u16 smid;
-	u32 ioc_state;
 	u8 issue_reset = 0;
 	int rc;
 	void *request;
-	u16 wait_state_count;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	mutex_lock(&ioc->base_cmds.mutex);
 
 	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: base_cmd in use\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
 
-	wait_state_count = 0;
-	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		if (wait_state_count++ == 10) {
-			pr_err(MPT3SAS_FMT
-			    "%s: failed due to ioc not operational\n",
-			    ioc->name, __func__);
-			rc = -EFAULT;
-			goto out;
-		}
-		ssleep(1);
-		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-		pr_info(MPT3SAS_FMT
-			"%s: waiting for operational state(count=%d)\n",
-			ioc->name, __func__, wait_state_count);
-	}
+	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+	if (rc)
+		goto out;
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -5354,7 +5829,7 @@
 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
 		ioc->ioc_link_reset_in_progress = 1;
 	init_completion(&ioc->base_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->base_cmds.done,
 	    msecs_to_jiffies(10000));
 	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -5402,46 +5877,27 @@
 	Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
 {
 	u16 smid;
-	u32 ioc_state;
 	u8 issue_reset = 0;
 	int rc;
 	void *request;
-	u16 wait_state_count;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	mutex_lock(&ioc->base_cmds.mutex);
 
 	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
-		pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: base_cmd in use\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
 
-	wait_state_count = 0;
-	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-		if (wait_state_count++ == 10) {
-			pr_err(MPT3SAS_FMT
-			    "%s: failed due to ioc not operational\n",
-			    ioc->name, __func__);
-			rc = -EFAULT;
-			goto out;
-		}
-		ssleep(1);
-		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-		pr_info(MPT3SAS_FMT
-			"%s: waiting for operational state(count=%d)\n",
-			ioc->name,
-		    __func__, wait_state_count);
-	}
+	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+	if (rc)
+		goto out;
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		rc = -EAGAIN;
 		goto out;
 	}
@@ -5450,9 +5906,10 @@
 	ioc->base_cmds.status = MPT3_CMD_PENDING;
 	request = mpt3sas_base_get_msg_frame(ioc, smid);
 	ioc->base_cmds.smid = smid;
+	memset(request, 0, ioc->request_sz);
 	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
 	init_completion(&ioc->base_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->base_cmds.done,
 	    msecs_to_jiffies(10000));
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -5495,8 +5952,7 @@
 	struct mpt3sas_port_facts *pfacts;
 	int mpi_reply_sz, mpi_request_sz, r;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
 	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
@@ -5507,8 +5963,7 @@
 	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
 
 	if (r != 0) {
-		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
-		    ioc->name, __func__, r);
+		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
 		return r;
 	}
 
@@ -5536,26 +5991,26 @@
 	u32 ioc_state;
 	int rc;
 
-	dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	if (ioc->pci_error_recovery) {
-		dfailprintk(ioc, printk(MPT3SAS_FMT
-		    "%s: host in pci error recovery\n", ioc->name, __func__));
+		dfailprintk(ioc,
+			    ioc_info(ioc, "%s: host in pci error recovery\n",
+				     __func__));
 		return -EFAULT;
 	}
 
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
-	dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
-	    ioc->name, __func__, ioc_state));
+	dhsprintk(ioc,
+		  ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
+			   __func__, ioc_state));
 
 	if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
 	    (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
 		return 0;
 
 	if (ioc_state & MPI2_DOORBELL_USED) {
-		dhsprintk(ioc, printk(MPT3SAS_FMT
-		    "unexpected doorbell active!\n", ioc->name));
+		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
 		goto issue_diag_reset;
 	}
 
@@ -5567,9 +6022,9 @@
 
 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
 	if (ioc_state) {
-		dfailprintk(ioc, printk(MPT3SAS_FMT
-		    "%s: failed going to ready state (ioc_state=0x%x)\n",
-		    ioc->name, __func__, ioc_state));
+		dfailprintk(ioc,
+			    ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+				     __func__, ioc_state));
 		return -EFAULT;
 	}
 
@@ -5592,14 +6047,13 @@
 	struct mpt3sas_facts *facts;
 	int mpi_reply_sz, mpi_request_sz, r;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	r = _base_wait_for_iocstate(ioc, 10);
 	if (r) {
-		dfailprintk(ioc, printk(MPT3SAS_FMT
-		    "%s: failed getting to correct state\n",
-		    ioc->name, __func__));
+		dfailprintk(ioc,
+			    ioc_info(ioc, "%s: failed getting to correct state\n",
+				     __func__));
 		return r;
 	}
 	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
@@ -5610,8 +6064,7 @@
 	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
 
 	if (r != 0) {
-		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
-		    ioc->name, __func__, r);
+		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
 		return r;
 	}
 
@@ -5639,6 +6092,9 @@
 	if ((facts->IOCCapabilities &
 	      MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
 		ioc->rdpq_array_capable = 1;
+	if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
+	    && ioc->is_aero_ioc)
+		ioc->atomic_desc_capable = 1;
 	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
 	facts->IOCRequestFrameSize =
 	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -5663,20 +6119,20 @@
 	 */
 	ioc->page_size = 1 << facts->CurrentHostPageSize;
 	if (ioc->page_size == 1) {
-		pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
-			"default host page size to 4k\n", ioc->name);
+		ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
 		ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
 	}
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
-		ioc->name, facts->CurrentHostPageSize));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "CurrentHostPageSize(%d)\n",
+			     facts->CurrentHostPageSize));
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"hba queue depth(%d), max chains per io(%d)\n",
-		ioc->name, facts->RequestCredit,
-	    facts->MaxChainDepth));
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"request frame size(%d), reply frame size(%d)\n", ioc->name,
-	    facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
+			     facts->RequestCredit, facts->MaxChainDepth));
+	dinitprintk(ioc,
+		    ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
+			     facts->IOCRequestFrameSize * 4,
+			     facts->ReplyFrameSize * 4));
 	return 0;
 }
 
@@ -5696,8 +6152,7 @@
 	u16 ioc_status;
 	u32 reply_post_free_array_sz = 0;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
@@ -5763,15 +6218,14 @@
 	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
 
 	if (r != 0) {
-		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
-		    ioc->name, __func__, r);
+		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
 		return r;
 	}
 
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
 	    mpi_reply.IOCLogInfo) {
-		pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
+		ioc_err(ioc, "%s: failed\n", __func__);
 		r = -EIO;
 	}
 
@@ -5842,18 +6296,16 @@
 	u16 smid;
 	u16 ioc_status;
 
-	pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+	ioc_info(ioc, "sending port enable !!\n");
 
 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
-		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: internal command already in use\n", __func__);
 		return -EAGAIN;
 	}
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		return -EAGAIN;
 	}
 
@@ -5864,11 +6316,10 @@
 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
 
 	init_completion(&ioc->port_enable_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
 	if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
-		pr_err(MPT3SAS_FMT "%s: timeout\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: timeout\n", __func__);
 		_debug_dump_mf(mpi_request,
 		    sizeof(Mpi2PortEnableRequest_t)/4);
 		if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
@@ -5881,16 +6332,15 @@
 	mpi_reply = ioc->port_enable_cmds.reply;
 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-		pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
-		    ioc->name, __func__, ioc_status);
+		ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
+			__func__, ioc_status);
 		r = -EFAULT;
 		goto out;
 	}
 
  out:
 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
-	pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
-	    "SUCCESS" : "FAILED"));
+	ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
 	return r;
 }
 
@@ -5906,18 +6356,16 @@
 	Mpi2PortEnableRequest_t *mpi_request;
 	u16 smid;
 
-	pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+	ioc_info(ioc, "sending port enable !!\n");
 
 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
-		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: internal command already in use\n", __func__);
 		return -EAGAIN;
 	}
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		return -EAGAIN;
 	}
 
@@ -5927,7 +6375,7 @@
 	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
 
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	return 0;
 }
 
@@ -6020,19 +6468,16 @@
 	int r = 0;
 	int i;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
-		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: internal command already in use\n", __func__);
 		return -EAGAIN;
 	}
 
 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
 	if (!smid) {
-		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
 		return -EAGAIN;
 	}
 	ioc->base_cmds.status = MPT3_CMD_PENDING;
@@ -6046,11 +6491,10 @@
 		mpi_request->EventMasks[i] =
 		    cpu_to_le32(ioc->event_masks[i]);
 	init_completion(&ioc->base_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
-		pr_err(MPT3SAS_FMT "%s: timeout\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: timeout\n", __func__);
 		_debug_dump_mf(mpi_request,
 		    sizeof(Mpi2EventNotificationRequest_t)/4);
 		if (ioc->base_cmds.status & MPT3_CMD_RESET)
@@ -6058,8 +6502,7 @@
 		else
 			r = -ETIME;
 	} else
-		dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
-		    ioc->name, __func__));
+		dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
 	return r;
 }
@@ -6115,18 +6558,16 @@
 	u32 count;
 	u32 hcb_size;
 
-	pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
+	ioc_info(ioc, "sending diag reset !!\n");
 
-	drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
-	    ioc->name));
+	drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
 
 	count = 0;
 	do {
 		/* Write magic sequence to WriteSequence register
 		 * Loop until in diagnostic mode
 		 */
-		drsprintk(ioc, pr_info(MPT3SAS_FMT
-			"write magic sequence\n", ioc->name));
+		drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
 		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
 		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
 		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
@@ -6141,17 +6582,16 @@
 		if (count++ > 20)
 			goto out;
 
-		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
-		drsprintk(ioc, pr_info(MPT3SAS_FMT
-			"wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
-		    ioc->name, count, host_diagnostic));
+		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
+		drsprintk(ioc,
+			  ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+				   count, host_diagnostic));
 
 	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
 
-	hcb_size = readl(&ioc->chip->HCBSize);
+	hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
 
-	drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
-	    ioc->name));
+	drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
 	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
 	     &ioc->chip->HostDiagnostic);
 
@@ -6162,7 +6602,7 @@
 	for (count = 0; count < (300000000 /
 		MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
 
-		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
 
 		if (host_diagnostic == 0xFFFFFFFF)
 			goto out;
@@ -6174,43 +6614,38 @@
 
 	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
 
-		drsprintk(ioc, pr_info(MPT3SAS_FMT
-		"restart the adapter assuming the HCB Address points to good F/W\n",
-		    ioc->name));
+		drsprintk(ioc,
+			  ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
 		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
 		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
 		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
 
-		drsprintk(ioc, pr_info(MPT3SAS_FMT
-		    "re-enable the HCDW\n", ioc->name));
+		drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
 		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
 		    &ioc->chip->HCBSize);
 	}
 
-	drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
-	    ioc->name));
+	drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
 	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
 	    &ioc->chip->HostDiagnostic);
 
-	drsprintk(ioc, pr_info(MPT3SAS_FMT
-		"disable writes to the diagnostic register\n", ioc->name));
+	drsprintk(ioc,
+		  ioc_info(ioc, "disable writes to the diagnostic register\n"));
 	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
 
-	drsprintk(ioc, pr_info(MPT3SAS_FMT
-		"Wait for FW to go to the READY state\n", ioc->name));
+	drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
 	if (ioc_state) {
-		pr_err(MPT3SAS_FMT
-			"%s: failed going to ready state (ioc_state=0x%x)\n",
-			ioc->name, __func__, ioc_state);
+		ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+			__func__, ioc_state);
 		goto out;
 	}
 
-	pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
+	ioc_info(ioc, "diag reset: SUCCESS\n");
 	return 0;
 
  out:
-	pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
+	ioc_err(ioc, "diag reset: FAILED\n");
 	return -EFAULT;
 }
 
@@ -6228,15 +6663,15 @@
 	int rc;
 	int count;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	if (ioc->pci_error_recovery)
 		return 0;
 
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
-	dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
-	    ioc->name, __func__, ioc_state));
+	dhsprintk(ioc,
+		  ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
+			   __func__, ioc_state));
 
 	/* if in RESET state, it should move to READY state shortly */
 	count = 0;
@@ -6244,9 +6679,8 @@
 		while ((ioc_state & MPI2_IOC_STATE_MASK) !=
 		    MPI2_IOC_STATE_READY) {
 			if (count++ == 10) {
-				pr_err(MPT3SAS_FMT
-					"%s: failed going to ready state (ioc_state=0x%x)\n",
-				    ioc->name, __func__, ioc_state);
+				ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+					__func__, ioc_state);
 				return -EFAULT;
 			}
 			ssleep(1);
@@ -6258,9 +6692,7 @@
 		return 0;
 
 	if (ioc_state & MPI2_DOORBELL_USED) {
-		dhsprintk(ioc, pr_info(MPT3SAS_FMT
-			"unexpected doorbell active!\n",
-			ioc->name));
+		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
 		goto issue_diag_reset;
 	}
 
@@ -6293,7 +6725,7 @@
 static int
 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
 {
-	int r, i, index;
+	int r, i, index, rc;
 	unsigned long	flags;
 	u32 reply_address;
 	u16 smid;
@@ -6304,8 +6736,7 @@
 	struct adapter_reply_queue *reply_q;
 	Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	/* clean the delayed target reset list */
 	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
@@ -6397,8 +6828,19 @@
  skip_init_reply_post_free_queue:
 
 	r = _base_send_ioc_init(ioc);
-	if (r)
-		return r;
+	if (r) {
+		/*
+		 * No need to check IOC state for fault state & issue
+		 * diag reset during host reset. This check is need
+		 * only during driver load time.
+		 */
+		if (!ioc->is_driver_loading)
+			return r;
+
+		rc = _base_check_for_fault_and_issue_reset(ioc);
+		if (rc || (_base_send_ioc_init(ioc)))
+			return r;
+	}
 
 	/* initialize reply free host index */
 	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
@@ -6465,8 +6907,7 @@
 void
 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
 {
-	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	/* synchronizing freeing resource with pci_access_mutex lock */
 	mutex_lock(&ioc->pci_access_mutex);
@@ -6491,11 +6932,10 @@
 int
 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 {
-	int r, i;
+	int r, i, rc;
 	int cpu_id, last_cpu_id = 0;
 
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	/* setup cpu_msix_table */
 	ioc->cpu_count = num_online_cpus();
@@ -6505,9 +6945,8 @@
 	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
 	ioc->reply_queue_count = 1;
 	if (!ioc->cpu_msix_table) {
-		dfailprintk(ioc, pr_info(MPT3SAS_FMT
-			"allocation for cpu_msix_table failed!!!\n",
-			ioc->name));
+		dfailprintk(ioc,
+			    ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
 		r = -ENOMEM;
 		goto out_free_resources;
 	}
@@ -6516,30 +6955,39 @@
 		ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
 		    sizeof(resource_size_t *), GFP_KERNEL);
 		if (!ioc->reply_post_host_index) {
-			dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
-				"for reply_post_host_index failed!!!\n",
-				ioc->name));
+			dfailprintk(ioc,
+				    ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
 			r = -ENOMEM;
 			goto out_free_resources;
 		}
 	}
 
+	ioc->smp_affinity_enable = smp_affinity_enable;
+
 	ioc->rdpq_array_enable_assigned = 0;
 	ioc->dma_mask = 0;
+	if (ioc->is_aero_ioc)
+		ioc->base_readl = &_base_readl_aero;
+	else
+		ioc->base_readl = &_base_readl;
 	r = mpt3sas_base_map_resources(ioc);
 	if (r)
 		goto out_free_resources;
 
 	pci_set_drvdata(ioc->pdev, ioc->shost);
 	r = _base_get_ioc_facts(ioc);
-	if (r)
-		goto out_free_resources;
+	if (r) {
+		rc = _base_check_for_fault_and_issue_reset(ioc);
+		if (rc || (_base_get_ioc_facts(ioc)))
+			goto out_free_resources;
+	}
 
 	switch (ioc->hba_mpi_version_belonged) {
 	case MPI2_VERSION:
 		ioc->build_sg_scmd = &_base_build_sg_scmd;
 		ioc->build_sg = &_base_build_sg;
 		ioc->build_zero_len_sge = &_base_build_zero_len_sge;
+		ioc->get_msix_index_for_smlio = &_base_get_msix_index;
 		break;
 	case MPI25_VERSION:
 	case MPI26_VERSION:
@@ -6554,15 +7002,30 @@
 		ioc->build_nvme_prp = &_base_build_nvme_prp;
 		ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
 		ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
-
+		if (ioc->high_iops_queues)
+			ioc->get_msix_index_for_smlio =
+					&_base_get_high_iops_msix_index;
+		else
+			ioc->get_msix_index_for_smlio = &_base_get_msix_index;
 		break;
 	}
-
-	if (ioc->is_mcpu_endpoint)
-		ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io;
-	else
-		ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
-
+	if (ioc->atomic_desc_capable) {
+		ioc->put_smid_default = &_base_put_smid_default_atomic;
+		ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
+		ioc->put_smid_fast_path =
+				&_base_put_smid_fast_path_atomic;
+		ioc->put_smid_hi_priority =
+				&_base_put_smid_hi_priority_atomic;
+	} else {
+		ioc->put_smid_default = &_base_put_smid_default;
+		ioc->put_smid_fast_path = &_base_put_smid_fast_path;
+		ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
+		if (ioc->is_mcpu_endpoint)
+			ioc->put_smid_scsi_io =
+				&_base_put_smid_mpi_ep_scsi_io;
+		else
+			ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
+	}
 	/*
 	 * These function pointers for other requests that don't
 	 * the require IEEE scatter gather elements.
@@ -6585,14 +7048,23 @@
 
 	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
 		r = _base_get_port_facts(ioc, i);
-		if (r)
-			goto out_free_resources;
+		if (r) {
+			rc = _base_check_for_fault_and_issue_reset(ioc);
+			if (rc || (_base_get_port_facts(ioc, i)))
+				goto out_free_resources;
+		}
 	}
 
 	r = _base_allocate_memory_pools(ioc);
 	if (r)
 		goto out_free_resources;
 
+	if (irqpoll_weight > 0)
+		ioc->thresh_hold = irqpoll_weight;
+	else
+		ioc->thresh_hold = ioc->hba_queue_depth/4;
+
+	_base_init_irqpolls(ioc);
 	init_waitqueue_head(&ioc->reset_wq);
 
 	/* allocate memory pd handle bitmask list */
@@ -6702,6 +7174,13 @@
 	if (r)
 		goto out_free_resources;
 
+	/*
+	 * Copy current copy of IOCFacts in prev_fw_facts
+	 * and it will be used during online firmware upgrade.
+	 */
+	memcpy(&ioc->prev_fw_facts, &ioc->facts,
+	    sizeof(struct mpt3sas_facts));
+
 	ioc->non_operational_loop = 0;
 	ioc->got_task_abort_from_ioctl = 0;
 	return 0;
@@ -6747,8 +7226,7 @@
 void
 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
 {
-	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-	    __func__));
+	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 	mpt3sas_base_stop_watchdog(ioc);
 	mpt3sas_base_free_resources(ioc);
@@ -6781,8 +7259,7 @@
 {
 	mpt3sas_scsih_pre_reset_handler(ioc);
 	mpt3sas_ctl_pre_reset_handler(ioc);
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
 }
 
 /**
@@ -6793,8 +7270,7 @@
 {
 	mpt3sas_scsih_after_reset_handler(ioc);
 	mpt3sas_ctl_after_reset_handler(ioc);
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
 	if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
 		ioc->transport_cmds.status |= MPT3_CMD_RESET;
 		mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
@@ -6835,8 +7311,7 @@
 {
 	mpt3sas_scsih_reset_done_handler(ioc);
 	mpt3sas_ctl_reset_done_handler(ioc);
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT
-			"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
 }
 
 /**
@@ -6868,6 +7343,85 @@
 }
 
 /**
+ * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
+ *     attributes during online firmware upgrade and update the corresponding
+ *     IOC variables accordingly.
+ *
+ * @ioc: Pointer to MPT_ADAPTER structure
+ */
+static int
+_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
+{
+	u16 pd_handles_sz;
+	void *pd_handles = NULL, *blocking_handles = NULL;
+	void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
+	struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
+
+	if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
+		pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
+		if (ioc->facts.MaxDevHandle % 8)
+			pd_handles_sz++;
+
+		pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
+		    GFP_KERNEL);
+		if (!pd_handles) {
+			ioc_info(ioc,
+			    "Unable to allocate the memory for pd_handles of sz: %d\n",
+			    pd_handles_sz);
+			return -ENOMEM;
+		}
+		memset(pd_handles + ioc->pd_handles_sz, 0,
+		    (pd_handles_sz - ioc->pd_handles_sz));
+		ioc->pd_handles = pd_handles;
+
+		blocking_handles = krealloc(ioc->blocking_handles,
+		    pd_handles_sz, GFP_KERNEL);
+		if (!blocking_handles) {
+			ioc_info(ioc,
+			    "Unable to allocate the memory for "
+			    "blocking_handles of sz: %d\n",
+			    pd_handles_sz);
+			return -ENOMEM;
+		}
+		memset(blocking_handles + ioc->pd_handles_sz, 0,
+		    (pd_handles_sz - ioc->pd_handles_sz));
+		ioc->blocking_handles = blocking_handles;
+		ioc->pd_handles_sz = pd_handles_sz;
+
+		pend_os_device_add = krealloc(ioc->pend_os_device_add,
+		    pd_handles_sz, GFP_KERNEL);
+		if (!pend_os_device_add) {
+			ioc_info(ioc,
+			    "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
+			    pd_handles_sz);
+			return -ENOMEM;
+		}
+		memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
+		    (pd_handles_sz - ioc->pend_os_device_add_sz));
+		ioc->pend_os_device_add = pend_os_device_add;
+		ioc->pend_os_device_add_sz = pd_handles_sz;
+
+		device_remove_in_progress = krealloc(
+		    ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
+		if (!device_remove_in_progress) {
+			ioc_info(ioc,
+			    "Unable to allocate the memory for "
+			    "device_remove_in_progress of sz: %d\n "
+			    , pd_handles_sz);
+			return -ENOMEM;
+		}
+		memset(device_remove_in_progress +
+		    ioc->device_remove_in_progress_sz, 0,
+		    (pd_handles_sz - ioc->device_remove_in_progress_sz));
+		ioc->device_remove_in_progress = device_remove_in_progress;
+		ioc->device_remove_in_progress_sz = pd_handles_sz;
+	}
+
+	memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
+	return 0;
+}
+
+/**
  * mpt3sas_base_hard_reset_handler - reset controller
  * @ioc: Pointer to MPT_ADAPTER structure
  * @type: FORCE_BIG_HAMMER or SOFT_RESET
@@ -6883,12 +7437,10 @@
 	u32 ioc_state;
 	u8 is_fault = 0, is_trigger = 0;
 
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-	    __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
 
 	if (ioc->pci_error_recovery) {
-		pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
-		    ioc->name, __func__);
+		ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
 		r = 0;
 		goto out_unlocked;
 	}
@@ -6932,6 +7484,13 @@
 	if (r)
 		goto out;
 
+	r = _base_check_ioc_facts_changes(ioc);
+	if (r) {
+		ioc_info(ioc,
+		    "Some of the parameters got changed in this new firmware"
+		    " image and it requires system reboot\n");
+		goto out;
+	}
 	if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
 		panic("%s: Issue occurred with flashing controller firmware."
 		      "Please reboot the system and ensure that the correct"
@@ -6942,8 +7501,9 @@
 		_base_reset_done_handler(ioc);
 
  out:
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
-	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
+	dtmprintk(ioc,
+		  ioc_info(ioc, "%s: %s\n",
+			   __func__, r == 0 ? "SUCCESS" : "FAILED"));
 
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 	ioc->shost_recovery = 0;
@@ -6959,7 +7519,6 @@
 			mpt3sas_trigger_master(ioc,
 			    MASTER_TRIGGER_ADAPTER_RESET);
 	}
-	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-	    __func__));
+	dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
 	return r;
 }