Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid
index 17419e3..2adc2af 100644
--- a/drivers/scsi/megaraid/Kconfig.megaraid
+++ b/drivers/scsi/megaraid/Kconfig.megaraid
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
config MEGARAID_NEWGEN
bool "LSI Logic New Generation RAID Device Drivers"
depends on PCI && SCSI
@@ -78,6 +79,7 @@
config MEGARAID_SAS
tristate "LSI Logic MegaRAID SAS RAID Module"
depends on PCI && SCSI
+ select IRQ_POLL
help
Module for LSI Logic's SAS based RAID controllers.
To compile this driver as a module, choose 'm' here.
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
index 6e74d21..12177e4 100644
--- a/drivers/scsi/megaraid/Makefile
+++ b/drivers/scsi/megaraid/Makefile
@@ -3,4 +3,4 @@
obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o
megaraid_sas-objs := megaraid_sas_base.o megaraid_sas_fusion.o \
- megaraid_sas_fp.o
+ megaraid_sas_fp.o megaraid_sas_debugfs.o
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
index e01c6f7..01a1bfb 100644
--- a/drivers/scsi/megaraid/mbox_defs.h
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -1,16 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
*
* Linux MegaRAID Unified device driver
*
* Copyright (c) 2003-2004 LSI Logic Corporation.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* FILE : mbox_defs.h
- *
*/
#ifndef _MRAID_MBOX_DEFS_H_
#define _MRAID_MBOX_DEFS_H_
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 1d037ed..3a7596e 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
*
* Linux MegaRAID device driver
*
* Copyright (c) 2003-2004 LSI Logic Corporation.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* FILE : mega_common.h
*
* Libaray of common routine used by all low-level megaraid drivers
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h
index eedcbde..ae9c2ff 100644
--- a/drivers/scsi/megaraid/megaraid_ioctl.h
+++ b/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
*
* Linux MegaRAID device driver
*
* Copyright (c) 2003-2004 LSI Logic Corporation.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* FILE : megaraid_ioctl.h
*
* Definitions to interface with user level applications
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 530358c..f6ac819 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Linux MegaRAID device driver
*
* Copyright (c) 2003-2004 LSI Logic Corporation.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* FILE : megaraid_mbox.c
* Version : v2.20.5.1 (Nov 16 2006)
*
@@ -38,7 +34,6 @@
* Dell PERC 4e/DC 1000 0408 1028 0002
* Dell PERC 4e/SC 1000 0408 1028 0001
*
- *
* LSI MegaRAID SCSI 320-0 1000 1960 1000 A520
* LSI MegaRAID SCSI 320-1 1000 1960 1000 0520
* LSI MegaRAID SCSI 320-2 1000 1960 1000 0518
@@ -202,13 +197,6 @@
MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
/*
- * ### global data ###
- */
-static uint8_t megaraid_mbox_version[8] =
- { 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
-
-
-/*
* PCI table for all supported controllers.
*/
static struct pci_device_id pci_id_table_g[] = {
@@ -343,7 +331,6 @@
.eh_abort_handler = megaraid_abort_handler,
.eh_host_reset_handler = megaraid_reset_handler,
.change_queue_depth = scsi_change_queue_depth,
- .use_clustering = ENABLE_CLUSTERING,
.no_write_same = 1,
.sdev_attrs = megaraid_sdev_attrs,
.shost_attrs = megaraid_shost_attrs,
@@ -457,10 +444,9 @@
// Setup the default DMA mask. This would be changed later on
// depending on hardware capabilities
- if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
-
+ if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(32))) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
+ "megaraid: dma_set_mask failed:%d\n", __LINE__));
goto out_free_adapter;
}
@@ -484,7 +470,7 @@
// Start the mailbox based controller
if (megaraid_init_mbox(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: maibox adapter did not initialize\n"));
+ "megaraid: mailbox adapter did not initialize\n"));
goto out_free_adapter;
}
@@ -878,11 +864,12 @@
adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
- if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
+ if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(64))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: DMA mask for 64-bit failed\n"));
- if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: 32-bit DMA mask failed\n"));
goto out_free_sysfs_res;
@@ -950,7 +937,7 @@
* megaraid_alloc_cmd_packets - allocate shared mailbox
* @adapter : soft state of the raid controller
*
- * Allocate and align the shared mailbox. This maibox is used to issue
+ * Allocate and align the shared mailbox. This mailbox is used to issue
* all the commands. For IO based controllers, the mailbox is also registered
* with the FW. Allocate memory for all commands as well.
* This is our big allocator.
@@ -975,9 +962,10 @@
* Allocate the common 16-byte aligned memory for the handshake
* mailbox.
*/
- raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev,
- sizeof(mbox64_t),
- &raid_dev->una_mbox64_dma);
+ raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(mbox64_t),
+ &raid_dev->una_mbox64_dma,
+ GFP_KERNEL);
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING
@@ -1003,8 +991,8 @@
align;
// Allocate memory for commands issued internally
- adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE,
- &adapter->ibuf_dma_h);
+ adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
+ &adapter->ibuf_dma_h, GFP_KERNEL);
if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING
@@ -1082,7 +1070,7 @@
scb->scp = NULL;
scb->state = SCB_FREE;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
scb->dma_type = MRAID_DMA_NONE;
scb->dev_channel = -1;
scb->dev_target = -1;
@@ -1098,10 +1086,10 @@
out_free_scb_list:
kfree(adapter->kscb_list);
out_free_ibuf:
- pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
+ dma_free_coherent(&pdev->dev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
adapter->ibuf_dma_h);
out_free_common_mbox:
- pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
return -1;
@@ -1123,10 +1111,10 @@
kfree(adapter->kscb_list);
- pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
+ dma_free_coherent(&adapter->pdev->dev, MBOX_IBUF_SIZE,
(void *)adapter->ibuf, adapter->ibuf_dma_h);
- pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
return;
}
@@ -1250,8 +1238,7 @@
dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
sg_pci_blk[i].dma_addr);
}
- if (raid_dev->sg_pool_handle)
- dma_pool_destroy(raid_dev->sg_pool_handle);
+ dma_pool_destroy(raid_dev->sg_pool_handle);
epthru_pci_blk = raid_dev->epthru_pool;
@@ -1259,8 +1246,7 @@
dma_pool_free(raid_dev->epthru_pool_handle,
epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
}
- if (raid_dev->epthru_pool_handle)
- dma_pool_destroy(raid_dev->epthru_pool_handle);
+ dma_pool_destroy(raid_dev->epthru_pool_handle);
mbox_pci_blk = raid_dev->mbox_pool;
@@ -1268,8 +1254,7 @@
dma_pool_free(raid_dev->mbox_pool_handle,
mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
}
- if (raid_dev->mbox_pool_handle)
- dma_pool_destroy(raid_dev->mbox_pool_handle);
+ dma_pool_destroy(raid_dev->mbox_pool_handle);
return;
}
@@ -1428,12 +1413,6 @@
adapter->outstanding_cmds++;
- if (scb->dma_direction == PCI_DMA_TODEVICE)
- pci_dma_sync_sg_for_device(adapter->pdev,
- scsi_sglist(scb->scp),
- scsi_sg_count(scb->scp),
- PCI_DMA_TODEVICE);
-
mbox->busy = 1; // Set busy
mbox->poll = 0;
mbox->ack = 0;
@@ -2181,31 +2160,6 @@
/**
- * megaraid_mbox_sync_scb - sync kernel buffers
- * @adapter : controller's soft state
- * @scb : pointer to the resource packet
- *
- * DMA sync if required.
- */
-static void
-megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
-{
- mbox_ccb_t *ccb;
-
- ccb = (mbox_ccb_t *)scb->ccb;
-
- if (scb->dma_direction == PCI_DMA_FROMDEVICE)
- pci_dma_sync_sg_for_cpu(adapter->pdev,
- scsi_sglist(scb->scp),
- scsi_sg_count(scb->scp),
- PCI_DMA_FROMDEVICE);
-
- scsi_dma_unmap(scb->scp);
- return;
-}
-
-
-/**
* megaraid_mbox_dpc - the tasklet to complete the commands from completed list
* @devp : pointer to HBA soft state
*
@@ -2403,9 +2357,7 @@
megaraid_mbox_display_scb(adapter, scb);
}
- // Free our internal resources and call the mid-layer callback
- // routine
- megaraid_mbox_sync_scb(adapter, scb);
+ scsi_dma_unmap(scp);
// remove from local clist
list_del_init(&scb->list);
@@ -2577,7 +2529,6 @@
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
int recovery_window;
- int recovering;
int i;
uioc_t *kioc;
@@ -2591,7 +2542,6 @@
return FAILED;
}
-
// Under exceptional conditions, FW can take up to 3 minutes to
// complete command processing. Wait for additional 2 minutes for the
// pending commands counter to go down to 0. If it doesn't, let the
@@ -2640,8 +2590,6 @@
recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
- recovering = adapter->outstanding_cmds;
-
for (i = 0; i < recovery_window; i++) {
megaraid_ack_sequence(adapter);
@@ -2725,13 +2673,10 @@
mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
- mbox64_t *mbox64;
mbox_t *mbox;
uint8_t status;
int i;
-
- mbox64 = raid_dev->mbox64;
mbox = raid_dev->mbox;
/*
@@ -2948,9 +2893,8 @@
* Issue an ENQUIRY3 command to find out certain adapter parameters,
* e.g., max channels, max commands etc.
*/
- pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
- &pinfo_dma_h);
-
+ pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
+ &pinfo_dma_h, GFP_KERNEL);
if (pinfo == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
@@ -2971,7 +2915,7 @@
con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
- pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
pinfo, pinfo_dma_h);
return -1;
@@ -3002,7 +2946,7 @@
con_log(CL_ANN, (KERN_WARNING
"megaraid: product info failed\n"));
- pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
pinfo, pinfo_dma_h);
return -1;
@@ -3038,7 +2982,7 @@
"megaraid: fw version:[%s] bios version:[%s]\n",
adapter->fw_version, adapter->bios_version));
- pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), pinfo,
pinfo_dma_h);
return 0;
@@ -3135,7 +3079,6 @@
static int
megaraid_mbox_support_random_del(adapter_t *adapter)
{
- mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
@@ -3157,8 +3100,6 @@
return 0;
}
- mbox = (mbox_t *)raw_mbox;
-
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = FC_DEL_LOGDRV;
@@ -3263,12 +3204,8 @@
static void
megaraid_mbox_flush_cache(adapter_t *adapter)
{
- mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
-
- mbox = (mbox_t *)raw_mbox;
-
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = FLUSH_ADAPTER;
@@ -3299,7 +3236,6 @@
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
- mbox64_t *mbox64;
int status = 0;
int i;
uint32_t dword;
@@ -3310,7 +3246,6 @@
raw_mbox[0] = 0xFF;
- mbox64 = raid_dev->mbox64;
mbox = raid_dev->mbox;
/* Wait until mailbox is free */
@@ -3515,7 +3450,7 @@
scb->scp = NULL;
scb->state = SCB_FREE;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
scb->dma_type = MRAID_DMA_NONE;
scb->dev_channel = -1;
scb->dev_target = -1;
@@ -3653,7 +3588,7 @@
scb->state = SCB_ACTIVE;
scb->dma_type = MRAID_DMA_NONE;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
ccb = (mbox_ccb_t *)scb->ccb;
mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
@@ -3794,10 +3729,6 @@
static int
gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
{
- uint8_t dmajor;
-
- dmajor = megaraid_mbox_version[0];
-
hinfo->pci_vendor_id = adapter->pdev->vendor;
hinfo->pci_device_id = adapter->pdev->device;
hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor;
@@ -3843,8 +3774,8 @@
raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
- raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
- PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
+ raid_dev->sysfs_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ PAGE_SIZE, &raid_dev->sysfs_buffer_dma, GFP_KERNEL);
if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
!raid_dev->sysfs_buffer) {
@@ -3881,7 +3812,7 @@
kfree(raid_dev->sysfs_mbox64);
if (raid_dev->sysfs_buffer) {
- pci_free_consistent(adapter->pdev, PAGE_SIZE,
+ dma_free_coherent(&adapter->pdev->dev, PAGE_SIZE,
raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
}
}
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index c1d86d9..3e4347c 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
*
* Linux MegaRAID device driver
*
* Copyright (c) 2003-2004 LSI Logic Corporation.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* FILE : megaraid_mbox.h
*/
@@ -117,7 +113,7 @@
* @raw_mbox : raw mailbox pointer
* @mbox : mailbox
* @mbox64 : extended mailbox
- * @mbox_dma_h : maibox dma address
+ * @mbox_dma_h : mailbox dma address
* @sgl64 : 64-bit scatter-gather list
* @sgl32 : 32-bit scatter-gather list
* @sgl_dma_h : dma handle for the scatter-gather list
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 8428247..59cca89 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Linux MegaRAID device driver
*
* Copyright (c) 2003-2004 LSI Logic Corporation.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* FILE : megaraid_mm.c
* Version : v2.20.2.7 (Jul 16 2006)
*
@@ -1017,8 +1013,7 @@
kfree(adapter->kioc_list);
kfree(adapter->mbox_list);
- if (adapter->pthru_dma_pool)
- dma_pool_destroy(adapter->pthru_dma_pool);
+ dma_pool_destroy(adapter->pthru_dma_pool);
kfree(adapter);
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
index a30e725..bf40115 100644
--- a/drivers/scsi/megaraid/megaraid_mm.h
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
*
* Linux MegaRAID device driver
*
* Copyright (c) 2003-2004 LSI Logic Corporation.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* FILE : megaraid_mm.h
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 67d356d..a6e788c 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1,32 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Linux MegaRAID driver for SAS based RAID controllers
*
* Copyright (c) 2003-2013 LSI Corporation
- * Copyright (c) 2013-2014 Avago Technologies
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (c) 2013-2016 Avago Technologies
+ * Copyright (c) 2016-2018 Broadcom Inc.
*
* FILE: megaraid_sas.h
*
- * Authors: Avago Technologies
- * Kashyap Desai <kashyap.desai@avagotech.com>
- * Sumit Saxena <sumit.saxena@avagotech.com>
+ * Authors: Broadcom Inc.
+ * Kashyap Desai <kashyap.desai@broadcom.com>
+ * Sumit Saxena <sumit.saxena@broadcom.com>
*
- * Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- * San Jose, California 95131
+ * Send feedback to: megaraidlinux.pdl@broadcom.com
*/
#ifndef LSI_MEGARAID_SAS_H
@@ -35,8 +21,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.706.03.00-rc1"
-#define MEGASAS_RELDATE "May 21, 2018"
+#define MEGASAS_VERSION "07.710.50.00-rc1"
+#define MEGASAS_RELDATE "June 28, 2019"
/*
* Device IDs
@@ -62,6 +48,14 @@
#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017
#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B
#define PCI_DEVICE_ID_LSI_CRUSADER_4PORT 0x001C
+#define PCI_DEVICE_ID_LSI_AERO_10E1 0x10e1
+#define PCI_DEVICE_ID_LSI_AERO_10E2 0x10e2
+#define PCI_DEVICE_ID_LSI_AERO_10E5 0x10e5
+#define PCI_DEVICE_ID_LSI_AERO_10E6 0x10e6
+#define PCI_DEVICE_ID_LSI_AERO_10E0 0x10e0
+#define PCI_DEVICE_ID_LSI_AERO_10E3 0x10e3
+#define PCI_DEVICE_ID_LSI_AERO_10E4 0x10e4
+#define PCI_DEVICE_ID_LSI_AERO_10E7 0x10e7
/*
* Intel HBA SSDIDs
@@ -133,6 +127,8 @@
#define MFI_RESET_ADAPTER 0x00000002
#define MEGAMFI_FRAME_SIZE 64
+#define MFI_STATE_FAULT_CODE 0x0FFF0000
+#define MFI_STATE_FAULT_SUBCODE 0x0000FF00
/*
* During FW init, clear pending cmds & reset state using inbound_msg_0
*
@@ -142,6 +138,7 @@
* CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
* HOTPLUG : Resume from Hotplug
* MFI_STOP_ADP : Send signal to FW to stop processing
+ * MFI_ADP_TRIGGER_SNAP_DUMP: Inform firmware to initiate snap dump
*/
#define WRITE_SEQUENCE_OFFSET (0x0000000FC) /* I20 */
#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) /* I20 */
@@ -158,6 +155,7 @@
#define MFI_RESET_FLAGS MFI_INIT_READY| \
MFI_INIT_MFIMODE| \
MFI_INIT_ABORT
+#define MFI_ADP_TRIGGER_SNAP_DUMP 0x00000100
#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
/*
@@ -198,6 +196,7 @@
MFI_CMD_SMP = 0x7,
MFI_CMD_STP = 0x8,
MFI_CMD_NVME = 0x9,
+ MFI_CMD_TOOLBOX = 0xa,
MFI_CMD_OP_COUNT,
MFI_CMD_INVALID = 0xff
};
@@ -786,6 +785,38 @@
u8 targetId[MAX_LOGICAL_DRIVES_EXT];
};
+struct MR_HOST_DEVICE_LIST_ENTRY {
+ struct {
+ union {
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:7;
+ u8 is_sys_pd:1;
+#else
+ u8 is_sys_pd:1;
+ u8 reserved:7;
+#endif
+ } bits;
+ u8 byte;
+ } u;
+ } flags;
+ u8 scsi_type;
+ __le16 target_id;
+ u8 reserved[4];
+ __le64 sas_addr[2];
+} __packed;
+
+struct MR_HOST_DEVICE_LIST {
+ __le32 size;
+ __le32 count;
+ __le32 reserved[2];
+ struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[1];
+} __packed;
+
+#define HOST_DEVICE_LIST_SZ (sizeof(struct MR_HOST_DEVICE_LIST) + \
+ (sizeof(struct MR_HOST_DEVICE_LIST_ENTRY) * \
+ (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT - 1)))
+
/*
* SAS controller properties
@@ -860,8 +891,26 @@
u32 reserved:18;
#endif
} OnOffProperties;
- u8 autoSnapVDSpace;
- u8 viewSpace;
+
+ union {
+ u8 autoSnapVDSpace;
+ u8 viewSpace;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u16 reserved3:9;
+ u16 enable_fw_dev_list:1;
+ u16 reserved2:1;
+ u16 enable_snap_dump:1;
+ u16 reserved1:4;
+#else
+ u16 reserved1:4;
+ u16 enable_snap_dump:1;
+ u16 reserved2:1;
+ u16 enable_fw_dev_list:1;
+ u16 reserved3:9;
+#endif
+ } on_off_properties2;
+ };
__le16 spinDownTime;
u8 reserved[24];
} __packed;
@@ -1407,7 +1456,39 @@
u8 reserved6[64];
- u32 rsvdForAdptOp[64];
+ struct {
+ #if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:19;
+ u32 support_pci_lane_margining: 1;
+ u32 support_psoc_update:1;
+ u32 support_force_personality_change:1;
+ u32 support_fde_type_mix:1;
+ u32 support_snap_dump:1;
+ u32 support_nvme_tm:1;
+ u32 support_oce_only:1;
+ u32 support_ext_mfg_vpd:1;
+ u32 support_pcie:1;
+ u32 support_cvhealth_info:1;
+ u32 support_profile_change:2;
+ u32 mr_config_ext2_supported:1;
+ #else
+ u32 mr_config_ext2_supported:1;
+ u32 support_profile_change:2;
+ u32 support_cvhealth_info:1;
+ u32 support_pcie:1;
+ u32 support_ext_mfg_vpd:1;
+ u32 support_oce_only:1;
+ u32 support_nvme_tm:1;
+ u32 support_snap_dump:1;
+ u32 support_fde_type_mix:1;
+ u32 support_force_personality_change:1;
+ u32 support_psoc_update:1;
+ u32 support_pci_lane_margining: 1;
+ u32 reserved:19;
+ #endif
+ } adapter_operations5;
+
+ u32 rsvdForAdptOp[63];
u8 reserved7[3];
@@ -1441,7 +1522,9 @@
#define MEGASAS_FW_BUSY 1
/* Driver's internal Logging levels*/
-#define OCR_LOGS (1 << 0)
+#define OCR_DEBUG (1 << 0)
+#define TM_DEBUG (1 << 1)
+#define LD_PD_DEBUG (1 << 2)
#define SCAN_PD_CHANNEL 0x1
#define SCAN_VD_CHANNEL 0x2
@@ -1485,7 +1568,6 @@
#define MEGASAS_IOCTL_CMD 0
#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
-#define MEGASAS_BLOCKED_CMD_TIMEOUT 60
#define MEGASAS_DEFAULT_TM_TIMEOUT 50
/*
* FW reports the maximum of number of commands that it can accept (maximum
@@ -1518,6 +1600,7 @@
#define MFI_IO_TIMEOUT_SECS 180
#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ)
#define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30)
+#define MEGASAS_SRIOV_MAX_RESET_TRIES_VF 1
#define MEGASAS_ROUTINE_WAIT_TIME_VF 300
#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
@@ -1542,13 +1625,21 @@
#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
+#define MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET (1 << 24)
+
#define MR_CAN_HANDLE_64_BIT_DMA_OFFSET (1 << 25)
+#define MR_INTR_COALESCING_SUPPORT_OFFSET (1 << 26)
+
+#define MEGASAS_WATCHDOG_THREAD_INTERVAL 1000
+#define MEGASAS_WAIT_FOR_NEXT_DMA_MSECS 20
+#define MEGASAS_WATCHDOG_WAIT_COUNT 50
enum MR_ADAPTER_TYPE {
MFI_SERIES = 1,
THUNDERBOLT_SERIES = 2,
INVADER_SERIES = 3,
VENTURA_SERIES = 4,
+ AERO_SERIES = 5,
};
/*
@@ -1588,11 +1679,10 @@
u32 reserved_3[3]; /*00A4h*/
- u32 outbound_scratch_pad ; /*00B0h*/
- u32 outbound_scratch_pad_2; /*00B4h*/
- u32 outbound_scratch_pad_3; /*00B8h*/
- u32 outbound_scratch_pad_4; /*00BCh*/
-
+ u32 outbound_scratch_pad_0; /*00B0h*/
+ u32 outbound_scratch_pad_1; /*00B4h*/
+ u32 outbound_scratch_pad_2; /*00B8h*/
+ u32 outbound_scratch_pad_3; /*00BCh*/
u32 inbound_low_queue_port ; /*00C0h*/
@@ -1664,7 +1754,8 @@
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:17;
+ u32 reserved:16;
+ u32 support_fw_exposed_dev_list:1;
u32 support_nvme_passthru:1;
u32 support_64bit_mode:1;
u32 support_pd_map_target_id:1;
@@ -1696,7 +1787,8 @@
u32 support_pd_map_target_id:1;
u32 support_64bit_mode:1;
u32 support_nvme_passthru:1;
- u32 reserved:17;
+ u32 support_fw_exposed_dev_list:1;
+ u32 reserved:16;
#endif
} mfi_capabilities;
__le32 reg;
@@ -1715,7 +1807,7 @@
__le32 pad_0; /*0Ch */
__le16 flags; /*10h */
- __le16 reserved_3; /*12h */
+ __le16 replyqueue_mask; /*12h */
__le32 data_xfer_len; /*14h */
__le32 queue_info_new_phys_addr_lo; /*18h */
@@ -2113,6 +2205,10 @@
struct megasas_irq_context {
struct megasas_instance *instance;
u32 MSIxIndex;
+ u32 os_irq;
+ struct irq_poll irqpoll;
+ bool irq_poll_scheduled;
+ bool irq_line_enable;
};
struct MR_DRV_SYSTEM_INFO {
@@ -2143,6 +2239,23 @@
#define MR_DEFAULT_NVME_MDTS_KB 128
#define MR_NVME_PAGE_SIZE_MASK 0x000000FF
+/*Aero performance parameters*/
+#define MR_HIGH_IOPS_QUEUE_COUNT 8
+#define MR_DEVICE_HIGH_IOPS_DEPTH 8
+#define MR_HIGH_IOPS_BATCH_COUNT 16
+
+enum MR_PERF_MODE {
+ MR_BALANCED_PERF_MODE = 0,
+ MR_IOPS_PERF_MODE = 1,
+ MR_LATENCY_PERF_MODE = 2,
+};
+
+#define MEGASAS_PERF_MODE_2STR(mode) \
+ ((mode) == MR_BALANCED_PERF_MODE ? "Balanced" : \
+ (mode) == MR_IOPS_PERF_MODE ? "IOPS" : \
+ (mode) == MR_LATENCY_PERF_MODE ? "Latency" : \
+ "Unknown")
+
struct megasas_instance {
unsigned int *reply_map;
@@ -2181,6 +2294,12 @@
struct MR_LD_TARGETID_LIST *ld_targetid_list_buf;
dma_addr_t ld_targetid_list_buf_h;
+ struct MR_HOST_DEVICE_LIST *host_device_list_buf;
+ dma_addr_t host_device_list_buf_h;
+
+ struct MR_SNAPDUMP_PROPERTIES *snapdump_prop;
+ dma_addr_t snapdump_prop_h;
+
void *crash_buf[MAX_CRASH_DUMP_SIZE];
unsigned int fw_crash_buffer_size;
unsigned int fw_crash_state;
@@ -2193,6 +2312,7 @@
u32 secure_jbod_support;
u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
bool use_seqnum_jbod_fp; /* Added for PD sequence */
+ bool smp_affinity_enable;
spinlock_t crashdump_lock;
struct megasas_register_set __iomem *reg_set;
@@ -2210,6 +2330,7 @@
u16 ldio_threshold;
u16 cur_can_queue;
u32 max_sectors_per_req;
+ bool msix_load_balance;
struct megasas_aen_event *ev;
struct megasas_cmd **cmd_list;
@@ -2237,20 +2358,20 @@
struct pci_dev *pdev;
u32 unique_id;
u32 fw_support_ieee;
+ u32 threshold_reply_count;
atomic_t fw_outstanding;
atomic_t ldio_outstanding;
atomic_t fw_reset_no_pci_access;
- atomic_t ieee_sgl;
- atomic_t prp_sgl;
- atomic_t sge_holes_type1;
- atomic_t sge_holes_type2;
- atomic_t sge_holes_type3;
+ atomic64_t total_io_count;
+ atomic64_t high_iops_outstanding;
struct megasas_instance_template *instancet;
struct tasklet_struct isr_tasklet;
struct work_struct work_init;
- struct work_struct crash_init;
+ struct delayed_work fw_fault_work;
+ struct workqueue_struct *fw_fault_work_q;
+ char fault_handler_work_q_name[48];
u8 flag;
u8 unload;
@@ -2308,9 +2429,22 @@
u8 adapter_type;
bool consistent_mask_64bit;
bool support_nvme_passthru;
+ bool enable_sdev_max_qd;
u8 task_abort_tmo;
u8 max_reset_tmo;
+ u8 snapdump_wait_time;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+ struct dentry *raidmap_dump;
+#endif
+ u8 enable_fw_dev_list;
+ bool atomic_desc_support;
+ bool support_seqnum_jbod_fp;
+ bool support_pci_lane_margining;
+ u8 low_latency_index_start;
+ int perf_mode;
};
+
struct MR_LD_VF_MAP {
u32 size;
union MR_LD_REF ref;
@@ -2386,9 +2520,9 @@
void (*enable_intr)(struct megasas_instance *);
void (*disable_intr)(struct megasas_instance *);
- int (*clear_intr)(struct megasas_register_set __iomem *);
+ int (*clear_intr)(struct megasas_instance *);
- u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
+ u32 (*read_fw_status_reg)(struct megasas_instance *);
int (*adp_reset)(struct megasas_instance *, \
struct megasas_register_set __iomem *);
int (*check_reset)(struct megasas_instance *, \
@@ -2535,11 +2669,11 @@
bool is_target_prop);
int megasas_get_target_prop(struct megasas_instance *instance,
struct scsi_device *sdev);
+void megasas_get_snapdump_properties(struct megasas_instance *instance);
int megasas_set_crash_dump_params(struct megasas_instance *instance,
u8 crash_buf_state);
void megasas_free_host_crash_buffer(struct megasas_instance *instance);
-void megasas_fusion_crash_dump_wq(struct work_struct *work);
void megasas_return_cmd_fusion(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd);
@@ -2560,7 +2694,15 @@
u32 mega_mod64(u64 dividend, u32 divisor);
int megasas_alloc_fusion_context(struct megasas_instance *instance);
void megasas_free_fusion_context(struct megasas_instance *instance);
+int megasas_fusion_start_watchdog(struct megasas_instance *instance);
+void megasas_fusion_stop_watchdog(struct megasas_instance *instance);
+
void megasas_set_dma_settings(struct megasas_instance *instance,
struct megasas_dcmd_frame *dcmd,
dma_addr_t dma_addr, u32 dma_len);
+int megasas_adp_reset_wait_for_ready(struct megasas_instance *instance,
+ bool do_adp_reset,
+ int ocr_context);
+int megasas_irqpoll(struct irq_poll *irqpoll, int budget);
+void megasas_dump_fusion_io(struct scsi_cmnd *scmd);
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f6de752..42cf38c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1,34 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux MegaRAID driver for SAS based RAID controllers
*
* Copyright (c) 2003-2013 LSI Corporation
- * Copyright (c) 2013-2014 Avago Technologies
+ * Copyright (c) 2013-2016 Avago Technologies
+ * Copyright (c) 2016-2018 Broadcom Inc.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * Authors: Avago Technologies
+ * Authors: Broadcom Inc.
* Sreenivas Bagalkote
* Sumant Patro
* Bo Yang
* Adam Radford
- * Kashyap Desai <kashyap.desai@avagotech.com>
- * Sumit Saxena <sumit.saxena@avagotech.com>
+ * Kashyap Desai <kashyap.desai@broadcom.com>
+ * Sumit Saxena <sumit.saxena@broadcom.com>
*
- * Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- * San Jose, California 95131
+ * Send feedback to: megaraidlinux.pdl@broadcom.com
*/
#include <linux/kernel.h>
@@ -50,12 +36,14 @@
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/vmalloc.h>
+#include <linux/irq_poll.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
#include "megaraid_sas_fusion.h"
#include "megaraid_sas.h"
@@ -64,52 +52,71 @@
* Will be set in megasas_init_mfi if user does not provide
*/
static unsigned int max_sectors;
-module_param_named(max_sectors, max_sectors, int, 0);
+module_param_named(max_sectors, max_sectors, int, 0444);
MODULE_PARM_DESC(max_sectors,
"Maximum number of sectors per IO command");
static int msix_disable;
-module_param(msix_disable, int, S_IRUGO);
+module_param(msix_disable, int, 0444);
MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
static unsigned int msix_vectors;
-module_param(msix_vectors, int, S_IRUGO);
+module_param(msix_vectors, int, 0444);
MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
static int allow_vf_ioctls;
-module_param(allow_vf_ioctls, int, S_IRUGO);
+module_param(allow_vf_ioctls, int, 0444);
MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
-module_param(throttlequeuedepth, int, S_IRUGO);
+module_param(throttlequeuedepth, int, 0444);
MODULE_PARM_DESC(throttlequeuedepth,
"Adapter queue depth when throttled due to I/O timeout. Default: 16");
unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
-module_param(resetwaittime, int, S_IRUGO);
-MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
- "before resetting adapter. Default: 180");
+module_param(resetwaittime, int, 0444);
+MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
int smp_affinity_enable = 1;
-module_param(smp_affinity_enable, int, S_IRUGO);
+module_param(smp_affinity_enable, int, 0444);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
int rdpq_enable = 1;
-module_param(rdpq_enable, int, S_IRUGO);
-MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
+module_param(rdpq_enable, int, 0444);
+MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
unsigned int dual_qdepth_disable;
-module_param(dual_qdepth_disable, int, S_IRUGO);
+module_param(dual_qdepth_disable, int, 0444);
MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
-module_param(scmd_timeout, int, S_IRUGO);
+module_param(scmd_timeout, int, 0444);
MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
+int perf_mode = -1;
+module_param(perf_mode, int, 0444);
+MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
+ "0 - balanced: High iops and low latency queues are allocated &\n\t\t"
+ "interrupt coalescing is enabled only on high iops queues\n\t\t"
+ "1 - iops: High iops queues are not allocated &\n\t\t"
+ "interrupt coalescing is enabled on all queues\n\t\t"
+ "2 - latency: High iops queues are not allocated &\n\t\t"
+ "interrupt coalescing is disabled on all queues\n\t\t"
+ "default mode is 'balanced'"
+ );
+
+int event_log_level = MFI_EVT_CLASS_CRITICAL;
+module_param(event_log_level, int, 0644);
+MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
+
+unsigned int enable_sdev_max_qd;
+module_param(enable_sdev_max_qd, int, 0444);
+MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
-MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
-MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
+MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
+MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
static int megasas_get_pd_list(struct megasas_instance *instance);
@@ -165,6 +172,14 @@
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
{}
};
@@ -181,15 +196,22 @@
u32 megasas_dbg_lvl;
static u32 support_device_change;
static bool support_nvme_encapsulation;
+static bool support_pci_lane_margining;
/* define lock for aen poll */
spinlock_t poll_aen_lock;
+extern struct dentry *megasas_debugfs_root;
+extern void megasas_init_debugfs(void);
+extern void megasas_exit_debugfs(void);
+extern void megasas_setup_debugfs(struct megasas_instance *instance);
+extern void megasas_destroy_debugfs(struct megasas_instance *instance);
+
void
megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
u8 alt_status);
static u32
-megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
+megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
static int
megasas_adp_reset_gen2(struct megasas_instance *instance,
struct megasas_register_set __iomem *reg_set);
@@ -219,6 +241,28 @@
static inline void
megasas_init_ctrl_params(struct megasas_instance *instance);
+u32 megasas_readl(struct megasas_instance *instance,
+ const volatile void __iomem *addr)
+{
+ u32 i = 0, ret_val;
+ /*
+ * Due to a HW errata in Aero controllers, reads to certain
+ * Fusion registers could intermittently return all zeroes.
+ * This behavior is transient in nature and subsequent reads will
+ * return valid value. As a workaround in driver, retry readl for
+ * upto three times until a non-zero value is read.
+ */
+ if (instance->adapter_type == AERO_SERIES) {
+ do {
+ ret_val = readl(addr);
+ i++;
+ } while (ret_val == 0 && i < 3);
+ return ret_val;
+ } else {
+ return readl(addr);
+ }
+}
+
/**
* megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
* @instance: Adapter soft state
@@ -244,7 +288,7 @@
}
}
-void
+static void
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
instance->instancet->fire_cmd(instance,
@@ -368,7 +412,13 @@
union megasas_evt_class_locale class_locale;
class_locale.word = le32_to_cpu(evt_detail->cl.word);
- if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
+ if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
+ (event_log_level > MFI_EVT_CLASS_DEAD)) {
+ printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
+ event_log_level = MFI_EVT_CLASS_CRITICAL;
+ }
+
+ if (class_locale.members.class >= event_log_level)
dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
le32_to_cpu(evt_detail->seq_num),
format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
@@ -419,19 +469,21 @@
* @regs: MFI register set
*/
static u32
-megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
+megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
{
- return readl(&(regs)->outbound_msg_0);
+ return readl(&instance->reg_set->outbound_msg_0);
}
/**
* megasas_clear_interrupt_xscale - Check & clear interrupt
* @regs: MFI register set
*/
static int
-megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
+megasas_clear_intr_xscale(struct megasas_instance *instance)
{
u32 status;
u32 mfiStatus = 0;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
/*
* Check if it is our interrupt
@@ -596,9 +648,9 @@
* @regs: MFI register set
*/
static u32
-megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
+megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
{
- return readl(&(regs)->outbound_scratch_pad);
+ return readl(&instance->reg_set->outbound_scratch_pad_0);
}
/**
@@ -606,9 +658,11 @@
* @regs: MFI register set
*/
static int
-megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
+megasas_clear_intr_ppc(struct megasas_instance *instance)
{
u32 status, mfiStatus = 0;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
/*
* Check if it is our interrupt
@@ -721,9 +775,9 @@
* @regs: MFI register set
*/
static u32
-megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
+megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
{
- return readl(&(regs)->outbound_scratch_pad);
+ return readl(&instance->reg_set->outbound_scratch_pad_0);
}
/**
@@ -731,10 +785,12 @@
* @regs: MFI register set
*/
static int
-megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
+megasas_clear_intr_skinny(struct megasas_instance *instance)
{
u32 status;
u32 mfiStatus = 0;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
/*
* Check if it is our interrupt
@@ -748,7 +804,7 @@
/*
* Check if it is our interrupt
*/
- if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
+ if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
MFI_STATE_FAULT) {
mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
} else
@@ -786,7 +842,6 @@
&(regs)->inbound_high_queue_port);
writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
&(regs)->inbound_low_queue_port);
- mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
@@ -866,9 +921,9 @@
* @regs: MFI register set
*/
static u32
-megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
+megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
{
- return readl(&(regs)->outbound_scratch_pad);
+ return readl(&instance->reg_set->outbound_scratch_pad_0);
}
/**
@@ -876,10 +931,12 @@
* @regs: MFI register set
*/
static int
-megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
+megasas_clear_intr_gen2(struct megasas_instance *instance)
{
u32 status;
u32 mfiStatus = 0;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
/*
* Check if it is our interrupt
@@ -1080,8 +1137,9 @@
ret = wait_event_timeout(instance->int_cmd_wait_q,
cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
if (!ret) {
- dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
- __func__, __LINE__);
+ dev_err(&instance->pdev->dev,
+ "DCMD(opcode: 0x%x) is timed out, func:%s\n",
+ cmd->frame->dcmd.opcode, __func__);
return DCMD_TIMEOUT;
}
} else
@@ -1110,6 +1168,7 @@
struct megasas_cmd *cmd;
struct megasas_abort_frame *abort_fr;
int ret = 0;
+ u32 opcode;
cmd = megasas_get_cmd(instance);
@@ -1145,8 +1204,10 @@
ret = wait_event_timeout(instance->abort_cmd_wait_q,
cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
if (!ret) {
- dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
- __func__, __LINE__);
+ opcode = cmd_to_abort->frame->dcmd.opcode;
+ dev_err(&instance->pdev->dev,
+ "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
+ opcode, __func__);
return DCMD_TIMEOUT;
}
} else
@@ -1330,11 +1391,11 @@
device_id = MEGASAS_DEV_INDEX(scp);
pthru = (struct megasas_pthru_frame *)cmd->frame;
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
flags = MFI_FRAME_DIR_WRITE;
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ else if (scp->sc_data_direction == DMA_FROM_DEVICE)
flags = MFI_FRAME_DIR_READ;
- else if (scp->sc_data_direction == PCI_DMA_NONE)
+ else if (scp->sc_data_direction == DMA_NONE)
flags = MFI_FRAME_DIR_NONE;
if (instance->flag_ieee == 1) {
@@ -1428,9 +1489,9 @@
device_id = MEGASAS_DEV_INDEX(scp);
ldio = (struct megasas_io_frame *)cmd->frame;
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
flags = MFI_FRAME_DIR_WRITE;
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ else if (scp->sc_data_direction == DMA_FROM_DEVICE)
flags = MFI_FRAME_DIR_READ;
if (instance->flag_ieee == 1) {
@@ -1884,26 +1945,19 @@
blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
}
-
/*
- * megasas_set_static_target_properties -
- * Device property set by driver are static and it is not required to be
- * updated after OCR.
- *
- * set io timeout
- * set device queue depth
- * set nvme device properties. see - megasas_set_nvme_device_properties
+ * megasas_set_fw_assisted_qd -
+ * set device queue depth to can_queue
+ * set device queue depth to fw assisted qd
*
* @sdev: scsi device
* @is_target_prop true, if fw provided target properties.
*/
-static void megasas_set_static_target_properties(struct scsi_device *sdev,
+static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
bool is_target_prop)
{
- u16 target_index = 0;
u8 interface_type;
u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
- u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
u32 tgt_device_qd;
struct megasas_instance *instance;
struct MR_PRIV_DEVICE *mr_device_priv_data;
@@ -1912,13 +1966,6 @@
mr_device_priv_data = sdev->hostdata;
interface_type = mr_device_priv_data->interface_type;
- /*
- * The RAID firmware may require extended timeouts.
- */
- blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
-
- target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
-
switch (interface_type) {
case SAS_PD:
device_qd = MEGASAS_SAS_QD;
@@ -1936,18 +1983,49 @@
if (tgt_device_qd &&
(tgt_device_qd <= instance->host->can_queue))
device_qd = tgt_device_qd;
-
- /* max_io_size_kb will be set to non zero for
- * nvme based vd and syspd.
- */
- max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
}
+ if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
+ device_qd = instance->host->can_queue;
+
+ scsi_change_queue_depth(sdev, device_qd);
+}
+
+/*
+ * megasas_set_static_target_properties -
+ * Device property set by driver are static and it is not required to be
+ * updated after OCR.
+ *
+ * set io timeout
+ * set device queue depth
+ * set nvme device properties. see - megasas_set_nvme_device_properties
+ *
+ * @sdev: scsi device
+ * @is_target_prop true, if fw provided target properties.
+ */
+static void megasas_set_static_target_properties(struct scsi_device *sdev,
+ bool is_target_prop)
+{
+ u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
+ struct megasas_instance *instance;
+
+ instance = megasas_lookup_instance(sdev->host->host_no);
+
+ /*
+ * The RAID firmware may require extended timeouts.
+ */
+ blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
+
+ /* max_io_size_kb will be set to non zero for
+ * nvme based vd and syspd.
+ */
+ if (is_target_prop)
+ max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
+
if (instance->nvme_page_size && max_io_size_kb)
megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
- scsi_change_queue_depth(sdev, device_qd);
-
+ megasas_set_fw_assisted_qd(sdev, is_target_prop);
}
@@ -2079,9 +2157,11 @@
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
(instance->adapter_type != MFI_SERIES)) {
- writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
- /* Flush */
- readl(&instance->reg_set->doorbell);
+ if (!instance->requestorId) {
+ writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
+ /* Flush */
+ readl(&instance->reg_set->doorbell);
+ }
if (instance->requestorId && instance->peerIsPresent)
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
} else {
@@ -2191,7 +2271,7 @@
static void
process_fw_state_change_wq(struct work_struct *work);
-void megasas_do_ocr(struct megasas_instance *instance)
+static void megasas_do_ocr(struct megasas_instance *instance)
{
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
@@ -2240,9 +2320,9 @@
sizeof(struct MR_LD_VF_AFFILIATION_111));
else {
new_affiliation_111 =
- pci_zalloc_consistent(instance->pdev,
- sizeof(struct MR_LD_VF_AFFILIATION_111),
- &new_affiliation_111_h);
+ dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &new_affiliation_111_h, GFP_KERNEL);
if (!new_affiliation_111) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2302,7 +2382,7 @@
}
out:
if (new_affiliation_111) {
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
new_affiliation_111,
new_affiliation_111_h);
@@ -2347,10 +2427,9 @@
sizeof(struct MR_LD_VF_AFFILIATION));
else {
new_affiliation =
- pci_zalloc_consistent(instance->pdev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- &new_affiliation_h);
+ dma_alloc_coherent(&instance->pdev->dev,
+ (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
+ &new_affiliation_h, GFP_KERNEL);
if (!new_affiliation) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2470,7 +2549,7 @@
}
if (new_affiliation)
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
(MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
new_affiliation, new_affiliation_h);
@@ -2513,9 +2592,10 @@
if (initial) {
instance->hb_host_mem =
- pci_zalloc_consistent(instance->pdev,
- sizeof(struct MR_CTRL_HB_HOST_MEM),
- &instance->hb_host_mem_h);
+ dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct MR_CTRL_HB_HOST_MEM),
+ &instance->hb_host_mem_h,
+ GFP_KERNEL);
if (!instance->hb_host_mem) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
" memory for heartbeat host memory for scsi%d\n",
@@ -2682,7 +2762,7 @@
i = 0;
outstanding = atomic_read(&instance->fw_outstanding);
- fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+ fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
goto no_outstanding;
@@ -2692,7 +2772,7 @@
do {
if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
dev_info(&instance->pdev->dev,
- "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
+ "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
if (i == 3)
goto kill_hba_and_failed;
@@ -2711,7 +2791,7 @@
outstanding = atomic_read(&instance->fw_outstanding);
- fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+ fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
goto no_outstanding;
}
@@ -2802,21 +2882,108 @@
}
/**
- * megasas_dump_frame - This function will dump MPT/MFI frame
+ * megasas_dump - This function will print hexdump of provided buffer.
+ * @buf: Buffer to be dumped
+ * @sz: Size in bytes
+ * @format: Different formats of dumping e.g. format=n will
+ * cause only 'n' 32 bit words to be dumped in a single
+ * line.
*/
-static inline void
-megasas_dump_frame(void *mpi_request, int sz)
+inline void
+megasas_dump(void *buf, int sz, int format)
{
int i;
- __le32 *mfp = (__le32 *)mpi_request;
+ __le32 *buf_loc = (__le32 *)buf;
- printk(KERN_INFO "IO request frame:\n\t");
- for (i = 0; i < sz / sizeof(__le32); i++) {
- if (i && ((i % 8) == 0))
- printk("\n\t");
- printk("%08x ", le32_to_cpu(mfp[i]));
+ for (i = 0; i < (sz / sizeof(__le32)); i++) {
+ if ((i % format) == 0) {
+ if (i != 0)
+ printk(KERN_CONT "\n");
+ printk(KERN_CONT "%08x: ", (i * 4));
+ }
+ printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
}
- printk("\n");
+ printk(KERN_CONT "\n");
+}
+
+/**
+ * megasas_dump_reg_set - This function will print hexdump of register set
+ * @buf: Buffer to be dumped
+ * @sz: Size in bytes
+ * @format: Different formats of dumping e.g. format=n will
+ * cause only 'n' 32 bit words to be dumped in a
+ * single line.
+ */
+inline void
+megasas_dump_reg_set(void __iomem *reg_set)
+{
+ unsigned int i, sz = 256;
+ u32 __iomem *reg = (u32 __iomem *)reg_set;
+
+ for (i = 0; i < (sz / sizeof(u32)); i++)
+ printk("%08x: %08x\n", (i * 4), readl(®[i]));
+}
+
+/**
+ * megasas_dump_fusion_io - This function will print key details
+ * of SCSI IO
+ * @scmd: SCSI command pointer of SCSI IO
+ */
+void
+megasas_dump_fusion_io(struct scsi_cmnd *scmd)
+{
+ struct megasas_cmd_fusion *cmd;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct megasas_instance *instance;
+
+ cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ scmd_printk(KERN_INFO, scmd,
+ "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n",
+ scmd, scmd->retries, scmd->allowed);
+ scsi_print_command(scmd);
+
+ if (cmd) {
+ req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
+ scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
+ scmd_printk(KERN_INFO, scmd,
+ "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n",
+ req_desc->SCSIIO.RequestFlags,
+ req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
+ req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
+
+ printk(KERN_INFO "IO request frame:\n");
+ megasas_dump(cmd->io_request,
+ MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
+ printk(KERN_INFO "Chain frame:\n");
+ megasas_dump(cmd->sg_frame,
+ instance->max_chain_frame_sz, 8);
+ }
+
+}
+
+/*
+ * megasas_dump_sys_regs - This function will dump system registers through
+ * sysfs.
+ * @reg_set: Pointer to System register set.
+ * @buf: Buffer to which output is to be written.
+ * @return: Number of bytes written to buffer.
+ */
+static inline ssize_t
+megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
+{
+ unsigned int i, sz = 256;
+ int bytes_wrote = 0;
+ char *loc = (char *)buf;
+ u32 __iomem *reg = (u32 __iomem *)reg_set;
+
+ for (i = 0; i < sz / sizeof(u32); i++) {
+ bytes_wrote += snprintf(loc + bytes_wrote, PAGE_SIZE,
+ "%08x: %08x\n", (i * 4),
+ readl(®[i]));
+ }
+ return bytes_wrote;
}
/**
@@ -2830,24 +2997,20 @@
instance = (struct megasas_instance *)scmd->device->host->hostdata;
scmd_printk(KERN_INFO, scmd,
- "Controller reset is requested due to IO timeout\n"
- "SCSI command pointer: (%p)\t SCSI host state: %d\t"
- " SCSI host busy: %d\t FW outstanding: %d\n",
- scmd, scmd->device->host->shost_state,
+ "OCR is requested due to IO timeout!!\n");
+
+ scmd_printk(KERN_INFO, scmd,
+ "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n",
+ scmd->device->host->shost_state,
scsi_host_busy(scmd->device->host),
atomic_read(&instance->fw_outstanding));
-
/*
* First wait for all commands to complete
*/
if (instance->adapter_type == MFI_SERIES) {
ret = megasas_generic_reset(scmd);
} else {
- struct megasas_cmd_fusion *cmd;
- cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
- if (cmd)
- megasas_dump_frame(cmd->io_request,
- MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
+ megasas_dump_fusion_io(scmd);
ret = megasas_reset_fusion(scmd->device->host,
SCSIIO_TIMEOUT_OCR);
}
@@ -2997,7 +3160,7 @@
}
static ssize_t
-megasas_fw_crash_buffer_store(struct device *cdev,
+fw_crash_buffer_store(struct device *cdev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3016,15 +3179,15 @@
}
static ssize_t
-megasas_fw_crash_buffer_show(struct device *cdev,
+fw_crash_buffer_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct megasas_instance *instance =
(struct megasas_instance *) shost->hostdata;
u32 size;
- unsigned long buff_addr;
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+ unsigned long chunk_left_bytes;
unsigned long src_addr;
unsigned long flags;
u32 buff_offset;
@@ -3040,8 +3203,6 @@
return -EINVAL;
}
- buff_addr = (unsigned long) buf;
-
if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
dev_err(&instance->pdev->dev,
"Firmware crash dump offset is out of range\n");
@@ -3050,6 +3211,8 @@
}
size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+ chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
+ size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
@@ -3061,7 +3224,7 @@
}
static ssize_t
-megasas_fw_crash_buffer_size_show(struct device *cdev,
+fw_crash_buffer_size_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3073,7 +3236,7 @@
}
static ssize_t
-megasas_fw_crash_state_store(struct device *cdev,
+fw_crash_state_store(struct device *cdev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3108,7 +3271,7 @@
}
static ssize_t
-megasas_fw_crash_state_show(struct device *cdev,
+fw_crash_state_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3119,14 +3282,14 @@
}
static ssize_t
-megasas_page_size_show(struct device *cdev,
+page_size_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
}
static ssize_t
-megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
+ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3136,7 +3299,7 @@
}
static ssize_t
-megasas_fw_cmds_outstanding_show(struct device *cdev,
+fw_cmds_outstanding_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3145,26 +3308,91 @@
return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
}
-static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
- megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
-static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
- megasas_fw_crash_buffer_size_show, NULL);
-static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
- megasas_fw_crash_state_show, megasas_fw_crash_state_store);
-static DEVICE_ATTR(page_size, S_IRUGO,
- megasas_page_size_show, NULL);
-static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
- megasas_ldio_outstanding_show, NULL);
-static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
- megasas_fw_cmds_outstanding_show, NULL);
+static ssize_t
+enable_sdev_max_qd_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
-struct device_attribute *megaraid_host_attrs[] = {
+ return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
+}
+
+static ssize_t
+enable_sdev_max_qd_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+ u32 val = 0;
+ bool is_target_prop;
+ int ret_target_prop = DCMD_FAILED;
+ struct scsi_device *sdev;
+
+ if (kstrtou32(buf, 0, &val) != 0) {
+ pr_err("megasas: could not set enable_sdev_max_qd\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&instance->reset_mutex);
+ if (val)
+ instance->enable_sdev_max_qd = true;
+ else
+ instance->enable_sdev_max_qd = false;
+
+ shost_for_each_device(sdev, shost) {
+ ret_target_prop = megasas_get_target_prop(instance, sdev);
+ is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
+ megasas_set_fw_assisted_qd(sdev, is_target_prop);
+ }
+ mutex_unlock(&instance->reset_mutex);
+
+ return strlen(buf);
+}
+
+static ssize_t
+dump_system_regs_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *)shost->hostdata;
+
+ return megasas_dump_sys_regs(instance->reg_set, buf);
+}
+
+static ssize_t
+raid_map_id_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *)shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n",
+ (unsigned long)instance->map_id);
+}
+
+static DEVICE_ATTR_RW(fw_crash_buffer);
+static DEVICE_ATTR_RO(fw_crash_buffer_size);
+static DEVICE_ATTR_RW(fw_crash_state);
+static DEVICE_ATTR_RO(page_size);
+static DEVICE_ATTR_RO(ldio_outstanding);
+static DEVICE_ATTR_RO(fw_cmds_outstanding);
+static DEVICE_ATTR_RW(enable_sdev_max_qd);
+static DEVICE_ATTR_RO(dump_system_regs);
+static DEVICE_ATTR_RO(raid_map_id);
+
+static struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_buffer_size,
&dev_attr_fw_crash_buffer,
&dev_attr_fw_crash_state,
&dev_attr_page_size,
&dev_attr_ldio_outstanding,
&dev_attr_fw_cmds_outstanding,
+ &dev_attr_enable_sdev_max_qd,
+ &dev_attr_dump_system_regs,
+ &dev_attr_raid_map_id,
NULL,
};
@@ -3186,8 +3414,8 @@
.eh_timed_out = megasas_reset_timer,
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
- .use_clustering = ENABLE_CLUSTERING,
.change_queue_depth = scsi_change_queue_depth,
+ .max_segment_size = 0xffffffff,
.no_write_same = 1,
};
@@ -3278,6 +3506,7 @@
megasas_complete_int_cmd(instance, cmd);
break;
}
+ /* fall through */
case MFI_CMD_LD_READ:
case MFI_CMD_LD_WRITE:
@@ -3348,6 +3577,7 @@
case MFI_CMD_SMP:
case MFI_CMD_STP:
case MFI_CMD_NVME:
+ case MFI_CMD_TOOLBOX:
megasas_complete_int_cmd(instance, cmd);
break;
@@ -3665,9 +3895,8 @@
return IRQ_HANDLED;
}
- if ((mfiStatus = instance->instancet->clear_intr(
- instance->reg_set)
- ) == 0) {
+ mfiStatus = instance->instancet->clear_intr(instance);
+ if (mfiStatus == 0) {
/* Hardware may not set outbound_intr_status in MSI-X mode */
if (!instance->msix_vectors)
return IRQ_NONE;
@@ -3677,7 +3906,7 @@
if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
fw_state = instance->instancet->read_fw_status_reg(
- instance->reg_set) & MFI_STATE_MASK;
+ instance) & MFI_STATE_MASK;
if (fw_state != MFI_STATE_FAULT) {
dev_notice(&instance->pdev->dev, "fw state:%x\n",
@@ -3757,10 +3986,9 @@
int i;
u8 max_wait;
u32 fw_state;
- u32 cur_state;
u32 abs_state, curr_abs_state;
- abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
+ abs_state = instance->instancet->read_fw_status_reg(instance);
fw_state = abs_state & MFI_STATE_MASK;
if (fw_state != MFI_STATE_READY)
@@ -3772,13 +4000,18 @@
switch (fw_state) {
case MFI_STATE_FAULT:
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
+ dev_printk(KERN_ERR, &instance->pdev->dev,
+ "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
+ abs_state & MFI_STATE_FAULT_CODE,
+ abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
if (ocr) {
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_FAULT;
break;
- } else
+ } else {
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
+ megasas_dump_reg_set(instance->reg_set);
return -ENODEV;
+ }
case MFI_STATE_WAIT_HANDSHAKE:
/*
@@ -3798,7 +4031,6 @@
&instance->reg_set->inbound_doorbell);
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_WAIT_HANDSHAKE;
break;
case MFI_STATE_BOOT_MESSAGE_PENDING:
@@ -3814,7 +4046,6 @@
&instance->reg_set->inbound_doorbell);
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
break;
case MFI_STATE_OPERATIONAL:
@@ -3832,7 +4063,8 @@
if (instance->adapter_type != MFI_SERIES) {
for (i = 0; i < (10 * 1000); i += 20) {
- if (readl(
+ if (megasas_readl(
+ instance,
&instance->
reg_set->
doorbell) & 1)
@@ -3846,7 +4078,6 @@
&instance->reg_set->inbound_doorbell);
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_OPERATIONAL;
break;
case MFI_STATE_UNDEFINED:
@@ -3854,49 +4085,45 @@
* This state should not last for more than 2 seconds
*/
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_UNDEFINED;
break;
case MFI_STATE_BB_INIT:
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_BB_INIT;
break;
case MFI_STATE_FW_INIT:
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_FW_INIT;
break;
case MFI_STATE_FW_INIT_2:
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_FW_INIT_2;
break;
case MFI_STATE_DEVICE_SCAN:
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_DEVICE_SCAN;
break;
case MFI_STATE_FLUSH_CACHE:
max_wait = MEGASAS_RESET_WAIT_TIME;
- cur_state = MFI_STATE_FLUSH_CACHE;
break;
default:
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
fw_state);
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
+ megasas_dump_reg_set(instance->reg_set);
return -ENODEV;
}
/*
* The cur_state should not last for more than max_wait secs
*/
- for (i = 0; i < (max_wait * 1000); i++) {
+ for (i = 0; i < max_wait * 50; i++) {
curr_abs_state = instance->instancet->
- read_fw_status_reg(instance->reg_set);
+ read_fw_status_reg(instance);
if (abs_state == curr_abs_state) {
- msleep(1);
+ msleep(20);
} else
break;
}
@@ -3907,6 +4134,8 @@
if (curr_abs_state == abs_state) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
"in %d secs\n", fw_state, max_wait);
+ dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
+ megasas_dump_reg_set(instance->reg_set);
return -ENODEV;
}
@@ -3970,23 +4199,12 @@
{
int i;
u16 max_cmd;
- u32 sge_sz;
u32 frame_count;
struct megasas_cmd *cmd;
max_cmd = instance->max_mfi_cmds;
/*
- * Size of our frame is 64 bytes for MFI frame, followed by max SG
- * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
- */
- sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
- sizeof(struct megasas_sge32);
-
- if (instance->flag_ieee)
- sge_sz = sizeof(struct megasas_sge_skinny);
-
- /*
* For MFI controllers.
* max_num_sge = 60
* max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
@@ -4155,6 +4373,7 @@
if (megasas_create_frame_pool(instance)) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
megasas_free_cmds(instance);
+ return -ENOMEM;
}
return 0;
@@ -4234,8 +4453,10 @@
switch (dcmd_timeout_ocr_possible(instance)) {
case INITIATE_OCR:
cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
megasas_reset_fusion(instance->host,
MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
break;
case KILL_ADAPTER:
megaraid_sas_kill_hba(instance);
@@ -4271,7 +4492,6 @@
struct megasas_dcmd_frame *dcmd;
struct MR_PD_LIST *ci;
struct MR_PD_ADDRESS *pd_addr;
- dma_addr_t ci_h = 0;
if (instance->pd_list_not_supported) {
dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
@@ -4280,7 +4500,6 @@
}
ci = instance->pd_list_buf;
- ci_h = instance->pd_list_buf_h;
cmd = megasas_get_cmd(instance);
@@ -4353,6 +4572,9 @@
case DCMD_SUCCESS:
pd_addr = ci->addr;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
+ __func__, le32_to_cpu(ci->count));
if ((le32_to_cpu(ci->count) >
(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
@@ -4368,6 +4590,11 @@
pd_addr->scsiDevType;
instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
MR_PD_STATE_SYSTEM;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "PD%d: targetID: 0x%03x deviceType:0x%x\n",
+ pd_index, le16_to_cpu(pd_addr->deviceId),
+ pd_addr->scsiDevType);
pd_addr++;
}
@@ -4471,6 +4698,10 @@
break;
case DCMD_SUCCESS:
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
+ __func__, ld_count);
+
if (ld_count > instance->fw_supported_vd_count)
break;
@@ -4480,6 +4711,10 @@
if (ci->ldList[ld_index].state != 0) {
ids = ci->ldList[ld_index].ref.targetId;
instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "LD%d: targetID: 0x%03x\n",
+ ld_index, ids);
}
}
@@ -4583,6 +4818,10 @@
case DCMD_SUCCESS:
tgtid_count = le32_to_cpu(ci->count);
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
+ __func__, tgtid_count);
+
if ((tgtid_count > (instance->fw_supported_vd_count)))
break;
@@ -4590,6 +4829,9 @@
for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
ids = ci->targetId[ld_index];
instance->ld_ids[ids] = ci->targetId[ld_index];
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
+ ld_index, ci->targetId[ld_index]);
}
break;
@@ -4601,6 +4843,140 @@
return ret;
}
+/**
+ * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET
+ * dcmd.mbox - reserved
+ * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure
+ * Desc: This DCMD will return the combined device list
+ * Status: MFI_STAT_OK - List returned successfully
+ * MFI_STAT_INVALID_CMD - Firmware support for the feature has been
+ * disabled
+ * @instance: Adapter soft state
+ * @is_probe: Driver probe check
+ * Return: 0 if DCMD succeeded
+ * non-zero if failed
+ */
+static int
+megasas_host_device_list_query(struct megasas_instance *instance,
+ bool is_probe)
+{
+ int ret, i, target_id;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_HOST_DEVICE_LIST *ci;
+ u32 count;
+ dma_addr_t ci_h;
+
+ ci = instance->host_device_list_buf;
+ ci_h = instance->host_device_list_buf_h;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ dev_warn(&instance->pdev->dev,
+ "%s: failed to get cmd\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = is_probe ? 0 : 1;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
+
+ megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
+
+ if (!instance->mask_interrupts) {
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MFI_IO_TIMEOUT_SECS);
+ } else {
+ ret = megasas_issue_polled(instance, cmd);
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ }
+
+ switch (ret) {
+ case DCMD_SUCCESS:
+ /* Fill the internal pd_list and ld_ids array based on
+ * targetIds returned by FW
+ */
+ count = le32_to_cpu(ci->count);
+
+ if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
+ break;
+
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
+ __func__, count);
+
+ memset(instance->local_pd_list, 0,
+ MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
+ memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
+ for (i = 0; i < count; i++) {
+ target_id = le16_to_cpu(ci->host_device_list[i].target_id);
+ if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
+ instance->local_pd_list[target_id].tid = target_id;
+ instance->local_pd_list[target_id].driveType =
+ ci->host_device_list[i].scsi_type;
+ instance->local_pd_list[target_id].driveState =
+ MR_PD_STATE_SYSTEM;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
+ i, target_id, ci->host_device_list[i].scsi_type);
+ } else {
+ instance->ld_ids[target_id] = target_id;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "Device %d: LD targetID: 0x%03x\n",
+ i, target_id);
+ }
+ }
+
+ memcpy(instance->pd_list, instance->local_pd_list,
+ sizeof(instance->pd_list));
+ break;
+
+ case DCMD_TIMEOUT:
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+ break;
+ case DCMD_FAILED:
+ dev_err(&instance->pdev->dev,
+ "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
+ __func__);
+ break;
+ }
+
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
/*
* megasas_update_ext_vd_details : Update details w.r.t Extended VD
* instance : Controller's instance
@@ -4634,9 +5010,9 @@
}
dev_info(&instance->pdev->dev,
- "firmware type\t: %s\n",
- instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
- "Legacy(64 VD) firmware");
+ "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
+ instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
+ instance->ctrl_info_buf->max_lds);
if (instance->max_raid_mapsize) {
ventura_map_sz = instance->max_raid_mapsize *
@@ -4661,6 +5037,89 @@
fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
}
+/*
+ * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
+ * dcmd.hdr.length - number of bytes to read
+ * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES
+ * Desc: Fill in snapdump properties
+ * Status: MFI_STAT_OK- Command successful
+ */
+void megasas_get_snapdump_properties(struct megasas_instance *instance)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_SNAPDUMP_PROPERTIES *ci;
+ dma_addr_t ci_h = 0;
+
+ ci = instance->snapdump_prop;
+ ci_h = instance->snapdump_prop_h;
+
+ if (!ci)
+ return;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
+ return;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
+
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct MR_SNAPDUMP_PROPERTIES));
+
+ if (!instance->mask_interrupts) {
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MFI_IO_TIMEOUT_SECS);
+ } else {
+ ret = megasas_issue_polled(instance, cmd);
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ }
+
+ switch (ret) {
+ case DCMD_SUCCESS:
+ instance->snapdump_wait_time =
+ min_t(u8, ci->trigger_min_num_sec_before_ocr,
+ MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
+ break;
+
+ case DCMD_TIMEOUT:
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+ }
+
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
+}
+
/**
* megasas_get_controller_info - Returns FW's controller structure
* @instance: Adapter soft state
@@ -4720,9 +5179,11 @@
* CPU endianness format.
*/
le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
+ le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
le32_to_cpus((u32 *)&ci->adapterOperations2);
le32_to_cpus((u32 *)&ci->adapterOperations3);
le16_to_cpus((u16 *)&ci->adapter_operations4);
+ le32_to_cpus((u32 *)&ci->adapter_operations5);
/* Update the latest Ext VD info.
* From Init path, store current firmware details.
@@ -4730,17 +5191,27 @@
* in case of Firmware upgrade without system reboot.
*/
megasas_update_ext_vd_details(instance);
- instance->use_seqnum_jbod_fp =
+ instance->support_seqnum_jbod_fp =
ci->adapterOperations3.useSeqNumJbodFP;
instance->support_morethan256jbod =
ci->adapter_operations4.support_pd_map_target_id;
instance->support_nvme_passthru =
ci->adapter_operations4.support_nvme_passthru;
+ instance->support_pci_lane_margining =
+ ci->adapter_operations5.support_pci_lane_margining;
instance->task_abort_tmo = ci->TaskAbortTO;
instance->max_reset_tmo = ci->MaxResetTO;
/*Check whether controller is iMR or MR */
instance->is_imr = (ci->memory_size ? 0 : 1);
+
+ instance->snapdump_wait_time =
+ (ci->properties.on_off_properties2.enable_snap_dump ?
+ MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
+
+ instance->enable_fw_dev_list =
+ ci->properties.on_off_properties2.enable_fw_dev_list;
+
dev_info(&instance->pdev->dev,
"controller type\t: %s(%dMB)\n",
instance->is_imr ? "iMR" : "MR",
@@ -4759,6 +5230,10 @@
dev_info(&instance->pdev->dev,
"FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
instance->task_abort_tmo, instance->max_reset_tmo);
+ dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
+ instance->support_seqnum_jbod_fp ? "Yes" : "No");
+ dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
+ instance->support_pci_lane_margining ? "Yes" : "No");
break;
@@ -4766,8 +5241,10 @@
switch (dcmd_timeout_ocr_possible(instance)) {
case INITIATE_OCR:
cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
megasas_reset_fusion(instance->host,
MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
break;
case KILL_ADAPTER:
megaraid_sas_kill_hba(instance);
@@ -4942,16 +5419,13 @@
static u32
megasas_init_adapter_mfi(struct megasas_instance *instance)
{
- struct megasas_register_set __iomem *reg_set;
u32 context_sz;
u32 reply_q_sz;
- reg_set = instance->reg_set;
-
/*
* Get various operational parameters from status register
*/
- instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+ instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
/*
* Reduce the max supported cmds by 1. This is to ensure that the
* reply_q_sz (1 more than the max cmd that driver may send)
@@ -4959,7 +5433,7 @@
*/
instance->max_fw_cmds = instance->max_fw_cmds-1;
instance->max_mfi_cmds = instance->max_fw_cmds;
- instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
+ instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
0x10;
/*
* For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
@@ -4995,9 +5469,8 @@
context_sz = sizeof(u32);
reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
- instance->reply_queue = pci_alloc_consistent(instance->pdev,
- reply_q_sz,
- &instance->reply_queue_h);
+ instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
+ reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
if (!instance->reply_queue) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
@@ -5016,7 +5489,7 @@
instance->fw_support_ieee = 0;
instance->fw_support_ieee =
- (instance->instancet->read_fw_status_reg(reg_set) &
+ (instance->instancet->read_fw_status_reg(instance) &
0x04000000);
dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
@@ -5029,7 +5502,7 @@
fail_fw_init:
- pci_free_consistent(instance->pdev, reply_q_sz,
+ dma_free_coherent(&instance->pdev->dev, reply_q_sz,
instance->reply_queue, instance->reply_queue_h);
fail_reply_queue:
megasas_free_cmds(instance);
@@ -5038,6 +5511,25 @@
return 1;
}
+static
+void megasas_setup_irq_poll(struct megasas_instance *instance)
+{
+ struct megasas_irq_context *irq_ctx;
+ u32 count, i;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+ /* Initialize IRQ poll */
+ for (i = 0; i < count; i++) {
+ irq_ctx = &instance->irq_context[i];
+ irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
+ irq_ctx->irq_poll_scheduled = false;
+ irq_poll_init(&irq_ctx->irqpoll,
+ instance->threshold_reply_count,
+ megasas_irqpoll);
+ }
+}
+
/*
* megasas_setup_irqs_ioapic - register legacy interrupts.
* @instance: Adapter soft state
@@ -5062,6 +5554,8 @@
__func__, __LINE__);
return -1;
}
+ instance->perf_mode = MR_LATENCY_PERF_MODE;
+ instance->low_latency_index_start = 0;
return 0;
}
@@ -5096,6 +5590,7 @@
&instance->irq_context[j]);
/* Retry irq register for IO_APIC*/
instance->msix_vectors = 0;
+ instance->msix_load_balance = false;
if (is_probe) {
pci_free_irq_vectors(instance->pdev);
return megasas_setup_irqs_ioapic(instance);
@@ -5104,6 +5599,7 @@
}
}
}
+
return 0;
}
@@ -5116,6 +5612,16 @@
megasas_destroy_irqs(struct megasas_instance *instance) {
int i;
+ int count;
+ struct megasas_irq_context *irq_ctx;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ if (instance->adapter_type != MFI_SERIES) {
+ for (i = 0; i < count; i++) {
+ irq_ctx = &instance->irq_context[i];
+ irq_poll_disable(&irq_ctx->irqpoll);
+ }
+ }
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
@@ -5144,10 +5650,12 @@
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
+ instance->use_seqnum_jbod_fp =
+ instance->support_seqnum_jbod_fp;
if (reset_devices || !fusion ||
- !instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
+ !instance->support_seqnum_jbod_fp) {
dev_info(&instance->pdev->dev,
- "Jbod map is not supported %s %d\n",
+ "JBOD sequence map is disabled %s %d\n",
__func__, __LINE__);
instance->use_seqnum_jbod_fp = false;
return;
@@ -5186,9 +5694,11 @@
static void megasas_setup_reply_map(struct megasas_instance *instance)
{
const struct cpumask *mask;
- unsigned int queue, cpu;
+ unsigned int queue, cpu, low_latency_index_start;
- for (queue = 0; queue < instance->msix_vectors; queue++) {
+ low_latency_index_start = instance->low_latency_index_start;
+
+ for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
mask = pci_irq_get_affinity(instance->pdev, queue);
if (!mask)
goto fallback;
@@ -5199,8 +5709,131 @@
return;
fallback:
- for_each_possible_cpu(cpu)
- instance->reply_map[cpu] = cpu % instance->msix_vectors;
+ queue = low_latency_index_start;
+ for_each_possible_cpu(cpu) {
+ instance->reply_map[cpu] = queue;
+ if (queue == (instance->msix_vectors - 1))
+ queue = low_latency_index_start;
+ else
+ queue++;
+ }
+}
+
+/**
+ * megasas_get_device_list - Get the PD and LD device list from FW.
+ * @instance: Adapter soft state
+ * @return: Success or failure
+ *
+ * Issue DCMDs to Firmware to get the PD and LD list.
+ * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
+ * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
+ */
+static
+int megasas_get_device_list(struct megasas_instance *instance)
+{
+ memset(instance->pd_list, 0,
+ (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+
+ if (instance->enable_fw_dev_list) {
+ if (megasas_host_device_list_query(instance, true))
+ return FAILED;
+ } else {
+ if (megasas_get_pd_list(instance) < 0) {
+ dev_err(&instance->pdev->dev, "failed to get PD list\n");
+ return FAILED;
+ }
+
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
+ dev_err(&instance->pdev->dev, "failed to get LD list\n");
+ return FAILED;
+ }
+ }
+
+ return SUCCESS;
+}
+
+/**
+ * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues
+ * @instance: Adapter soft state
+ * return: void
+ */
+static inline void
+megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
+{
+ int i;
+ int local_numa_node;
+
+ if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+ local_numa_node = dev_to_node(&instance->pdev->dev);
+
+ for (i = 0; i < instance->low_latency_index_start; i++)
+ irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
+ cpumask_of_node(local_numa_node));
+ }
+}
+
+static int
+__megasas_alloc_irq_vectors(struct megasas_instance *instance)
+{
+ int i, irq_flags;
+ struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
+ struct irq_affinity *descp = &desc;
+
+ irq_flags = PCI_IRQ_MSIX;
+
+ if (instance->smp_affinity_enable)
+ irq_flags |= PCI_IRQ_AFFINITY;
+ else
+ descp = NULL;
+
+ i = pci_alloc_irq_vectors_affinity(instance->pdev,
+ instance->low_latency_index_start,
+ instance->msix_vectors, irq_flags, descp);
+
+ return i;
+}
+
+/**
+ * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors
+ * @instance: Adapter soft state
+ * return: void
+ */
+static void
+megasas_alloc_irq_vectors(struct megasas_instance *instance)
+{
+ int i;
+ unsigned int num_msix_req;
+
+ i = __megasas_alloc_irq_vectors(instance);
+
+ if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
+ (i != instance->msix_vectors)) {
+ if (instance->msix_vectors)
+ pci_free_irq_vectors(instance->pdev);
+ /* Disable Balanced IOPS mode and try realloc vectors */
+ instance->perf_mode = MR_LATENCY_PERF_MODE;
+ instance->low_latency_index_start = 1;
+ num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+
+ instance->msix_vectors = min(num_msix_req,
+ instance->msix_vectors);
+
+ i = __megasas_alloc_irq_vectors(instance);
+
+ }
+
+ dev_info(&instance->pdev->dev,
+ "requested/available msix %d/%d\n", instance->msix_vectors, i);
+
+ if (i > 0)
+ instance->msix_vectors = i;
+ else
+ instance->msix_vectors = 0;
+
+ if (instance->smp_affinity_enable)
+ megasas_set_high_iops_queue_affinity_hint(instance);
}
/**
@@ -5214,14 +5847,17 @@
{
u32 max_sectors_1;
u32 max_sectors_2, tmp_sectors, msix_enable;
- u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
+ u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
resource_size_t base_addr;
- struct megasas_register_set __iomem *reg_set;
+ void *base_addr_phys;
struct megasas_ctrl_info *ctrl_info = NULL;
unsigned long bar_list;
- int i, j, loop, fw_msix_count = 0;
+ int i, j, loop;
struct IOV_111 *iovPtr;
struct fusion_context *fusion;
+ bool intr_coalescing;
+ unsigned int num_msix_req;
+ u16 lnksta, speed;
fusion = instance->ctrl_context;
@@ -5242,7 +5878,10 @@
goto fail_ioremap;
}
- reg_set = instance->reg_set;
+ base_addr_phys = &base_addr;
+ dev_printk(KERN_DEBUG, &instance->pdev->dev,
+ "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n",
+ instance->bar, base_addr_phys, instance->reg_set);
if (instance->adapter_type != MFI_SERIES)
instance->instancet = &megasas_instance_template_fusion;
@@ -5270,19 +5909,35 @@
}
if (megasas_transition_to_ready(instance, 0)) {
- atomic_set(&instance->fw_reset_no_pci_access, 1);
- instance->instancet->adp_reset
- (instance, instance->reg_set);
- atomic_set(&instance->fw_reset_no_pci_access, 0);
dev_info(&instance->pdev->dev,
- "FW restarted successfully from %s!\n",
- __func__);
+ "Failed to transition controller to ready from %s!\n",
+ __func__);
+ if (instance->adapter_type != MFI_SERIES) {
+ status_reg = instance->instancet->read_fw_status_reg(
+ instance);
+ if (status_reg & MFI_RESET_ADAPTER) {
+ if (megasas_adp_reset_wait_for_ready
+ (instance, true, 0) == FAILED)
+ goto fail_ready_state;
+ } else {
+ goto fail_ready_state;
+ }
+ } else {
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset
+ (instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
- /*waitting for about 30 second before retry*/
- ssleep(30);
+ /*waiting for about 30 second before retry*/
+ ssleep(30);
- if (megasas_transition_to_ready(instance, 0))
- goto fail_ready_state;
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+ }
+
+ dev_info(&instance->pdev->dev,
+ "FW restarted successfully from %s!\n",
+ __func__);
}
megasas_init_ctrl_params(instance);
@@ -5298,40 +5953,76 @@
fusion = instance->ctrl_context;
- if (instance->adapter_type == VENTURA_SERIES) {
- scratch_pad_3 =
- readl(&instance->reg_set->outbound_scratch_pad_3);
- instance->max_raid_mapsize = ((scratch_pad_3 >>
+ if (instance->adapter_type >= VENTURA_SERIES) {
+ scratch_pad_2 =
+ megasas_readl(instance,
+ &instance->reg_set->outbound_scratch_pad_2);
+ instance->max_raid_mapsize = ((scratch_pad_2 >>
MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
MR_MAX_RAID_MAP_SIZE_MASK);
}
+ instance->enable_sdev_max_qd = enable_sdev_max_qd;
+
+ switch (instance->adapter_type) {
+ case VENTURA_SERIES:
+ fusion->pcie_bw_limitation = true;
+ break;
+ case AERO_SERIES:
+ fusion->r56_div_offload = true;
+ break;
+ default:
+ break;
+ }
+
/* Check if MSI-X is supported while in ready state */
- msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+ msix_enable = (instance->instancet->read_fw_status_reg(instance) &
0x4000000) >> 0x1a;
if (msix_enable && !msix_disable) {
- int irq_flags = PCI_IRQ_MSIX;
- scratch_pad_2 = readl
- (&instance->reg_set->outbound_scratch_pad_2);
+ scratch_pad_1 = megasas_readl
+ (instance, &instance->reg_set->outbound_scratch_pad_1);
/* Check max MSI-X vectors */
if (fusion) {
if (instance->adapter_type == THUNDERBOLT_SERIES) {
/* Thunderbolt Series*/
- instance->msix_vectors = (scratch_pad_2
+ instance->msix_vectors = (scratch_pad_1
& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
- fw_msix_count = instance->msix_vectors;
- } else { /* Invader series supports more than 8 MSI-x vectors*/
- instance->msix_vectors = ((scratch_pad_2
+ } else {
+ instance->msix_vectors = ((scratch_pad_1
& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
- if (instance->msix_vectors > 16)
- instance->msix_combined = true;
+
+ /*
+ * For Invader series, > 8 MSI-x vectors
+ * supported by FW/HW implies combined
+ * reply queue mode is enabled.
+ * For Ventura series, > 16 MSI-x vectors
+ * supported by FW/HW implies combined
+ * reply queue mode is enabled.
+ */
+ switch (instance->adapter_type) {
+ case INVADER_SERIES:
+ if (instance->msix_vectors > 8)
+ instance->msix_combined = true;
+ break;
+ case AERO_SERIES:
+ case VENTURA_SERIES:
+ if (instance->msix_vectors > 16)
+ instance->msix_combined = true;
+ break;
+ }
if (rdpq_enable)
- instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
+ instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
1 : 0;
- fw_msix_count = instance->msix_vectors;
+
+ if (instance->adapter_type >= INVADER_SERIES &&
+ !instance->msix_combined) {
+ instance->msix_load_balance = true;
+ instance->smp_affinity_enable = false;
+ }
+
/* Save 1-15 reply post index address to local memory
* Index 0 is already saved from reg offset
* MPI2_REPLY_POST_HOST_INDEX_OFFSET
@@ -5344,22 +6035,91 @@
+ (loop * 0x10));
}
}
+
+ dev_info(&instance->pdev->dev,
+ "firmware supports msix\t: (%d)",
+ instance->msix_vectors);
if (msix_vectors)
instance->msix_vectors = min(msix_vectors,
instance->msix_vectors);
} else /* MFI adapters */
instance->msix_vectors = 1;
- /* Don't bother allocating more MSI-X vectors than cpus */
- instance->msix_vectors = min(instance->msix_vectors,
- (unsigned int)num_online_cpus());
- if (smp_affinity_enable)
- irq_flags |= PCI_IRQ_AFFINITY;
- i = pci_alloc_irq_vectors(instance->pdev, 1,
- instance->msix_vectors, irq_flags);
- if (i > 0)
- instance->msix_vectors = i;
+
+
+ /*
+ * For Aero (if some conditions are met), driver will configure a
+ * few additional reply queues with interrupt coalescing enabled.
+ * These queues with interrupt coalescing enabled are called
+ * High IOPS queues and rest of reply queues (based on number of
+ * logical CPUs) are termed as Low latency queues.
+ *
+ * Total Number of reply queues = High IOPS queues + low latency queues
+ *
+ * For rest of fusion adapters, 1 additional reply queue will be
+ * reserved for management commands, rest of reply queues
+ * (based on number of logical CPUs) will be used for IOs and
+ * referenced as IO queues.
+ * Total Number of reply queues = 1 + IO queues
+ *
+ * MFI adapters supports single MSI-x so single reply queue
+ * will be used for IO and management commands.
+ */
+
+ intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
+ true : false;
+ if (intr_coalescing &&
+ (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
+ (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
+ instance->perf_mode = MR_BALANCED_PERF_MODE;
else
- instance->msix_vectors = 0;
+ instance->perf_mode = MR_LATENCY_PERF_MODE;
+
+
+ if (instance->adapter_type == AERO_SERIES) {
+ pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
+ speed = lnksta & PCI_EXP_LNKSTA_CLS;
+
+ /*
+ * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
+ * in latency perf mode and enable R1 PCI bandwidth algorithm
+ */
+ if (speed < 0x4) {
+ instance->perf_mode = MR_LATENCY_PERF_MODE;
+ fusion->pcie_bw_limitation = true;
+ }
+
+ /*
+ * Performance mode settings provided through module parameter-perf_mode will
+ * take affect only for:
+ * 1. Aero family of adapters.
+ * 2. When user sets module parameter- perf_mode in range of 0-2.
+ */
+ if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
+ (perf_mode <= MR_LATENCY_PERF_MODE))
+ instance->perf_mode = perf_mode;
+ /*
+ * If intr coalescing is not supported by controller FW, then IOPS
+ * and Balanced modes are not feasible.
+ */
+ if (!intr_coalescing)
+ instance->perf_mode = MR_LATENCY_PERF_MODE;
+
+ }
+
+ if (instance->perf_mode == MR_BALANCED_PERF_MODE)
+ instance->low_latency_index_start =
+ MR_HIGH_IOPS_QUEUE_COUNT;
+ else
+ instance->low_latency_index_start = 1;
+
+ num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+
+ instance->msix_vectors = min(num_msix_req,
+ instance->msix_vectors);
+
+ megasas_alloc_irq_vectors(instance);
+ if (!instance->msix_vectors)
+ instance->msix_load_balance = false;
}
/*
* MSI-X host index 0 is common for all adapter.
@@ -5378,14 +6138,12 @@
if (!instance->msix_vectors) {
i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
if (i < 0)
- goto fail_setup_irqs;
+ goto fail_init_adapter;
}
megasas_setup_reply_map(instance);
dev_info(&instance->pdev->dev,
- "firmware supports msix\t: (%d)", fw_msix_count);
- dev_info(&instance->pdev->dev,
"current msix/online cpus\t: (%d/%d)\n",
instance->msix_vectors, (unsigned int)num_online_cpus());
dev_info(&instance->pdev->dev,
@@ -5404,13 +6162,14 @@
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
- if (instance->adapter_type == VENTURA_SERIES) {
- scratch_pad_4 =
- readl(&instance->reg_set->outbound_scratch_pad_4);
- if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
+ if (instance->adapter_type >= VENTURA_SERIES) {
+ scratch_pad_3 =
+ megasas_readl(instance,
+ &instance->reg_set->outbound_scratch_pad_3);
+ if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
MR_DEFAULT_NVME_PAGE_SHIFT)
instance->nvme_page_size =
- (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
+ (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
dev_info(&instance->pdev->dev,
"NVME page size\t: (%d)\n", instance->nvme_page_size);
@@ -5421,26 +6180,24 @@
megasas_setup_irqs_ioapic(instance))
goto fail_init_adapter;
+ if (instance->adapter_type != MFI_SERIES)
+ megasas_setup_irq_poll(instance);
+
instance->instancet->enable_intr(instance);
dev_info(&instance->pdev->dev, "INIT adapter done\n");
megasas_setup_jbod_map(instance);
- /** for passthrough
- * the following function will get the PD LIST.
- */
- memset(instance->pd_list, 0,
- (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
- if (megasas_get_pd_list(instance) < 0) {
- dev_err(&instance->pdev->dev, "failed to get PD list\n");
+ if (megasas_get_device_list(instance) != SUCCESS) {
+ dev_err(&instance->pdev->dev,
+ "%s: megasas_get_device_list failed\n",
+ __func__);
goto fail_get_ld_pd_list;
}
- memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
-
/* stream detection initialization */
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
fusion->stream_detect_by_ld =
kcalloc(MAX_LOGICAL_DRIVES_EXT,
sizeof(struct LD_STREAM_DETECT *),
@@ -5468,10 +6225,6 @@
}
}
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- goto fail_get_ld_pd_list;
-
/*
* Compute the max allowed sectors per IO: The controller info has two
* limits on max sectors. Driver should use the minimum of these two.
@@ -5533,13 +6286,18 @@
else {
if (instance->crash_dump_buf)
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
CRASH_DMA_BUF_SIZE,
instance->crash_dump_buf,
instance->crash_dump_h);
instance->crash_dump_buf = NULL;
}
+ if (instance->snapdump_wait_time) {
+ megasas_get_snapdump_properties(instance);
+ dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
+ instance->snapdump_wait_time);
+ }
dev_info(&instance->pdev->dev,
"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
@@ -5551,9 +6309,8 @@
instance->UnevenSpanSupport ? "yes" : "no");
dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
instance->crash_dump_drv_support ? "yes" : "no");
- dev_info(&instance->pdev->dev, "jbod sync map : %s\n",
- instance->use_seqnum_jbod_fp ? "yes" : "no");
-
+ dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n",
+ instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
instance->max_sectors_per_req = instance->max_num_sge *
SGE_BUFFER_SIZE / 512;
@@ -5577,19 +6334,32 @@
/* Launch SR-IOV heartbeat timer */
if (instance->requestorId) {
- if (!megasas_sriov_start_heartbeat(instance, 1))
+ if (!megasas_sriov_start_heartbeat(instance, 1)) {
megasas_start_timer(instance);
- else
+ } else {
instance->skip_heartbeat_timer_del = 1;
+ goto fail_get_ld_pd_list;
+ }
}
+ /*
+ * Create and start watchdog thread which will monitor
+ * controller state every 1 sec and trigger OCR when
+ * it enters fault state
+ */
+ if (instance->adapter_type != MFI_SERIES)
+ if (megasas_fusion_start_watchdog(instance) != SUCCESS)
+ goto fail_start_watchdog;
+
return 0;
+fail_start_watchdog:
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
fail_get_ld_pd_list:
instance->instancet->disable_intr(instance);
-fail_init_adapter:
megasas_destroy_irqs(instance);
-fail_setup_irqs:
+fail_init_adapter:
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
instance->msix_vectors = 0;
@@ -5616,7 +6386,7 @@
u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
if (instance->reply_queue)
- pci_free_consistent(instance->pdev, reply_q_sz,
+ dma_free_coherent(&instance->pdev->dev, reply_q_sz,
instance->reply_queue, instance->reply_queue_h);
megasas_free_cmds(instance);
@@ -5655,10 +6425,9 @@
}
dcmd = &cmd->frame->dcmd;
- el_info = pci_zalloc_consistent(instance->pdev,
- sizeof(struct megasas_evt_log_info),
- &el_info_h);
-
+ el_info = dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct megasas_evt_log_info),
+ &el_info_h, GFP_KERNEL);
if (!el_info) {
megasas_return_cmd(instance, cmd);
return -ENOMEM;
@@ -5695,8 +6464,9 @@
eli->boot_seq_num = el_info->boot_seq_num;
dcmd_failed:
- pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
- el_info, el_info_h);
+ dma_free_coherent(&instance->pdev->dev,
+ sizeof(struct megasas_evt_log_info),
+ el_info, el_info_h);
megasas_return_cmd(instance, cmd);
@@ -5861,7 +6631,8 @@
int ret;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- u16 targetId = (sdev->channel % 2) + sdev->id;
+ u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;
cmd = megasas_get_cmd(instance);
@@ -5903,8 +6674,10 @@
switch (dcmd_timeout_ocr_possible(instance)) {
case INITIATE_OCR:
cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
megasas_reset_fusion(instance->host,
MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
break;
case KILL_ADAPTER:
megaraid_sas_kill_hba(instance);
@@ -6023,13 +6796,13 @@
* @instance: Adapter soft state
* Description:
*
- * For Ventura, driver/FW will operate in 64bit DMA addresses.
+ * For Ventura, driver/FW will operate in 63bit DMA addresses.
*
* For invader-
* By default, driver/FW will operate in 32bit DMA addresses
* for consistent DMA mapping but if 32 bit consistent
- * DMA mask fails, driver will try with 64 bit consistent
- * mask provided FW is true 64bit DMA capable
+ * DMA mask fails, driver will try with 63 bit consistent
+ * mask provided FW is true 63bit DMA capable
*
* For older controllers(Thunderbolt and MFI based adapters)-
* driver/FW will operate in 32 bit consistent DMA addresses.
@@ -6039,31 +6812,31 @@
{
u64 consistent_mask;
struct pci_dev *pdev;
- u32 scratch_pad_2;
+ u32 scratch_pad_1;
pdev = instance->pdev;
- consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
- DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+ consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
+ DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
if (IS_DMA64) {
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
goto fail_set_dma_mask;
- if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
+ if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
(dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
/*
* If 32 bit DMA mask fails, then try for 64 bit mask
* for FW capable of handling 64 bit DMA.
*/
- scratch_pad_2 = readl
- (&instance->reg_set->outbound_scratch_pad_2);
+ scratch_pad_1 = megasas_readl
+ (instance, &instance->reg_set->outbound_scratch_pad_1);
- if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
+ if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
goto fail_set_dma_mask;
else if (dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(64)))
+ DMA_BIT_MASK(63)))
goto fail_set_dma_mask;
}
} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
@@ -6075,8 +6848,8 @@
instance->consistent_mask_64bit = true;
dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
- ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
- (instance->consistent_mask_64bit ? "64" : "32"));
+ ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
+ (instance->consistent_mask_64bit ? "63" : "32"));
return 0;
@@ -6089,12 +6862,14 @@
/*
* megasas_set_adapter_type - Set adapter type.
* Supported controllers can be divided in
- * 4 categories- enum MR_ADAPTER_TYPE {
- * MFI_SERIES = 1,
- * THUNDERBOLT_SERIES = 2,
- * INVADER_SERIES = 3,
- * VENTURA_SERIES = 4,
- * };
+ * different categories-
+ * enum MR_ADAPTER_TYPE {
+ * MFI_SERIES = 1,
+ * THUNDERBOLT_SERIES = 2,
+ * INVADER_SERIES = 3,
+ * VENTURA_SERIES = 4,
+ * AERO_SERIES = 5,
+ * };
* @instance: Adapter soft state
* return: void
*/
@@ -6105,6 +6880,12 @@
instance->adapter_type = MFI_SERIES;
} else {
switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_AERO_10E1:
+ case PCI_DEVICE_ID_LSI_AERO_10E2:
+ case PCI_DEVICE_ID_LSI_AERO_10E5:
+ case PCI_DEVICE_ID_LSI_AERO_10E6:
+ instance->adapter_type = AERO_SERIES;
+ break;
case PCI_DEVICE_ID_LSI_VENTURA:
case PCI_DEVICE_ID_LSI_CRUSADER:
case PCI_DEVICE_ID_LSI_HARPOON:
@@ -6134,10 +6915,10 @@
static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
{
- instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
- &instance->producer_h);
- instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
- &instance->consumer_h);
+ instance->producer = dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(u32), &instance->producer_h, GFP_KERNEL);
+ instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(u32), &instance->consumer_h, GFP_KERNEL);
if (!instance->producer || !instance->consumer) {
dev_err(&instance->pdev->dev,
@@ -6172,6 +6953,7 @@
if (megasas_alloc_mfi_ctrl_mem(instance))
goto fail;
break;
+ case AERO_SERIES:
case VENTURA_SERIES:
case THUNDERBOLT_SERIES:
case INVADER_SERIES:
@@ -6199,11 +6981,11 @@
kfree(instance->reply_map);
if (instance->adapter_type == MFI_SERIES) {
if (instance->producer)
- pci_free_consistent(instance->pdev, sizeof(u32),
+ dma_free_coherent(&instance->pdev->dev, sizeof(u32),
instance->producer,
instance->producer_h);
if (instance->consumer)
- pci_free_consistent(instance->pdev, sizeof(u32),
+ dma_free_coherent(&instance->pdev->dev, sizeof(u32),
instance->consumer,
instance->consumer_h);
} else {
@@ -6224,10 +7006,9 @@
struct pci_dev *pdev = instance->pdev;
struct fusion_context *fusion = instance->ctrl_context;
- instance->evt_detail =
- pci_alloc_consistent(pdev,
- sizeof(struct megasas_evt_detail),
- &instance->evt_detail_h);
+ instance->evt_detail = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct megasas_evt_detail),
+ &instance->evt_detail_h, GFP_KERNEL);
if (!instance->evt_detail) {
dev_err(&instance->pdev->dev,
@@ -6247,12 +7028,32 @@
"Failed to allocate PD list buffer\n");
return -ENOMEM;
}
+
+ instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_SNAPDUMP_PROPERTIES),
+ &instance->snapdump_prop_h, GFP_KERNEL);
+
+ if (!instance->snapdump_prop)
+ dev_err(&pdev->dev,
+ "Failed to allocate snapdump properties buffer\n");
+
+ instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
+ HOST_DEVICE_LIST_SZ,
+ &instance->host_device_list_buf_h,
+ GFP_KERNEL);
+
+ if (!instance->host_device_list_buf) {
+ dev_err(&pdev->dev,
+ "Failed to allocate targetid list buffer\n");
+ return -ENOMEM;
+ }
+
}
instance->pd_list_buf =
- pci_alloc_consistent(pdev,
+ dma_alloc_coherent(&pdev->dev,
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
- &instance->pd_list_buf_h);
+ &instance->pd_list_buf_h, GFP_KERNEL);
if (!instance->pd_list_buf) {
dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
@@ -6260,9 +7061,9 @@
}
instance->ctrl_info_buf =
- pci_alloc_consistent(pdev,
+ dma_alloc_coherent(&pdev->dev,
sizeof(struct megasas_ctrl_info),
- &instance->ctrl_info_buf_h);
+ &instance->ctrl_info_buf_h, GFP_KERNEL);
if (!instance->ctrl_info_buf) {
dev_err(&pdev->dev,
@@ -6271,9 +7072,9 @@
}
instance->ld_list_buf =
- pci_alloc_consistent(pdev,
+ dma_alloc_coherent(&pdev->dev,
sizeof(struct MR_LD_LIST),
- &instance->ld_list_buf_h);
+ &instance->ld_list_buf_h, GFP_KERNEL);
if (!instance->ld_list_buf) {
dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
@@ -6281,9 +7082,9 @@
}
instance->ld_targetid_list_buf =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_LD_TARGETID_LIST),
- &instance->ld_targetid_list_buf_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_LD_TARGETID_LIST),
+ &instance->ld_targetid_list_buf_h, GFP_KERNEL);
if (!instance->ld_targetid_list_buf) {
dev_err(&pdev->dev,
@@ -6293,21 +7094,20 @@
if (!reset_devices) {
instance->system_info_buf =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_DRV_SYSTEM_INFO),
- &instance->system_info_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_DRV_SYSTEM_INFO),
+ &instance->system_info_h, GFP_KERNEL);
instance->pd_info =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_PD_INFO),
- &instance->pd_info_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_PD_INFO),
+ &instance->pd_info_h, GFP_KERNEL);
instance->tgt_prop =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_TARGET_PROPERTIES),
- &instance->tgt_prop_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_TARGET_PROPERTIES),
+ &instance->tgt_prop_h, GFP_KERNEL);
instance->crash_dump_buf =
- pci_alloc_consistent(pdev,
- CRASH_DMA_BUF_SIZE,
- &instance->crash_dump_h);
+ dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h, GFP_KERNEL);
if (!instance->system_info_buf)
dev_err(&instance->pdev->dev,
@@ -6343,7 +7143,7 @@
struct fusion_context *fusion = instance->ctrl_context;
if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
instance->evt_detail,
instance->evt_detail_h);
@@ -6354,43 +7154,56 @@
fusion->ioc_init_request_phys);
if (instance->pd_list_buf)
- pci_free_consistent(pdev,
+ dma_free_coherent(&pdev->dev,
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
instance->pd_list_buf,
instance->pd_list_buf_h);
if (instance->ld_list_buf)
- pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
instance->ld_list_buf,
instance->ld_list_buf_h);
if (instance->ld_targetid_list_buf)
- pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
instance->ld_targetid_list_buf,
instance->ld_targetid_list_buf_h);
if (instance->ctrl_info_buf)
- pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
+ dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
instance->ctrl_info_buf,
instance->ctrl_info_buf_h);
if (instance->system_info_buf)
- pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
instance->system_info_buf,
instance->system_info_h);
if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
instance->pd_info, instance->pd_info_h);
if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
instance->tgt_prop, instance->tgt_prop_h);
if (instance->crash_dump_buf)
- pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+ dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
instance->crash_dump_buf,
instance->crash_dump_h);
+
+ if (instance->snapdump_prop)
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct MR_SNAPDUMP_PROPERTIES),
+ instance->snapdump_prop,
+ instance->snapdump_prop_h);
+
+ if (instance->host_device_list_buf)
+ dma_free_coherent(&pdev->dev,
+ HOST_DEVICE_LIST_SZ,
+ instance->host_device_list_buf,
+ instance->host_device_list_buf_h);
+
}
/*
@@ -6414,6 +7227,7 @@
INIT_LIST_HEAD(&instance->internal_reset_pending_q);
atomic_set(&instance->fw_outstanding, 0);
+ atomic64_set(&instance->total_io_count, 0);
init_waitqueue_head(&instance->int_cmd_wait_q);
init_waitqueue_head(&instance->abort_cmd_wait_q);
@@ -6436,13 +7250,13 @@
instance->last_time = 0;
instance->disableOnlineCtrlReset = 1;
instance->UnevenSpanSupport = 0;
+ instance->smp_affinity_enable = smp_affinity_enable ? true : false;
+ instance->msix_load_balance = false;
- if (instance->adapter_type != MFI_SERIES) {
+ if (instance->adapter_type != MFI_SERIES)
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
- INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
- } else {
+ else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
- }
}
/**
@@ -6458,6 +7272,19 @@
struct megasas_instance *instance;
u16 control = 0;
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_LSI_AERO_10E0:
+ case PCI_DEVICE_ID_LSI_AERO_10E3:
+ case PCI_DEVICE_ID_LSI_AERO_10E4:
+ case PCI_DEVICE_ID_LSI_AERO_10E7:
+ dev_err(&pdev->dev, "Adapter is in non secure mode\n");
+ return 1;
+ case PCI_DEVICE_ID_LSI_AERO_10E1:
+ case PCI_DEVICE_ID_LSI_AERO_10E5:
+ dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
+ break;
+ }
+
/* Reset MSI-X in the kdump kernel */
if (reset_devices) {
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
@@ -6516,17 +7343,20 @@
if (instance->requestorId) {
if (instance->PlasmaFW111) {
instance->vf_affiliation_111 =
- pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
- &instance->vf_affiliation_111_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &instance->vf_affiliation_111_h,
+ GFP_KERNEL);
if (!instance->vf_affiliation_111)
dev_warn(&pdev->dev, "Can't allocate "
"memory for VF affiliation buffer\n");
} else {
instance->vf_affiliation =
- pci_alloc_consistent(pdev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- &instance->vf_affiliation_h);
+ dma_alloc_coherent(&pdev->dev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &instance->vf_affiliation_h,
+ GFP_KERNEL);
if (!instance->vf_affiliation)
dev_warn(&pdev->dev, "Can't allocate "
"memory for VF affiliation buffer\n");
@@ -6556,7 +7386,9 @@
/*
* Trigger SCSI to scan our drives
*/
- scsi_scan_host(host);
+ if (!instance->enable_fw_dev_list ||
+ (instance->host_device_list_buf->count > 0))
+ scsi_scan_host(host);
/*
* Initiate AEN (Asynchronous Event Notification)
@@ -6566,6 +7398,8 @@
goto fail_start_aen;
}
+ megasas_setup_debugfs(instance);
+
/* Get current SR-IOV LD/VF affiliation */
if (instance->requestorId)
megasas_get_ld_vf_affiliation(instance, 1);
@@ -6697,17 +7531,25 @@
static int
megasas_suspend(struct pci_dev *pdev, pm_message_t state)
{
- struct Scsi_Host *host;
struct megasas_instance *instance;
instance = pci_get_drvdata(pdev);
- host = instance->host;
+
+ if (!instance)
+ return 0;
+
instance->unload = 1;
+ dev_info(&pdev->dev, "%s is called\n", __func__);
+
/* Shutdown SR-IOV heartbeat timer */
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
+ /* Stop the FW fault detection watchdog */
+ if (instance->adapter_type != MFI_SERIES)
+ megasas_fusion_stop_watchdog(instance);
+
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
@@ -6749,11 +7591,16 @@
int irq_flags = PCI_IRQ_LEGACY;
instance = pci_get_drvdata(pdev);
+
+ if (!instance)
+ return 0;
+
host = instance->host;
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake(pdev, PCI_D0, 0);
pci_restore_state(pdev);
+ dev_info(&pdev->dev, "%s is called\n", __func__);
/*
* PCI prepping: enable device set bus mastering and dma mask
*/
@@ -6785,7 +7632,7 @@
/* Now re-enable MSI-X */
if (instance->msix_vectors) {
irq_flags = PCI_IRQ_MSIX;
- if (smp_affinity_enable)
+ if (instance->smp_affinity_enable)
irq_flags |= PCI_IRQ_AFFINITY;
}
rval = pci_alloc_irq_vectors(instance->pdev, 1,
@@ -6823,6 +7670,9 @@
megasas_setup_irqs_ioapic(instance))
goto fail_init_mfi;
+ if (instance->adapter_type != MFI_SERIES)
+ megasas_setup_irq_poll(instance);
+
/* Re-launch SR-IOV heartbeat timer */
if (instance->requestorId) {
if (!megasas_sriov_start_heartbeat(instance, 0))
@@ -6843,8 +7693,16 @@
if (megasas_start_aen(instance))
dev_err(&instance->pdev->dev, "Start AEN failed\n");
+ /* Re-launch FW fault watchdog */
+ if (instance->adapter_type != MFI_SERIES)
+ if (megasas_fusion_start_watchdog(instance) != SUCCESS)
+ goto fail_start_watchdog;
+
return 0;
+fail_start_watchdog:
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
fail_init_mfi:
megasas_free_ctrl_dma_buffers(instance);
megasas_free_ctrl_mem(instance);
@@ -6905,6 +7763,10 @@
u32 pd_seq_map_sz;
instance = pci_get_drvdata(pdev);
+
+ if (!instance)
+ return;
+
host = instance->host;
fusion = instance->ctrl_context;
@@ -6912,6 +7774,10 @@
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
+ /* Stop the FW fault detection watchdog */
+ if (instance->adapter_type != MFI_SERIES)
+ megasas_fusion_stop_watchdog(instance);
+
if (instance->fw_crash_state != UNAVAILABLE)
megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
@@ -6956,7 +7822,7 @@
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
kfree(fusion->stream_detect_by_ld[i]);
kfree(fusion->stream_detect_by_ld);
@@ -6994,19 +7860,19 @@
}
if (instance->vf_affiliation)
- pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+ dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
instance->vf_affiliation,
instance->vf_affiliation_h);
if (instance->vf_affiliation_111)
- pci_free_consistent(pdev,
+ dma_free_coherent(&pdev->dev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
instance->vf_affiliation_111,
instance->vf_affiliation_111_h);
if (instance->hb_host_mem)
- pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
instance->hb_host_mem,
instance->hb_host_mem_h);
@@ -7014,6 +7880,8 @@
megasas_free_ctrl_mem(instance);
+ megasas_destroy_debugfs(instance);
+
scsi_host_put(host);
pci_disable_device(pdev);
@@ -7027,6 +7895,9 @@
{
struct megasas_instance *instance = pci_get_drvdata(pdev);
+ if (!instance)
+ return;
+
instance->unload = 1;
if (megasas_wait_for_adapter_operational(instance))
@@ -7172,7 +8043,9 @@
if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
- !instance->support_nvme_passthru)) {
+ !instance->support_nvme_passthru) ||
+ ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
+ !instance->support_pci_lane_margining)) {
dev_err(&instance->pdev->dev,
"Received invalid ioctl command 0x%x\n",
ioc->frame.hdr.cmd);
@@ -7208,10 +8081,13 @@
opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
+ mutex_lock(&instance->reset_mutex);
if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
megasas_return_cmd(instance, cmd);
+ mutex_unlock(&instance->reset_mutex);
return -1;
}
+ mutex_unlock(&instance->reset_mutex);
}
if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
@@ -7254,7 +8130,7 @@
/*
* We don't change the dma_coherent_mask, so
- * pci_alloc_consistent only returns 32bit addresses
+ * dma_alloc_coherent only returns 32bit addresses
*/
if (instance->consistent_mask_64bit) {
kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
@@ -7653,6 +8529,14 @@
static DRIVER_ATTR_RO(support_nvme_encapsulation);
+static ssize_t
+support_pci_lane_margining_show(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_pci_lane_margining);
+}
+
+static DRIVER_ATTR_RO(support_pci_lane_margining);
+
static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
{
sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
@@ -7660,102 +8544,103 @@
scsi_device_put(sdev);
}
-static void
-megasas_aen_polling(struct work_struct *work)
+/**
+ * megasas_update_device_list - Update the PD and LD device list from FW
+ * after an AEN event notification
+ * @instance: Adapter soft state
+ * @event_type: Indicates type of event (PD or LD event)
+ *
+ * @return: Success or failure
+ *
+ * Issue DCMDs to Firmware to update the internal device list in driver.
+ * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
+ * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
+ */
+static
+int megasas_update_device_list(struct megasas_instance *instance,
+ int event_type)
{
- struct megasas_aen_event *ev =
- container_of(work, struct megasas_aen_event, hotplug_work.work);
- struct megasas_instance *instance = ev->instance;
- union megasas_evt_class_locale class_locale;
- struct Scsi_Host *host;
- struct scsi_device *sdev1;
- u16 pd_index = 0;
- u16 ld_index = 0;
- int i, j, doscan = 0;
- u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
- int error;
- u8 dcmd_ret = DCMD_SUCCESS;
+ int dcmd_ret = DCMD_SUCCESS;
- if (!instance) {
- printk(KERN_ERR "invalid instance!\n");
- kfree(ev);
- return;
- }
-
- /* Adjust event workqueue thread wait time for VF mode */
- if (instance->requestorId)
- wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
-
- /* Don't run the event workqueue thread if OCR is running */
- mutex_lock(&instance->reset_mutex);
-
- instance->ev = NULL;
- host = instance->host;
- if (instance->evt_detail) {
- megasas_decode_evt(instance);
-
- switch (le32_to_cpu(instance->evt_detail->code)) {
-
- case MR_EVT_PD_INSERTED:
- case MR_EVT_PD_REMOVED:
- dcmd_ret = megasas_get_pd_list(instance);
- if (dcmd_ret == DCMD_SUCCESS)
- doscan = SCAN_PD_CHANNEL;
- break;
-
- case MR_EVT_LD_OFFLINE:
- case MR_EVT_CFG_CLEARED:
- case MR_EVT_LD_DELETED:
- case MR_EVT_LD_CREATED:
- if (!instance->requestorId ||
- (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
- dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
-
- if (dcmd_ret == DCMD_SUCCESS)
- doscan = SCAN_VD_CHANNEL;
-
- break;
-
- case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
- case MR_EVT_FOREIGN_CFG_IMPORTED:
- case MR_EVT_LD_STATE_CHANGE:
- dcmd_ret = megasas_get_pd_list(instance);
-
- if (dcmd_ret != DCMD_SUCCESS)
- break;
-
- if (!instance->requestorId ||
- (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
- dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
-
- if (dcmd_ret != DCMD_SUCCESS)
- break;
-
- doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
- dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
- instance->host->host_no);
- break;
-
- case MR_EVT_CTRL_PROP_CHANGED:
- dcmd_ret = megasas_get_ctrl_info(instance);
- break;
- default:
- doscan = 0;
- break;
- }
+ if (instance->enable_fw_dev_list) {
+ dcmd_ret = megasas_host_device_list_query(instance, false);
+ if (dcmd_ret != DCMD_SUCCESS)
+ goto out;
} else {
- dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
- mutex_unlock(&instance->reset_mutex);
- kfree(ev);
- return;
+ if (event_type & SCAN_PD_CHANNEL) {
+ dcmd_ret = megasas_get_pd_list(instance);
+
+ if (dcmd_ret != DCMD_SUCCESS)
+ goto out;
+ }
+
+ if (event_type & SCAN_VD_CHANNEL) {
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ dcmd_ret = megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+ if (dcmd_ret != DCMD_SUCCESS)
+ goto out;
+ }
+ }
}
- mutex_unlock(&instance->reset_mutex);
+out:
+ return dcmd_ret;
+}
- if (doscan & SCAN_PD_CHANNEL) {
+/**
+ * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer
+ * after an AEN event notification
+ * @instance: Adapter soft state
+ * @scan_type: Indicates type of devices (PD/LD) to add
+ * @return void
+ */
+static
+void megasas_add_remove_devices(struct megasas_instance *instance,
+ int scan_type)
+{
+ int i, j;
+ u16 pd_index = 0;
+ u16 ld_index = 0;
+ u16 channel = 0, id = 0;
+ struct Scsi_Host *host;
+ struct scsi_device *sdev1;
+ struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
+ struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
+
+ host = instance->host;
+
+ if (instance->enable_fw_dev_list) {
+ targetid_list = instance->host_device_list_buf;
+ for (i = 0; i < targetid_list->count; i++) {
+ targetid_entry = &targetid_list->host_device_list[i];
+ if (targetid_entry->flags.u.bits.is_sys_pd) {
+ channel = le16_to_cpu(targetid_entry->target_id) /
+ MEGASAS_MAX_DEV_PER_CHANNEL;
+ id = le16_to_cpu(targetid_entry->target_id) %
+ MEGASAS_MAX_DEV_PER_CHANNEL;
+ } else {
+ channel = MEGASAS_MAX_PD_CHANNELS +
+ (le16_to_cpu(targetid_entry->target_id) /
+ MEGASAS_MAX_DEV_PER_CHANNEL);
+ id = le16_to_cpu(targetid_entry->target_id) %
+ MEGASAS_MAX_DEV_PER_CHANNEL;
+ }
+ sdev1 = scsi_device_lookup(host, channel, id, 0);
+ if (!sdev1) {
+ scsi_add_device(host, channel, id, 0);
+ } else {
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+
+ if (scan_type & SCAN_PD_CHANNEL) {
for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
- pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+ pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
sdev1 = scsi_device_lookup(host, i, j, 0);
if (instance->pd_list[pd_index].driveState ==
MR_PD_STATE_SYSTEM) {
@@ -7771,11 +8656,12 @@
}
}
- if (doscan & SCAN_VD_CHANNEL) {
+ if (scan_type & SCAN_VD_CHANNEL) {
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ sdev1 = scsi_device_lookup(host,
+ MEGASAS_MAX_PD_CHANNELS + i, j, 0);
if (instance->ld_ids[ld_index] != 0xff) {
if (!sdev1)
scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
@@ -7789,6 +8675,84 @@
}
}
+}
+
+static void
+megasas_aen_polling(struct work_struct *work)
+{
+ struct megasas_aen_event *ev =
+ container_of(work, struct megasas_aen_event, hotplug_work.work);
+ struct megasas_instance *instance = ev->instance;
+ union megasas_evt_class_locale class_locale;
+ int event_type = 0;
+ u32 seq_num;
+ int error;
+ u8 dcmd_ret = DCMD_SUCCESS;
+
+ if (!instance) {
+ printk(KERN_ERR "invalid instance!\n");
+ kfree(ev);
+ return;
+ }
+
+ /* Don't run the event workqueue thread if OCR is running */
+ mutex_lock(&instance->reset_mutex);
+
+ instance->ev = NULL;
+ if (instance->evt_detail) {
+ megasas_decode_evt(instance);
+
+ switch (le32_to_cpu(instance->evt_detail->code)) {
+
+ case MR_EVT_PD_INSERTED:
+ case MR_EVT_PD_REMOVED:
+ event_type = SCAN_PD_CHANNEL;
+ break;
+
+ case MR_EVT_LD_OFFLINE:
+ case MR_EVT_CFG_CLEARED:
+ case MR_EVT_LD_DELETED:
+ case MR_EVT_LD_CREATED:
+ event_type = SCAN_VD_CHANNEL;
+ break;
+
+ case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+ case MR_EVT_FOREIGN_CFG_IMPORTED:
+ case MR_EVT_LD_STATE_CHANGE:
+ event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
+ dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
+ instance->host->host_no);
+ break;
+
+ case MR_EVT_CTRL_PROP_CHANGED:
+ dcmd_ret = megasas_get_ctrl_info(instance);
+ if (dcmd_ret == DCMD_SUCCESS &&
+ instance->snapdump_wait_time) {
+ megasas_get_snapdump_properties(instance);
+ dev_info(&instance->pdev->dev,
+ "Snap dump wait time\t: %d\n",
+ instance->snapdump_wait_time);
+ }
+ break;
+ default:
+ event_type = 0;
+ break;
+ }
+ } else {
+ dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
+ mutex_unlock(&instance->reset_mutex);
+ kfree(ev);
+ return;
+ }
+
+ if (event_type)
+ dcmd_ret = megasas_update_device_list(instance, event_type);
+
+ mutex_unlock(&instance->reset_mutex);
+
+ if (event_type && dcmd_ret == DCMD_SUCCESS)
+ megasas_add_remove_devices(instance, event_type);
+
if (dcmd_ret == DCMD_SUCCESS)
seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
else
@@ -7842,6 +8806,7 @@
support_poll_for_event = 2;
support_device_change = 1;
support_nvme_encapsulation = true;
+ support_pci_lane_margining = true;
memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
@@ -7857,6 +8822,8 @@
megasas_mgmt_majorno = rval;
+ megasas_init_debugfs();
+
/*
* Register ourselves as PCI hotplug module
*/
@@ -7867,6 +8834,12 @@
goto err_pcidrv;
}
+ if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
+ (event_log_level > MFI_EVT_CLASS_DEAD)) {
+ pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
+ event_log_level = MFI_EVT_CLASS_CRITICAL;
+ }
+
rval = driver_create_file(&megasas_pci_driver.driver,
&driver_attr_version);
if (rval)
@@ -7896,8 +8869,17 @@
if (rval)
goto err_dcf_support_nvme_encapsulation;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_pci_lane_margining);
+ if (rval)
+ goto err_dcf_support_pci_lane_margining;
+
return rval;
+err_dcf_support_pci_lane_margining:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_nvme_encapsulation);
+
err_dcf_support_nvme_encapsulation:
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_support_device_change);
@@ -7916,6 +8898,7 @@
err_dcf_attr_ver:
pci_unregister_driver(&megasas_pci_driver);
err_pcidrv:
+ megasas_exit_debugfs();
unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
return rval;
}
@@ -7936,8 +8919,11 @@
driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_support_nvme_encapsulation);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_pci_lane_margining);
pci_unregister_driver(&megasas_pci_driver);
+ megasas_exit_debugfs();
unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_debugfs.c b/drivers/scsi/megaraid/megaraid_sas_debugfs.c
new file mode 100644
index 0000000..c697607
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_debugfs.c
@@ -0,0 +1,179 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2003-2018 LSI Corporation.
+ * Copyright (c) 2003-2018 Avago Technologies.
+ * Copyright (c) 2003-2018 Broadcom Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Authors: Broadcom Inc.
+ * Kashyap Desai <kashyap.desai@broadcom.com>
+ * Sumit Saxena <sumit.saxena@broadcom.com>
+ * Shivasharan S <shivasharan.srikanteshwara@broadcom.com>
+ *
+ * Send feedback to: megaraidlinux.pdl@broadcom.com
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/irq_poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+struct dentry *megasas_debugfs_root;
+
+static ssize_t
+megasas_debugfs_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct megasas_debugfs_buffer *debug = filp->private_data;
+
+ if (!debug || !debug->buf)
+ return 0;
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len);
+}
+
+static int
+megasas_debugfs_raidmap_open(struct inode *inode, struct file *file)
+{
+ struct megasas_instance *instance = inode->i_private;
+ struct megasas_debugfs_buffer *debug;
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ debug = kzalloc(sizeof(struct megasas_debugfs_buffer), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->buf = (void *)fusion->ld_drv_map[(instance->map_id & 1)];
+ debug->len = fusion->drv_map_sz;
+ file->private_data = debug;
+
+ return 0;
+}
+
+static int
+megasas_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct megasas_debug_buffer *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ file->private_data = NULL;
+ kfree(debug);
+ return 0;
+}
+
+static const struct file_operations megasas_debugfs_raidmap_fops = {
+ .owner = THIS_MODULE,
+ .open = megasas_debugfs_raidmap_open,
+ .read = megasas_debugfs_read,
+ .release = megasas_debugfs_release,
+};
+
+/*
+ * megasas_init_debugfs : Create debugfs root for megaraid_sas driver
+ */
+void megasas_init_debugfs(void)
+{
+ megasas_debugfs_root = debugfs_create_dir("megaraid_sas", NULL);
+ if (!megasas_debugfs_root)
+ pr_info("Cannot create debugfs root\n");
+}
+
+/*
+ * megasas_exit_debugfs : Remove debugfs root for megaraid_sas driver
+ */
+void megasas_exit_debugfs(void)
+{
+ debugfs_remove_recursive(megasas_debugfs_root);
+}
+
+/*
+ * megasas_setup_debugfs : Setup debugfs per Fusion adapter
+ * instance: Soft instance of adapter
+ */
+void
+megasas_setup_debugfs(struct megasas_instance *instance)
+{
+ char name[64];
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ if (fusion) {
+ snprintf(name, sizeof(name),
+ "scsi_host%d", instance->host->host_no);
+ if (!instance->debugfs_root) {
+ instance->debugfs_root =
+ debugfs_create_dir(name, megasas_debugfs_root);
+ if (!instance->debugfs_root) {
+ dev_err(&instance->pdev->dev,
+ "Cannot create per adapter debugfs directory\n");
+ return;
+ }
+ }
+
+ snprintf(name, sizeof(name), "raidmap_dump");
+ instance->raidmap_dump =
+ debugfs_create_file(name, S_IRUGO,
+ instance->debugfs_root, instance,
+ &megasas_debugfs_raidmap_fops);
+ if (!instance->raidmap_dump) {
+ dev_err(&instance->pdev->dev,
+ "Cannot create raidmap debugfs file\n");
+ debugfs_remove(instance->debugfs_root);
+ return;
+ }
+ }
+
+}
+
+/*
+ * megasas_destroy_debugfs : Destroy debugfs per Fusion adapter
+ * instance: Soft instance of adapter
+ */
+void megasas_destroy_debugfs(struct megasas_instance *instance)
+{
+ debugfs_remove_recursive(instance->debugfs_root);
+}
+
+#else
+void megasas_init_debugfs(void)
+{
+}
+void megasas_exit_debugfs(void)
+{
+}
+void megasas_setup_debugfs(struct megasas_instance *instance)
+{
+}
+void megasas_destroy_debugfs(struct megasas_instance *instance)
+{
+}
+#endif /*CONFIG_DEBUG_FS*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 59ecbb3..50b8c1b 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1,35 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux MegaRAID driver for SAS based RAID controllers
*
* Copyright (c) 2009-2013 LSI Corporation
- * Copyright (c) 2013-2014 Avago Technologies
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (c) 2013-2016 Avago Technologies
+ * Copyright (c) 2016-2018 Broadcom Inc.
*
* FILE: megaraid_sas_fp.c
*
- * Authors: Avago Technologies
+ * Authors: Broadcom Inc.
* Sumant Patro
* Varad Talamacki
* Manoj Jose
- * Kashyap Desai <kashyap.desai@avagotech.com>
- * Sumit Saxena <sumit.saxena@avagotech.com>
+ * Kashyap Desai <kashyap.desai@broadcom.com>
+ * Sumit Saxena <sumit.saxena@broadcom.com>
*
- * Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- * San Jose, California 95131
+ * Send feedback to: megaraidlinux.pdl@broadcom.com
*/
#include <linux/kernel.h>
@@ -47,6 +33,7 @@
#include <linux/compat.h>
#include <linux/blkdev.h>
#include <linux/poll.h>
+#include <linux/irq_poll.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -59,7 +46,7 @@
#define LB_PENDING_CMDS_DEFAULT 4
static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
-module_param(lb_pending_cmds, int, S_IRUGO);
+module_param(lb_pending_cmds, int, 0444);
MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
"threshold. Valid Values are 1-128. Default: 4");
@@ -745,7 +732,7 @@
*pDevHandle = MR_PdDevHandleGet(pd, map);
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
/* get second pd also for raid 1/10 fast path writes*/
- if ((instance->adapter_type == VENTURA_SERIES) &&
+ if ((instance->adapter_type >= VENTURA_SERIES) &&
(raid->level == 1) &&
!io_info->isRead) {
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -770,7 +757,7 @@
}
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
io_info->span_arm =
@@ -861,7 +848,7 @@
*pDevHandle = MR_PdDevHandleGet(pd, map);
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
/* get second pd also for raid 1/10 fast path writes*/
- if ((instance->adapter_type == VENTURA_SERIES) &&
+ if ((instance->adapter_type >= VENTURA_SERIES) &&
(raid->level == 1) &&
!io_info->isRead) {
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -888,7 +875,7 @@
}
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
io_info->span_arm =
@@ -903,6 +890,77 @@
}
/*
+ * mr_get_phy_params_r56_rmw - Calculate parameters for R56 CTIO write operation
+ * @instance: Adapter soft state
+ * @ld: LD index
+ * @stripNo: Strip Number
+ * @io_info: IO info structure pointer
+ * pRAID_Context: RAID context pointer
+ * map: RAID map pointer
+ *
+ * This routine calculates the logical arm, data Arm, row number and parity arm
+ * for R56 CTIO write operation.
+ */
+static void mr_get_phy_params_r56_rmw(struct megasas_instance *instance,
+ u32 ld, u64 stripNo,
+ struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT_G35 *pRAID_Context,
+ struct MR_DRV_RAID_MAP_ALL *map)
+{
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u8 span, dataArms, arms, dataArm, logArm;
+ s8 rightmostParityArm, PParityArm;
+ u64 rowNum;
+ u64 *pdBlock = &io_info->pdBlock;
+
+ dataArms = raid->rowDataSize;
+ arms = raid->rowSize;
+
+ rowNum = mega_div64_32(stripNo, dataArms);
+ /* parity disk arm, first arm is 0 */
+ rightmostParityArm = (arms - 1) - mega_mod64(rowNum, arms);
+
+ /* logical arm within row */
+ logArm = mega_mod64(stripNo, dataArms);
+ /* physical arm for data */
+ dataArm = mega_mod64((rightmostParityArm + 1 + logArm), arms);
+
+ if (raid->spanDepth == 1) {
+ span = 0;
+ } else {
+ span = (u8)MR_GetSpanBlock(ld, rowNum, pdBlock, map);
+ if (span == SPAN_INVALID)
+ return;
+ }
+
+ if (raid->level == 6) {
+ /* P Parity arm, note this can go negative adjust if negative */
+ PParityArm = (arms - 2) - mega_mod64(rowNum, arms);
+
+ if (PParityArm < 0)
+ PParityArm += arms;
+
+ /* rightmostParityArm is P-Parity for RAID 5 and Q-Parity for RAID */
+ pRAID_Context->flow_specific.r56_arm_map = rightmostParityArm;
+ pRAID_Context->flow_specific.r56_arm_map |=
+ (u16)(PParityArm << RAID_CTX_R56_P_ARM_SHIFT);
+ } else {
+ pRAID_Context->flow_specific.r56_arm_map |=
+ (u16)(rightmostParityArm << RAID_CTX_R56_P_ARM_SHIFT);
+ }
+
+ pRAID_Context->reg_lock_row_lba = cpu_to_le64(rowNum);
+ pRAID_Context->flow_specific.r56_arm_map |=
+ (u16)(logArm << RAID_CTX_R56_LOG_ARM_SHIFT);
+ cpu_to_le16s(&pRAID_Context->flow_specific.r56_arm_map);
+ pRAID_Context->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | dataArm;
+ pRAID_Context->raid_flags = (MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD <<
+ MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+
+ return;
+}
+
+/*
******************************************************************************
*
* MR_BuildRaidContext function
@@ -968,6 +1026,7 @@
stripSize = 1 << raid->stripeShift;
stripe_mask = stripSize-1;
+ io_info->data_arms = raid->rowDataSize;
/*
* calculate starting row and stripe, and number of strips and rows
@@ -1109,6 +1168,13 @@
/* save pointer to raid->LUN array */
*raidLUN = raid->LUN;
+ /* Aero R5/6 Division Offload for WRITE */
+ if (fusion->r56_div_offload && (raid->level >= 5) && !isRead) {
+ mr_get_phy_params_r56_rmw(instance, ld, start_strip, io_info,
+ (struct RAID_CONTEXT_G35 *)pRAID_Context,
+ map);
+ return true;
+ }
/*Get Phy Params only if FP capable, or else leave it to MR firmware
to do the calculation.*/
@@ -1266,7 +1332,7 @@
for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
ld = MR_TargetIdToLdGet(ldCount, drv_map);
- if (ld >= MAX_LOGICAL_DRIVES_EXT) {
+ if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
lbInfo[ldCount].loadBalanceFlag = 0;
continue;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index c7f95ba..e301458 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1,34 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux MegaRAID driver for SAS based RAID controllers
*
* Copyright (c) 2009-2013 LSI Corporation
- * Copyright (c) 2013-2014 Avago Technologies
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (c) 2013-2016 Avago Technologies
+ * Copyright (c) 2016-2018 Broadcom Inc.
*
* FILE: megaraid_sas_fusion.c
*
- * Authors: Avago Technologies
+ * Authors: Broadcom Inc.
* Sumant Patro
* Adam Radford
- * Kashyap Desai <kashyap.desai@avagotech.com>
- * Sumit Saxena <sumit.saxena@avagotech.com>
+ * Kashyap Desai <kashyap.desai@broadcom.com>
+ * Sumit Saxena <sumit.saxena@broadcom.com>
*
- * Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- * San Jose, California 95131
+ * Send feedback to: megaraidlinux.pdl@broadcom.com
*/
#include <linux/kernel.h>
@@ -48,6 +34,8 @@
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <linux/irq_poll.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -74,7 +62,7 @@
megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
int megasas_alloc_cmds(struct megasas_instance *instance);
int
-megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
+megasas_clear_intr_fusion(struct megasas_instance *instance);
int
megasas_issue_polled(struct megasas_instance *instance,
struct megasas_cmd *cmd);
@@ -95,6 +83,65 @@
static void megasas_free_reply_fusion(struct megasas_instance *instance);
static inline
void megasas_configure_queue_sizes(struct megasas_instance *instance);
+static void megasas_fusion_crash_dump(struct megasas_instance *instance);
+extern u32 megasas_readl(struct megasas_instance *instance,
+ const volatile void __iomem *addr);
+
+/**
+ * megasas_adp_reset_wait_for_ready - initiate chip reset and wait for
+ * controller to come to ready state
+ * @instance - adapter's soft state
+ * @do_adp_reset - If true, do a chip reset
+ * @ocr_context - If called from OCR context this will
+ * be set to 1, else 0
+ *
+ * This function initates a chip reset followed by a wait for controller to
+ * transition to ready state.
+ * During this, driver will block all access to PCI config space from userspace
+ */
+int
+megasas_adp_reset_wait_for_ready(struct megasas_instance *instance,
+ bool do_adp_reset,
+ int ocr_context)
+{
+ int ret = FAILED;
+
+ /*
+ * Block access to PCI config space from userspace
+ * when diag reset is initiated from driver
+ */
+ if (megasas_dbg_lvl & OCR_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "Block access to PCI config space %s %d\n",
+ __func__, __LINE__);
+
+ pci_cfg_access_lock(instance->pdev);
+
+ if (do_adp_reset) {
+ if (instance->instancet->adp_reset
+ (instance, instance->reg_set))
+ goto out;
+ }
+
+ /* Wait for FW to become ready */
+ if (megasas_transition_to_ready(instance, ocr_context)) {
+ dev_warn(&instance->pdev->dev,
+ "Failed to transition controller to ready for scsi%d.\n",
+ instance->host->host_no);
+ goto out;
+ }
+
+ ret = SUCCESS;
+out:
+ if (megasas_dbg_lvl & OCR_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "Unlock access to PCI config space %s %d\n",
+ __func__, __LINE__);
+
+ pci_cfg_access_unlock(instance->pdev);
+
+ return ret;
+}
/**
* megasas_check_same_4gb_region - check if allocation
@@ -143,7 +190,8 @@
writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
/* Dummy readl to force pci flush */
- readl(®s->outbound_intr_mask);
+ dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n",
+ __func__, readl(®s->outbound_intr_mask));
}
/**
@@ -154,24 +202,27 @@
megasas_disable_intr_fusion(struct megasas_instance *instance)
{
u32 mask = 0xFFFFFFFF;
- u32 status;
struct megasas_register_set __iomem *regs;
regs = instance->reg_set;
instance->mask_interrupts = 1;
writel(mask, ®s->outbound_intr_mask);
/* Dummy readl to force pci flush */
- status = readl(®s->outbound_intr_mask);
+ dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n",
+ __func__, readl(®s->outbound_intr_mask));
}
int
-megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
+megasas_clear_intr_fusion(struct megasas_instance *instance)
{
u32 status;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
/*
* Check if it is our interrupt
*/
- status = readl(®s->outbound_intr_status);
+ status = megasas_readl(instance,
+ ®s->outbound_intr_status);
if (status & 1) {
writel(status, ®s->outbound_intr_status);
@@ -214,21 +265,17 @@
}
/**
- * megasas_fire_cmd_fusion - Sends command to the FW
- * @instance: Adapter soft state
- * @req_desc: 64bit Request descriptor
- *
- * Perform PCI Write.
+ * megasas_write_64bit_req_desc - PCI writes 64bit request descriptor
+ * @instance: Adapter soft state
+ * @req_desc: 64bit Request descriptor
*/
-
static void
-megasas_fire_cmd_fusion(struct megasas_instance *instance,
+megasas_write_64bit_req_desc(struct megasas_instance *instance,
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
{
#if defined(writeq) && defined(CONFIG_64BIT)
u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
le32_to_cpu(req_desc->u.low));
-
writeq(req_data, &instance->reg_set->inbound_low_queue_port);
#else
unsigned long flags;
@@ -237,12 +284,30 @@
&instance->reg_set->inbound_low_queue_port);
writel(le32_to_cpu(req_desc->u.high),
&instance->reg_set->inbound_high_queue_port);
- mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
#endif
}
/**
+ * megasas_fire_cmd_fusion - Sends command to the FW
+ * @instance: Adapter soft state
+ * @req_desc: 32bit or 64bit Request descriptor
+ *
+ * Perform PCI Write. AERO SERIES supports 32 bit Descriptor.
+ * Prior to AERO_SERIES support 64 bit Descriptor.
+ */
+static void
+megasas_fire_cmd_fusion(struct megasas_instance *instance,
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
+{
+ if (instance->atomic_desc_support)
+ writel(le32_to_cpu(req_desc->u.low),
+ &instance->reg_set->inbound_single_queue_port);
+ else
+ megasas_write_64bit_req_desc(instance, req_desc);
+}
+
+/**
* megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here
* @instance: Adapter soft state
* fw_boot_context: Whether this function called during probe or after OCR
@@ -258,20 +323,18 @@
{
u16 cur_max_fw_cmds = 0;
u16 ldio_threshold = 0;
- struct megasas_register_set __iomem *reg_set;
- reg_set = instance->reg_set;
-
- /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
+ /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */
if (instance->adapter_type < VENTURA_SERIES)
cur_max_fw_cmds =
- readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
+ megasas_readl(instance,
+ &instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF;
if (dual_qdepth_disable || !cur_max_fw_cmds)
- cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+ cur_max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
else
ldio_threshold =
- (instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
+ (instance->instancet->read_fw_status_reg(instance) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
dev_info(&instance->pdev->dev,
"Current firmware supports maximum commands: %d\t LDIO threshold: %d\n",
@@ -471,7 +534,7 @@
return 0;
}
-int
+static int
megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
{
u32 max_mpt_cmd, i, j;
@@ -510,7 +573,8 @@
return 0;
}
-int
+
+static int
megasas_alloc_request_fusion(struct megasas_instance *instance)
{
struct fusion_context *fusion;
@@ -591,7 +655,7 @@
return 0;
}
-int
+static int
megasas_alloc_reply_fusion(struct megasas_instance *instance)
{
int i, count;
@@ -668,7 +732,7 @@
return 0;
}
-int
+static int
megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
{
int i, j, k, msix_count;
@@ -684,8 +748,9 @@
array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = pci_zalloc_consistent(instance->pdev, array_size,
- &fusion->rdpq_phys);
+ fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev,
+ array_size, &fusion->rdpq_phys,
+ GFP_KERNEL);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
@@ -807,13 +872,11 @@
}
- if (fusion->reply_frames_desc_pool)
- dma_pool_destroy(fusion->reply_frames_desc_pool);
- if (fusion->reply_frames_desc_pool_align)
- dma_pool_destroy(fusion->reply_frames_desc_pool_align);
+ dma_pool_destroy(fusion->reply_frames_desc_pool);
+ dma_pool_destroy(fusion->reply_frames_desc_pool_align);
if (fusion->rdpq_virt)
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
fusion->rdpq_virt, fusion->rdpq_phys);
}
@@ -830,8 +893,7 @@
fusion->reply_frames_desc[0],
fusion->reply_frames_desc_phys[0]);
- if (fusion->reply_frames_desc_pool)
- dma_pool_destroy(fusion->reply_frames_desc_pool);
+ dma_pool_destroy(fusion->reply_frames_desc_pool);
}
@@ -852,7 +914,7 @@
* and is used as SMID of the cmd.
* SMID value range is from 1 to max_fw_cmds.
*/
-int
+static int
megasas_alloc_cmds_fusion(struct megasas_instance *instance)
{
int i;
@@ -933,17 +995,22 @@
{
int i;
struct megasas_header *frame_hdr = &cmd->frame->hdr;
- struct fusion_context *fusion;
+ u32 status_reg;
u32 msecs = seconds * 1000;
- fusion = instance->ctrl_context;
/*
* Wait for cmd_status to change
*/
for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
rmb();
msleep(20);
+ if (!(i % 5000)) {
+ status_reg = instance->instancet->read_fw_status_reg(instance)
+ & MFI_STATE_MASK;
+ if (status_reg == MFI_STATE_FAULT)
+ break;
+ }
}
if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
@@ -974,9 +1041,10 @@
struct megasas_header *frame_hdr;
const char *sys_info;
MFI_CAPABILITIES *drv_ops;
- u32 scratch_pad_2;
+ u32 scratch_pad_1;
ktime_t time;
bool cur_fw_64bit_dma_capable;
+ bool cur_intr_coalescing;
fusion = instance->ctrl_context;
@@ -985,14 +1053,14 @@
cmd = fusion->ioc_init_cmd;
- scratch_pad_2 = readl
- (&instance->reg_set->outbound_scratch_pad_2);
+ scratch_pad_1 = megasas_readl
+ (instance, &instance->reg_set->outbound_scratch_pad_1);
- cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+ cur_rdpq_mode = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
if (instance->adapter_type == INVADER_SERIES) {
cur_fw_64bit_dma_capable =
- (scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
+ (scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
@@ -1010,7 +1078,17 @@
goto fail_fw_init;
}
- instance->fw_sync_cache_support = (scratch_pad_2 &
+ cur_intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
+ true : false;
+
+ if ((instance->low_latency_index_start ==
+ MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
+ instance->perf_mode = MR_BALANCED_PERF_MODE;
+
+ dev_info(&instance->pdev->dev, "Performance mode :%s\n",
+ MEGASAS_PERF_MODE_2STR(instance->perf_mode));
+
+ instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
instance->fw_sync_cache_support ? "Yes" : "No");
@@ -1043,9 +1121,7 @@
frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = 0xFF;
- frame_hdr->flags = cpu_to_le16(
- le16_to_cpu(frame_hdr->flags) |
- MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
+ frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
init_frame->cmd = MFI_CMD_INIT;
init_frame->cmd_status = 0xFF;
@@ -1072,6 +1148,7 @@
drv_ops->mfi_capabilities.support_qd_throttling = 1;
drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
drv_ops->mfi_capabilities.support_nvme_passthru = 1;
+ drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1;
if (instance->consistent_mask_64bit)
drv_ops->mfi_capabilities.support_64bit_mode = 1;
@@ -1095,6 +1172,22 @@
cpu_to_le32(lower_32_bits(ioc_init_handle));
init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
+ /*
+ * Each bit in replyqueue_mask represents one group of MSI-x vectors
+ * (each group has 8 vectors)
+ */
+ switch (instance->perf_mode) {
+ case MR_BALANCED_PERF_MODE:
+ init_frame->replyqueue_mask =
+ cpu_to_le16(~(~0 << instance->low_latency_index_start/8));
+ break;
+ case MR_IOPS_PERF_MODE:
+ init_frame->replyqueue_mask =
+ cpu_to_le16(~(~0 << instance->msix_vectors/8));
+ break;
+ }
+
+
req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
req_desc.MFAIo.RequestFlags =
@@ -1107,15 +1200,16 @@
instance->instancet->disable_intr(instance);
for (i = 0; i < (10 * 1000); i += 20) {
- if (readl(&instance->reg_set->doorbell) & 1)
+ if (megasas_readl(instance, &instance->reg_set->doorbell) & 1)
msleep(20);
else
break;
}
- megasas_fire_cmd_fusion(instance, &req_desc);
+ /* For AERO also, IOC_INIT requires 64 bit descriptor write */
+ megasas_write_64bit_req_desc(instance, &req_desc);
- wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
+ wait_and_poll(instance, cmd, MFI_IO_TIMEOUT_SECS);
frame_hdr = &cmd->frame->hdr;
if (frame_hdr->cmd_status != 0) {
@@ -1123,6 +1217,17 @@
goto fail_fw_init;
}
+ if (instance->adapter_type >= AERO_SERIES) {
+ scratch_pad_1 = megasas_readl
+ (instance, &instance->reg_set->outbound_scratch_pad_1);
+
+ instance->atomic_desc_support =
+ (scratch_pad_1 & MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
+
+ dev_info(&instance->pdev->dev, "FW supports atomic descriptor\t: %s\n",
+ instance->atomic_desc_support ? "Yes" : "No");
+ }
+
return 0;
fail_fw_init:
@@ -1145,7 +1250,7 @@
int
megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
int ret = 0;
- u32 pd_seq_map_sz;
+ size_t pd_seq_map_sz;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
struct fusion_context *fusion = instance->ctrl_context;
@@ -1154,9 +1259,7 @@
pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
- (sizeof(struct MR_PD_CFG_SEQ) *
- (MAX_PHYSICAL_DEVICES - 1));
+ pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES - 1);
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -1328,7 +1431,6 @@
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
u16 num_lds;
- u32 size_sync_info;
struct fusion_context *fusion;
struct MR_LD_TARGET_SYNC *ci = NULL;
struct MR_DRV_RAID_MAP_ALL *map;
@@ -1357,8 +1459,6 @@
dcmd = &cmd->frame->dcmd;
- size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
-
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
ci = (struct MR_LD_TARGET_SYNC *)
@@ -1559,14 +1659,12 @@
fusion = instance->ctrl_context;
max_cmd = instance->max_fw_cmds;
- if (instance->adapter_type == VENTURA_SERIES)
+ if (instance->adapter_type >= VENTURA_SERIES)
instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS;
else
instance->max_mpt_cmds = instance->max_fw_cmds;
- instance->max_scsi_cmds = instance->max_fw_cmds -
- (MEGASAS_FUSION_INTERNAL_CMDS +
- MEGASAS_FUSION_IOCTL_CMDS);
+ instance->max_scsi_cmds = instance->max_fw_cmds - instance->max_mfi_cmds;
instance->cur_can_queue = instance->max_scsi_cmds;
instance->host->can_queue = instance->cur_can_queue;
@@ -1627,8 +1725,7 @@
fusion->ioc_init_cmd->frame,
fusion->ioc_init_cmd->frame_phys_addr);
- if (fusion->ioc_init_cmd)
- kfree(fusion->ioc_init_cmd);
+ kfree(fusion->ioc_init_cmd);
}
/**
@@ -1637,18 +1734,16 @@
*
* This is the main function for initializing firmware.
*/
-u32
+static u32
megasas_init_adapter_fusion(struct megasas_instance *instance)
{
- struct megasas_register_set __iomem *reg_set;
struct fusion_context *fusion;
- u32 scratch_pad_2;
+ u32 scratch_pad_1;
int i = 0, count;
+ u32 status_reg;
fusion = instance->ctrl_context;
- reg_set = instance->reg_set;
-
megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
/*
@@ -1659,20 +1754,21 @@
megasas_configure_queue_sizes(instance);
- scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
- /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
+ scratch_pad_1 = megasas_readl(instance,
+ &instance->reg_set->outbound_scratch_pad_1);
+ /* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
* Firmware support extended IO chain frame which is 4 times more than
* legacy Firmware.
* Legacy Firmware - Frame size is (8 * 128) = 1K
* 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
*/
- if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
+ if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
instance->max_chain_frame_sz =
- ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
+ ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
else
instance->max_chain_frame_sz =
- ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
+ ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
@@ -1727,8 +1823,21 @@
if (megasas_alloc_cmds_fusion(instance))
goto fail_alloc_cmds;
- if (megasas_ioc_init_fusion(instance))
- goto fail_ioc_init;
+ if (megasas_ioc_init_fusion(instance)) {
+ status_reg = instance->instancet->read_fw_status_reg(instance);
+ if (((status_reg & MFI_STATE_MASK) == MFI_STATE_FAULT) &&
+ (status_reg & MFI_RESET_ADAPTER)) {
+ /* Do a chip reset and then retry IOC INIT once */
+ if (megasas_adp_reset_wait_for_ready
+ (instance, true, 0) == FAILED)
+ goto fail_ioc_init;
+
+ if (megasas_ioc_init_fusion(instance))
+ goto fail_ioc_init;
+ } else {
+ goto fail_ioc_init;
+ }
+ }
megasas_display_intel_branding(instance);
if (megasas_get_ctrl_info(instance)) {
@@ -1740,6 +1849,7 @@
instance->flag_ieee = 1;
instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT;
+ instance->threshold_reply_count = instance->max_fw_cmds / 4;
fusion->fast_path_io = 0;
if (megasas_allocate_raid_maps(instance))
@@ -1760,13 +1870,97 @@
}
/**
+ * megasas_fault_detect_work - Worker function of
+ * FW fault handling workqueue.
+ */
+static void
+megasas_fault_detect_work(struct work_struct *work)
+{
+ struct megasas_instance *instance =
+ container_of(work, struct megasas_instance,
+ fw_fault_work.work);
+ u32 fw_state, dma_state, status;
+
+ /* Check the fw state */
+ fw_state = instance->instancet->read_fw_status_reg(instance) &
+ MFI_STATE_MASK;
+
+ if (fw_state == MFI_STATE_FAULT) {
+ dma_state = instance->instancet->read_fw_status_reg(instance) &
+ MFI_STATE_DMADONE;
+ /* Start collecting crash, if DMA bit is done */
+ if (instance->crash_dump_drv_support &&
+ instance->crash_dump_app_support && dma_state) {
+ megasas_fusion_crash_dump(instance);
+ } else {
+ if (instance->unload == 0) {
+ status = megasas_reset_fusion(instance->host, 0);
+ if (status != SUCCESS) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d, do not re-arm timer\n",
+ __func__, __LINE__);
+ return;
+ }
+ }
+ }
+ }
+
+ if (instance->fw_fault_work_q)
+ queue_delayed_work(instance->fw_fault_work_q,
+ &instance->fw_fault_work,
+ msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
+}
+
+int
+megasas_fusion_start_watchdog(struct megasas_instance *instance)
+{
+ /* Check if the Fault WQ is already started */
+ if (instance->fw_fault_work_q)
+ return SUCCESS;
+
+ INIT_DELAYED_WORK(&instance->fw_fault_work, megasas_fault_detect_work);
+
+ snprintf(instance->fault_handler_work_q_name,
+ sizeof(instance->fault_handler_work_q_name),
+ "poll_megasas%d_status", instance->host->host_no);
+
+ instance->fw_fault_work_q =
+ create_singlethread_workqueue(instance->fault_handler_work_q_name);
+ if (!instance->fw_fault_work_q) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return FAILED;
+ }
+
+ queue_delayed_work(instance->fw_fault_work_q,
+ &instance->fw_fault_work,
+ msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
+
+ return SUCCESS;
+}
+
+void
+megasas_fusion_stop_watchdog(struct megasas_instance *instance)
+{
+ struct workqueue_struct *wq;
+
+ if (instance->fw_fault_work_q) {
+ wq = instance->fw_fault_work_q;
+ instance->fw_fault_work_q = NULL;
+ if (!cancel_delayed_work_sync(&instance->fw_fault_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+/**
* map_cmd_status - Maps FW cmd status to OS cmd status
* @cmd : Pointer to cmd
* @status : status of cmd returned by FW
* @ext_status : ext status of cmd returned by FW
*/
-void
+static void
map_cmd_status(struct fusion_context *fusion,
struct scsi_cmnd *scmd, u8 status, u8 ext_status,
u32 data_length, u8 *sense)
@@ -1842,7 +2036,6 @@
megasas_is_prp_possible(struct megasas_instance *instance,
struct scsi_cmnd *scmd, int sge_count)
{
- struct fusion_context *fusion;
int i;
u32 data_length = 0;
struct scatterlist *sg_scmd;
@@ -1851,7 +2044,6 @@
mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
MR_DEFAULT_NVME_PAGE_SIZE);
- fusion = instance->ctrl_context;
data_length = scsi_bufflen(scmd);
sg_scmd = scsi_sglist(scmd);
@@ -1908,7 +2100,6 @@
mega_mod64(sg_dma_address(sg_scmd),
mr_nvme_pg_size)) {
build_prp = false;
- atomic_inc(&instance->sge_holes_type1);
break;
}
}
@@ -1918,7 +2109,6 @@
sg_dma_len(sg_scmd)),
mr_nvme_pg_size))) {
build_prp = false;
- atomic_inc(&instance->sge_holes_type2);
break;
}
}
@@ -1927,7 +2117,6 @@
if (mega_mod64(sg_dma_address(sg_scmd),
mr_nvme_pg_size)) {
build_prp = false;
- atomic_inc(&instance->sge_holes_type3);
break;
}
}
@@ -1964,12 +2153,9 @@
u32 first_prp_len;
bool build_prp = false;
int data_len = scsi_bufflen(scmd);
- struct fusion_context *fusion;
u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
MR_DEFAULT_NVME_PAGE_SIZE);
- fusion = instance->ctrl_context;
-
build_prp = megasas_is_prp_possible(instance, scmd, sge_count);
if (!build_prp)
@@ -2063,7 +2249,6 @@
main_chain_element->Length =
cpu_to_le32(num_prp_in_chain * sizeof(u64));
- atomic_inc(&instance->prp_sgl);
return build_prp;
}
@@ -2138,7 +2323,6 @@
memset(sgl_ptr, 0, instance->max_chain_frame_sz);
}
}
- atomic_inc(&instance->ieee_sgl);
}
/**
@@ -2189,7 +2373,7 @@
*
* Used to set the PD LBA in CDB for FP IOs
*/
-void
+static void
megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
@@ -2209,7 +2393,7 @@
cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
- if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ if (scp->sc_data_direction == DMA_FROM_DEVICE)
cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
else
cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
@@ -2238,7 +2422,7 @@
cdb[31] = (u8)(num_blocks & 0xff);
/* set SCSI IO EEDPFlags */
- if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
+ if (scp->sc_data_direction == DMA_FROM_DEVICE) {
io_request->EEDPFlags = cpu_to_le16(
MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
@@ -2450,9 +2634,10 @@
*
*/
static void
-megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
- struct MR_LD_RAID *raid, bool fp_possible,
- u8 is_read, u32 scsi_buff_len)
+megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion,
+ union RAID_CONTEXT_UNION *praid_context,
+ struct MR_LD_RAID *raid, bool fp_possible,
+ u8 is_read, u32 scsi_buff_len)
{
u8 cpu_sel = MR_RAID_CTX_CPUSEL_0;
struct RAID_CONTEXT_G35 *rctx_g35;
@@ -2510,11 +2695,11 @@
* vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS.
* IO Subtype is not bitmap.
*/
- if ((raid->level == 1) && (!is_read)) {
- if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
- praid_context->raid_context_g35.raid_flags =
- (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
- << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+ if ((fusion->pcie_bw_limitation) && (raid->level == 1) && (!is_read) &&
+ (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)) {
+ praid_context->raid_context_g35.raid_flags =
+ (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
}
}
@@ -2527,7 +2712,7 @@
* Prepares the io_request and chain elements (sg_frame) for IO
* The IO can be for PD (Fast Path) or LD
*/
-void
+static void
megasas_build_ldio_fusion(struct megasas_instance *instance,
struct scsi_cmnd *scp,
struct megasas_cmd_fusion *cmd)
@@ -2537,27 +2722,27 @@
u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
u32 scsi_buff_len;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
- union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
struct IO_REQUEST_INFO io_info;
struct fusion_context *fusion;
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
u8 *raidLUN;
unsigned long spinlock_flags;
- union RAID_CONTEXT_UNION *praid_context;
struct MR_LD_RAID *raid = NULL;
struct MR_PRIV_DEVICE *mrdev_priv;
+ struct RAID_CONTEXT *rctx;
+ struct RAID_CONTEXT_G35 *rctx_g35;
device_id = MEGASAS_DEV_INDEX(scp);
fusion = instance->ctrl_context;
io_request = cmd->io_request;
- io_request->RaidContext.raid_context.virtual_disk_tgt_id =
- cpu_to_le16(device_id);
- io_request->RaidContext.raid_context.status = 0;
- io_request->RaidContext.raid_context.ex_status = 0;
+ rctx = &io_request->RaidContext.raid_context;
+ rctx_g35 = &io_request->RaidContext.raid_context_g35;
- req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
+ rctx->virtual_disk_tgt_id = cpu_to_le16(device_id);
+ rctx->status = 0;
+ rctx->ex_status = 0;
start_lba_lo = 0;
start_lba_hi = 0;
@@ -2620,8 +2805,9 @@
io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
scsi_buff_len = scsi_bufflen(scp);
io_request->DataLength = cpu_to_le32(scsi_buff_len);
+ io_info.data_arms = 1;
- if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ if (scp->sc_data_direction == DMA_FROM_DEVICE)
io_info.isRead = 1;
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
@@ -2631,21 +2817,29 @@
raid = MR_LdRaidGet(ld, local_map_ptr);
if (!raid || (!fusion->fast_path_io)) {
- io_request->RaidContext.raid_context.reg_lock_flags = 0;
+ rctx->reg_lock_flags = 0;
fp_possible = false;
} else {
- if (MR_BuildRaidContext(instance, &io_info,
- &io_request->RaidContext.raid_context,
+ if (MR_BuildRaidContext(instance, &io_info, rctx,
local_map_ptr, &raidLUN))
fp_possible = (io_info.fpOkForIo > 0) ? true : false;
}
- cmd->request_desc->SCSIIO.MSIxIndex =
- instance->reply_map[raw_smp_processor_id()];
+ if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
+ atomic_read(&scp->device->device_busy) >
+ (io_info.data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
+ MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
+ else if (instance->msix_load_balance)
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
+ instance->msix_vectors));
+ else
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->reply_map[raw_smp_processor_id()];
- praid_context = &io_request->RaidContext;
-
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
/* FP for Optimal raid level 1.
* All large RAID-1 writes (> 32 KiB, both WT and WB modes)
* are built by the driver as LD I/Os.
@@ -2661,8 +2855,9 @@
(instance->host->can_queue)) {
fp_possible = false;
atomic_dec(&instance->fw_outstanding);
- } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
- (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) {
+ } else if (fusion->pcie_bw_limitation &&
+ ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
+ (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0))) {
fp_possible = false;
atomic_dec(&instance->fw_outstanding);
if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
@@ -2681,17 +2876,17 @@
/* In ventura if stream detected for a read and it is
* read ahead capable make this IO as LDIO
*/
- if (is_stream_detected(&io_request->RaidContext.raid_context_g35))
+ if (is_stream_detected(rctx_g35))
fp_possible = false;
}
/* If raid is NULL, set CPU affinity to default CPU0 */
if (raid)
- megasas_set_raidflag_cpu_affinity(praid_context,
+ megasas_set_raidflag_cpu_affinity(fusion, &io_request->RaidContext,
raid, fp_possible, io_info.isRead,
scsi_buff_len);
else
- praid_context->raid_context_g35.routing_flags |=
+ rctx_g35->routing_flags |=
(MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
}
@@ -2703,25 +2898,16 @@
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (instance->adapter_type == INVADER_SERIES) {
- if (io_request->RaidContext.raid_context.reg_lock_flags ==
- REGION_TYPE_UNUSED)
- cmd->request_desc->SCSIIO.RequestFlags =
- (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
- MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- io_request->RaidContext.raid_context.type
- = MPI2_TYPE_CUDA;
- io_request->RaidContext.raid_context.nseg = 0x1;
+ rctx->type = MPI2_TYPE_CUDA;
+ rctx->nseg = 0x1;
io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
- io_request->RaidContext.raid_context.reg_lock_flags |=
+ rctx->reg_lock_flags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
- } else if (instance->adapter_type == VENTURA_SERIES) {
- io_request->RaidContext.raid_context_g35.nseg_type |=
- (1 << RAID_CONTEXT_NSEG_SHIFT);
- io_request->RaidContext.raid_context_g35.nseg_type |=
- (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
- io_request->RaidContext.raid_context_g35.routing_flags |=
- (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+ } else if (instance->adapter_type >= VENTURA_SERIES) {
+ rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT);
+ rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+ rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
io_request->IoFlags |=
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
}
@@ -2734,17 +2920,15 @@
&io_info, local_map_ptr);
scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
cmd->pd_r1_lb = io_info.pd_after_lb;
- if (instance->adapter_type == VENTURA_SERIES)
- io_request->RaidContext.raid_context_g35.span_arm
- = io_info.span_arm;
+ if (instance->adapter_type >= VENTURA_SERIES)
+ rctx_g35->span_arm = io_info.span_arm;
else
- io_request->RaidContext.raid_context.span_arm
- = io_info.span_arm;
+ rctx->span_arm = io_info.span_arm;
} else
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
- if (instance->adapter_type == VENTURA_SERIES)
+ if (instance->adapter_type >= VENTURA_SERIES)
cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
else
cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
@@ -2762,31 +2946,26 @@
/* populate the LUN field */
memcpy(io_request->LUN, raidLUN, 8);
} else {
- io_request->RaidContext.raid_context.timeout_value =
+ rctx->timeout_value =
cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (instance->adapter_type == INVADER_SERIES) {
if (io_info.do_fp_rlbypass ||
- (io_request->RaidContext.raid_context.reg_lock_flags
- == REGION_TYPE_UNUSED))
+ (rctx->reg_lock_flags == REGION_TYPE_UNUSED))
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- io_request->RaidContext.raid_context.type
- = MPI2_TYPE_CUDA;
- io_request->RaidContext.raid_context.reg_lock_flags |=
+ rctx->type = MPI2_TYPE_CUDA;
+ rctx->reg_lock_flags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
- MR_RL_FLAGS_SEQ_NUM_ENABLE);
- io_request->RaidContext.raid_context.nseg = 0x1;
- } else if (instance->adapter_type == VENTURA_SERIES) {
- io_request->RaidContext.raid_context_g35.routing_flags |=
- (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
- io_request->RaidContext.raid_context_g35.nseg_type |=
- (1 << RAID_CONTEXT_NSEG_SHIFT);
- io_request->RaidContext.raid_context_g35.nseg_type |=
- (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ rctx->nseg = 0x1;
+ } else if (instance->adapter_type >= VENTURA_SERIES) {
+ rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+ rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT);
+ rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
}
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
io_request->DevHandle = cpu_to_le16(device_id);
@@ -2832,7 +3011,7 @@
device_id < instance->fw_supported_vd_count)) {
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
- if (ld >= instance->fw_supported_vd_count)
+ if (ld >= instance->fw_supported_vd_count - 1)
fp_possible = 0;
else {
raid = MR_LdRaidGet(ld, local_map_ptr);
@@ -2855,7 +3034,7 @@
/* set RAID context values */
pRAID_Context->config_seq_num = raid->seqNum;
- if (instance->adapter_type != VENTURA_SERIES)
+ if (instance->adapter_type < VENTURA_SERIES)
pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
pRAID_Context->timeout_value =
cpu_to_le16(raid->fpIoTimeoutForLd);
@@ -2926,50 +3105,71 @@
<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
/* If FW supports PD sequence number */
- if (instance->use_seqnum_jbod_fp &&
- instance->pd_list[pd_index].driveType == TYPE_DISK) {
- /* TgtId must be incremented by 255 as jbod seq number is index
- * below raid map
- */
- /* More than 256 PD/JBOD support for Ventura */
- if (instance->support_morethan256jbod)
- pRAID_Context->virtual_disk_tgt_id =
- pd_sync->seq[pd_index].pd_target_id;
- else
- pRAID_Context->virtual_disk_tgt_id =
- cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
- pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
- io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
- if (instance->adapter_type == VENTURA_SERIES) {
- io_request->RaidContext.raid_context_g35.routing_flags |=
- (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
- io_request->RaidContext.raid_context_g35.nseg_type |=
- (1 << RAID_CONTEXT_NSEG_SHIFT);
- io_request->RaidContext.raid_context_g35.nseg_type |=
- (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+ if (instance->support_seqnum_jbod_fp) {
+ if (instance->use_seqnum_jbod_fp &&
+ instance->pd_list[pd_index].driveType == TYPE_DISK) {
+
+ /* More than 256 PD/JBOD support for Ventura */
+ if (instance->support_morethan256jbod)
+ pRAID_Context->virtual_disk_tgt_id =
+ pd_sync->seq[pd_index].pd_target_id;
+ else
+ pRAID_Context->virtual_disk_tgt_id =
+ cpu_to_le16(device_id +
+ (MAX_PHYSICAL_DEVICES - 1));
+ pRAID_Context->config_seq_num =
+ pd_sync->seq[pd_index].seqNum;
+ io_request->DevHandle =
+ pd_sync->seq[pd_index].devHandle;
+ if (instance->adapter_type >= VENTURA_SERIES) {
+ io_request->RaidContext.raid_context_g35.routing_flags |=
+ (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+ io_request->RaidContext.raid_context_g35.nseg_type |=
+ (1 << RAID_CONTEXT_NSEG_SHIFT);
+ io_request->RaidContext.raid_context_g35.nseg_type |=
+ (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+ } else {
+ pRAID_Context->type = MPI2_TYPE_CUDA;
+ pRAID_Context->nseg = 0x1;
+ pRAID_Context->reg_lock_flags |=
+ (MR_RL_FLAGS_SEQ_NUM_ENABLE |
+ MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+ }
} else {
- pRAID_Context->type = MPI2_TYPE_CUDA;
- pRAID_Context->nseg = 0x1;
- pRAID_Context->reg_lock_flags |=
- (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+ pRAID_Context->virtual_disk_tgt_id =
+ cpu_to_le16(device_id +
+ (MAX_PHYSICAL_DEVICES - 1));
+ pRAID_Context->config_seq_num = 0;
+ io_request->DevHandle = cpu_to_le16(0xFFFF);
}
- } else if (fusion->fast_path_io) {
- pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
- pRAID_Context->config_seq_num = 0;
- local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
- io_request->DevHandle =
- local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
} else {
- /* Want to send all IO via FW path */
pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
pRAID_Context->config_seq_num = 0;
- io_request->DevHandle = cpu_to_le16(0xFFFF);
+
+ if (fusion->fast_path_io) {
+ local_map_ptr =
+ fusion->ld_drv_map[(instance->map_id & 1)];
+ io_request->DevHandle =
+ local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ } else {
+ io_request->DevHandle = cpu_to_le16(0xFFFF);
+ }
}
cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
- cmd->request_desc->SCSIIO.MSIxIndex =
- instance->reply_map[raw_smp_processor_id()];
+ if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
+ atomic_read(&scmd->device->device_busy) > MR_DEVICE_HIGH_IOPS_DEPTH)
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
+ MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
+ else if (instance->msix_load_balance)
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
+ instance->msix_vectors));
+ else
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->reply_map[raw_smp_processor_id()];
if (!fp_possible) {
/* system pd firmware path */
@@ -3009,7 +3209,7 @@
* Invokes helper functions to prepare request frames
* and sets flags appropriate for IO/Non-IO cmd
*/
-int
+static int
megasas_build_io_fusion(struct megasas_instance *instance,
struct scsi_cmnd *scp,
struct megasas_cmd_fusion *cmd)
@@ -3073,7 +3273,7 @@
return 1;
}
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
@@ -3088,9 +3288,9 @@
io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ else if (scp->sc_data_direction == DMA_FROM_DEVICE)
io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
io_request->SGLOffset0 =
@@ -3123,9 +3323,9 @@
/* megasas_prepate_secondRaid1_IO
* It prepares the raid 1 second IO
*/
-void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
- struct megasas_cmd_fusion *cmd,
- struct megasas_cmd_fusion *r1_cmd)
+static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd,
+ struct megasas_cmd_fusion *r1_cmd)
{
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
struct fusion_context *fusion;
@@ -3149,9 +3349,9 @@
r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
- cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
+ cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid =
cpu_to_le16(r1_cmd->index);
- r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
+ r1_cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid =
cpu_to_le16(cmd->index);
/*MSIxIndex of both commands request descriptors should be same*/
r1_cmd->request_desc->SCSIIO.MSIxIndex =
@@ -3174,9 +3374,6 @@
struct megasas_cmd_fusion *cmd, *r1_cmd = NULL;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u32 index;
- struct fusion_context *fusion;
-
- fusion = instance->ctrl_context;
if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
instance->ldio_threshold &&
@@ -3272,7 +3469,7 @@
rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35;
fusion = instance->ctrl_context;
- peer_smid = le16_to_cpu(rctx_g35->smid.peer_smid);
+ peer_smid = le16_to_cpu(rctx_g35->flow_specific.peer_smid);
r1_cmd = fusion->cmd_list[peer_smid - 1];
scmd_local = cmd->scmd;
@@ -3311,8 +3508,9 @@
* @instance: Adapter soft state
* Completes all commands that is in reply descriptor queue
*/
-int
-complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
+static int
+complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
+ struct megasas_irq_context *irq_context)
{
union MPI2_REPLY_DESCRIPTORS_UNION *desc;
struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
@@ -3385,7 +3583,7 @@
atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
}
- //Fall thru and complete IO
+ /* Fall through - and complete IO */
case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
atomic_dec(&instance->fw_outstanding);
if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
@@ -3445,7 +3643,7 @@
* number of reply counts and still there are more replies in reply queue
* pending to be completed
*/
- if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
+ if (threshold_reply_count >= instance->threshold_reply_count) {
if (instance->msix_combined)
writel(((MSIxIndex & 0x7) << 24) |
fusion->last_reply_idx[MSIxIndex],
@@ -3455,39 +3653,102 @@
fusion->last_reply_idx[MSIxIndex],
instance->reply_post_host_index_addr[0]);
threshold_reply_count = 0;
+ if (irq_context) {
+ if (!irq_context->irq_poll_scheduled) {
+ irq_context->irq_poll_scheduled = true;
+ irq_context->irq_line_enable = true;
+ irq_poll_sched(&irq_context->irqpoll);
+ }
+ return num_completed;
+ }
}
}
- if (!num_completed)
- return IRQ_NONE;
+ if (num_completed) {
+ wmb();
+ if (instance->msix_combined)
+ writel(((MSIxIndex & 0x7) << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[MSIxIndex/8]);
+ else
+ writel((MSIxIndex << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[0]);
+ megasas_check_and_restore_queue_depth(instance);
+ }
+ return num_completed;
+}
- wmb();
- if (instance->msix_combined)
- writel(((MSIxIndex & 0x7) << 24) |
- fusion->last_reply_idx[MSIxIndex],
- instance->reply_post_host_index_addr[MSIxIndex/8]);
- else
- writel((MSIxIndex << 24) |
- fusion->last_reply_idx[MSIxIndex],
- instance->reply_post_host_index_addr[0]);
- megasas_check_and_restore_queue_depth(instance);
- return IRQ_HANDLED;
+/**
+ * megasas_enable_irq_poll() - enable irqpoll
+ */
+static void megasas_enable_irq_poll(struct megasas_instance *instance)
+{
+ u32 count, i;
+ struct megasas_irq_context *irq_ctx;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+ for (i = 0; i < count; i++) {
+ irq_ctx = &instance->irq_context[i];
+ irq_poll_enable(&irq_ctx->irqpoll);
+ }
}
/**
* megasas_sync_irqs - Synchronizes all IRQs owned by adapter
* @instance: Adapter soft state
*/
-void megasas_sync_irqs(unsigned long instance_addr)
+static void megasas_sync_irqs(unsigned long instance_addr)
{
u32 count, i;
struct megasas_instance *instance =
(struct megasas_instance *)instance_addr;
+ struct megasas_irq_context *irq_ctx;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
- for (i = 0; i < count; i++)
+ for (i = 0; i < count; i++) {
synchronize_irq(pci_irq_vector(instance->pdev, i));
+ irq_ctx = &instance->irq_context[i];
+ irq_poll_disable(&irq_ctx->irqpoll);
+ if (irq_ctx->irq_poll_scheduled) {
+ irq_ctx->irq_poll_scheduled = false;
+ enable_irq(irq_ctx->os_irq);
+ }
+ }
+}
+
+/**
+ * megasas_irqpoll() - process a queue for completed reply descriptors
+ * @irqpoll: IRQ poll structure associated with queue to poll.
+ * @budget: Threshold of reply descriptors to process per poll.
+ *
+ * Return: The number of entries processed.
+ */
+
+int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
+{
+ struct megasas_irq_context *irq_ctx;
+ struct megasas_instance *instance;
+ int num_entries;
+
+ irq_ctx = container_of(irqpoll, struct megasas_irq_context, irqpoll);
+ instance = irq_ctx->instance;
+
+ if (irq_ctx->irq_line_enable) {
+ disable_irq(irq_ctx->os_irq);
+ irq_ctx->irq_line_enable = false;
+ }
+
+ num_entries = complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
+ if (num_entries < budget) {
+ irq_poll_complete(irqpoll);
+ irq_ctx->irq_poll_scheduled = false;
+ enable_irq(irq_ctx->os_irq);
+ }
+
+ return num_entries;
}
/**
@@ -3496,77 +3757,54 @@
*
* Tasklet to complete cmds
*/
-void
+static void
megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
{
struct megasas_instance *instance =
(struct megasas_instance *)instance_addr;
- unsigned long flags;
u32 count, MSIxIndex;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
/* If we have already declared adapter dead, donot complete cmds */
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return;
- }
- spin_unlock_irqrestore(&instance->hba_lock, flags);
for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
- complete_cmd_fusion(instance, MSIxIndex);
+ complete_cmd_fusion(instance, MSIxIndex, NULL);
}
/**
* megasas_isr_fusion - isr entry point
*/
-irqreturn_t megasas_isr_fusion(int irq, void *devp)
+static irqreturn_t megasas_isr_fusion(int irq, void *devp)
{
struct megasas_irq_context *irq_context = devp;
struct megasas_instance *instance = irq_context->instance;
- u32 mfiStatus, fw_state, dma_state;
+ u32 mfiStatus;
if (instance->mask_interrupts)
return IRQ_NONE;
+#if defined(ENABLE_IRQ_POLL)
+ if (irq_context->irq_poll_scheduled)
+ return IRQ_HANDLED;
+#endif
+
if (!instance->msix_vectors) {
- mfiStatus = instance->instancet->clear_intr(instance->reg_set);
+ mfiStatus = instance->instancet->clear_intr(instance);
if (!mfiStatus)
return IRQ_NONE;
}
/* If we are resetting, bail */
if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
- instance->instancet->clear_intr(instance->reg_set);
+ instance->instancet->clear_intr(instance);
return IRQ_HANDLED;
}
- if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
- instance->instancet->clear_intr(instance->reg_set);
- /* If we didn't complete any commands, check for FW fault */
- fw_state = instance->instancet->read_fw_status_reg(
- instance->reg_set) & MFI_STATE_MASK;
- dma_state = instance->instancet->read_fw_status_reg
- (instance->reg_set) & MFI_STATE_DMADONE;
- if (instance->crash_dump_drv_support &&
- instance->crash_dump_app_support) {
- /* Start collecting crash, if DMA bit is done */
- if ((fw_state == MFI_STATE_FAULT) && dma_state)
- schedule_work(&instance->crash_init);
- else if (fw_state == MFI_STATE_FAULT) {
- if (instance->unload == 0)
- schedule_work(&instance->work_init);
- }
- } else if (fw_state == MFI_STATE_FAULT) {
- dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
- "for scsi%d\n", instance->host->host_no);
- if (instance->unload == 0)
- schedule_work(&instance->work_init);
- }
- }
-
- return IRQ_HANDLED;
+ return complete_cmd_fusion(instance, irq_context->MSIxIndex, irq_context)
+ ? IRQ_HANDLED : IRQ_NONE;
}
/**
@@ -3575,7 +3813,7 @@
* mfi_cmd: megasas_cmd pointer
*
*/
-void
+static void
build_mpt_mfi_pass_thru(struct megasas_instance *instance,
struct megasas_cmd *mfi_cmd)
{
@@ -3633,7 +3871,7 @@
* @cmd: mfi cmd to build
*
*/
-union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL;
@@ -3659,7 +3897,7 @@
* @cmd: mfi cmd pointer
*
*/
-void
+static void
megasas_issue_dcmd_fusion(struct megasas_instance *instance,
struct megasas_cmd *cmd)
{
@@ -3692,9 +3930,9 @@
* @regs: MFI register set
*/
static u32
-megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
+megasas_read_fw_status_reg_fusion(struct megasas_instance *instance)
{
- return readl(&(regs)->outbound_scratch_pad);
+ return megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_0);
}
/**
@@ -3756,11 +3994,12 @@
writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
/* Check that the diag write enable (DRWE) bit is on */
- host_diag = readl(&instance->reg_set->fusion_host_diag);
+ host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag);
retry = 0;
while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
msleep(100);
- host_diag = readl(&instance->reg_set->fusion_host_diag);
+ host_diag = megasas_readl(instance,
+ &instance->reg_set->fusion_host_diag);
if (retry++ == 100) {
dev_warn(&instance->pdev->dev,
"Host diag unlock failed from %s %d\n",
@@ -3777,11 +4016,12 @@
msleep(3000);
/* Make sure reset adapter bit is cleared */
- host_diag = readl(&instance->reg_set->fusion_host_diag);
+ host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag);
retry = 0;
while (host_diag & HOST_DIAG_RESET_ADAPTER) {
msleep(100);
- host_diag = readl(&instance->reg_set->fusion_host_diag);
+ host_diag = megasas_readl(instance,
+ &instance->reg_set->fusion_host_diag);
if (retry++ == 1000) {
dev_warn(&instance->pdev->dev,
"Diag reset adapter never cleared %s %d\n",
@@ -3792,14 +4032,14 @@
if (host_diag & HOST_DIAG_RESET_ADAPTER)
return -1;
- abs_state = instance->instancet->read_fw_status_reg(instance->reg_set)
+ abs_state = instance->instancet->read_fw_status_reg(instance)
& MFI_STATE_MASK;
retry = 0;
while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
msleep(100);
abs_state = instance->instancet->
- read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+ read_fw_status_reg(instance) & MFI_STATE_MASK;
}
if (abs_state <= MFI_STATE_FW_INIT) {
dev_warn(&instance->pdev->dev,
@@ -3822,21 +4062,68 @@
return 0;
}
+/**
+ * megasas_trigger_snap_dump - Trigger snap dump in FW
+ * @instance: Soft instance of adapter
+ */
+static inline void megasas_trigger_snap_dump(struct megasas_instance *instance)
+{
+ int j;
+ u32 fw_state, abs_state;
+
+ if (!instance->disableOnlineCtrlReset) {
+ dev_info(&instance->pdev->dev, "Trigger snap dump\n");
+ writel(MFI_ADP_TRIGGER_SNAP_DUMP,
+ &instance->reg_set->doorbell);
+ readl(&instance->reg_set->doorbell);
+ }
+
+ for (j = 0; j < instance->snapdump_wait_time; j++) {
+ abs_state = instance->instancet->read_fw_status_reg(instance);
+ fw_state = abs_state & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT) {
+ dev_printk(KERN_ERR, &instance->pdev->dev,
+ "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n",
+ abs_state & MFI_STATE_FAULT_CODE,
+ abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
+ return;
+ }
+ msleep(1000);
+ }
+}
+
/* This function waits for outstanding commands on fusion to complete */
-int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
- int reason, int *convert)
+static int
+megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+ int reason, int *convert)
{
int i, outstanding, retval = 0, hb_seconds_missed = 0;
- u32 fw_state;
+ u32 fw_state, abs_state;
+ u32 waittime_for_io_completion;
- for (i = 0; i < resetwaittime; i++) {
+ waittime_for_io_completion =
+ min_t(u32, resetwaittime,
+ (resetwaittime - instance->snapdump_wait_time));
+
+ if (reason == MFI_IO_TIMEOUT_OCR) {
+ dev_info(&instance->pdev->dev,
+ "MFI command is timed out\n");
+ megasas_complete_cmd_dpc_fusion((unsigned long)instance);
+ if (instance->snapdump_wait_time)
+ megasas_trigger_snap_dump(instance);
+ retval = 1;
+ goto out;
+ }
+
+ for (i = 0; i < waittime_for_io_completion; i++) {
/* Check if firmware is in fault state */
- fw_state = instance->instancet->read_fw_status_reg(
- instance->reg_set) & MFI_STATE_MASK;
+ abs_state = instance->instancet->read_fw_status_reg(instance);
+ fw_state = abs_state & MFI_STATE_MASK;
if (fw_state == MFI_STATE_FAULT) {
- dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
- " will reset adapter scsi%d.\n",
- instance->host->host_no);
+ dev_printk(KERN_ERR, &instance->pdev->dev,
+ "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n",
+ abs_state & MFI_STATE_FAULT_CODE,
+ abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
megasas_complete_cmd_dpc_fusion((unsigned long)instance);
if (instance->requestorId && reason) {
dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT"
@@ -3850,13 +4137,6 @@
goto out;
}
- if (reason == MFI_IO_TIMEOUT_OCR) {
- dev_info(&instance->pdev->dev,
- "MFI IO is timed out, initiating OCR\n");
- megasas_complete_cmd_dpc_fusion((unsigned long)instance);
- retval = 1;
- goto out;
- }
/* If SR-IOV VF mode & heartbeat timeout, don't wait */
if (instance->requestorId && !reason) {
@@ -3901,6 +4181,12 @@
msleep(1000);
}
+ if (instance->snapdump_wait_time) {
+ megasas_trigger_snap_dump(instance);
+ retval = 1;
+ goto out;
+ }
+
if (atomic_read(&instance->fw_outstanding)) {
dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
"will reset adapter scsi%d.\n",
@@ -3908,6 +4194,7 @@
*convert = 1;
retval = 1;
}
+
out:
return retval;
}
@@ -3932,7 +4219,7 @@
* megasas_refire_mgmt_cmd : Re-fire management commands
* @instance: Controller's soft instance
*/
-void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
+static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
{
int j;
struct megasas_cmd_fusion *cmd_fusion;
@@ -3985,6 +4272,13 @@
}
break;
+ case MFI_CMD_TOOLBOX:
+ if (!instance->support_pci_lane_margining) {
+ cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
+ result = COMPLETE_CMD;
+ }
+
+ break;
default:
break;
}
@@ -4208,6 +4502,7 @@
instance->instancet->disable_intr(instance);
megasas_sync_irqs((unsigned long)instance);
instance->instancet->enable_intr(instance);
+ megasas_enable_irq_poll(instance);
if (scsi_lookup->scmd == NULL)
break;
}
@@ -4221,6 +4516,7 @@
megasas_sync_irqs((unsigned long)instance);
rc = megasas_track_scsiio(instance, id, channel);
instance->instancet->enable_intr(instance);
+ megasas_enable_irq_poll(instance);
break;
case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
@@ -4313,17 +4609,11 @@
{
struct megasas_instance *instance;
u16 smid, devhandle;
- struct fusion_context *fusion;
int ret;
struct MR_PRIV_DEVICE *mr_device_priv_data;
mr_device_priv_data = scmd->device->hostdata;
-
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- fusion = instance->ctrl_context;
-
- scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd);
- scsi_print_command(scmd);
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
@@ -4340,7 +4630,6 @@
goto out;
}
-
if (!mr_device_priv_data->is_tm_capable) {
ret = FAILED;
goto out;
@@ -4353,7 +4642,7 @@
if (!smid) {
ret = SUCCESS;
scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
- " issued is not found in oustanding commands\n");
+ " issued is not found in outstanding commands\n");
mutex_unlock(&instance->reset_mutex);
goto out;
}
@@ -4368,7 +4657,7 @@
goto out;
}
sdev_printk(KERN_INFO, scmd->device,
- "attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
+ "attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n",
scmd, devhandle);
mr_device_priv_data->tm_busy = 1;
@@ -4379,9 +4668,12 @@
mr_device_priv_data->tm_busy = 0;
mutex_unlock(&instance->reset_mutex);
-out:
- sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n",
((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+out:
+ scsi_print_command(scmd);
+ if (megasas_dbg_lvl & TM_DEBUG)
+ megasas_dump_fusion_io(scmd);
return ret;
}
@@ -4399,15 +4691,10 @@
struct megasas_instance *instance;
int ret = FAILED;
u16 devhandle;
- struct fusion_context *fusion;
struct MR_PRIV_DEVICE *mr_device_priv_data;
mr_device_priv_data = scmd->device->hostdata;
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- fusion = instance->ctrl_context;
-
- sdev_printk(KERN_INFO, scmd->device,
- "target reset called for scmd(%p)\n", scmd);
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
@@ -4417,14 +4704,13 @@
}
if (!mr_device_priv_data) {
- sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
- "scmd(%p)\n", scmd);
+ sdev_printk(KERN_INFO, scmd->device,
+ "device been deleted! scmd: (0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
ret = SUCCESS;
goto out;
}
-
if (!mr_device_priv_data->is_tm_capable) {
ret = FAILED;
goto out;
@@ -4442,7 +4728,7 @@
}
sdev_printk(KERN_INFO, scmd->device,
- "attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
+ "attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n",
scmd, devhandle);
mr_device_priv_data->tm_busy = 1;
ret = megasas_issue_tm(instance, devhandle,
@@ -4451,15 +4737,16 @@
mr_device_priv_data);
mr_device_priv_data->tm_busy = 0;
mutex_unlock(&instance->reset_mutex);
-out:
- scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n",
+ scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n",
(ret == SUCCESS) ? "SUCCESS" : "FAILED");
+out:
return ret;
}
/*SRIOV get other instance in cluster if any*/
-struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
+static struct
+megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
{
int i;
@@ -4499,12 +4786,14 @@
struct megasas_instance *instance;
struct megasas_cmd_fusion *cmd_fusion, *r1_cmd;
struct fusion_context *fusion;
- u32 abs_state, status_reg, reset_adapter;
+ u32 abs_state, status_reg, reset_adapter, fpio_count = 0;
u32 io_timeout_in_crash_mode = 0;
struct scsi_cmnd *scmd_local = NULL;
struct scsi_device *sdev;
int ret_target_prop = DCMD_FAILED;
bool is_target_prop = false;
+ bool do_adp_reset = true;
+ int max_reset_tries = MEGASAS_FUSION_MAX_RESET_TRIES;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
@@ -4518,7 +4807,7 @@
mutex_unlock(&instance->reset_mutex);
return FAILED;
}
- status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+ status_reg = instance->instancet->read_fw_status_reg(instance);
abs_state = status_reg & MFI_STATE_MASK;
/* IO timeout detected, forcibly put FW in FAULT state */
@@ -4527,7 +4816,7 @@
dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
"forcibly FAULT Firmware\n");
atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
- status_reg = readl(&instance->reg_set->doorbell);
+ status_reg = megasas_readl(instance, &instance->reg_set->doorbell);
writel(status_reg | MFI_STATE_FORCE_OCR,
&instance->reg_set->doorbell);
readl(&instance->reg_set->doorbell);
@@ -4571,26 +4860,30 @@
if (convert)
reason = 0;
- if (megasas_dbg_lvl & OCR_LOGS)
+ if (megasas_dbg_lvl & OCR_DEBUG)
dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n");
/* Now return commands back to the OS */
for (i = 0 ; i < instance->max_scsi_cmds; i++) {
cmd_fusion = fusion->cmd_list[i];
/*check for extra commands issued by driver*/
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
megasas_return_cmd_fusion(instance, r1_cmd);
}
scmd_local = cmd_fusion->scmd;
if (cmd_fusion->scmd) {
- if (megasas_dbg_lvl & OCR_LOGS) {
+ if (megasas_dbg_lvl & OCR_DEBUG) {
sdev_printk(KERN_INFO,
cmd_fusion->scmd->device, "SMID: 0x%x\n",
cmd_fusion->index);
- scsi_print_command(cmd_fusion->scmd);
+ megasas_dump_fusion_io(cmd_fusion->scmd);
}
+ if (cmd_fusion->io_request->Function ==
+ MPI2_FUNCTION_SCSI_IO_REQUEST)
+ fpio_count++;
+
scmd_local->result =
megasas_check_mpio_paths(instance,
scmd_local);
@@ -4603,10 +4896,12 @@
}
}
+ dev_info(&instance->pdev->dev, "Outstanding fastpath IOs: %d\n",
+ fpio_count);
+
atomic_set(&instance->fw_outstanding, 0);
- status_reg = instance->instancet->read_fw_status_reg(
- instance->reg_set);
+ status_reg = instance->instancet->read_fw_status_reg(instance);
abs_state = status_reg & MFI_STATE_MASK;
reset_adapter = status_reg & MFI_RESET_ADAPTER;
if (instance->disableOnlineCtrlReset ||
@@ -4615,52 +4910,45 @@
dev_warn(&instance->pdev->dev, "Reset not supported"
", killing adapter scsi%d.\n",
instance->host->host_no);
- megaraid_sas_kill_hba(instance);
- instance->skip_heartbeat_timer_del = 1;
- retval = FAILED;
- goto out;
+ goto kill_hba;
}
/* Let SR-IOV VF & PF sync up if there was a HB failure */
if (instance->requestorId && !reason) {
msleep(MEGASAS_OCR_SETTLE_TIME_VF);
- goto transition_to_ready;
+ do_adp_reset = false;
+ max_reset_tries = MEGASAS_SRIOV_MAX_RESET_TRIES_VF;
}
/* Now try to reset the chip */
- for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
-
- if (instance->instancet->adp_reset
- (instance, instance->reg_set))
+ for (i = 0; i < max_reset_tries; i++) {
+ /*
+ * Do adp reset and wait for
+ * controller to transition to ready
+ */
+ if (megasas_adp_reset_wait_for_ready(instance,
+ do_adp_reset, 1) == FAILED)
continue;
-transition_to_ready:
+
/* Wait for FW to become ready */
if (megasas_transition_to_ready(instance, 1)) {
dev_warn(&instance->pdev->dev,
"Failed to transition controller to ready for "
"scsi%d.\n", instance->host->host_no);
- if (instance->requestorId && !reason)
- goto fail_kill_adapter;
- else
- continue;
+ continue;
}
megasas_reset_reply_desc(instance);
megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
if (megasas_ioc_init_fusion(instance)) {
- if (instance->requestorId && !reason)
- goto fail_kill_adapter;
- else
- continue;
+ continue;
}
if (megasas_get_ctrl_info(instance)) {
dev_info(&instance->pdev->dev,
"Failed from %s %d\n",
__func__, __LINE__);
- megaraid_sas_kill_hba(instance);
- retval = FAILED;
- goto out;
+ goto kill_hba;
}
megasas_refire_mgmt_cmd(instance);
@@ -4677,7 +4965,7 @@
megasas_setup_jbod_map(instance);
/* reset stream detection array */
- if (instance->adapter_type == VENTURA_SERIES) {
+ if (instance->adapter_type >= VENTURA_SERIES) {
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
memset(fusion->stream_detect_by_ld[j],
0, sizeof(struct LD_STREAM_DETECT));
@@ -4689,7 +4977,7 @@
clear_bit(MEGASAS_FUSION_IN_RESET,
&instance->reset_flags);
instance->instancet->enable_intr(instance);
-
+ megasas_enable_irq_poll(instance);
shost_for_each_device(sdev, shost) {
if ((instance->tgt_prop) &&
(instance->nvme_page_size))
@@ -4701,9 +4989,9 @@
atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
- dev_info(&instance->pdev->dev, "Interrupts are enabled and"
- " controller is OPERATIONAL for scsi:%d\n",
- instance->host->host_no);
+ dev_info(&instance->pdev->dev,
+ "Adapter is OPERATIONAL for scsi:%d\n",
+ instance->host->host_no);
/* Restart SR-IOV heartbeat */
if (instance->requestorId) {
@@ -4721,6 +5009,13 @@
megasas_set_crash_dump_params(instance,
MR_CRASH_BUF_TURN_OFF);
+ if (instance->snapdump_wait_time) {
+ megasas_get_snapdump_properties(instance);
+ dev_info(&instance->pdev->dev,
+ "Snap dump wait time\t: %d\n",
+ instance->snapdump_wait_time);
+ }
+
retval = SUCCESS;
/* Adapter reset completed successfully */
@@ -4730,13 +5025,10 @@
goto out;
}
-fail_kill_adapter:
/* Reset failed, kill the adapter */
dev_warn(&instance->pdev->dev, "Reset failed, killing "
"adapter scsi%d.\n", instance->host->host_no);
- megaraid_sas_kill_hba(instance);
- instance->skip_heartbeat_timer_del = 1;
- retval = FAILED;
+ goto kill_hba;
} else {
/* For VF: Restart HB timer if we didn't OCR */
if (instance->requestorId) {
@@ -4744,24 +5036,30 @@
}
clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
instance->instancet->enable_intr(instance);
+ megasas_enable_irq_poll(instance);
atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
+ goto out;
}
+kill_hba:
+ megaraid_sas_kill_hba(instance);
+ megasas_enable_irq_poll(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ retval = FAILED;
out:
clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
mutex_unlock(&instance->reset_mutex);
return retval;
}
-/* Fusion Crash dump collection work queue */
-void megasas_fusion_crash_dump_wq(struct work_struct *work)
+/* Fusion Crash dump collection */
+static void megasas_fusion_crash_dump(struct megasas_instance *instance)
{
- struct megasas_instance *instance =
- container_of(work, struct megasas_instance, crash_init);
u32 status_reg;
u8 partial_copy = 0;
+ int wait = 0;
- status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+ status_reg = instance->instancet->read_fw_status_reg(instance);
/*
* Allocate host crash buffers to copy data from 1 MB DMA crash buffer
@@ -4777,8 +5075,8 @@
"crash dump and initiating OCR\n");
status_reg |= MFI_STATE_CRASH_DUMP_DONE;
writel(status_reg,
- &instance->reg_set->outbound_scratch_pad);
- readl(&instance->reg_set->outbound_scratch_pad);
+ &instance->reg_set->outbound_scratch_pad_0);
+ readl(&instance->reg_set->outbound_scratch_pad_0);
return;
}
megasas_alloc_host_crash_buffer(instance);
@@ -4786,21 +5084,41 @@
"allocated: %d\n", instance->drv_buf_alloc);
}
- /*
- * Driver has allocated max buffers, which can be allocated
- * and FW has more crash dump data, then driver will
- * ignore the data.
- */
- if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
- dev_info(&instance->pdev->dev, "Driver is done copying "
- "the buffer: %d\n", instance->drv_buf_alloc);
- status_reg |= MFI_STATE_CRASH_DUMP_DONE;
- partial_copy = 1;
- } else {
- memcpy(instance->crash_buf[instance->drv_buf_index],
- instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
- instance->drv_buf_index++;
- status_reg &= ~MFI_STATE_DMADONE;
+ while (!(status_reg & MFI_STATE_CRASH_DUMP_DONE) &&
+ (wait < MEGASAS_WATCHDOG_WAIT_COUNT)) {
+ if (!(status_reg & MFI_STATE_DMADONE)) {
+ /*
+ * Next crash dump buffer is not yet DMA'd by FW
+ * Check after 10ms. Wait for 1 second for FW to
+ * post the next buffer. If not bail out.
+ */
+ wait++;
+ msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
+ status_reg = instance->instancet->read_fw_status_reg(
+ instance);
+ continue;
+ }
+
+ wait = 0;
+ if (instance->drv_buf_index >= instance->drv_buf_alloc) {
+ dev_info(&instance->pdev->dev,
+ "Driver is done copying the buffer: %d\n",
+ instance->drv_buf_alloc);
+ status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+ partial_copy = 1;
+ break;
+ } else {
+ memcpy(instance->crash_buf[instance->drv_buf_index],
+ instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
+ instance->drv_buf_index++;
+ status_reg &= ~MFI_STATE_DMADONE;
+ }
+
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
+ readl(&instance->reg_set->outbound_scratch_pad_0);
+
+ msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
+ status_reg = instance->instancet->read_fw_status_reg(instance);
}
if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
@@ -4809,13 +5127,10 @@
instance->fw_crash_buffer_size = instance->drv_buf_index;
instance->fw_crash_state = AVAILABLE;
instance->drv_buf_index = 0;
- writel(status_reg, &instance->reg_set->outbound_scratch_pad);
- readl(&instance->reg_set->outbound_scratch_pad);
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
+ readl(&instance->reg_set->outbound_scratch_pad_0);
if (!partial_copy)
megasas_reset_fusion(instance->host, 0);
- } else {
- writel(status_reg, &instance->reg_set->outbound_scratch_pad);
- readl(&instance->reg_set->outbound_scratch_pad);
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 8e5ebee..c013c80 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1,34 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Linux MegaRAID driver for SAS based RAID controllers
*
* Copyright (c) 2009-2013 LSI Corporation
- * Copyright (c) 2013-2014 Avago Technologies
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (c) 2013-2016 Avago Technologies
+ * Copyright (c) 2016-2018 Broadcom Inc.
*
* FILE: megaraid_sas_fusion.h
*
- * Authors: Avago Technologies
+ * Authors: Broadcom Inc.
* Manoj Jose
* Sumant Patro
- * Kashyap Desai <kashyap.desai@avagotech.com>
- * Sumit Saxena <sumit.saxena@avagotech.com>
+ * Kashyap Desai <kashyap.desai@broadcom.com>
+ * Sumit Saxena <sumit.saxena@broadcom.com>
*
- * Send feedback to: megaraidlinux.pdl@avagotech.com
- *
- * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
- * San Jose, California 95131
+ * Send feedback to: megaraidlinux.pdl@broadcom.com
*/
#ifndef _MEGARAID_SAS_FUSION_H_
@@ -89,7 +75,8 @@
MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3,
MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4,
MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
- MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7
+ MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7,
+ MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD = 8
};
/*
@@ -102,7 +89,6 @@
#define MEGASAS_FP_CMD_LEN 16
#define MEGASAS_FUSION_IN_RESET 0
-#define THRESHOLD_REPLY_COUNT 50
#define RAID_1_PEER_CMDS 2
#define JBOD_MAPS_COUNT 2
#define MEGASAS_REDUCE_QD_COUNT 64
@@ -154,12 +140,15 @@
u16 timeout_value; /* 0x02 -0x03 */
u16 routing_flags; // 0x04 -0x05 routing flags
u16 virtual_disk_tgt_id; /* 0x06 -0x07 */
- u64 reg_lock_row_lba; /* 0x08 - 0x0F */
+ __le64 reg_lock_row_lba; /* 0x08 - 0x0F */
u32 reg_lock_length; /* 0x10 - 0x13 */
- union {
- u16 next_lmid; /* 0x14 - 0x15 */
- u16 peer_smid; /* used for the raid 1/10 fp writes */
- } smid;
+ union { // flow specific
+ u16 rmw_op_index; /* 0x14 - 0x15, R5/6 RMW: rmw operation index*/
+ u16 peer_smid; /* 0x14 - 0x15, R1 Write: peer smid*/
+ u16 r56_arm_map; /* 0x14 - 0x15, Unused [15], LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
+
+ } flow_specific;
+
u8 ex_status; /* 0x16 : OUT */
u8 status; /* 0x17 status */
u8 raid_flags; /* 0x18 resvd[7:6], ioSubType[5:4],
@@ -250,6 +239,13 @@
#define RAID_CTX_SPANARM_SPAN_SHIFT (5)
#define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
+/* LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
+#define RAID_CTX_R56_Q_ARM_MASK (0x1F)
+#define RAID_CTX_R56_P_ARM_SHIFT (5)
+#define RAID_CTX_R56_P_ARM_MASK (0x3E0)
+#define RAID_CTX_R56_LOG_ARM_SHIFT (10)
+#define RAID_CTX_R56_LOG_ARM_MASK (0x7C00)
+
/* number of bits per index in U32 TrackStream */
#define BITS_PER_INDEX_STREAM 4
#define INVALID_STREAM_NUM 16
@@ -725,6 +721,8 @@
#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
+#define MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 0x01200100
+#define MR_DCMD_CTRL_DEVICE_LIST_GET 0x01190600
struct MR_DEV_HANDLE_INFO {
__le16 curDevHdl;
@@ -952,6 +950,7 @@
u8 pd_after_lb;
u16 r1_alt_dev_handle; /* raid 1/10 only */
bool ra_capable;
+ u8 data_arms;
};
struct MR_LD_TARGET_SYNC {
@@ -1063,6 +1062,9 @@
#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
+#define MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME 15
+#define MEGASAS_MAX_SNAP_DUMP_WAIT_TIME 60
+
struct megasas_register_set;
struct megasas_instance;
@@ -1333,7 +1335,8 @@
dma_addr_t ioc_init_request_phys;
struct MPI2_IOC_INIT_REQUEST *ioc_init_request;
struct megasas_cmd *ioc_init_cmd;
-
+ bool pcie_bw_limitation;
+ bool r56_div_offload;
};
union desc_value {
@@ -1350,6 +1353,19 @@
RETURN_CMD = 3,
};
+struct MR_SNAPDUMP_PROPERTIES {
+ u8 offload_num;
+ u8 max_num_supported;
+ u8 cur_num_supported;
+ u8 trigger_min_num_sec_before_ocr;
+ u8 reserved[12];
+};
+
+struct megasas_debugfs_buffer {
+ void *buf;
+ u32 len;
+};
+
void megasas_free_cmds_fusion(struct megasas_instance *instance);
int megasas_ioc_init_fusion(struct megasas_instance *instance);
u8 megasas_get_map_info(struct megasas_instance *instance);