Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4e86994..398d2af 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
*
@@ -5,22 +6,6 @@
* PMC-Sierra Inc
*
* Copyright (C) 2008, 2009 PMC Sierra Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
- * USA
- *
*/
#include <linux/fs.h>
#include <linux/init.h>
@@ -846,16 +831,9 @@
cmd->ioa_cb->ioarcb.cdb[0], ioasc);
}
- /* if we had allocated sense buffers for request sense, copy the sense
- * release the buffers
- */
- if (cmd->sense_buffer != NULL) {
- memcpy(scsi_cmd->sense_buffer,
- cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE);
- pci_free_consistent(pinstance->pdev,
- SCSI_SENSE_BUFFERSIZE,
- cmd->sense_buffer, cmd->sense_buffer_dma);
+ if (cmd->sense_buffer) {
+ dma_unmap_single(&pinstance->pdev->dev, cmd->sense_buffer_dma,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
cmd->sense_buffer = NULL;
cmd->sense_buffer_dma = 0;
}
@@ -2444,13 +2422,12 @@
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+ struct device *dev = &cmd->drv_inst->pdev->dev;
- /* allocate DMAable memory for sense buffers */
- cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev,
- SCSI_SENSE_BUFFERSIZE,
- &cmd->sense_buffer_dma);
-
- if (cmd->sense_buffer == NULL) {
+ cmd->sense_buffer = cmd->scsi_cmd->sense_buffer;
+ cmd->sense_buffer_dma = dma_map_single(dev, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, cmd->sense_buffer_dma)) {
pmcraid_err
("couldn't allocate sense buffer for request sense\n");
pmcraid_erp_done(cmd);
@@ -2491,17 +2468,15 @@
/**
* pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
* @cmd: command that failed
- * @sense: true if request_sense is required after cancel all
+ * @need_sense: true if request_sense is required after cancel all
*
* This function sends a cancel all to a device to clear the queue.
*/
-static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
+static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, bool need_sense)
{
struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
- void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
- : pmcraid_request_sense;
memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
ioarcb->request_flags0 = SYNC_OVERRIDE;
@@ -2519,7 +2494,8 @@
/* writing to IOARRIN must be protected by host_lock, as mid-layer
* schedule queuecommand while we are doing this
*/
- pmcraid_send_cmd(cmd, cmd_done,
+ pmcraid_send_cmd(cmd, need_sense ?
+ pmcraid_erp_done : pmcraid_request_sense,
PMCRAID_REQUEST_SENSE_TIMEOUT,
pmcraid_timeout_handler);
}
@@ -2612,7 +2588,7 @@
struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
u32 ioasc = le32_to_cpu(ioasa->ioasc);
u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
- u32 sense_copied = 0;
+ bool sense_copied = false;
if (!res) {
pmcraid_info("resource pointer is NULL\n");
@@ -2684,7 +2660,7 @@
memcpy(scsi_cmd->sense_buffer,
ioasa->sense_data,
data_size);
- sense_copied = 1;
+ sense_copied = true;
}
if (RES_IS_GSCSI(res->cfg_entry))
@@ -3279,7 +3255,7 @@
int direction
)
{
- struct scatterlist *scatterlist;
+ struct scatterlist *sg;
void *kaddr;
int bsize_elem;
int i;
@@ -3288,10 +3264,10 @@
/* Determine the actual number of bytes per element */
bsize_elem = PAGE_SIZE * (1 << sglist->order);
- scatterlist = sglist->scatterlist;
+ sg = sglist->scatterlist;
- for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
- struct page *page = sg_page(&scatterlist[i]);
+ for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
+ struct page *page = sg_page(sg);
kaddr = kmap(page);
if (direction == DMA_TO_DEVICE)
@@ -3306,11 +3282,11 @@
return -EFAULT;
}
- scatterlist[i].length = bsize_elem;
+ sg->length = bsize_elem;
}
if (len % bsize_elem) {
- struct page *page = sg_page(&scatterlist[i]);
+ struct page *page = sg_page(sg);
kaddr = kmap(page);
@@ -3321,7 +3297,7 @@
kunmap(page);
- scatterlist[i].length = len % bsize_elem;
+ sg->length = len % bsize_elem;
}
if (rc) {
@@ -3523,7 +3499,7 @@
return -ENOMEM;
}
- sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev,
+ sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
sglist->scatterlist,
sglist->num_sg, direction);
@@ -3572,7 +3548,7 @@
struct pmcraid_sglist *sglist = cmd->sglist;
if (buflen > 0) {
- pci_unmap_sg(cmd->drv_inst->pdev,
+ dma_unmap_sg(&cmd->drv_inst->pdev->dev,
sglist->scatterlist,
sglist->num_sg,
direction);
@@ -3609,7 +3585,7 @@
u32 ioasc;
int request_size;
int buffer_size;
- u8 access, direction;
+ u8 direction;
int rc = 0;
/* If IOA reset is in progress, wait 10 secs for reset to complete */
@@ -3658,10 +3634,8 @@
request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length);
if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
- access = VERIFY_READ;
direction = DMA_TO_DEVICE;
} else {
- access = VERIFY_WRITE;
direction = DMA_FROM_DEVICE;
}
@@ -4158,7 +4132,6 @@
.max_sectors = PMCRAID_IOA_MAX_SECTORS,
.no_write_same = 1,
.cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
- .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = pmcraid_host_attrs,
.proc_name = PMCRAID_DRIVER_NAME,
};
@@ -4708,9 +4681,9 @@
pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
{
int i;
- for (i = 0; i < maxindex; i++) {
- pci_free_consistent(pinstance->pdev,
+ for (i = 0; i < maxindex; i++) {
+ dma_free_coherent(&pinstance->pdev->dev,
HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
pinstance->hrrq_start[i],
pinstance->hrrq_start_bus_addr[i]);
@@ -4737,11 +4710,9 @@
for (i = 0; i < pinstance->num_hrrq; i++) {
pinstance->hrrq_start[i] =
- pci_alloc_consistent(
- pinstance->pdev,
- buffer_size,
- &(pinstance->hrrq_start_bus_addr[i]));
-
+ dma_alloc_coherent(&pinstance->pdev->dev, buffer_size,
+ &pinstance->hrrq_start_bus_addr[i],
+ GFP_KERNEL);
if (!pinstance->hrrq_start[i]) {
pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
i);
@@ -4770,7 +4741,7 @@
static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
{
if (pinstance->ccn.msg != NULL) {
- pci_free_consistent(pinstance->pdev,
+ dma_free_coherent(&pinstance->pdev->dev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ccn_ext),
pinstance->ccn.msg,
@@ -4782,7 +4753,7 @@
}
if (pinstance->ldn.msg != NULL) {
- pci_free_consistent(pinstance->pdev,
+ dma_free_coherent(&pinstance->pdev->dev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ldn),
pinstance->ldn.msg,
@@ -4803,17 +4774,15 @@
*/
static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
{
- pinstance->ccn.msg = pci_alloc_consistent(
- pinstance->pdev,
+ pinstance->ccn.msg = dma_alloc_coherent(&pinstance->pdev->dev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ccn_ext),
- &(pinstance->ccn.baddr));
+ &pinstance->ccn.baddr, GFP_KERNEL);
- pinstance->ldn.msg = pci_alloc_consistent(
- pinstance->pdev,
+ pinstance->ldn.msg = dma_alloc_coherent(&pinstance->pdev->dev,
PMCRAID_AEN_HDR_SIZE +
sizeof(struct pmcraid_hcam_ldn),
- &(pinstance->ldn.baddr));
+ &pinstance->ldn.baddr, GFP_KERNEL);
if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
pmcraid_release_hcams(pinstance);
@@ -4841,7 +4810,7 @@
{
if (pinstance->cfg_table != NULL &&
pinstance->cfg_table_bus_addr != 0) {
- pci_free_consistent(pinstance->pdev,
+ dma_free_coherent(&pinstance->pdev->dev,
sizeof(struct pmcraid_config_table),
pinstance->cfg_table,
pinstance->cfg_table_bus_addr);
@@ -4886,10 +4855,10 @@
list_add_tail(&pinstance->res_entries[i].queue,
&pinstance->free_res_q);
- pinstance->cfg_table =
- pci_alloc_consistent(pinstance->pdev,
+ pinstance->cfg_table = dma_alloc_coherent(&pinstance->pdev->dev,
sizeof(struct pmcraid_config_table),
- &pinstance->cfg_table_bus_addr);
+ &pinstance->cfg_table_bus_addr,
+ GFP_KERNEL);
if (NULL == pinstance->cfg_table) {
pmcraid_err("couldn't alloc DMA memory for config table\n");
@@ -4954,7 +4923,7 @@
pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
if (pinstance->inq_data != NULL) {
- pci_free_consistent(pinstance->pdev,
+ dma_free_coherent(&pinstance->pdev->dev,
sizeof(struct pmcraid_inquiry_data),
pinstance->inq_data,
pinstance->inq_data_baddr);
@@ -4964,7 +4933,7 @@
}
if (pinstance->timestamp_data != NULL) {
- pci_free_consistent(pinstance->pdev,
+ dma_free_coherent(&pinstance->pdev->dev,
sizeof(struct pmcraid_timestamp_data),
pinstance->timestamp_data,
pinstance->timestamp_data_baddr);
@@ -4981,8 +4950,8 @@
* This routine pre-allocates memory based on the type of block as below:
* cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
* IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator
- * config-table entries : DMAable memory using pci_alloc_consistent
- * HostRRQs : DMAable memory, using pci_alloc_consistent
+ * config-table entries : DMAable memory using dma_alloc_coherent
+ * HostRRQs : DMAable memory, using dma_alloc_coherent
*
* Return Value
* 0 in case all of the blocks are allocated, -ENOMEM otherwise.
@@ -5019,11 +4988,9 @@
}
/* allocate DMAable memory for page D0 INQUIRY buffer */
- pinstance->inq_data = pci_alloc_consistent(
- pinstance->pdev,
+ pinstance->inq_data = dma_alloc_coherent(&pinstance->pdev->dev,
sizeof(struct pmcraid_inquiry_data),
- &pinstance->inq_data_baddr);
-
+ &pinstance->inq_data_baddr, GFP_KERNEL);
if (pinstance->inq_data == NULL) {
pmcraid_err("couldn't allocate DMA memory for INQUIRY\n");
pmcraid_release_buffers(pinstance);
@@ -5031,11 +4998,10 @@
}
/* allocate DMAable memory for set timestamp data buffer */
- pinstance->timestamp_data = pci_alloc_consistent(
- pinstance->pdev,
+ pinstance->timestamp_data = dma_alloc_coherent(&pinstance->pdev->dev,
sizeof(struct pmcraid_timestamp_data),
- &pinstance->timestamp_data_baddr);
-
+ &pinstance->timestamp_data_baddr,
+ GFP_KERNEL);
if (pinstance->timestamp_data == NULL) {
pmcraid_err("couldn't allocate DMA memory for \
set time_stamp \n");
@@ -5324,12 +5290,12 @@
pci_set_master(pdev);
- if ((sizeof(dma_addr_t) == 4) ||
- pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (sizeof(dma_addr_t) == 4 ||
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc == 0)
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc != 0) {
dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
@@ -5733,19 +5699,19 @@
/* Firmware requires the system bus address of IOARCB to be within
* 32-bit addressable range though it has 64-bit IOARRIN register.
* However, firmware supports 64-bit streaming DMA buffers, whereas
- * coherent buffers are to be 32-bit. Since pci_alloc_consistent always
+ * coherent buffers are to be 32-bit. Since dma_alloc_coherent always
* returns memory within 4GB (if not, change this logic), coherent
* buffers are within firmware acceptable address ranges.
*/
- if ((sizeof(dma_addr_t) == 4) ||
- pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (sizeof(dma_addr_t) == 4 ||
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
/* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
- * bit mask for pci_alloc_consistent to return addresses within 4GB
+ * bit mask for dma_alloc_coherent to return addresses within 4GB
*/
if (rc == 0)
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc != 0) {
dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
@@ -5875,7 +5841,7 @@
}
/*
- * PCI driver structure of pcmraid driver
+ * PCI driver structure of pmcraid driver
*/
static struct pci_driver pmcraid_driver = {
.name = PMCRAID_DRIVER_NAME,