Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b5cee2a..1149bfc 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -40,6 +40,7 @@
#include <linux/irq.h>
#include <linux/bitops.h>
#include <linux/crash_dump.h>
+#include <linux/cpu.h>
#include <linux/cpuhotplug.h>
#include <scsi/scsi.h>
@@ -49,8 +50,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/fc/fc_fs.h>
-#include <linux/nvme-fc-driver.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -60,7 +59,6 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
-#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -155,7 +153,7 @@
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0324 Config Port initialization "
"error, mbxCmd x%x READ_NVPARM, "
"mbxStatus x%x\n",
@@ -179,7 +177,7 @@
lpfc_read_rev(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0439 Adapter failed to init, mbxCmd x%x "
"READ_REV, mbxStatus x%x\n",
mb->mbxCommand, mb->mbxStatus);
@@ -194,7 +192,7 @@
*/
if (mb->un.varRdRev.rr == 0) {
vp->rev.rBit = 0;
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0440 Adapter failed to init, READ_REV has "
"missing revision information.\n");
mempool_free(pmb, phba->mbox_mem_pool);
@@ -255,13 +253,15 @@
*/
if (mb->un.varDmp.word_cnt == 0)
break;
- if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
- mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
+
+ i = mb->un.varDmp.word_cnt * sizeof(uint32_t);
+ if (offset + i > DMP_VPD_SIZE)
+ i = DMP_VPD_SIZE - offset;
lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
- lpfc_vpd_data + offset,
- mb->un.varDmp.word_cnt);
- offset += mb->un.varDmp.word_cnt;
- } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
+ lpfc_vpd_data + offset, i);
+ offset += i;
+ } while (offset < DMP_VPD_SIZE);
+
lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
kfree(lpfc_vpd_data);
@@ -444,7 +444,7 @@
pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0448 Adapter failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x\n",
mb->mbxCommand, mb->mbxStatus);
@@ -498,7 +498,7 @@
lpfc_read_config(phba, pmb);
pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0453 Adapter failed to init, mbxCmd x%x "
"READ_CONFIG, mbxStatus x%x\n",
mb->mbxCommand, mb->mbxStatus);
@@ -511,21 +511,12 @@
lpfc_sli_read_link_ste(phba);
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
- i = (mb->un.varRdConfig.max_xri + 1);
- if (phba->cfg_hba_queue_depth > i) {
+ if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"3359 HBA queue depth changed from %d to %d\n",
- phba->cfg_hba_queue_depth, i);
- phba->cfg_hba_queue_depth = i;
- }
-
- /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
- i = (mb->un.varRdConfig.max_xri >> 3);
- if (phba->pport->cfg_lun_queue_depth > i) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "3360 LUN queue depth changed from %d to %d\n",
- phba->pport->cfg_lun_queue_depth, i);
- phba->pport->cfg_lun_queue_depth = i;
+ phba->cfg_hba_queue_depth,
+ mb->un.varRdConfig.max_xri);
+ phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
}
phba->lmt = mb->un.varRdConfig.lmt;
@@ -556,7 +547,7 @@
}
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0352 Config MSI mailbox command "
"failed, mbxCmd x%x, mbxStatus x%x\n",
pmb->u.mb.mbxCommand,
@@ -607,17 +598,15 @@
jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
if (phba->hba_flag & LINK_DISABLED) {
- lpfc_printf_log(phba,
- KERN_ERR, LOG_INIT,
- "2598 Adapter Link is disabled.\n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2598 Adapter Link is disabled.\n");
lpfc_down_link(phba, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
- lpfc_printf_log(phba,
- KERN_ERR, LOG_INIT,
- "2599 Adapter failed to issue DOWN_LINK"
- " mbox command rc 0x%x\n", rc);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2599 Adapter failed to issue DOWN_LINK"
+ " mbox command rc 0x%x\n", rc);
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
@@ -641,9 +630,7 @@
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0456 Adapter failed to issue "
"ASYNCEVT_ENABLE mbox status x%x\n",
rc);
@@ -663,7 +650,8 @@
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0435 Adapter failed "
"to get Option ROM version status x%x\n", rc);
mempool_free(pmb, phba->mbox_mem_pool);
}
@@ -741,10 +729,10 @@
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
!(phba->lmt & LMT_64Gb))) {
/* Reset link speed to auto */
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1302 Invalid speed for this board:%d "
- "Reset link speed to auto.\n",
- phba->cfg_link_speed);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "1302 Invalid speed for this board:%d "
+ "Reset link speed to auto.\n",
+ phba->cfg_link_speed);
phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
}
lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
@@ -753,10 +741,10 @@
lpfc_set_loopback_flag(phba);
rc = lpfc_sli_issue_mbox(phba, pmb, flag);
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0498 Adapter failed to init, mbxCmd x%x "
- "INIT_LINK, mbxStatus x%x\n",
- mb->mbxCommand, mb->mbxStatus);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0498 Adapter failed to init, mbxCmd x%x "
+ "INIT_LINK, mbxStatus x%x\n",
+ mb->mbxCommand, mb->mbxStatus);
if (phba->sli_rev <= LPFC_SLI_REV3) {
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
@@ -802,17 +790,15 @@
return -ENOMEM;
}
- lpfc_printf_log(phba,
- KERN_ERR, LOG_INIT,
- "0491 Adapter Link is disabled.\n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0491 Adapter Link is disabled.\n");
lpfc_down_link(phba, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, flag);
if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
- lpfc_printf_log(phba,
- KERN_ERR, LOG_INIT,
- "2522 Adapter failed to issue DOWN_LINK"
- " mbox command rc 0x%x\n", rc);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2522 Adapter failed to issue DOWN_LINK"
+ " mbox command rc 0x%x\n", rc);
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
@@ -1007,7 +993,6 @@
/**
* lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
- int i;
* @phba: pointer to lpfc HBA data structure.
*
* This routine will do uninitialization after the HBA is reset when bring
@@ -1040,7 +1025,7 @@
lpfc_hba_down_post_s4(struct lpfc_hba *phba)
{
struct lpfc_io_buf *psb, *psb_next;
- struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
+ struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
struct lpfc_sli4_hdw_queue *qp;
LIST_HEAD(aborts);
LIST_HEAD(nvme_aborts);
@@ -1107,7 +1092,7 @@
&nvmet_aborts);
spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
- ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
+ ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
}
}
@@ -1135,7 +1120,7 @@
/**
* lpfc_hb_timeout - The HBA-timer timeout handler
- * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ * @t: timer context used to obtain the pointer to lpfc hba data structure.
*
* This is the HBA-timer timeout handler registered to the lpfc driver. When
* this timer fires, a HBA timeout event shall be posted to the lpfc driver
@@ -1169,7 +1154,7 @@
/**
* lpfc_rrq_timeout - The RRQ-timer timeout handler
- * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ * @t: timer context used to obtain the pointer to lpfc hba data structure.
*
* This is the RRQ-timer timeout handler registered to the lpfc driver. When
* this timer fires, a RRQ timeout event shall be posted to the lpfc driver
@@ -1233,6 +1218,75 @@
return;
}
+/*
+ * lpfc_idle_stat_delay_work - idle_stat tracking
+ *
+ * This routine tracks per-cq idle_stat and determines polling decisions.
+ *
+ * Return codes:
+ * None
+ **/
+static void
+lpfc_idle_stat_delay_work(struct work_struct *work)
+{
+ struct lpfc_hba *phba = container_of(to_delayed_work(work),
+ struct lpfc_hba,
+ idle_stat_delay_work);
+ struct lpfc_queue *cq;
+ struct lpfc_sli4_hdw_queue *hdwq;
+ struct lpfc_idle_stat *idle_stat;
+ u32 i, idle_percent;
+ u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
+
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
+
+ if (phba->link_state == LPFC_HBA_ERROR ||
+ phba->pport->fc_flag & FC_OFFLINE_MODE)
+ goto requeue;
+
+ for_each_present_cpu(i) {
+ hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
+ cq = hdwq->io_cq;
+
+ /* Skip if we've already handled this cq's primary CPU */
+ if (cq->chann != i)
+ continue;
+
+ idle_stat = &phba->sli4_hba.idle_stat[i];
+
+ /* get_cpu_idle_time returns values as running counters. Thus,
+ * to know the amount for this period, the prior counter values
+ * need to be subtracted from the current counter values.
+ * From there, the idle time stat can be calculated as a
+ * percentage of 100 - the sum of the other consumption times.
+ */
+ wall_idle = get_cpu_idle_time(i, &wall, 1);
+ diff_idle = wall_idle - idle_stat->prev_idle;
+ diff_wall = wall - idle_stat->prev_wall;
+
+ if (diff_wall <= diff_idle)
+ busy_time = 0;
+ else
+ busy_time = diff_wall - diff_idle;
+
+ idle_percent = div64_u64(100 * busy_time, diff_wall);
+ idle_percent = 100 - idle_percent;
+
+ if (idle_percent < 15)
+ cq->poll_mode = LPFC_QUEUE_WORK;
+ else
+ cq->poll_mode = LPFC_IRQ_POLL;
+
+ idle_stat->prev_idle = wall_idle;
+ idle_stat->prev_wall = wall;
+ }
+
+requeue:
+ schedule_delayed_work(&phba->idle_stat_delay_work,
+ msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
+}
+
static void
lpfc_hb_eq_delay_work(struct work_struct *work)
{
@@ -1240,10 +1294,9 @@
struct lpfc_hba, eq_delay_work);
struct lpfc_eq_intr_info *eqi, *eqi_new;
struct lpfc_queue *eq, *eq_next;
- unsigned char *eqcnt = NULL;
+ unsigned char *ena_delay = NULL;
uint32_t usdelay;
int i;
- bool update = false;
if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
return;
@@ -1252,44 +1305,36 @@
phba->pport->fc_flag & FC_OFFLINE_MODE)
goto requeue;
- eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
- GFP_KERNEL);
- if (!eqcnt)
+ ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
+ GFP_KERNEL);
+ if (!ena_delay)
goto requeue;
- if (phba->cfg_irq_chann > 1) {
- /* Loop thru all IRQ vectors */
- for (i = 0; i < phba->cfg_irq_chann; i++) {
- /* Get the EQ corresponding to the IRQ vector */
- eq = phba->sli4_hba.hba_eq_hdl[i].eq;
- if (!eq)
- continue;
- if (eq->q_mode) {
- update = true;
- break;
- }
- if (eqcnt[eq->last_cpu] < 2)
- eqcnt[eq->last_cpu]++;
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ /* Get the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[i].eq;
+ if (!eq)
+ continue;
+ if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
+ eq->q_flag &= ~HBA_EQ_DELAY_CHK;
+ ena_delay[eq->last_cpu] = 1;
}
- } else
- update = true;
+ }
for_each_present_cpu(i) {
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
- if (!update && eqcnt[i] < 2) {
- eqi->icnt = 0;
- continue;
+ if (ena_delay[i]) {
+ usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
+ if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
+ usdelay = LPFC_MAX_AUTO_EQ_DELAY;
+ } else {
+ usdelay = 0;
}
- usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
- LPFC_EQ_DELAY_STEP;
- if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
- usdelay = LPFC_MAX_AUTO_EQ_DELAY;
-
eqi->icnt = 0;
list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
- if (eq->last_cpu != i) {
+ if (unlikely(eq->last_cpu != i)) {
eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
eq->last_cpu);
list_move_tail(&eq->cpu_list, &eqi_new->list);
@@ -1301,7 +1346,7 @@
}
}
- kfree(eqcnt);
+ kfree(ena_delay);
requeue:
queue_delayed_work(phba->wq, &phba->eq_delay_work,
@@ -1558,11 +1603,11 @@
return;
}
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0479 Deferred Adapter Hardware Error "
- "Data: x%x x%x x%x\n",
- phba->work_hs,
- phba->work_status[0], phba->work_status[1]);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0479 Deferred Adapter Hardware Error "
+ "Data: x%x x%x x%x\n",
+ phba->work_hs, phba->work_status[0],
+ phba->work_status[1]);
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
@@ -1713,7 +1758,7 @@
temp_event_data.event_code = LPFC_CRIT_TEMP;
temp_event_data.data = (uint32_t)temperature;
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0406 Adapter maximum temperature exceeded "
"(%ld), taking this port offline "
"Data: x%x x%x x%x\n",
@@ -1737,7 +1782,7 @@
* failure is a value other than FFER6. Do not call the offline
* twice. This is the adapter hardware error path.
*/
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0457 Adapter Hardware Error "
"Data: x%x x%x x%x\n",
phba->work_hs,
@@ -1758,7 +1803,7 @@
* lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
* @phba: pointer to lpfc hba data structure.
* @mbx_action: flag for mailbox shutdown action.
- *
+ * @en_rn_msg: send reset/port recovery message.
* This routine is invoked to perform an SLI4 port PCI function reset in
* response to port status register polling attention. It waits for port
* status register (ERR, RDY, RN) bits before proceeding with function reset.
@@ -1785,7 +1830,7 @@
/* need reset: attempt for port recovery */
if (en_rn_msg)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2887 Reset Needed: Attempting Port "
"Recovery...\n");
lpfc_offline_prep(phba, mbx_action);
@@ -1795,14 +1840,14 @@
lpfc_sli4_disable_intr(phba);
rc = lpfc_sli_brdrestart(phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6309 Failed to restart board\n");
return rc;
}
/* request and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3175 Failed to enable interrupt\n");
return -EIO;
}
@@ -1841,7 +1886,7 @@
* we cannot communicate with the pci card anyway.
*/
if (pci_channel_offline(phba->pcidev)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3166 pci channel is offline\n");
lpfc_sli4_offline_eratt(phba);
return;
@@ -1864,7 +1909,7 @@
lpfc_sli4_offline_eratt(phba);
return;
}
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"7623 Checking UE recoverable");
for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
@@ -1881,7 +1926,7 @@
msleep(1000);
}
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"4827 smphr_port_status x%x : Waited %dSec",
smphr_port_status, i);
@@ -1899,14 +1944,14 @@
LPFC_MBX_NO_WAIT, en_rn_msg);
if (rc == 0)
return;
- lpfc_printf_log(phba,
- KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"4215 Failed to recover UE");
break;
}
}
}
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"7624 Firmware not ready: Failing UE recovery,"
" waited %dSec", i);
phba->link_state = LPFC_HBA_ERROR;
@@ -1919,7 +1964,7 @@
&portstat_reg.word0);
/* consider PCI bus read error as pci_channel_offline */
if (pci_rd_rc1 == -EIO) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3151 PCI bus read access failure: x%x\n",
readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
lpfc_sli4_offline_eratt(phba);
@@ -1928,10 +1973,10 @@
reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2889 Port Overtemperature event, "
- "taking port offline Data: x%x x%x\n",
- reg_err1, reg_err2);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2889 Port Overtemperature event, "
+ "taking port offline Data: x%x x%x\n",
+ reg_err1, reg_err2);
phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
@@ -1953,17 +1998,17 @@
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3143 Port Down: Firmware Update "
"Detected\n");
en_rn_msg = false;
} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3144 Port Down: Debug Dump\n");
else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3145 Port Down: Provisioning\n");
/* If resets are disabled then leave the HBA alone and return */
@@ -1982,7 +2027,7 @@
break;
}
/* fall through for not able to recover */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3152 Unrecoverable error\n");
phba->link_state = LPFC_HBA_ERROR;
break;
@@ -2100,8 +2145,8 @@
lpfc_linkdown(phba);
phba->link_state = LPFC_HBA_ERROR;
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
return;
}
@@ -2850,12 +2895,13 @@
*/
while (!list_empty(&vport->fc_nodes)) {
if (i++ > 3000) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0233 Nodelist not empty\n");
list_for_each_entry_safe(ndlp, next_ndlp,
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
- LOG_NODE,
+ LOG_TRACE_EVENT,
"0282 did:x%x ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
ndlp->nlp_DID, (void *)ndlp,
@@ -2942,6 +2988,7 @@
if (phba->pport)
lpfc_stop_vport_timers(phba->pport);
cancel_delayed_work_sync(&phba->eq_delay_work);
+ cancel_delayed_work_sync(&phba->idle_stat_delay_work);
del_timer_sync(&phba->sli.mbox_tmo);
del_timer_sync(&phba->fabric_block_timer);
del_timer_sync(&phba->eratt_poll);
@@ -2962,7 +3009,7 @@
lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0297 Invalid device group (x%x)\n",
phba->pci_dev_grp);
break;
@@ -2973,6 +3020,7 @@
/**
* lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
* @phba: pointer to lpfc hba data structure.
+ * @mbx_action: flag for mailbox no wait action.
*
* This routine marks a HBA's management interface as blocked. Once the HBA's
* management interface is marked as blocked, all the user space access to
@@ -3009,10 +3057,10 @@
/* Check active mailbox complete status every 2ms */
msleep(2);
if (time_after(jiffies, timeout)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2813 Mgmt IO is Blocked %x "
- "- mbox cmd %x still active\n",
- phba->sli.sli_flag, actcmd);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2813 Mgmt IO is Blocked %x "
+ "- mbox cmd %x still active\n",
+ phba->sli.sli_flag, actcmd);
break;
}
}
@@ -3058,11 +3106,12 @@
continue;
}
ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0009 rpi:%x DID:%x "
- "flg:%x map:%x x%px\n", ndlp->nlp_rpi,
- ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0009 Assign RPI x%x to ndlp x%px "
+ "DID:x%06x flg:x%x map:x%x\n",
+ ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_usg_map);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3356,7 +3405,7 @@
!phba->nvmet_support) {
error = lpfc_nvme_create_localport(phba->pport);
if (error)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6132 NVME restore reg failed "
"on nvmei error x%x\n", error);
}
@@ -3422,6 +3471,7 @@
/**
* lpfc_offline_prep - Prepare a HBA to be brought offline
* @phba: pointer to lpfc hba data structure.
+ * @mbx_action: flag for mailbox shutdown action.
*
* This routine is invoked to prepare a HBA to be brought offline. It performs
* unregistration login to all the nodes on all vports and flushes the mailbox
@@ -3460,10 +3510,15 @@
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
+ if ((!NLP_CHK_NODE_ACT(ndlp)) ||
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ /* Driver must assume RPI is invalid for
+ * any unused or inactive node.
+ */
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
continue;
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
- continue;
+ }
+
if (ndlp->nlp_type & NLP_FABRIC) {
lpfc_disc_state_machine(vports[i], ndlp,
NULL, NLP_EVT_DEVICE_RECOVERY);
@@ -3479,16 +3534,16 @@
* comes back online.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_printf_vlog(ndlp->vport,
- KERN_INFO, LOG_NODE,
- "0011 lpfc_offline: "
- "ndlp:x%px did %x "
- "usgmap:x%x rpi:%x\n",
- ndlp, ndlp->nlp_DID,
- ndlp->nlp_usg_map,
- ndlp->nlp_rpi);
-
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0011 Free RPI x%x on "
+ "ndlp:x%px did x%x "
+ "usgmap:x%x\n",
+ ndlp->nlp_rpi, ndlp,
+ ndlp->nlp_DID,
+ ndlp->nlp_usg_map);
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
lpfc_unreg_rpi(vports[i], ndlp);
}
@@ -3691,7 +3746,8 @@
sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
GFP_KERNEL);
if (sglq_entry == NULL) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"2562 Failure to allocate an "
"ELS sgl entry:%d\n", i);
rc = -ENOMEM;
@@ -3702,7 +3758,8 @@
&sglq_entry->phys);
if (sglq_entry->virt == NULL) {
kfree(sglq_entry);
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"2563 Failure to allocate an "
"ELS mbuf:%d\n", i);
rc = -ENOMEM;
@@ -3757,7 +3814,8 @@
&phba->sli4_hba.lpfc_els_sgl_list, list) {
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"2400 Failed to allocate xri for "
"ELS sgl\n");
rc = -ENOMEM;
@@ -3812,7 +3870,8 @@
sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
GFP_KERNEL);
if (sglq_entry == NULL) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6303 Failure to allocate an "
"NVMET sgl entry:%d\n", i);
rc = -ENOMEM;
@@ -3823,7 +3882,8 @@
&sglq_entry->phys);
if (sglq_entry->virt == NULL) {
kfree(sglq_entry);
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6304 Failure to allocate an "
"NVMET buf:%d\n", i);
rc = -ENOMEM;
@@ -3879,7 +3939,8 @@
&phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6307 Failed to allocate xri for "
"NVMET sgl\n");
rc = -ENOMEM;
@@ -4053,7 +4114,8 @@
&io_sgl_list, list) {
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6075 Failed to allocate xri for "
"nvme buffer\n");
rc = -ENOMEM;
@@ -4072,8 +4134,8 @@
/**
* lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
- * @vport: The virtual port for which this call being executed.
- * @num_to_allocate: The requested number of buffers to allocate.
+ * @phba: Pointer to lpfc hba data structure.
+ * @num_to_alloc: The requested number of buffers to allocate.
*
* This routine allocates nvme buffers for device with SLI-4 interface spec,
* the nvme buffer contains all the necessary information needed to initiate
@@ -4123,7 +4185,8 @@
if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
(((unsigned long)(lpfc_ncmd->data) &
(unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"3369 Memory alignment err: "
"addr=%lx\n",
(unsigned long)lpfc_ncmd->data);
@@ -4152,7 +4215,7 @@
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6121 Failed to allocate IOTAG for"
" XRI:0x%x\n", lxri);
lpfc_sli4_free_xri(phba, lxri);
@@ -4203,7 +4266,7 @@
lpfc_read_nv(phba, mboxq);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6019 Mailbox failed , mbxCmd x%x "
"READ_NV, mbxStatus x%x\n",
bf_get(lpfc_mqe_command, &mboxq->u.mqe),
@@ -4242,6 +4305,7 @@
{
struct lpfc_vport *vport;
struct Scsi_Host *shost = NULL;
+ struct scsi_host_template *template;
int error = 0;
int i;
uint64_t wwn;
@@ -4262,7 +4326,8 @@
for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
if (wwn == lpfc_no_hba_reset[i]) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6020 Setting use_no_reset port=%llx\n",
wwn);
use_no_reset_hba = true;
@@ -4270,22 +4335,50 @@
}
}
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
- if (dev != &phba->pcidev->dev) {
- shost = scsi_host_alloc(&lpfc_vport_template,
- sizeof(struct lpfc_vport));
+ /* Seed template for SCSI host registration */
+ if (dev == &phba->pcidev->dev) {
+ template = &phba->port_template;
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+ /* Seed physical port template */
+ memcpy(template, &lpfc_template, sizeof(*template));
+
+ if (use_no_reset_hba) {
+ /* template is for a no reset SCSI Host */
+ template->max_sectors = 0xffff;
+ template->eh_host_reset_handler = NULL;
+ }
+
+ /* Template for all vports this physical port creates */
+ memcpy(&phba->vport_template, &lpfc_template,
+ sizeof(*template));
+ phba->vport_template.max_sectors = 0xffff;
+ phba->vport_template.shost_attrs = lpfc_vport_attrs;
+ phba->vport_template.eh_bus_reset_handler = NULL;
+ phba->vport_template.eh_host_reset_handler = NULL;
+ phba->vport_template.vendor_id = 0;
+
+ /* Initialize the host templates with updated value */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ template->sg_tablesize = phba->cfg_scsi_seg_cnt;
+ phba->vport_template.sg_tablesize =
+ phba->cfg_scsi_seg_cnt;
+ } else {
+ template->sg_tablesize = phba->cfg_sg_seg_cnt;
+ phba->vport_template.sg_tablesize =
+ phba->cfg_sg_seg_cnt;
+ }
+
} else {
- if (!use_no_reset_hba)
- shost = scsi_host_alloc(&lpfc_template,
- sizeof(struct lpfc_vport));
- else
- shost = scsi_host_alloc(&lpfc_template_no_hr,
- sizeof(struct lpfc_vport));
+ /* NVMET is for physical port only */
+ memcpy(template, &lpfc_template_nvme,
+ sizeof(*template));
}
- } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- shost = scsi_host_alloc(&lpfc_template_nvme,
- sizeof(struct lpfc_vport));
+ } else {
+ template = &phba->vport_template;
}
+
+ shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
if (!shost)
goto out;
@@ -4340,6 +4433,12 @@
vport->port_type = LPFC_PHYSICAL_PORT;
}
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+ "9081 CreatePort TMPLATE type %x TBLsize %d "
+ "SEGcnt %d/%d\n",
+ vport->port_type, shost->sg_tablesize,
+ phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
+
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
INIT_LIST_HEAD(&vport->rcv_buffer_list);
@@ -4478,6 +4577,13 @@
struct lpfc_hba *phba = vport->phba;
fc_host_supported_speeds(shost) = 0;
+ /*
+ * Avoid reporting supported link speed for FCoE as it can't be
+ * controlled via FCoE.
+ */
+ if (phba->hba_flag & HBA_FCOE_MODE)
+ return;
+
if (phba->lmt & LMT_128Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
if (phba->lmt & LMT_64Gb)
@@ -4625,7 +4731,7 @@
/**
* lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
- * @ptr: Map to lpfc_hba data structure pointer.
+ * @t: Timer context used to obtain the pointer to lpfc hba data structure.
*
* This routine is invoked when waiting for FCF table rediscover has been
* timed out. If new FCF record(s) has (have) been discovered during the
@@ -4673,7 +4779,7 @@
case LPFC_ASYNC_LINK_FAULT_LR_LRR:
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0398 Unknown link fault code: x%x\n",
bf_get(lpfc_acqe_link_fault, acqe_link));
break;
@@ -4709,7 +4815,7 @@
att_type = LPFC_ATT_LINK_UP;
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0399 Invalid link attention type: x%x\n",
bf_get(lpfc_acqe_link_status, acqe_link));
att_type = LPFC_ATT_RESERVED;
@@ -4811,6 +4917,9 @@
case LPFC_ASYNC_LINK_SPEED_40GBPS:
port_speed = 40000;
break;
+ case LPFC_ASYNC_LINK_SPEED_100GBPS:
+ port_speed = 100000;
+ break;
default:
port_speed = 0;
}
@@ -4881,19 +4990,19 @@
phba->fcoe_eventtag = acqe_link->event_tag;
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0395 The mboxq allocation failed\n");
return;
}
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!mp) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0396 The lpfc_dmabuf allocation failed\n");
goto out_free_pmb;
}
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
if (!mp->virt) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0397 The mbuf allocation failed\n");
goto out_free_dmabuf;
}
@@ -4992,7 +5101,6 @@
* lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
* topology.
* @phba: pointer to lpfc hba data structure.
- * @evt_code: asynchronous event code.
* @speed_code: asynchronous event link speed code.
*
* This routine is to parse the giving SLI4 async event link speed code into
@@ -5094,7 +5202,7 @@
phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
}
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2910 Async FC Trunking Event - Speed:%d\n"
"\tLogical speed:%d "
"port0: %s port1: %s port2: %s port3: %s\n",
@@ -5104,7 +5212,7 @@
trunk_link_status(2), trunk_link_status(3));
if (port_fault)
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3202 trunk error:0x%x (%s) seen on port0:%s "
/*
* SLI-4: We have only 0xA error codes
@@ -5138,7 +5246,7 @@
if (bf_get(lpfc_trailer_type, acqe_fc) !=
LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2895 Non FC link Event detected.(%d)\n",
bf_get(lpfc_trailer_type, acqe_fc));
return;
@@ -5186,19 +5294,19 @@
phba->sli4_hba.link_state.fault);
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2897 The mboxq allocation failed\n");
return;
}
mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!mp) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2898 The lpfc_dmabuf allocation failed\n");
goto out_free_pmb;
}
mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
if (!mp->virt) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2899 The mbuf allocation failed\n");
goto out_free_dmabuf;
}
@@ -5270,7 +5378,7 @@
/**
* lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
* @phba: pointer to lpfc hba data structure.
- * @acqe_fc: pointer to the async SLI completion queue entry.
+ * @acqe_sli: pointer to the async SLI completion queue entry.
*
* This routine is to handle the SLI4 asynchronous SLI events.
**/
@@ -5291,10 +5399,10 @@
evt_type = bf_get(lpfc_trailer_type, acqe_sli);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2901 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d\n",
+ "2901 Async SLI event - Type:%d, Event Data: x%08x "
+ "x%08x x%08x x%08x\n", evt_type,
acqe_sli->event_data1, acqe_sli->event_data2,
- evt_type);
+ acqe_sli->reserved, acqe_sli->trailer);
port_name = phba->Port[0];
if (port_name == 0x00)
@@ -5365,7 +5473,7 @@
&misconfigured->theEvent);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3296 "
"LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
"event: Invalid link %d",
@@ -5417,7 +5525,8 @@
rc = lpfc_sli4_read_config(phba);
if (rc) {
phba->lmt = 0;
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"3194 Unable to retrieve supported "
"speeds, rc = 0x%x\n", rc);
}
@@ -5441,11 +5550,26 @@
"Event Data1:x%08x Event Data2: x%08x\n",
acqe_sli->event_data1, acqe_sli->event_data2);
break;
+ case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
+ /* Misconfigured WWN. Reports that the SLI Port is configured
+ * to use FA-WWN, but the attached device doesn’t support it.
+ * No driver action is required.
+ * Event Data1 - N.A, Event Data2 - N.A
+ */
+ lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
+ "2699 Misconfigured FA-WWN - Attached device does "
+ "not support FA-WWN\n");
+ break;
+ case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
+ /* EEPROM failure. No driver action is required */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2518 EEPROM failure - "
+ "Event Data1: x%08x Event Data2: x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3193 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d\n",
- acqe_sli->event_data1, acqe_sli->event_data2,
+ "3193 Unrecognized SLI event, type: 0x%x",
evt_type);
break;
}
@@ -5510,7 +5634,7 @@
/**
* lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
- * @vport: pointer to lpfc hba data structure.
+ * @phba: pointer to lpfc hba data structure.
*
* This routine is to perform Clear Virtual Link (CVL) on all vports in
* response to a FCF dead event.
@@ -5531,7 +5655,7 @@
/**
* lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
* @phba: pointer to lpfc hba data structure.
- * @acqe_link: pointer to the async fcoe completion queue entry.
+ * @acqe_fip: pointer to the async fcoe completion queue entry.
*
* This routine is to handle the SLI4 asynchronous fcoe event.
**/
@@ -5554,8 +5678,7 @@
case LPFC_FIP_EVENT_TYPE_NEW_FCF:
case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
- LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2546 New FCF event, evt_tag:x%x, "
"index:x%x\n",
acqe_fip->event_tag,
@@ -5608,23 +5731,24 @@
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST);
if (rc)
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2547 Issue FCF scan read FCF mailbox "
"command failed (x%x)\n", rc);
break;
case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2548 FCF Table full count 0x%x tag 0x%x\n",
- bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
- acqe_fip->event_tag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2548 FCF Table full count 0x%x tag 0x%x\n",
+ bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
+ acqe_fip->event_tag);
break;
case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
- "2549 FCF (x%x) disconnected from network, "
- "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2549 FCF (x%x) disconnected from network, "
+ "tag:x%x\n", acqe_fip->index,
+ acqe_fip->event_tag);
/*
* If we are in the middle of FCF failover process, clear
* the corresponding FCF bit in the roundrobin bitmap.
@@ -5661,7 +5785,7 @@
rc = lpfc_sli4_redisc_fcf_table(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
- LOG_DISCOVERY,
+ LOG_TRACE_EVENT,
"2772 Issue FCF rediscover mailbox "
"command failed, fail through to FCF "
"dead event\n");
@@ -5685,7 +5809,8 @@
break;
case LPFC_FIP_EVENT_TYPE_CVL:
phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"2718 Clear Virtual Link Received for VPI 0x%x"
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -5752,7 +5877,7 @@
rc = lpfc_sli4_redisc_fcf_table(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
- LOG_DISCOVERY,
+ LOG_TRACE_EVENT,
"2774 Issue FCF rediscover "
"mailbox command failed, "
"through to CVL event\n");
@@ -5773,9 +5898,9 @@
}
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0288 Unknown FCoE event type 0x%x event tag "
- "0x%x\n", event_type, acqe_fip->event_tag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0288 Unknown FCoE event type 0x%x event tag "
+ "0x%x\n", event_type, acqe_fip->event_tag);
break;
}
}
@@ -5783,7 +5908,7 @@
/**
* lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
* @phba: pointer to lpfc hba data structure.
- * @acqe_link: pointer to the async dcbx completion queue entry.
+ * @acqe_dcbx: pointer to the async dcbx completion queue entry.
*
* This routine is to handle the SLI4 asynchronous dcbx event.
**/
@@ -5792,7 +5917,7 @@
struct lpfc_acqe_dcbx *acqe_dcbx)
{
phba->fc_eventTag = acqe_dcbx->event_tag;
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0290 The SLI4 DCBX asynchronous event is not "
"handled yet\n");
}
@@ -5800,7 +5925,7 @@
/**
* lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
* @phba: pointer to lpfc hba data structure.
- * @acqe_link: pointer to the async grp5 completion queue entry.
+ * @acqe_grp5: pointer to the async grp5 completion queue entry.
*
* This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
* is an asynchronous notified of a logical link speed change. The Port
@@ -5833,18 +5958,21 @@
void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
{
struct lpfc_cq_event *cq_event;
+ unsigned long iflags;
/* First, declare the async event has been handled */
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~ASYNC_EVENT;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
/* Now, handle all the async events */
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
- /* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
+ iflags);
+
/* Process the asynchronous event */
switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
case LPFC_TRAILER_CODE_LINK:
@@ -5869,15 +5997,19 @@
lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "1804 Invalid asynchrous event code: "
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
+ "1804 Invalid asynchronous event code: "
"x%x\n", bf_get(lpfc_trailer_code,
&cq_event->cqe.mcqe_cmpl));
break;
}
+
/* Free the completion event processed to the free pool */
lpfc_sli4_cq_event_release(phba, cq_event);
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
}
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
}
/**
@@ -5905,7 +6037,7 @@
"2777 Start post-quiescent FCF table scan\n");
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
if (rc)
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2747 Issue FCF scan read FCF mailbox "
"command failed 0x%x\n", rc);
}
@@ -5976,7 +6108,7 @@
"0480 Enabled MSI-X interrupt mode.\n");
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0482 Illegal interrupt mode.\n");
break;
}
@@ -6024,7 +6156,7 @@
out_disable_device:
pci_disable_device(pdev);
out_error:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1401 Failed to enable pci device\n");
return -ENODEV;
}
@@ -6125,7 +6257,7 @@
max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
if (nr_vfn > max_nr_vfn) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3057 Requested vfs (%d) greater than "
"supported vfs (%d)", nr_vfn, max_nr_vfn);
return -EINVAL;
@@ -6164,6 +6296,9 @@
* Driver resources common to all SLI revisions
*/
atomic_set(&phba->fast_event_count, 0);
+ atomic_set(&phba->dbg_log_idx, 0);
+ atomic_set(&phba->dbg_log_cnt, 0);
+ atomic_set(&phba->dbg_log_dmping, 0);
spin_lock_init(&phba->hbalock);
/* Initialize ndlp management spinlock */
@@ -6217,6 +6352,9 @@
INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
+ INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
+ lpfc_idle_stat_delay_work);
+
return 0;
}
@@ -6274,11 +6412,6 @@
* used to create the sg_dma_buf_pool must be dynamically calculated.
*/
- /* Initialize the host templates the configured values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-
if (phba->sli_rev == LPFC_SLI_REV4)
entry_sz = sizeof(struct sli4_sge);
else
@@ -6319,7 +6452,7 @@
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
- "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
+ "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
phba->cfg_total_seg_cnt);
@@ -6422,7 +6555,7 @@
u32 if_fam;
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
- phba->sli4_hba.num_possible_cpu = num_possible_cpus();
+ phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
phba->sli4_hba.curr_disp_cpu = 0;
/* Get all the module params for configuring this host */
@@ -6501,6 +6634,8 @@
/* This abort list used by worker thread */
spin_lock_init(&phba->sli4_hba.sgl_list_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
+ spin_lock_init(&phba->sli4_hba.asynce_list_lock);
+ spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
/*
* Initialize driver internal slow-path work queues
@@ -6512,8 +6647,6 @@
INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
/* Asynchronous event CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
- /* Fast-path XRI aborted CQ Event work queue list */
- INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
/* Slow-path XRI aborted CQ Event work queue list */
INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
/* Receive queue CQ Event work queue list */
@@ -6591,7 +6724,8 @@
lpfc_read_nv(phba, mboxq);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6016 Mailbox failed , mbxCmd x%x "
"READ_NV, mbxStatus x%x\n",
bf_get(lpfc_mqe_command, &mboxq->u.mqe),
@@ -6620,17 +6754,26 @@
phba->nvmet_support = 1; /* a match */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6017 NVME Target %016llx\n",
wwn);
#else
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"6021 Can't enable NVME Target."
" NVME_TARGET_FC infrastructure"
" is not in kernel\n");
#endif
/* Not supported for NVMET */
phba->cfg_xri_rebalancing = 0;
+ if (phba->irq_chann_mode == NHT_MODE) {
+ phba->cfg_irq_chann =
+ phba->sli4_hba.num_present_cpu;
+ phba->cfg_hdw_queue =
+ phba->sli4_hba.num_present_cpu;
+ phba->irq_chann_mode = NORMAL_MODE;
+ }
break;
}
}
@@ -6651,9 +6794,9 @@
&phba->sli4_hba.sli_intf);
if (phba->sli4_hba.extents_in_use &&
phba->sli4_hba.rpi_hdrs_in_use) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2999 Unsupported SLI4 Parameters "
- "Extents and RPI headers enabled.\n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2999 Unsupported SLI4 Parameters "
+ "Extents and RPI headers enabled.\n");
if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -6760,11 +6903,6 @@
phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
}
- /* Initialize the host templates with the updated values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
-
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
"9087 sg_seg_cnt:%d dmabuf_size:%d "
"total:%d scsi:%d nvme:%d\n",
@@ -6818,13 +6956,13 @@
/* Allocate and initialize active sgl array */
rc = lpfc_init_active_sgl_array(phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1430 Failed to initialize sgl list.\n");
goto out_destroy_cq_event_pool;
}
rc = lpfc_sli4_init_rpi_hdrs(phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1432 Failed to initialize rpi headers.\n");
goto out_free_active_sgl;
}
@@ -6834,7 +6972,7 @@
phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
GFP_KERNEL);
if (!phba->fcf.fcf_rr_bmask) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2759 Failed allocate memory for FCF round "
"robin failover bmask\n");
rc = -ENOMEM;
@@ -6845,7 +6983,7 @@
sizeof(struct lpfc_hba_eq_hdl),
GFP_KERNEL);
if (!phba->sli4_hba.hba_eq_hdl) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2572 Failed allocate memory for "
"fast-path per-EQ handle array\n");
rc = -ENOMEM;
@@ -6856,7 +6994,7 @@
sizeof(struct lpfc_vector_map_info),
GFP_KERNEL);
if (!phba->sli4_hba.cpu_map) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3327 Failed allocate memory for msi-x "
"interrupt vector mapping\n");
rc = -ENOMEM;
@@ -6865,11 +7003,32 @@
phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
if (!phba->sli4_hba.eq_info) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3321 Failed allocation for per_cpu stats\n");
rc = -ENOMEM;
goto out_free_hba_cpu_map;
}
+
+ phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
+ sizeof(*phba->sli4_hba.idle_stat),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.idle_stat) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "3390 Failed allocation for idle_stat\n");
+ rc = -ENOMEM;
+ goto out_free_hba_eq_info;
+ }
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
+ if (!phba->sli4_hba.c_stat) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "3332 Failed allocating per cpu hdwq stats\n");
+ rc = -ENOMEM;
+ goto out_free_hba_idle_stat;
+ }
+#endif
+
/*
* Enable sr-iov virtual functions if supported and configured
* through the module parameter.
@@ -6889,6 +7048,12 @@
return 0;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+out_free_hba_idle_stat:
+ kfree(phba->sli4_hba.idle_stat);
+#endif
+out_free_hba_eq_info:
+ free_percpu(phba->sli4_hba.eq_info);
out_free_hba_cpu_map:
kfree(phba->sli4_hba.cpu_map);
out_free_hba_eq_hdl:
@@ -6927,12 +7092,17 @@
struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
free_percpu(phba->sli4_hba.eq_info);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ free_percpu(phba->sli4_hba.c_stat);
+#endif
+ kfree(phba->sli4_hba.idle_stat);
/* Free memory allocated for msi-x interrupt vector to CPU mapping */
kfree(phba->sli4_hba.cpu_map);
phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0;
+ cpumask_clear(&phba->sli4_hba.irq_aff_mask);
/* Free memory allocated for fast-path work queue handles */
kfree(phba->sli4_hba.hba_eq_hdl);
@@ -7000,7 +7170,7 @@
phba->lpfc_stop_port = lpfc_stop_port_s4;
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1431 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
@@ -7084,6 +7254,7 @@
/**
* lpfc_init_iocb_list - Allocate and initialize iocb list.
* @phba: pointer to lpfc hba data structure.
+ * @iocb_count: number of requested iocbs
*
* This routine is invoked to allocate and initizlize the driver's IOCB
* list and set up the IOCB tag array accordingly.
@@ -7106,7 +7277,7 @@
if (iocbq_entry == NULL) {
printk(KERN_ERR "%s: only allocated %d iocbs of "
"expected %d count. Unloading driver.\n",
- __func__, i, LPFC_IOCB_LIST_CNT);
+ __func__, i, iocb_count);
goto out_free_iocbq;
}
@@ -7295,7 +7466,7 @@
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0391 Error during rpi post operation\n");
lpfc_sli4_remove_rpis(phba);
rc = -ENODEV;
@@ -7525,18 +7696,10 @@
if (phba->nvmet_support) {
/* Only 1 vport (pport) will support NVME target */
- if (phba->txrdy_payload_pool == NULL) {
- phba->txrdy_payload_pool = dma_pool_create(
- "txrdy_pool", &phba->pcidev->dev,
- TXRDY_PAYLOAD_LEN, 16, 0);
- if (phba->txrdy_payload_pool) {
- phba->targetport = NULL;
- phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
- lpfc_printf_log(phba, KERN_INFO,
- LOG_INIT | LOG_NVME_DISC,
- "6076 NVME Target Found\n");
- }
- }
+ phba->targetport = NULL;
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
+ "6076 NVME Target Found\n");
}
lpfc_debugfs_initialize(vport);
@@ -7615,7 +7778,7 @@
if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
if ((old_mask != phba->cfg_prot_mask) ||
(old_guard != phba->cfg_prot_guard))
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1475 Registering BlockGuard with the "
"SCSI layer: mask %d guard %d\n",
phba->cfg_prot_mask,
@@ -7624,7 +7787,7 @@
scsi_host_set_prot(shost, phba->cfg_prot_mask);
scsi_host_set_guard(shost, phba->cfg_prot_guard);
} else
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1479 Not Registering BlockGuard with the SCSI "
"layer, Bad protection parameters: %d %d\n",
old_mask, old_guard);
@@ -7855,7 +8018,7 @@
* other register reads as the data may not be valid. Just exit.
*/
if (port_error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1408 Port Failed POST - portsmphr=0x%x, "
"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
"scr2=x%x, hscratch=x%x, pstatus=x%x\n",
@@ -7904,7 +8067,8 @@
readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
(~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"1422 Unrecoverable Error "
"Detected during POST "
"uerr_lo_reg=0x%x, "
@@ -7931,7 +8095,7 @@
phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.
ERR2regaddr);
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2888 Unrecoverable port error "
"following POST: port status reg "
"0x%x, port_smphr reg 0x%x, "
@@ -8044,6 +8208,7 @@
/**
* lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
* @phba: pointer to lpfc hba data structure.
+ * @if_type: sli if type to operate on.
*
* This routine is invoked to set up SLI4 BAR1 register memory map.
**/
@@ -8215,6 +8380,85 @@
memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
}
+static const char * const lpfc_topo_to_str[] = {
+ "Loop then P2P",
+ "Loopback",
+ "P2P Only",
+ "Unsupported",
+ "Loop Only",
+ "Unsupported",
+ "P2P then Loop",
+};
+
+#define LINK_FLAGS_DEF 0x0
+#define LINK_FLAGS_P2P 0x1
+#define LINK_FLAGS_LOOP 0x2
+/**
+ * lpfc_map_topology - Map the topology read from READ_CONFIG
+ * @phba: pointer to lpfc hba data structure.
+ * @rd_config: pointer to read config data
+ *
+ * This routine is invoked to map the topology values as read
+ * from the read config mailbox command. If the persistent
+ * topology feature is supported, the firmware will provide the
+ * saved topology information to be used in INIT_LINK
+ **/
+static void
+lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
+{
+ u8 ptv, tf, pt;
+
+ ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
+ tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
+ pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
+ ptv, tf, pt);
+ if (!ptv) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2019 FW does not support persistent topology "
+ "Using driver parameter defined value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ return;
+ }
+ /* FW supports persistent topology - override module parameter value */
+ phba->hba_flag |= HBA_PERSISTENT_TOPO;
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_LANCER_G7_FC:
+ case PCI_DEVICE_ID_LANCER_G6_FC:
+ if (!tf) {
+ phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
+ ? FLAGS_TOPOLOGY_MODE_LOOP
+ : FLAGS_TOPOLOGY_MODE_PT_PT);
+ } else {
+ phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
+ }
+ break;
+ default: /* G5 */
+ if (tf) {
+ /* If topology failover set - pt is '0' or '1' */
+ phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
+ FLAGS_TOPOLOGY_MODE_LOOP_PT);
+ } else {
+ phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
+ ? FLAGS_TOPOLOGY_MODE_PT_PT
+ : FLAGS_TOPOLOGY_MODE_LOOP);
+ }
+ break;
+ }
+ if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2020 Using persistent topology value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2021 Invalid topology values from FW "
+ "Using driver parameter defined value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ }
+}
+
/**
* lpfc_sli4_read_config - Get the config parameters.
* @phba: pointer to lpfc hba data structure.
@@ -8245,7 +8489,7 @@
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2011 Unable to allocate memory for issuing "
"SLI_CONFIG_SPECIAL mailbox command\n");
return -ENOMEM;
@@ -8255,11 +8499,11 @@
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2012 Mailbox failed , mbxCmd x%x "
- "READ_CONFIG, mbxStatus x%x\n",
- bf_get(lpfc_mqe_command, &pmb->u.mqe),
- bf_get(lpfc_mqe_status, &pmb->u.mqe));
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2012 Mailbox failed , mbxCmd x%x "
+ "READ_CONFIG, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe),
+ bf_get(lpfc_mqe_status, &pmb->u.mqe));
rc = -EIO;
} else {
rd_config = &pmb->u.mqe.un.rd_config;
@@ -8326,13 +8570,14 @@
phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
+ lpfc_map_topology(phba, rd_config);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2003 cfg params Extents? %d "
"XRI(B:%d M:%d), "
"VPI(B:%d M:%d) "
"VFI(B:%d M:%d) "
"RPI(B:%d M:%d) "
- "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
+ "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
phba->sli4_hba.extents_in_use,
phba->sli4_hba.max_cfg_param.xri_base,
phba->sli4_hba.max_cfg_param.max_xri,
@@ -8346,7 +8591,8 @@
phba->sli4_hba.max_cfg_param.max_eq,
phba->sli4_hba.max_cfg_param.max_cq,
phba->sli4_hba.max_cfg_param.max_wq,
- phba->sli4_hba.max_cfg_param.max_rq);
+ phba->sli4_hba.max_cfg_param.max_rq,
+ phba->lmt);
/*
* Calculate queue resources based on how
@@ -8368,8 +8614,9 @@
/* Check to see if there is enough for NVME */
if ((phba->cfg_irq_chann > qmin) ||
(phba->cfg_hdw_queue > qmin)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2005 Reducing Queues: "
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2005 Reducing Queues - "
+ "FW resource limitation: "
"WQ %d CQ %d EQ %d: min %d: "
"IRQ %d HDWQ %d\n",
phba->sli4_hba.max_cfg_param.max_wq,
@@ -8434,7 +8681,8 @@
LPFC_USER_LINK_SPEED_AUTO;
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"0047 Unrecognized link "
"speed : %d\n",
forced_link_speed);
@@ -8471,7 +8719,7 @@
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (rc2 || shdr_status || shdr_add_status) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3026 Mailbox failed , mbxCmd x%x "
"GET_FUNCTION_CONFIG, mbxStatus x%x\n",
bf_get(lpfc_mqe_command, &pmb->u.mqe),
@@ -8508,7 +8756,7 @@
"vf_number:%d\n", phba->sli4_hba.iov.pf_number,
phba->sli4_hba.iov.vf_number);
else
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3028 GET_FUNCTION_CONFIG: failed to find "
"Resource Descriptor:x%x\n",
LPFC_RSRC_DESC_TYPE_FCFCOE);
@@ -8545,7 +8793,7 @@
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0492 Unable to allocate memory for "
"issuing SLI_CONFIG_SPECIAL mailbox "
"command\n");
@@ -8560,7 +8808,7 @@
memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0493 SLI_CONFIG_SPECIAL mailbox "
"failed with status x%x\n",
rc);
@@ -8599,8 +8847,8 @@
*/
if (phba->nvmet_support) {
- if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
}
@@ -8640,8 +8888,9 @@
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0499 Failed allocate fast-path IO CQ (%d)\n", idx);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0499 Failed allocate fast-path IO CQ (%d)\n",
+ idx);
return 1;
}
qdesc->qe_valid = 1;
@@ -8663,7 +8912,7 @@
phba->sli4_hba.wq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0503 Failed allocate fast-path IO WQ (%d)\n",
idx);
return 1;
@@ -8719,7 +8968,7 @@
phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
GFP_KERNEL);
if (!phba->sli4_hba.hdwq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6427 Failed allocate memory for "
"fast-path Hardware Queue array\n");
goto out_error;
@@ -8751,7 +9000,7 @@
sizeof(struct lpfc_queue *),
GFP_KERNEL);
if (!phba->sli4_hba.nvmet_cqset) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3121 Fail allocate memory for "
"fast-path CQ set array\n");
goto out_error;
@@ -8761,7 +9010,7 @@
sizeof(struct lpfc_queue *),
GFP_KERNEL);
if (!phba->sli4_hba.nvmet_mrq_hdr) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3122 Fail allocate memory for "
"fast-path RQ set hdr array\n");
goto out_error;
@@ -8771,7 +9020,7 @@
sizeof(struct lpfc_queue *),
GFP_KERNEL);
if (!phba->sli4_hba.nvmet_mrq_data) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3124 Fail allocate memory for "
"fast-path RQ set data array\n");
goto out_error;
@@ -8799,7 +9048,7 @@
phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0497 Failed allocate EQ (%d)\n",
cpup->hdwq);
goto out_error;
@@ -8853,7 +9102,7 @@
phba->sli4_hba.cq_ecount,
cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3142 Failed allocate NVME "
"CQ Set (%d)\n", idx);
goto out_error;
@@ -8875,7 +9124,7 @@
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0500 Failed allocate slow-path mailbox CQ\n");
goto out_error;
}
@@ -8887,7 +9136,7 @@
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0501 Failed allocate slow-path ELS CQ\n");
goto out_error;
}
@@ -8906,7 +9155,7 @@
phba->sli4_hba.mq_esize,
phba->sli4_hba.mq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0505 Failed allocate slow-path MQ\n");
goto out_error;
}
@@ -8922,7 +9171,7 @@
phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0504 Failed allocate slow-path ELS WQ\n");
goto out_error;
}
@@ -8936,7 +9185,7 @@
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6079 Failed allocate NVME LS CQ\n");
goto out_error;
}
@@ -8949,7 +9198,7 @@
phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6080 Failed allocate NVME LS WQ\n");
goto out_error;
}
@@ -8967,7 +9216,7 @@
phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0506 Failed allocate receive HRQ\n");
goto out_error;
}
@@ -8978,7 +9227,7 @@
phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount, cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0507 Failed allocate receive DRQ\n");
goto out_error;
}
@@ -8996,7 +9245,7 @@
LPFC_NVMET_RQE_DEF_COUNT,
cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3146 Failed allocate "
"receive HRQ\n");
goto out_error;
@@ -9009,7 +9258,7 @@
GFP_KERNEL,
cpu_to_node(cpu));
if (qdesc->rqbp == NULL) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6131 Failed allocate "
"Header RQBP\n");
goto out_error;
@@ -9025,7 +9274,7 @@
LPFC_NVMET_RQE_DEF_COUNT,
cpu);
if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3156 Failed allocate "
"receive DRQ\n");
goto out_error;
@@ -9216,7 +9465,7 @@
int rc;
if (!eq || !cq || !wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6085 Fast-path %s (%d) not allocated\n",
((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
return -ENOMEM;
@@ -9226,9 +9475,9 @@
rc = lpfc_cq_create(phba, cq, eq,
(qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6086 Failed setup of CQ (%d), rc = 0x%x\n",
- qidx, (uint32_t)rc);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "6086 Failed setup of CQ (%d), rc = 0x%x\n",
+ qidx, (uint32_t)rc);
return rc;
}
@@ -9244,7 +9493,7 @@
/* create the wq */
rc = lpfc_wq_create(phba, wq, cq, qtype);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
qidx, (uint32_t)rc);
/* no need to tear down cq - caller will do so */
@@ -9262,9 +9511,9 @@
} else {
rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0539 Failed setup of slow-path MQ: "
- "rc = 0x%x\n", rc);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0539 Failed setup of slow-path MQ: "
+ "rc = 0x%x\n", rc);
/* no need to tear down cq - caller will do so */
return rc;
}
@@ -9337,7 +9586,7 @@
/* Check for dual-ULP support */
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3249 Unable to allocate memory for "
"QUERY_FW_CFG mailbox command\n");
return -ENOMEM;
@@ -9355,7 +9604,7 @@
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3250 QUERY_FW_CFG mailbox failed with status "
"x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
@@ -9384,7 +9633,7 @@
/* Set up HBA event queue */
if (!qp) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3147 Fast-path EQs not allocated\n");
rc = -ENOMEM;
goto out_error;
@@ -9408,7 +9657,7 @@
rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
phba->cfg_fcp_imax);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0523 Failed setup of fast-path"
" EQ (%d), rc = 0x%x\n",
cpup->eq, (uint32_t)rc);
@@ -9440,7 +9689,7 @@
qidx,
LPFC_IO);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0535 Failed to setup fastpath "
"IO WQ/CQ (%d), rc = 0x%x\n",
qidx, (uint32_t)rc);
@@ -9455,7 +9704,7 @@
/* Set up slow-path MBOX CQ/MQ */
if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0528 %s not allocated\n",
phba->sli4_hba.mbx_cq ?
"Mailbox WQ" : "Mailbox CQ");
@@ -9468,14 +9717,14 @@
phba->sli4_hba.mbx_wq,
NULL, 0, LPFC_MBOX);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
(uint32_t)rc);
goto out_destroy;
}
if (phba->nvmet_support) {
if (!phba->sli4_hba.nvmet_cqset) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3165 Fast-path NVME CQ Set "
"array not allocated\n");
rc = -ENOMEM;
@@ -9487,7 +9736,7 @@
qp,
LPFC_WCQ, LPFC_NVMET);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3164 Failed setup of NVME CQ "
"Set, rc = 0x%x\n",
(uint32_t)rc);
@@ -9499,7 +9748,7 @@
qp[0].hba_eq,
LPFC_WCQ, LPFC_NVMET);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6089 Failed setup NVMET CQ: "
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
@@ -9516,7 +9765,7 @@
/* Set up slow-path ELS WQ/CQ */
if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0530 ELS %s not allocated\n",
phba->sli4_hba.els_cq ? "WQ" : "CQ");
rc = -ENOMEM;
@@ -9527,7 +9776,7 @@
phba->sli4_hba.els_wq,
NULL, 0, LPFC_ELS);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
(uint32_t)rc);
goto out_destroy;
@@ -9540,7 +9789,7 @@
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Set up NVME LS Complete Queue */
if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6091 LS %s not allocated\n",
phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
rc = -ENOMEM;
@@ -9551,7 +9800,7 @@
phba->sli4_hba.nvmels_wq,
NULL, 0, LPFC_NVME_LS);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0526 Failed setup of NVVME LS WQ/CQ: "
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
@@ -9571,7 +9820,7 @@
if ((!phba->sli4_hba.nvmet_cqset) ||
(!phba->sli4_hba.nvmet_mrq_hdr) ||
(!phba->sli4_hba.nvmet_mrq_data)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6130 MRQ CQ Queues not "
"allocated\n");
rc = -ENOMEM;
@@ -9584,7 +9833,7 @@
phba->sli4_hba.nvmet_cqset,
LPFC_NVMET);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6098 Failed setup of NVMET "
"MRQ: rc = 0x%x\n",
(uint32_t)rc);
@@ -9598,7 +9847,7 @@
phba->sli4_hba.nvmet_cqset[0],
LPFC_NVMET);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6057 Failed setup of NVMET "
"Receive Queue: rc = 0x%x\n",
(uint32_t)rc);
@@ -9617,7 +9866,7 @@
}
if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0540 Receive Queue not allocated\n");
rc = -ENOMEM;
goto out_destroy;
@@ -9626,7 +9875,7 @@
rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
phba->sli4_hba.els_cq, LPFC_USOL);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0541 Failed setup of Receive Queue: "
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
@@ -9654,7 +9903,7 @@
phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
sizeof(struct lpfc_queue *), GFP_KERNEL);
if (!phba->sli4_hba.cq_lookup) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0549 Failed setup of CQ Lookup table: "
"size 0x%x\n", phba->sli4_hba.cq_max);
rc = -ENOMEM;
@@ -9901,26 +10150,28 @@
static void
lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
{
- LIST_HEAD(cqelist);
- struct lpfc_cq_event *cqe;
+ LIST_HEAD(cq_event_list);
+ struct lpfc_cq_event *cq_event;
unsigned long iflags;
/* Retrieve all the pending WCQEs from pending WCQE lists */
- spin_lock_irqsave(&phba->hbalock, iflags);
- /* Pending FCP XRI abort events */
- list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
- &cqelist);
- /* Pending ELS XRI abort events */
- list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
- &cqelist);
- /* Pending asynnc events */
- list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
- &cqelist);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- while (!list_empty(&cqelist)) {
- list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
- lpfc_sli4_cq_event_release(phba, cqe);
+ /* Pending ELS XRI abort events */
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
+ list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+ &cq_event_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
+
+ /* Pending asynnc events */
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
+ list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
+ &cq_event_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
+
+ while (!list_empty(&cq_event_list)) {
+ list_remove_head(&cq_event_list, cq_event,
+ struct lpfc_cq_event, list);
+ lpfc_sli4_cq_event_release(phba, cq_event);
}
}
@@ -9954,7 +10205,7 @@
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0494 Unable to allocate memory for "
"issuing SLI_FUNCTION_RESET mailbox "
"command\n");
@@ -9973,7 +10224,7 @@
&shdr->response);
mempool_free(mboxq, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0495 SLI_FUNCTION_RESET mailbox "
"failed with status x%x add_status x%x,"
" mbx status x%x\n",
@@ -10005,7 +10256,7 @@
phba->sli4_hba.u.if_type2.ERR1regaddr);
phba->work_status[1] = readl(
phba->sli4_hba.u.if_type2.ERR2regaddr);
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2890 Port not ready, port status reg "
"0x%x error 1=0x%x, error 2=0x%x\n",
reg_data.word0,
@@ -10047,7 +10298,7 @@
out:
/* Catch the not-ready port failure after a port reset. */
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3317 HBA not functional: IP Reset Failed "
"try: echo fw_reset > board_mode\n");
rc = -ENODEV;
@@ -10097,7 +10348,7 @@
/* There is no SLI3 failback for SLI4 devices. */
if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_VALID) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2894 SLI_INTF reg contents invalid "
"sli_intf reg 0x%x\n",
phba->sli4_hba.sli_intf.word0);
@@ -10300,6 +10551,8 @@
case LPFC_SLI_INTF_IF_TYPE_6:
iounmap(phba->sli4_hba.drbl_regs_memmap_p);
iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ if (phba->sli4_hba.dpp_regs_memmap_p)
+ iounmap(phba->sli4_hba.dpp_regs_memmap_p);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -10370,7 +10623,7 @@
if (!pmb) {
rc = -ENOMEM;
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0474 Unable to allocate memory for issuing "
"MBOX_CONFIG_MSI command\n");
goto mem_fail_out;
@@ -10453,6 +10706,7 @@
/**
* lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
* @phba: pointer to lpfc hba data structure.
+ * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
*
* This routine is invoked to enable device interrupt and associate driver's
* interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
@@ -10561,7 +10815,6 @@
*/
if ((match == LPFC_FIND_BY_EQ) &&
(cpup->flag & LPFC_CPU_FIRST_IRQ) &&
- (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
(cpup->eq == id))
return cpu;
@@ -10599,6 +10852,75 @@
}
#endif
+/*
+ * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
+ * @phba: pointer to lpfc hba data structure.
+ * @eqidx: index for eq and irq vector
+ * @flag: flags to set for vector_map structure
+ * @cpu: cpu used to index vector_map structure
+ *
+ * The routine assigns eq info into vector_map structure
+ */
+static inline void
+lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
+ unsigned int cpu)
+{
+ struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
+ struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
+
+ cpup->eq = eqidx;
+ cpup->flag |= flag;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
+ cpu, eqhdl->irq, cpup->eq, cpup->flag);
+}
+
+/**
+ * lpfc_cpu_map_array_init - Initialize cpu_map structure
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The routine initializes the cpu_map array structure
+ */
+static void
+lpfc_cpu_map_array_init(struct lpfc_hba *phba)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct lpfc_eq_intr_info *eqi;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->eq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->flag = 0;
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
+ INIT_LIST_HEAD(&eqi->list);
+ eqi->icnt = 0;
+ }
+}
+
+/**
+ * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The routine initializes the hba_eq_hdl array structure
+ */
+static void
+lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
+{
+ struct lpfc_hba_eq_hdl *eqhdl;
+ int i;
+
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ eqhdl = lpfc_get_eq_hdl(i);
+ eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
+ eqhdl->phba = phba;
+ }
+}
+
/**
* lpfc_cpu_affinity_check - Check vector CPU affinity mappings
* @phba: pointer to lpfc hba data structure.
@@ -10617,21 +10939,12 @@
int max_core_id, min_core_id;
struct lpfc_vector_map_info *cpup;
struct lpfc_vector_map_info *new_cpup;
- const struct cpumask *maskp;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
-
- /* Init cpu_map array */
- for_each_possible_cpu(cpu) {
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
- cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
- cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
- cpup->eq = LPFC_VECTOR_MAP_EMPTY;
- cpup->irq = LPFC_VECTOR_MAP_EMPTY;
- cpup->flag = 0;
- }
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct lpfc_hdwq_stat *c_stat;
+#endif
max_phys_id = 0;
min_phys_id = LPFC_VECTOR_MAP_EMPTY;
@@ -10668,65 +10981,6 @@
min_core_id = cpup->core_id;
}
- for_each_possible_cpu(i) {
- struct lpfc_eq_intr_info *eqi =
- per_cpu_ptr(phba->sli4_hba.eq_info, i);
-
- INIT_LIST_HEAD(&eqi->list);
- eqi->icnt = 0;
- }
-
- /* This loop sets up all CPUs that are affinitized with a
- * irq vector assigned to the driver. All affinitized CPUs
- * will get a link to that vectors IRQ and EQ.
- *
- * NULL affinity mask handling:
- * If irq count is greater than one, log an error message.
- * If the null mask is received for the first irq, find the
- * first present cpu, and assign the eq index to ensure at
- * least one EQ is assigned.
- */
- for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
- /* Get a CPU mask for all CPUs affinitized to this vector */
- maskp = pci_irq_get_affinity(phba->pcidev, idx);
- if (!maskp) {
- if (phba->cfg_irq_chann > 1)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3329 No affinity mask found "
- "for vector %d (%d)\n",
- idx, phba->cfg_irq_chann);
- if (!idx) {
- cpu = cpumask_first(cpu_present_mask);
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->eq = idx;
- cpup->irq = pci_irq_vector(phba->pcidev, idx);
- cpup->flag |= LPFC_CPU_FIRST_IRQ;
- }
- break;
- }
-
- i = 0;
- /* Loop through all CPUs associated with vector idx */
- for_each_cpu_and(cpu, maskp, cpu_present_mask) {
- /* Set the EQ index and IRQ for that vector */
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->eq = idx;
- cpup->irq = pci_irq_vector(phba->pcidev, idx);
-
- /* If this is the first CPU thats assigned to this
- * vector, set LPFC_CPU_FIRST_IRQ.
- */
- if (!i)
- cpup->flag |= LPFC_CPU_FIRST_IRQ;
- i++;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3336 Set Affinity: CPU %d "
- "irq %d eq %d flag x%x\n",
- cpu, cpup->irq, cpup->eq, cpup->flag);
- }
- }
-
/* After looking at each irq vector assigned to this pcidev, its
* possible to see that not ALL CPUs have been accounted for.
* Next we will set any unassigned (unaffinitized) cpu map
@@ -10752,7 +11006,7 @@
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
- (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
(new_cpup->phys_id == cpup->phys_id))
goto found_same;
new_cpu = cpumask_next(
@@ -10765,7 +11019,6 @@
found_same:
/* We found a matching phys_id, so copy the IRQ info */
cpup->eq = new_cpup->eq;
- cpup->irq = new_cpup->irq;
/* Bump start_cpu to the next slot to minmize the
* chance of having multiple unassigned CPU entries
@@ -10777,9 +11030,10 @@
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3337 Set Affinity: CPU %d "
- "irq %d from id %d same "
+ "eq %d from peer cpu %d same "
"phys_id (%d)\n",
- cpu, cpup->irq, new_cpu, cpup->phys_id);
+ cpu, cpup->eq, new_cpu,
+ cpup->phys_id);
}
}
@@ -10803,7 +11057,7 @@
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
- (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
goto found_any;
new_cpu = cpumask_next(
new_cpu, cpu_present_mask);
@@ -10813,13 +11067,12 @@
/* We should never leave an entry unassigned */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3339 Set Affinity: CPU %d "
- "irq %d UNASSIGNED\n",
- cpup->hdwq, cpup->irq);
+ "eq %d UNASSIGNED\n",
+ cpup->hdwq, cpup->eq);
continue;
found_any:
/* We found an available entry, copy the IRQ info */
cpup->eq = new_cpup->eq;
- cpup->irq = new_cpup->irq;
/* Bump start_cpu to the next slot to minmize the
* chance of having multiple unassigned CPU entries
@@ -10831,8 +11084,8 @@
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3338 Set Affinity: CPU %d "
- "irq %d from id %d (%d/%d)\n",
- cpu, cpup->irq, new_cpu,
+ "eq %d from peer cpu %d (%d/%d)\n",
+ cpu, cpup->eq, new_cpu,
new_cpup->phys_id, new_cpup->core_id);
}
}
@@ -10851,13 +11104,13 @@
/* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
cpup->hdwq = idx;
idx++;
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3333 Set Affinity: CPU %d (phys %d core %d): "
- "hdwq %d eq %d irq %d flg x%x\n",
+ "hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
- cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ cpup->hdwq, cpup->eq, cpup->flag);
}
- /* Finally we need to associate a hdwq with each cpu_map entry
+ /* Associate a hdwq with each cpu_map entry
* This will be 1 to 1 - hdwq to cpu, unless there are less
* hardware queues then CPUs. For that case we will just round-robin
* the available hardware queues as they get assigned to CPUs.
@@ -10929,11 +11182,35 @@
start_cpu = first_cpu;
cpup->hdwq = new_cpup->hdwq;
logit:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3335 Set Affinity: CPU %d (phys %d core %d): "
- "hdwq %d eq %d irq %d flg x%x\n",
+ "hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
- cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ cpup->hdwq, cpup->eq, cpup->flag);
+ }
+
+ /*
+ * Initialize the cpu_map slots for not-present cpus in case
+ * a cpu is hot-added. Perform a simple hdwq round robin assignment.
+ */
+ idx = 0;
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
+ c_stat->hdwq_no = cpup->hdwq;
+#endif
+ if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
+ continue;
+
+ cpup->hdwq = idx++ % phba->cfg_hdw_queue;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ c_stat->hdwq_no = cpup->hdwq;
+#endif
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3340 Set Affinity: not present "
+ "CPU %d hdwq %d\n",
+ cpu, cpup->hdwq);
}
/* The cpu_map array will be used later during initialization
@@ -10947,19 +11224,21 @@
*
* @phba: pointer to lpfc hba data structure.
* @cpu: cpu going offline
- * @eqlist:
+ * @eqlist: eq list to append to
*/
-static void
+static int
lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
struct list_head *eqlist)
{
- struct lpfc_vector_map_info *map;
const struct cpumask *maskp;
struct lpfc_queue *eq;
- unsigned int i;
- cpumask_t tmp;
+ struct cpumask *tmp;
u16 idx;
+ tmp = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
maskp = pci_irq_get_affinity(phba->pcidev, idx);
if (!maskp)
@@ -10969,7 +11248,7 @@
* then we don't need to poll the eq attached
* to it.
*/
- if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
+ if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
continue;
/* get the cpus that are online and are affini-
* tized to this irq vector. If the count is
@@ -10977,8 +11256,8 @@
* down this vector. Since this cpu has not
* gone offline yet, we need >1.
*/
- cpumask_and(&tmp, maskp, cpu_online_mask);
- if (cpumask_weight(&tmp) > 1)
+ cpumask_and(tmp, maskp, cpu_online_mask);
+ if (cpumask_weight(tmp) > 1)
continue;
/* Now that we have an irq to shutdown, get the eq
@@ -10986,16 +11265,11 @@
* the software can share an eq, but eventually
* only eq will be mapped to this vector
*/
- for_each_possible_cpu(i) {
- map = &phba->sli4_hba.cpu_map[i];
- if (!(map->irq == pci_irq_vector(phba->pcidev, idx)))
- continue;
- eq = phba->sli4_hba.hdwq[map->hdwq].hba_eq;
- list_add(&eq->_poll_list, eqlist);
- /* 1 is good enough. others will be a copy of this */
- break;
- }
+ eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
+ list_add(&eq->_poll_list, eqlist);
}
+ kfree(tmp);
+ return 0;
}
static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
@@ -11028,11 +11302,9 @@
rcu_read_lock();
- if (!list_empty(&phba->poll_list)) {
- timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ if (!list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
- }
rcu_read_unlock();
@@ -11056,6 +11328,99 @@
return false;
}
+/**
+ * lpfc_irq_set_aff - set IRQ affinity
+ * @eqhdl: EQ handle
+ * @cpu: cpu to set affinity
+ *
+ **/
+static inline void
+lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
+{
+ cpumask_clear(&eqhdl->aff_mask);
+ cpumask_set_cpu(cpu, &eqhdl->aff_mask);
+ irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
+ irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+}
+
+/**
+ * lpfc_irq_clear_aff - clear IRQ affinity
+ * @eqhdl: EQ handle
+ *
+ **/
+static inline void
+lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
+{
+ cpumask_clear(&eqhdl->aff_mask);
+ irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
+}
+
+/**
+ * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
+ * @phba: pointer to HBA context object.
+ * @cpu: cpu going offline/online
+ * @offline: true, cpu is going offline. false, cpu is coming online.
+ *
+ * If cpu is going offline, we'll try our best effort to find the next
+ * online cpu on the phba's original_mask and migrate all offlining IRQ
+ * affinities.
+ *
+ * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
+ *
+ * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
+ * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
+ *
+ **/
+static void
+lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct cpumask *aff_mask;
+ unsigned int cpu_select, cpu_next, idx;
+ const struct cpumask *orig_mask;
+
+ if (phba->irq_chann_mode == NORMAL_MODE)
+ return;
+
+ orig_mask = &phba->sli4_hba.irq_aff_mask;
+
+ if (!cpumask_test_cpu(cpu, orig_mask))
+ return;
+
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+ return;
+
+ if (offline) {
+ /* Find next online CPU on original mask */
+ cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+ cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
+
+ /* Found a valid CPU */
+ if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
+ /* Go through each eqhdl and ensure offlining
+ * cpu aff_mask is migrated
+ */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ aff_mask = lpfc_get_aff_mask(idx);
+
+ /* Migrate affinity */
+ if (cpumask_test_cpu(cpu, aff_mask))
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
+ cpu_select);
+ }
+ } else {
+ /* Rely on irqbalance if no online CPUs left on NUMA */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++)
+ lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
+ }
+ } else {
+ /* Migrate affinity back to this CPU */
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
+ }
+}
+
static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
{
struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
@@ -11071,7 +11436,11 @@
if (__lpfc_cpuhp_checks(phba, &retval))
return retval;
- lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+ lpfc_irq_rebalance(phba, cpu, true);
+
+ retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+ if (retval)
+ return retval;
/* start polling on these eq's */
list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
@@ -11097,6 +11466,8 @@
if (__lpfc_cpuhp_checks(phba, &retval))
return retval;
+ lpfc_irq_rebalance(phba, cpu, false);
+
list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
if (n == cpu)
@@ -11111,7 +11482,24 @@
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-4 interface spec.
+ * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
+ * to cpus on the system.
+ *
+ * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
+ * the number of cpus on the same numa node as this adapter. The vectors are
+ * allocated without requesting OS affinity mapping. A vector will be
+ * allocated and assigned to each online and offline cpu. If the cpu is
+ * online, then affinity will be set to that cpu. If the cpu is offline, then
+ * affinity will be set to the nearest peer cpu within the numa node that is
+ * online. If there are no online cpus within the numa node, affinity is not
+ * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
+ * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
+ * configured.
+ *
+ * If numa mode is not enabled and there is more than 1 vector allocated, then
+ * the driver relies on the managed irq interface where the OS assigns vector to
+ * cpu affinity. The driver will then use that affinity mapping to setup its
+ * cpu mapping table.
*
* Return codes
* 0 - successful
@@ -11122,13 +11510,33 @@
{
int vectors, rc, index;
char *name;
+ const struct cpumask *aff_mask = NULL;
+ unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
+ struct lpfc_vector_map_info *cpup;
+ struct lpfc_hba_eq_hdl *eqhdl;
+ const struct cpumask *maskp;
+ unsigned int flags = PCI_IRQ_MSIX;
/* Set up MSI-X multi-message vectors */
vectors = phba->cfg_irq_chann;
- rc = pci_alloc_irq_vectors(phba->pcidev,
- 1,
- vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (phba->irq_chann_mode != NORMAL_MODE)
+ aff_mask = &phba->sli4_hba.irq_aff_mask;
+
+ if (aff_mask) {
+ cpu_cnt = cpumask_weight(aff_mask);
+ vectors = min(phba->cfg_irq_chann, cpu_cnt);
+
+ /* cpu: iterates over aff_mask including offline or online
+ * cpu_select: iterates over online aff_mask to set affinity
+ */
+ cpu = cpumask_first(aff_mask);
+ cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
+ } else {
+ flags |= PCI_IRQ_AFFINITY;
+ }
+
+ rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
if (rc < 0) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0484 PCI enable MSI-X failed (%d)\n", rc);
@@ -11138,43 +11546,92 @@
/* Assign MSI-X vectors to interrupt handlers */
for (index = 0; index < vectors; index++) {
- name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
+ eqhdl = lpfc_get_eq_hdl(index);
+ name = eqhdl->handler_name;
memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
LPFC_DRIVER_HANDLER_NAME"%d", index);
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ eqhdl->idx = index;
rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_hba_intr_handler, 0,
- name,
- &phba->sli4_hba.hba_eq_hdl[index]);
+ name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
"request_irq failed (%d)\n", index, rc);
goto cfg_fail_out;
}
+
+ eqhdl->irq = pci_irq_vector(phba->pcidev, index);
+
+ if (aff_mask) {
+ /* If found a neighboring online cpu, set affinity */
+ if (cpu_select < nr_cpu_ids)
+ lpfc_irq_set_aff(eqhdl, cpu_select);
+
+ /* Assign EQ to cpu_map */
+ lpfc_assign_eq_map_info(phba, index,
+ LPFC_CPU_FIRST_IRQ,
+ cpu);
+
+ /* Iterate to next offline or online cpu in aff_mask */
+ cpu = cpumask_next(cpu, aff_mask);
+
+ /* Find next online cpu in aff_mask to set affinity */
+ cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
+ } else if (vectors == 1) {
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
+ cpu);
+ } else {
+ maskp = pci_irq_get_affinity(phba->pcidev, index);
+
+ /* Loop through all CPUs associated with vector index */
+ for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* If this is the first CPU thats assigned to
+ * this vector, set LPFC_CPU_FIRST_IRQ.
+ *
+ * With certain platforms its possible that irq
+ * vectors are affinitized to all the cpu's.
+ * This can result in each cpu_map.eq to be set
+ * to the last vector, resulting in overwrite
+ * of all the previous cpu_map.eq. Ensure that
+ * each vector receives a place in cpu_map.
+ * Later call to lpfc_cpu_affinity_check will
+ * ensure we are nicely balanced out.
+ */
+ if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
+ continue;
+ lpfc_assign_eq_map_info(phba, index,
+ LPFC_CPU_FIRST_IRQ,
+ cpu);
+ break;
+ }
+ }
}
if (vectors != phba->cfg_irq_chann) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3238 Reducing IO channels to match number of "
"MSI-X vectors, requested %d got %d\n",
phba->cfg_irq_chann, vectors);
if (phba->cfg_irq_chann > vectors)
phba->cfg_irq_chann = vectors;
- if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
- phba->cfg_nvmet_mrq = vectors;
}
return rc;
cfg_fail_out:
/* free the irq already requested */
- for (--index; index >= 0; index--)
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
+ for (--index; index >= 0; index--) {
+ eqhdl = lpfc_get_eq_hdl(index);
+ lpfc_irq_clear_aff(eqhdl);
+ irq_set_affinity_hint(eqhdl->irq, NULL);
+ free_irq(eqhdl->irq, eqhdl);
+ }
/* Unconfigure MSI-X capability structure */
pci_free_irq_vectors(phba->pcidev);
@@ -11201,6 +11658,8 @@
lpfc_sli4_enable_msi(struct lpfc_hba *phba)
{
int rc, index;
+ unsigned int cpu;
+ struct lpfc_hba_eq_hdl *eqhdl;
rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
@@ -11222,9 +11681,15 @@
return rc;
}
+ eqhdl = lpfc_get_eq_hdl(0);
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
+
for (index = 0; index < phba->cfg_irq_chann; index++) {
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ eqhdl = lpfc_get_eq_hdl(index);
+ eqhdl->idx = index;
}
return 0;
@@ -11233,6 +11698,7 @@
/**
* lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
* @phba: pointer to lpfc hba data structure.
+ * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
*
* This routine is invoked to enable device interrupt and associate driver's
* interrupt handler(s) to interrupt vector(s) to device with SLI-4
@@ -11282,15 +11748,21 @@
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (!retval) {
struct lpfc_hba_eq_hdl *eqhdl;
+ unsigned int cpu;
/* Indicate initialization to INTx mode */
phba->intr_type = INTx;
intr_mode = 0;
+ eqhdl = lpfc_get_eq_hdl(0);
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
+ cpu);
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
- eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
+ eqhdl = lpfc_get_eq_hdl(idx);
eqhdl->idx = idx;
- eqhdl->phba = phba;
}
}
}
@@ -11312,14 +11784,14 @@
/* Disable the currently initialized interrupt mode */
if (phba->intr_type == MSIX) {
int index;
+ struct lpfc_hba_eq_hdl *eqhdl;
/* Free up MSI-X multi-message vectors */
for (index = 0; index < phba->cfg_irq_chann; index++) {
- irq_set_affinity_hint(
- pci_irq_vector(phba->pcidev, index),
- NULL);
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
+ eqhdl = lpfc_get_eq_hdl(index);
+ lpfc_irq_clear_aff(eqhdl);
+ irq_set_affinity_hint(eqhdl->irq, NULL);
+ free_irq(eqhdl->irq, eqhdl);
}
} else {
free_irq(phba->pcidev->irq, phba);
@@ -11416,17 +11888,17 @@
while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
if (!nvmet_xri_cmpl)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6424 NVMET XRI exchange busy "
"wait time: %d seconds.\n",
wait_time/1000);
if (!io_xri_cmpl)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6100 IO XRI exchange busy "
"wait time: %d seconds.\n",
wait_time/1000);
if (!els_xri_cmpl)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2878 ELS XRI exchange busy "
"wait time: %d seconds.\n",
wait_time/1000);
@@ -11615,6 +12087,7 @@
sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
+ sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
@@ -11666,13 +12139,10 @@
}
/* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
- * accommodate 512K and 1M IOs in a single nvme buf and supply
- * enough NVME LS iocb buffers for larger connectivity counts.
+ * accommodate 512K and 1M IOs in a single nvme buf.
*/
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- phba->cfg_iocb_cnt = 5;
- }
/* Only embed PBDE for if_type 6, PBDE support requires xib be set */
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
@@ -11852,14 +12322,14 @@
/* Configure and enable interrupt */
intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0431 Failed to enable interrupt.\n");
error = -ENODEV;
goto out_free_sysfs_attr;
}
/* SLI-3 HBA setup */
if (lpfc_sli_hba_setup(phba)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1477 Failed to set up hba\n");
error = -ENODEV;
goto out_remove_device;
@@ -12117,7 +12587,7 @@
/* Configure and enable interrupt */
intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0430 PM resume Failed to enable interrupt\n");
return -EIO;
} else
@@ -12143,7 +12613,7 @@
static void
lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2723 PCI channel I/O abort preparing for recovery\n");
/*
@@ -12164,7 +12634,7 @@
static void
lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2710 PCI channel disable preparing for reset\n");
/* Block any management I/Os to the device */
@@ -12195,7 +12665,7 @@
static void
lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2711 PCI channel permanent disable for failure\n");
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
@@ -12246,7 +12716,7 @@
return PCI_ERS_RESULT_DISCONNECT;
default:
/* Unknown state, prepare and request slot reset */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0472 Unknown PCI error state: x%x\n", state);
lpfc_sli_prep_dev_for_reset(phba);
return PCI_ERS_RESULT_NEED_RESET;
@@ -12304,7 +12774,7 @@
/* Configure and enable interrupt */
intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0427 Cannot re-enable interrupt after "
"slot reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
@@ -12389,35 +12859,56 @@
}
-static void
+static int
lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
const struct firmware *fw)
{
- if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
- magic_number != MAGIC_NUMER_G6) ||
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
- magic_number != MAGIC_NUMER_G7))
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3030 This firmware version is not supported on "
- "this HBA model. Device:%x Magic:%x Type:%x "
- "ID:%x Size %d %zd\n",
- phba->pcidev->device, magic_number, ftype, fid,
- fsize, fw->size);
- else
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3022 FW Download failed. Device:%x Magic:%x Type:%x "
- "ID:%x Size %d %zd\n",
- phba->pcidev->device, magic_number, ftype, fid,
- fsize, fw->size);
-}
+ int rc;
+ /* Three cases: (1) FW was not supported on the detected adapter.
+ * (2) FW update has been locked out administratively.
+ * (3) Some other error during FW update.
+ * In each case, an unmaskable message is written to the console
+ * for admin diagnosis.
+ */
+ if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
+ (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
+ magic_number != MAGIC_NUMBER_G6) ||
+ (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
+ magic_number != MAGIC_NUMBER_G7)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "3030 This firmware version is not supported on"
+ " this HBA model. Device:%x Magic:%x Type:%x "
+ "ID:%x Size %d %zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ rc = -EINVAL;
+ } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "3021 Firmware downloads have been prohibited "
+ "by a system configuration setting on "
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
+ "%zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ rc = -EACCES;
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "3022 FW Download failed. Add Status x%x "
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
+ "%zd\n",
+ offset, phba->pcidev->device, magic_number,
+ ftype, fid, fsize, fw->size);
+ rc = -EIO;
+ }
+ return rc;
+}
/**
* lpfc_write_firmware - attempt to write a firmware image to the port
* @fw: pointer to firmware image returned from request_firmware.
- * @phba: pointer to lpfc hba data structure.
+ * @context: pointer to firmware image returned from request_firmware.
*
**/
static void
@@ -12447,7 +12938,7 @@
INIT_LIST_HEAD(&dma_buffer_list);
lpfc_decode_firmware_rev(phba, fwrev, 1);
if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3023 Updating Firmware, Current Version:%s "
"New Version:%s\n",
fwrev, image->revision);
@@ -12486,14 +12977,18 @@
rc = lpfc_wr_object(phba, &dma_buffer_list,
(fw->size - offset), &offset);
if (rc) {
- lpfc_log_write_firmware_error(phba, offset,
- magic_number, ftype, fid, fsize, fw);
+ rc = lpfc_log_write_firmware_error(phba, offset,
+ magic_number,
+ ftype,
+ fid,
+ fsize,
+ fw);
goto release_out;
}
}
rc = offset;
} else
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3029 Skipped Firmware update, Current "
"Version:%s New Version:%s\n",
fwrev, image->revision);
@@ -12507,14 +13002,18 @@
}
release_firmware(fw);
out:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3024 Firmware update done: %d.\n", rc);
- return;
+ if (rc < 0)
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "3062 Firmware update error, status %d.\n", rc);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "3024 Firmware update success: size %d.\n", rc);
}
/**
* lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
* @phba: pointer to lpfc hba data structure.
+ * @fw_upgrade: which firmware to update.
*
* This routine is called to perform Linux generic firmware upgrade on device
* that supports such feature.
@@ -12581,6 +13080,8 @@
if (!phba)
return -ENOMEM;
+ INIT_LIST_HEAD(&phba->poll_list);
+
/* Perform generic PCI device enabling operation */
error = lpfc_enable_pci_dev(phba);
if (error)
@@ -12628,10 +13129,16 @@
phba->pport = NULL;
lpfc_stop_port(phba);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
+
+ /* Init hba_eq_hdl array */
+ lpfc_hba_eq_hdl_array_init(phba);
+
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0426 Failed to enable interrupt.\n");
error = -ENODEV;
goto out_unset_driver_resource;
@@ -12666,7 +13173,7 @@
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1421 Failed to set up hba\n");
error = -ENODEV;
goto out_free_sysfs_attr;
@@ -12691,7 +13198,7 @@
*/
error = lpfc_nvme_create_localport(vport);
if (error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6004 NVME registration "
"failed, error x%x\n",
error);
@@ -12709,7 +13216,7 @@
/* Enable RAS FW log support */
lpfc_sli4_ras_setup(phba);
- INIT_LIST_HEAD(&phba->poll_list);
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
return 0;
@@ -12924,7 +13431,7 @@
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0294 PM resume Failed to enable interrupt\n");
return -EIO;
} else
@@ -12950,7 +13457,7 @@
static void
lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2828 PCI channel I/O abort preparing for recovery\n");
/*
* There may be errored I/Os through HBA, abort all I/Os on txcmplq
@@ -12970,7 +13477,7 @@
static void
lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2826 PCI channel disable preparing for reset\n");
/* Block any management I/Os to the device */
@@ -13002,7 +13509,7 @@
static void
lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2827 PCI channel permanent disable for failure\n");
/* Block all SCSI devices' I/Os on the host */
@@ -13052,7 +13559,7 @@
return PCI_ERS_RESULT_DISCONNECT;
default:
/* Unknown state, prepare and request slot reset */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2825 Unknown PCI error state: x%x\n", state);
lpfc_sli4_prep_dev_for_reset(phba);
return PCI_ERS_RESULT_NEED_RESET;
@@ -13110,7 +13617,7 @@
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2824 Cannot re-enable interrupt after "
"slot reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
@@ -13215,7 +13722,7 @@
lpfc_pci_remove_one_s4(pdev);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1424 Invalid PCI device group: 0x%x\n",
phba->pci_dev_grp);
break;
@@ -13252,7 +13759,7 @@
rc = lpfc_pci_suspend_one_s4(pdev, msg);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1425 Invalid PCI device group: 0x%x\n",
phba->pci_dev_grp);
break;
@@ -13288,7 +13795,7 @@
rc = lpfc_pci_resume_one_s4(pdev);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1426 Invalid PCI device group: 0x%x\n",
phba->pci_dev_grp);
break;
@@ -13326,7 +13833,7 @@
rc = lpfc_io_error_detected_s4(pdev, state);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1427 Invalid PCI device group: 0x%x\n",
phba->pci_dev_grp);
break;
@@ -13363,7 +13870,7 @@
rc = lpfc_io_slot_reset_s4(pdev);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1428 Invalid PCI device group: 0x%x\n",
phba->pci_dev_grp);
break;
@@ -13395,7 +13902,7 @@
lpfc_io_resume_s4(pdev);
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"1429 Invalid PCI device group: 0x%x\n",
phba->pci_dev_grp);
break;
@@ -13424,8 +13931,7 @@
phba->cfg_fof = 1;
} else {
phba->cfg_fof = 0;
- if (phba->device_data_mem_pool)
- mempool_destroy(phba->device_data_mem_pool);
+ mempool_destroy(phba->device_data_mem_pool);
phba->device_data_mem_pool = NULL;
}
@@ -13504,25 +14010,26 @@
{
int error = 0;
- printk(LPFC_MODULE_DESC "\n");
- printk(LPFC_COPYRIGHT "\n");
+ pr_info(LPFC_MODULE_DESC "\n");
+ pr_info(LPFC_COPYRIGHT "\n");
error = misc_register(&lpfc_mgmt_dev);
if (error)
printk(KERN_ERR "Could not register lpfcmgmt device, "
"misc_register returned with status %d", error);
+ error = -ENOMEM;
lpfc_transport_functions.vport_create = lpfc_vport_create;
lpfc_transport_functions.vport_delete = lpfc_vport_delete;
lpfc_transport_template =
fc_attach_transport(&lpfc_transport_functions);
if (lpfc_transport_template == NULL)
- return -ENOMEM;
+ goto unregister;
lpfc_vport_transport_template =
fc_attach_transport(&lpfc_vport_transport_functions);
if (lpfc_vport_transport_template == NULL) {
fc_release_transport(lpfc_transport_template);
- return -ENOMEM;
+ goto unregister;
}
lpfc_nvme_cmd_template();
lpfc_nvmet_cmd_template();
@@ -13548,10 +14055,91 @@
cpuhp_failure:
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
+unregister:
+ misc_deregister(&lpfc_mgmt_dev);
return error;
}
+void lpfc_dmp_dbg(struct lpfc_hba *phba)
+{
+ unsigned int start_idx;
+ unsigned int dbg_cnt;
+ unsigned int temp_idx;
+ int i;
+ int j = 0;
+ unsigned long rem_nsec;
+
+ if (phba->cfg_log_verbose)
+ return;
+
+ if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
+ return;
+
+ start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
+ dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
+ temp_idx = start_idx;
+ if (dbg_cnt >= DBG_LOG_SZ) {
+ dbg_cnt = DBG_LOG_SZ;
+ temp_idx -= 1;
+ } else {
+ if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
+ temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
+ } else {
+ if (start_idx < dbg_cnt)
+ start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
+ else
+ start_idx -= dbg_cnt;
+ }
+ }
+ dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
+ start_idx, temp_idx, dbg_cnt);
+
+ for (i = 0; i < dbg_cnt; i++) {
+ if ((start_idx + i) < DBG_LOG_SZ)
+ temp_idx = (start_idx + i) % DBG_LOG_SZ;
+ else
+ temp_idx = j++;
+ rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
+ dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
+ temp_idx,
+ (unsigned long)phba->dbg_log[temp_idx].t_ns,
+ rem_nsec / 1000,
+ phba->dbg_log[temp_idx].log);
+ }
+ atomic_set(&phba->dbg_log_cnt, 0);
+ atomic_set(&phba->dbg_log_dmping, 0);
+}
+
+__printf(2, 3)
+void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
+{
+ unsigned int idx;
+ va_list args;
+ int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
+ struct va_format vaf;
+
+
+ va_start(args, fmt);
+ if (unlikely(dbg_dmping)) {
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ dev_info(&phba->pcidev->dev, "%pV", &vaf);
+ va_end(args);
+ return;
+ }
+ idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
+ DBG_LOG_SZ;
+
+ atomic_inc(&phba->dbg_log_cnt);
+
+ vscnprintf(phba->dbg_log[idx].log,
+ sizeof(phba->dbg_log[idx].log), fmt, args);
+ va_end(args);
+
+ phba->dbg_log[idx].t_ns = local_clock();
+}
+
/**
* lpfc_exit - lpfc module removal routine
*