blob: 1149bfc42fe6442e955912d28062c9bb312280b8 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
Olivier Deprez157378f2022-04-04 15:47:50 +02004 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
33#include <linux/ctype.h>
34#include <linux/aer.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/miscdevice.h>
38#include <linux/percpu.h>
39#include <linux/msi.h>
David Brazdil0f672f62019-12-10 10:32:29 +000040#include <linux/irq.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041#include <linux/bitops.h>
David Brazdil0f672f62019-12-10 10:32:29 +000042#include <linux/crash_dump.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020043#include <linux/cpu.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020044#include <linux/cpuhotplug.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_transport_fc.h>
50#include <scsi/scsi_tcq.h>
51#include <scsi/fc/fc_fs.h>
52
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000053#include "lpfc_hw4.h"
54#include "lpfc_hw.h"
55#include "lpfc_sli.h"
56#include "lpfc_sli4.h"
57#include "lpfc_nl.h"
58#include "lpfc_disc.h"
59#include "lpfc.h"
60#include "lpfc_scsi.h"
61#include "lpfc_nvme.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062#include "lpfc_logmsg.h"
63#include "lpfc_crtn.h"
64#include "lpfc_vport.h"
65#include "lpfc_version.h"
66#include "lpfc_ids.h"
67
Olivier Deprez0e641232021-09-23 10:07:05 +020068static enum cpuhp_state lpfc_cpuhp_state;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069/* Used when mapping IRQ vectors in a driver centric manner */
David Brazdil0f672f62019-12-10 10:32:29 +000070static uint32_t lpfc_present_cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071
Olivier Deprez0e641232021-09-23 10:07:05 +020072static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74static void lpfc_cpuhp_add(struct lpfc_hba *phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76static int lpfc_post_rcv_buf(struct lpfc_hba *);
77static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79static int lpfc_setup_endian_order(struct lpfc_hba *);
80static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83static void lpfc_init_sgl_list(struct lpfc_hba *);
84static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85static void lpfc_free_active_sgl(struct lpfc_hba *);
86static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
David Brazdil0f672f62019-12-10 10:32:29 +000094static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096
97static struct scsi_transport_template *lpfc_transport_template = NULL;
98static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
99static DEFINE_IDR(lpfc_hba_index);
100#define LPFC_NVMET_BUF_POST 254
101
102/**
103 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
104 * @phba: pointer to lpfc hba data structure.
105 *
106 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
107 * mailbox command. It retrieves the revision information from the HBA and
108 * collects the Vital Product Data (VPD) about the HBA for preparing the
109 * configuration of the HBA.
110 *
111 * Return codes:
112 * 0 - success.
113 * -ERESTART - requests the SLI layer to reset the HBA and try again.
114 * Any other value - indicates an error.
115 **/
116int
117lpfc_config_port_prep(struct lpfc_hba *phba)
118{
119 lpfc_vpd_t *vp = &phba->vpd;
120 int i = 0, rc;
121 LPFC_MBOXQ_t *pmb;
122 MAILBOX_t *mb;
123 char *lpfc_vpd_data = NULL;
124 uint16_t offset = 0;
125 static char licensed[56] =
126 "key unlock for use with gnu public licensed code only\0";
127 static int init_key = 1;
128
129 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
130 if (!pmb) {
131 phba->link_state = LPFC_HBA_ERROR;
132 return -ENOMEM;
133 }
134
135 mb = &pmb->u.mb;
136 phba->link_state = LPFC_INIT_MBX_CMDS;
137
138 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
139 if (init_key) {
140 uint32_t *ptext = (uint32_t *) licensed;
141
142 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
143 *ptext = cpu_to_be32(*ptext);
144 init_key = 0;
145 }
146
147 lpfc_read_nv(phba, pmb);
148 memset((char*)mb->un.varRDnvp.rsvd3, 0,
149 sizeof (mb->un.varRDnvp.rsvd3));
150 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
151 sizeof (licensed));
152
153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154
155 if (rc != MBX_SUCCESS) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200156 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000157 "0324 Config Port initialization "
158 "error, mbxCmd x%x READ_NVPARM, "
159 "mbxStatus x%x\n",
160 mb->mbxCommand, mb->mbxStatus);
161 mempool_free(pmb, phba->mbox_mem_pool);
162 return -ERESTART;
163 }
164 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
165 sizeof(phba->wwnn));
166 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
167 sizeof(phba->wwpn));
168 }
169
170 /*
171 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
172 * which was already set in lpfc_get_cfgparam()
173 */
174 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
175
176 /* Setup and issue mailbox READ REV command */
177 lpfc_read_rev(phba, pmb);
178 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
179 if (rc != MBX_SUCCESS) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000181 "0439 Adapter failed to init, mbxCmd x%x "
182 "READ_REV, mbxStatus x%x\n",
183 mb->mbxCommand, mb->mbxStatus);
184 mempool_free( pmb, phba->mbox_mem_pool);
185 return -ERESTART;
186 }
187
188
189 /*
190 * The value of rr must be 1 since the driver set the cv field to 1.
191 * This setting requires the FW to set all revision fields.
192 */
193 if (mb->un.varRdRev.rr == 0) {
194 vp->rev.rBit = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +0200195 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000196 "0440 Adapter failed to init, READ_REV has "
197 "missing revision information.\n");
198 mempool_free(pmb, phba->mbox_mem_pool);
199 return -ERESTART;
200 }
201
202 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
203 mempool_free(pmb, phba->mbox_mem_pool);
204 return -EINVAL;
205 }
206
207 /* Save information as VPD data */
208 vp->rev.rBit = 1;
209 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
210 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
211 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
212 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
213 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
214 vp->rev.biuRev = mb->un.varRdRev.biuRev;
215 vp->rev.smRev = mb->un.varRdRev.smRev;
216 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
217 vp->rev.endecRev = mb->un.varRdRev.endecRev;
218 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
219 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
220 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
221 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
222 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
223 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
224
225 /* If the sli feature level is less then 9, we must
226 * tear down all RPIs and VPIs on link down if NPIV
227 * is enabled.
228 */
229 if (vp->rev.feaLevelHigh < 9)
230 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
231
232 if (lpfc_is_LC_HBA(phba->pcidev->device))
233 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
234 sizeof (phba->RandomData));
235
236 /* Get adapter VPD information */
237 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
238 if (!lpfc_vpd_data)
239 goto out_free_mbox;
240 do {
241 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
242 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
243
244 if (rc != MBX_SUCCESS) {
245 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
246 "0441 VPD not present on adapter, "
247 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
248 mb->mbxCommand, mb->mbxStatus);
249 mb->un.varDmp.word_cnt = 0;
250 }
251 /* dump mem may return a zero when finished or we got a
252 * mailbox error, either way we are done.
253 */
254 if (mb->un.varDmp.word_cnt == 0)
255 break;
Olivier Deprez157378f2022-04-04 15:47:50 +0200256
257 i = mb->un.varDmp.word_cnt * sizeof(uint32_t);
258 if (offset + i > DMP_VPD_SIZE)
259 i = DMP_VPD_SIZE - offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
Olivier Deprez157378f2022-04-04 15:47:50 +0200261 lpfc_vpd_data + offset, i);
262 offset += i;
263 } while (offset < DMP_VPD_SIZE);
264
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
266
267 kfree(lpfc_vpd_data);
268out_free_mbox:
269 mempool_free(pmb, phba->mbox_mem_pool);
270 return 0;
271}
272
273/**
274 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
275 * @phba: pointer to lpfc hba data structure.
276 * @pmboxq: pointer to the driver internal queue element for mailbox command.
277 *
278 * This is the completion handler for driver's configuring asynchronous event
279 * mailbox command to the device. If the mailbox command returns successfully,
280 * it will set internal async event support flag to 1; otherwise, it will
281 * set internal async event support flag to 0.
282 **/
283static void
284lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
285{
286 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
287 phba->temp_sensor_support = 1;
288 else
289 phba->temp_sensor_support = 0;
290 mempool_free(pmboxq, phba->mbox_mem_pool);
291 return;
292}
293
294/**
295 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
296 * @phba: pointer to lpfc hba data structure.
297 * @pmboxq: pointer to the driver internal queue element for mailbox command.
298 *
299 * This is the completion handler for dump mailbox command for getting
300 * wake up parameters. When this command complete, the response contain
301 * Option rom version of the HBA. This function translate the version number
302 * into a human readable string and store it in OptionROMVersion.
303 **/
304static void
305lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
306{
307 struct prog_id *prg;
308 uint32_t prog_id_word;
309 char dist = ' ';
310 /* character array used for decoding dist type. */
311 char dist_char[] = "nabx";
312
313 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
314 mempool_free(pmboxq, phba->mbox_mem_pool);
315 return;
316 }
317
318 prg = (struct prog_id *) &prog_id_word;
319
320 /* word 7 contain option rom version */
321 prog_id_word = pmboxq->u.mb.un.varWords[7];
322
323 /* Decode the Option rom version word to a readable string */
324 if (prg->dist < 4)
325 dist = dist_char[prg->dist];
326
327 if ((prg->dist == 3) && (prg->num == 0))
328 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
329 prg->ver, prg->rev, prg->lev);
330 else
331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
332 prg->ver, prg->rev, prg->lev,
333 dist, prg->num);
334 mempool_free(pmboxq, phba->mbox_mem_pool);
335 return;
336}
337
338/**
339 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
340 * cfg_soft_wwnn, cfg_soft_wwpn
341 * @vport: pointer to lpfc vport data structure.
342 *
343 *
344 * Return codes
345 * None.
346 **/
347void
348lpfc_update_vport_wwn(struct lpfc_vport *vport)
349{
350 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
351 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
352
353 /* If the soft name exists then update it using the service params */
354 if (vport->phba->cfg_soft_wwnn)
355 u64_to_wwn(vport->phba->cfg_soft_wwnn,
356 vport->fc_sparam.nodeName.u.wwn);
357 if (vport->phba->cfg_soft_wwpn)
358 u64_to_wwn(vport->phba->cfg_soft_wwpn,
359 vport->fc_sparam.portName.u.wwn);
360
361 /*
362 * If the name is empty or there exists a soft name
363 * then copy the service params name, otherwise use the fc name
364 */
365 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
366 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
367 sizeof(struct lpfc_name));
368 else
369 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
370 sizeof(struct lpfc_name));
371
372 /*
373 * If the port name has changed, then set the Param changes flag
374 * to unreg the login
375 */
376 if (vport->fc_portname.u.wwn[0] != 0 &&
377 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
378 sizeof(struct lpfc_name)))
379 vport->vport_flag |= FAWWPN_PARAM_CHG;
380
381 if (vport->fc_portname.u.wwn[0] == 0 ||
382 vport->phba->cfg_soft_wwpn ||
383 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
384 vport->vport_flag & FAWWPN_SET) {
385 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
386 sizeof(struct lpfc_name));
387 vport->vport_flag &= ~FAWWPN_SET;
388 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
389 vport->vport_flag |= FAWWPN_SET;
390 }
391 else
392 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
393 sizeof(struct lpfc_name));
394}
395
396/**
397 * lpfc_config_port_post - Perform lpfc initialization after config port
398 * @phba: pointer to lpfc hba data structure.
399 *
400 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
401 * command call. It performs all internal resource and state setups on the
402 * port: post IOCB buffers, enable appropriate host interrupt attentions,
403 * ELS ring timers, etc.
404 *
405 * Return codes
406 * 0 - success.
407 * Any other value - error.
408 **/
409int
410lpfc_config_port_post(struct lpfc_hba *phba)
411{
412 struct lpfc_vport *vport = phba->pport;
413 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
414 LPFC_MBOXQ_t *pmb;
415 MAILBOX_t *mb;
416 struct lpfc_dmabuf *mp;
417 struct lpfc_sli *psli = &phba->sli;
418 uint32_t status, timeout;
419 int i, j;
420 int rc;
421
422 spin_lock_irq(&phba->hbalock);
423 /*
424 * If the Config port completed correctly the HBA is not
425 * over heated any more.
426 */
427 if (phba->over_temp_state == HBA_OVER_TEMP)
428 phba->over_temp_state = HBA_NORMAL_TEMP;
429 spin_unlock_irq(&phba->hbalock);
430
431 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
432 if (!pmb) {
433 phba->link_state = LPFC_HBA_ERROR;
434 return -ENOMEM;
435 }
436 mb = &pmb->u.mb;
437
438 /* Get login parameters for NID. */
439 rc = lpfc_read_sparam(phba, pmb, 0);
440 if (rc) {
441 mempool_free(pmb, phba->mbox_mem_pool);
442 return -ENOMEM;
443 }
444
445 pmb->vport = vport;
446 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200447 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000448 "0448 Adapter failed init, mbxCmd x%x "
449 "READ_SPARM mbxStatus x%x\n",
450 mb->mbxCommand, mb->mbxStatus);
451 phba->link_state = LPFC_HBA_ERROR;
David Brazdil0f672f62019-12-10 10:32:29 +0000452 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000453 mempool_free(pmb, phba->mbox_mem_pool);
454 lpfc_mbuf_free(phba, mp->virt, mp->phys);
455 kfree(mp);
456 return -EIO;
457 }
458
David Brazdil0f672f62019-12-10 10:32:29 +0000459 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000460
461 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
462 lpfc_mbuf_free(phba, mp->virt, mp->phys);
463 kfree(mp);
David Brazdil0f672f62019-12-10 10:32:29 +0000464 pmb->ctx_buf = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000465 lpfc_update_vport_wwn(vport);
466
467 /* Update the fc_host data structures with new wwn. */
468 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
469 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
470 fc_host_max_npiv_vports(shost) = phba->max_vpi;
471
472 /* If no serial number in VPD data, use low 6 bytes of WWNN */
473 /* This should be consolidated into parse_vpd ? - mr */
474 if (phba->SerialNumber[0] == 0) {
475 uint8_t *outptr;
476
477 outptr = &vport->fc_nodename.u.s.IEEE[0];
478 for (i = 0; i < 12; i++) {
479 status = *outptr++;
480 j = ((status & 0xf0) >> 4);
481 if (j <= 9)
482 phba->SerialNumber[i] =
483 (char)((uint8_t) 0x30 + (uint8_t) j);
484 else
485 phba->SerialNumber[i] =
486 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
487 i++;
488 j = (status & 0xf);
489 if (j <= 9)
490 phba->SerialNumber[i] =
491 (char)((uint8_t) 0x30 + (uint8_t) j);
492 else
493 phba->SerialNumber[i] =
494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
495 }
496 }
497
498 lpfc_read_config(phba, pmb);
499 pmb->vport = vport;
500 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000502 "0453 Adapter failed to init, mbxCmd x%x "
503 "READ_CONFIG, mbxStatus x%x\n",
504 mb->mbxCommand, mb->mbxStatus);
505 phba->link_state = LPFC_HBA_ERROR;
506 mempool_free( pmb, phba->mbox_mem_pool);
507 return -EIO;
508 }
509
510 /* Check if the port is disabled */
511 lpfc_sli_read_link_ste(phba);
512
513 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
Olivier Deprez157378f2022-04-04 15:47:50 +0200514 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
516 "3359 HBA queue depth changed from %d to %d\n",
Olivier Deprez157378f2022-04-04 15:47:50 +0200517 phba->cfg_hba_queue_depth,
518 mb->un.varRdConfig.max_xri);
519 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000520 }
521
522 phba->lmt = mb->un.varRdConfig.lmt;
523
524 /* Get the default values for Model Name and Description */
525 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
526
527 phba->link_state = LPFC_LINK_DOWN;
528
529 /* Only process IOCBs on ELS ring till hba_state is READY */
530 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
531 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
532 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
533 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
534
535 /* Post receive buffers for desired rings */
536 if (phba->sli_rev != 3)
537 lpfc_post_rcv_buf(phba);
538
539 /*
540 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
541 */
542 if (phba->intr_type == MSIX) {
543 rc = lpfc_config_msi(phba, pmb);
544 if (rc) {
545 mempool_free(pmb, phba->mbox_mem_pool);
546 return -EIO;
547 }
548 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
549 if (rc != MBX_SUCCESS) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000551 "0352 Config MSI mailbox command "
552 "failed, mbxCmd x%x, mbxStatus x%x\n",
553 pmb->u.mb.mbxCommand,
554 pmb->u.mb.mbxStatus);
555 mempool_free(pmb, phba->mbox_mem_pool);
556 return -EIO;
557 }
558 }
559
560 spin_lock_irq(&phba->hbalock);
561 /* Initialize ERATT handling flag */
562 phba->hba_flag &= ~HBA_ERATT_HANDLED;
563
564 /* Enable appropriate host interrupts */
565 if (lpfc_readl(phba->HCregaddr, &status)) {
566 spin_unlock_irq(&phba->hbalock);
567 return -EIO;
568 }
569 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
570 if (psli->num_rings > 0)
571 status |= HC_R0INT_ENA;
572 if (psli->num_rings > 1)
573 status |= HC_R1INT_ENA;
574 if (psli->num_rings > 2)
575 status |= HC_R2INT_ENA;
576 if (psli->num_rings > 3)
577 status |= HC_R3INT_ENA;
578
579 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
580 (phba->cfg_poll & DISABLE_FCP_RING_INT))
581 status &= ~(HC_R0INT_ENA);
582
583 writel(status, phba->HCregaddr);
584 readl(phba->HCregaddr); /* flush */
585 spin_unlock_irq(&phba->hbalock);
586
587 /* Set up ring-0 (ELS) timer */
588 timeout = phba->fc_ratov * 2;
589 mod_timer(&vport->els_tmofunc,
590 jiffies + msecs_to_jiffies(1000 * timeout));
591 /* Set up heart beat (HB) timer */
592 mod_timer(&phba->hb_tmofunc,
593 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
594 phba->hb_outstanding = 0;
595 phba->last_completion_time = jiffies;
596 /* Set up error attention (ERATT) polling timer */
597 mod_timer(&phba->eratt_poll,
598 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
599
600 if (phba->hba_flag & LINK_DISABLED) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
602 "2598 Adapter Link is disabled.\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000603 lpfc_down_link(phba, pmb);
604 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
605 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
606 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200607 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
608 "2599 Adapter failed to issue DOWN_LINK"
609 " mbox command rc 0x%x\n", rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000610
611 mempool_free(pmb, phba->mbox_mem_pool);
612 return -EIO;
613 }
614 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
615 mempool_free(pmb, phba->mbox_mem_pool);
616 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
617 if (rc)
618 return rc;
619 }
620 /* MBOX buffer will be freed in mbox compl */
621 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
622 if (!pmb) {
623 phba->link_state = LPFC_HBA_ERROR;
624 return -ENOMEM;
625 }
626
627 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
628 pmb->mbox_cmpl = lpfc_config_async_cmpl;
629 pmb->vport = phba->pport;
630 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
631
632 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200633 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000634 "0456 Adapter failed to issue "
635 "ASYNCEVT_ENABLE mbox status x%x\n",
636 rc);
637 mempool_free(pmb, phba->mbox_mem_pool);
638 }
639
640 /* Get Option rom version */
641 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
642 if (!pmb) {
643 phba->link_state = LPFC_HBA_ERROR;
644 return -ENOMEM;
645 }
646
647 lpfc_dump_wakeup_param(phba, pmb);
648 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
649 pmb->vport = phba->pport;
650 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
651
652 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200653 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
654 "0435 Adapter failed "
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000655 "to get Option ROM version status x%x\n", rc);
656 mempool_free(pmb, phba->mbox_mem_pool);
657 }
658
659 return 0;
660}
661
662/**
663 * lpfc_hba_init_link - Initialize the FC link
664 * @phba: pointer to lpfc hba data structure.
665 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
666 *
667 * This routine will issue the INIT_LINK mailbox command call.
668 * It is available to other drivers through the lpfc_hba data
669 * structure for use as a delayed link up mechanism with the
670 * module parameter lpfc_suppress_link_up.
671 *
672 * Return code
673 * 0 - success
674 * Any other value - error
675 **/
676static int
677lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
678{
679 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
680}
681
682/**
683 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
684 * @phba: pointer to lpfc hba data structure.
685 * @fc_topology: desired fc topology.
686 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
687 *
688 * This routine will issue the INIT_LINK mailbox command call.
689 * It is available to other drivers through the lpfc_hba data
690 * structure for use as a delayed link up mechanism with the
691 * module parameter lpfc_suppress_link_up.
692 *
693 * Return code
694 * 0 - success
695 * Any other value - error
696 **/
697int
698lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
699 uint32_t flag)
700{
701 struct lpfc_vport *vport = phba->pport;
702 LPFC_MBOXQ_t *pmb;
703 MAILBOX_t *mb;
704 int rc;
705
706 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
707 if (!pmb) {
708 phba->link_state = LPFC_HBA_ERROR;
709 return -ENOMEM;
710 }
711 mb = &pmb->u.mb;
712 pmb->vport = vport;
713
714 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
715 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
716 !(phba->lmt & LMT_1Gb)) ||
717 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
718 !(phba->lmt & LMT_2Gb)) ||
719 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
720 !(phba->lmt & LMT_4Gb)) ||
721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
722 !(phba->lmt & LMT_8Gb)) ||
723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
724 !(phba->lmt & LMT_10Gb)) ||
725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
726 !(phba->lmt & LMT_16Gb)) ||
727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
728 !(phba->lmt & LMT_32Gb)) ||
729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
730 !(phba->lmt & LMT_64Gb))) {
731 /* Reset link speed to auto */
Olivier Deprez157378f2022-04-04 15:47:50 +0200732 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
733 "1302 Invalid speed for this board:%d "
734 "Reset link speed to auto.\n",
735 phba->cfg_link_speed);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000736 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
737 }
738 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
739 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
740 if (phba->sli_rev < LPFC_SLI_REV4)
741 lpfc_set_loopback_flag(phba);
742 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
743 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200744 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
745 "0498 Adapter failed to init, mbxCmd x%x "
746 "INIT_LINK, mbxStatus x%x\n",
747 mb->mbxCommand, mb->mbxStatus);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000748 if (phba->sli_rev <= LPFC_SLI_REV3) {
749 /* Clear all interrupt enable conditions */
750 writel(0, phba->HCregaddr);
751 readl(phba->HCregaddr); /* flush */
752 /* Clear all pending interrupts */
753 writel(0xffffffff, phba->HAregaddr);
754 readl(phba->HAregaddr); /* flush */
755 }
756 phba->link_state = LPFC_HBA_ERROR;
757 if (rc != MBX_BUSY || flag == MBX_POLL)
758 mempool_free(pmb, phba->mbox_mem_pool);
759 return -EIO;
760 }
761 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
762 if (flag == MBX_POLL)
763 mempool_free(pmb, phba->mbox_mem_pool);
764
765 return 0;
766}
767
768/**
769 * lpfc_hba_down_link - this routine downs the FC link
770 * @phba: pointer to lpfc hba data structure.
771 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
772 *
773 * This routine will issue the DOWN_LINK mailbox command call.
774 * It is available to other drivers through the lpfc_hba data
775 * structure for use to stop the link.
776 *
777 * Return code
778 * 0 - success
779 * Any other value - error
780 **/
781static int
782lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
783{
784 LPFC_MBOXQ_t *pmb;
785 int rc;
786
787 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
788 if (!pmb) {
789 phba->link_state = LPFC_HBA_ERROR;
790 return -ENOMEM;
791 }
792
Olivier Deprez157378f2022-04-04 15:47:50 +0200793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
794 "0491 Adapter Link is disabled.\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000795 lpfc_down_link(phba, pmb);
796 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
797 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
798 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
800 "2522 Adapter failed to issue DOWN_LINK"
801 " mbox command rc 0x%x\n", rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000802
803 mempool_free(pmb, phba->mbox_mem_pool);
804 return -EIO;
805 }
806 if (flag == MBX_POLL)
807 mempool_free(pmb, phba->mbox_mem_pool);
808
809 return 0;
810}
811
812/**
813 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
814 * @phba: pointer to lpfc HBA data structure.
815 *
816 * This routine will do LPFC uninitialization before the HBA is reset when
817 * bringing down the SLI Layer.
818 *
819 * Return codes
820 * 0 - success.
821 * Any other value - error.
822 **/
823int
824lpfc_hba_down_prep(struct lpfc_hba *phba)
825{
826 struct lpfc_vport **vports;
827 int i;
828
829 if (phba->sli_rev <= LPFC_SLI_REV3) {
830 /* Disable interrupts */
831 writel(0, phba->HCregaddr);
832 readl(phba->HCregaddr); /* flush */
833 }
834
835 if (phba->pport->load_flag & FC_UNLOADING)
836 lpfc_cleanup_discovery_resources(phba->pport);
837 else {
838 vports = lpfc_create_vport_work_array(phba);
839 if (vports != NULL)
840 for (i = 0; i <= phba->max_vports &&
841 vports[i] != NULL; i++)
842 lpfc_cleanup_discovery_resources(vports[i]);
843 lpfc_destroy_vport_work_array(phba, vports);
844 }
845 return 0;
846}
847
848/**
849 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
850 * rspiocb which got deferred
851 *
852 * @phba: pointer to lpfc HBA data structure.
853 *
854 * This routine will cleanup completed slow path events after HBA is reset
855 * when bringing down the SLI Layer.
856 *
857 *
858 * Return codes
859 * void.
860 **/
861static void
862lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
863{
864 struct lpfc_iocbq *rspiocbq;
865 struct hbq_dmabuf *dmabuf;
866 struct lpfc_cq_event *cq_event;
867
868 spin_lock_irq(&phba->hbalock);
869 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
870 spin_unlock_irq(&phba->hbalock);
871
872 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
873 /* Get the response iocb from the head of work queue */
874 spin_lock_irq(&phba->hbalock);
875 list_remove_head(&phba->sli4_hba.sp_queue_event,
876 cq_event, struct lpfc_cq_event, list);
877 spin_unlock_irq(&phba->hbalock);
878
879 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
880 case CQE_CODE_COMPL_WQE:
881 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
882 cq_event);
883 lpfc_sli_release_iocbq(phba, rspiocbq);
884 break;
885 case CQE_CODE_RECEIVE:
886 case CQE_CODE_RECEIVE_V1:
887 dmabuf = container_of(cq_event, struct hbq_dmabuf,
888 cq_event);
889 lpfc_in_buf_free(phba, &dmabuf->dbuf);
890 }
891 }
892}
893
894/**
895 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
896 * @phba: pointer to lpfc HBA data structure.
897 *
898 * This routine will cleanup posted ELS buffers after the HBA is reset
899 * when bringing down the SLI Layer.
900 *
901 *
902 * Return codes
903 * void.
904 **/
905static void
906lpfc_hba_free_post_buf(struct lpfc_hba *phba)
907{
908 struct lpfc_sli *psli = &phba->sli;
909 struct lpfc_sli_ring *pring;
910 struct lpfc_dmabuf *mp, *next_mp;
911 LIST_HEAD(buflist);
912 int count;
913
914 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
915 lpfc_sli_hbqbuf_free_all(phba);
916 else {
917 /* Cleanup preposted buffers on the ELS ring */
918 pring = &psli->sli3_ring[LPFC_ELS_RING];
919 spin_lock_irq(&phba->hbalock);
920 list_splice_init(&pring->postbufq, &buflist);
921 spin_unlock_irq(&phba->hbalock);
922
923 count = 0;
924 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
925 list_del(&mp->list);
926 count++;
927 lpfc_mbuf_free(phba, mp->virt, mp->phys);
928 kfree(mp);
929 }
930
931 spin_lock_irq(&phba->hbalock);
932 pring->postbufq_cnt -= count;
933 spin_unlock_irq(&phba->hbalock);
934 }
935}
936
937/**
938 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
939 * @phba: pointer to lpfc HBA data structure.
940 *
941 * This routine will cleanup the txcmplq after the HBA is reset when bringing
942 * down the SLI Layer.
943 *
944 * Return codes
945 * void
946 **/
947static void
948lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
949{
950 struct lpfc_sli *psli = &phba->sli;
951 struct lpfc_queue *qp = NULL;
952 struct lpfc_sli_ring *pring;
953 LIST_HEAD(completions);
954 int i;
955 struct lpfc_iocbq *piocb, *next_iocb;
956
957 if (phba->sli_rev != LPFC_SLI_REV4) {
958 for (i = 0; i < psli->num_rings; i++) {
959 pring = &psli->sli3_ring[i];
960 spin_lock_irq(&phba->hbalock);
961 /* At this point in time the HBA is either reset or DOA
962 * Nothing should be on txcmplq as it will
963 * NEVER complete.
964 */
965 list_splice_init(&pring->txcmplq, &completions);
966 pring->txcmplq_cnt = 0;
967 spin_unlock_irq(&phba->hbalock);
968
969 lpfc_sli_abort_iocb_ring(phba, pring);
970 }
971 /* Cancel all the IOCBs from the completions list */
972 lpfc_sli_cancel_iocbs(phba, &completions,
973 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
974 return;
975 }
976 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
977 pring = qp->pring;
978 if (!pring)
979 continue;
980 spin_lock_irq(&pring->ring_lock);
981 list_for_each_entry_safe(piocb, next_iocb,
982 &pring->txcmplq, list)
983 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
984 list_splice_init(&pring->txcmplq, &completions);
985 pring->txcmplq_cnt = 0;
986 spin_unlock_irq(&pring->ring_lock);
987 lpfc_sli_abort_iocb_ring(phba, pring);
988 }
989 /* Cancel all the IOCBs from the completions list */
990 lpfc_sli_cancel_iocbs(phba, &completions,
991 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
992}
993
994/**
995 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000996 * @phba: pointer to lpfc HBA data structure.
997 *
998 * This routine will do uninitialization after the HBA is reset when bring
999 * down the SLI Layer.
1000 *
1001 * Return codes
1002 * 0 - success.
1003 * Any other value - error.
1004 **/
1005static int
1006lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1007{
1008 lpfc_hba_free_post_buf(phba);
1009 lpfc_hba_clean_txcmplq(phba);
1010 return 0;
1011}
1012
1013/**
1014 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1015 * @phba: pointer to lpfc HBA data structure.
1016 *
1017 * This routine will do uninitialization after the HBA is reset when bring
1018 * down the SLI Layer.
1019 *
1020 * Return codes
1021 * 0 - success.
1022 * Any other value - error.
1023 **/
1024static int
1025lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1026{
David Brazdil0f672f62019-12-10 10:32:29 +00001027 struct lpfc_io_buf *psb, *psb_next;
Olivier Deprez157378f2022-04-04 15:47:50 +02001028 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
David Brazdil0f672f62019-12-10 10:32:29 +00001029 struct lpfc_sli4_hdw_queue *qp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001030 LIST_HEAD(aborts);
1031 LIST_HEAD(nvme_aborts);
1032 LIST_HEAD(nvmet_aborts);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001033 struct lpfc_sglq *sglq_entry = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001034 int cnt, idx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001035
1036
1037 lpfc_sli_hbqbuf_free_all(phba);
1038 lpfc_hba_clean_txcmplq(phba);
1039
1040 /* At this point in time the HBA is either reset or DOA. Either
1041 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1042 * on the lpfc_els_sgl_list so that it can either be freed if the
1043 * driver is unloading or reposted if the driver is restarting
1044 * the port.
1045 */
1046 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
1047 /* scsl_buf_list */
1048 /* sgl_list_lock required because worker thread uses this
1049 * list.
1050 */
1051 spin_lock(&phba->sli4_hba.sgl_list_lock);
1052 list_for_each_entry(sglq_entry,
1053 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1054 sglq_entry->state = SGL_FREED;
1055
1056 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1057 &phba->sli4_hba.lpfc_els_sgl_list);
1058
1059
1060 spin_unlock(&phba->sli4_hba.sgl_list_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001061
1062 /* abts_xxxx_buf_list_lock required because worker thread uses this
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001063 * list.
1064 */
David Brazdil0f672f62019-12-10 10:32:29 +00001065 cnt = 0;
1066 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1067 qp = &phba->sli4_hba.hdwq[idx];
1068
1069 spin_lock(&qp->abts_io_buf_list_lock);
1070 list_splice_init(&qp->lpfc_abts_io_buf_list,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001071 &aborts);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001072
David Brazdil0f672f62019-12-10 10:32:29 +00001073 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001074 psb->pCmd = NULL;
1075 psb->status = IOSTAT_SUCCESS;
1076 cnt++;
1077 }
David Brazdil0f672f62019-12-10 10:32:29 +00001078 spin_lock(&qp->io_buf_list_put_lock);
1079 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1080 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1081 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1082 qp->abts_scsi_io_bufs = 0;
1083 qp->abts_nvme_io_bufs = 0;
1084 spin_unlock(&qp->io_buf_list_put_lock);
1085 spin_unlock(&qp->abts_io_buf_list_lock);
1086 }
1087 spin_unlock_irq(&phba->hbalock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001088
David Brazdil0f672f62019-12-10 10:32:29 +00001089 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1090 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1091 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1092 &nvmet_aborts);
1093 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001094 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001095 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001096 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1097 }
1098 }
1099
1100 lpfc_sli4_free_sp_events(phba);
David Brazdil0f672f62019-12-10 10:32:29 +00001101 return cnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001102}
1103
1104/**
1105 * lpfc_hba_down_post - Wrapper func for hba down post routine
1106 * @phba: pointer to lpfc HBA data structure.
1107 *
1108 * This routine wraps the actual SLI3 or SLI4 routine for performing
1109 * uninitialization after the HBA is reset when bring down the SLI Layer.
1110 *
1111 * Return codes
1112 * 0 - success.
1113 * Any other value - error.
1114 **/
1115int
1116lpfc_hba_down_post(struct lpfc_hba *phba)
1117{
1118 return (*phba->lpfc_hba_down_post)(phba);
1119}
1120
1121/**
1122 * lpfc_hb_timeout - The HBA-timer timeout handler
Olivier Deprez157378f2022-04-04 15:47:50 +02001123 * @t: timer context used to obtain the pointer to lpfc hba data structure.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001124 *
1125 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1126 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1127 * work-port-events bitmap and the worker thread is notified. This timeout
1128 * event will be used by the worker thread to invoke the actual timeout
1129 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1130 * be performed in the timeout handler and the HBA timeout event bit shall
1131 * be cleared by the worker thread after it has taken the event bitmap out.
1132 **/
1133static void
1134lpfc_hb_timeout(struct timer_list *t)
1135{
1136 struct lpfc_hba *phba;
1137 uint32_t tmo_posted;
1138 unsigned long iflag;
1139
1140 phba = from_timer(phba, t, hb_tmofunc);
1141
1142 /* Check for heart beat timeout conditions */
1143 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1144 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1145 if (!tmo_posted)
1146 phba->pport->work_port_events |= WORKER_HB_TMO;
1147 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1148
1149 /* Tell the worker thread there is work to do */
1150 if (!tmo_posted)
1151 lpfc_worker_wake_up(phba);
1152 return;
1153}
1154
1155/**
1156 * lpfc_rrq_timeout - The RRQ-timer timeout handler
Olivier Deprez157378f2022-04-04 15:47:50 +02001157 * @t: timer context used to obtain the pointer to lpfc hba data structure.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001158 *
1159 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1160 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1161 * work-port-events bitmap and the worker thread is notified. This timeout
1162 * event will be used by the worker thread to invoke the actual timeout
1163 * handler routine, lpfc_rrq_handler. Any periodical operations will
1164 * be performed in the timeout handler and the RRQ timeout event bit shall
1165 * be cleared by the worker thread after it has taken the event bitmap out.
1166 **/
1167static void
1168lpfc_rrq_timeout(struct timer_list *t)
1169{
1170 struct lpfc_hba *phba;
1171 unsigned long iflag;
1172
1173 phba = from_timer(phba, t, rrq_tmr);
1174 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1175 if (!(phba->pport->load_flag & FC_UNLOADING))
1176 phba->hba_flag |= HBA_RRQ_ACTIVE;
1177 else
1178 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1179 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1180
1181 if (!(phba->pport->load_flag & FC_UNLOADING))
1182 lpfc_worker_wake_up(phba);
1183}
1184
1185/**
1186 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1187 * @phba: pointer to lpfc hba data structure.
1188 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1189 *
1190 * This is the callback function to the lpfc heart-beat mailbox command.
1191 * If configured, the lpfc driver issues the heart-beat mailbox command to
1192 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1193 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1194 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1195 * heart-beat outstanding state. Once the mailbox command comes back and
1196 * no error conditions detected, the heart-beat mailbox command timer is
1197 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1198 * state is cleared for the next heart-beat. If the timer expired with the
1199 * heart-beat outstanding state set, the driver will put the HBA offline.
1200 **/
1201static void
1202lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1203{
1204 unsigned long drvr_flag;
1205
1206 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1207 phba->hb_outstanding = 0;
1208 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1209
1210 /* Check and reset heart-beat timer is necessary */
1211 mempool_free(pmboxq, phba->mbox_mem_pool);
1212 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1213 !(phba->link_state == LPFC_HBA_ERROR) &&
1214 !(phba->pport->load_flag & FC_UNLOADING))
1215 mod_timer(&phba->hb_tmofunc,
1216 jiffies +
1217 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1218 return;
1219}
1220
Olivier Deprez157378f2022-04-04 15:47:50 +02001221/*
1222 * lpfc_idle_stat_delay_work - idle_stat tracking
1223 *
1224 * This routine tracks per-cq idle_stat and determines polling decisions.
1225 *
1226 * Return codes:
1227 * None
1228 **/
1229static void
1230lpfc_idle_stat_delay_work(struct work_struct *work)
1231{
1232 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1233 struct lpfc_hba,
1234 idle_stat_delay_work);
1235 struct lpfc_queue *cq;
1236 struct lpfc_sli4_hdw_queue *hdwq;
1237 struct lpfc_idle_stat *idle_stat;
1238 u32 i, idle_percent;
1239 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1240
1241 if (phba->pport->load_flag & FC_UNLOADING)
1242 return;
1243
1244 if (phba->link_state == LPFC_HBA_ERROR ||
1245 phba->pport->fc_flag & FC_OFFLINE_MODE)
1246 goto requeue;
1247
1248 for_each_present_cpu(i) {
1249 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1250 cq = hdwq->io_cq;
1251
1252 /* Skip if we've already handled this cq's primary CPU */
1253 if (cq->chann != i)
1254 continue;
1255
1256 idle_stat = &phba->sli4_hba.idle_stat[i];
1257
1258 /* get_cpu_idle_time returns values as running counters. Thus,
1259 * to know the amount for this period, the prior counter values
1260 * need to be subtracted from the current counter values.
1261 * From there, the idle time stat can be calculated as a
1262 * percentage of 100 - the sum of the other consumption times.
1263 */
1264 wall_idle = get_cpu_idle_time(i, &wall, 1);
1265 diff_idle = wall_idle - idle_stat->prev_idle;
1266 diff_wall = wall - idle_stat->prev_wall;
1267
1268 if (diff_wall <= diff_idle)
1269 busy_time = 0;
1270 else
1271 busy_time = diff_wall - diff_idle;
1272
1273 idle_percent = div64_u64(100 * busy_time, diff_wall);
1274 idle_percent = 100 - idle_percent;
1275
1276 if (idle_percent < 15)
1277 cq->poll_mode = LPFC_QUEUE_WORK;
1278 else
1279 cq->poll_mode = LPFC_IRQ_POLL;
1280
1281 idle_stat->prev_idle = wall_idle;
1282 idle_stat->prev_wall = wall;
1283 }
1284
1285requeue:
1286 schedule_delayed_work(&phba->idle_stat_delay_work,
1287 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1288}
1289
David Brazdil0f672f62019-12-10 10:32:29 +00001290static void
1291lpfc_hb_eq_delay_work(struct work_struct *work)
1292{
1293 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1294 struct lpfc_hba, eq_delay_work);
1295 struct lpfc_eq_intr_info *eqi, *eqi_new;
1296 struct lpfc_queue *eq, *eq_next;
Olivier Deprez157378f2022-04-04 15:47:50 +02001297 unsigned char *ena_delay = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001298 uint32_t usdelay;
1299 int i;
David Brazdil0f672f62019-12-10 10:32:29 +00001300
1301 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1302 return;
1303
1304 if (phba->link_state == LPFC_HBA_ERROR ||
1305 phba->pport->fc_flag & FC_OFFLINE_MODE)
1306 goto requeue;
1307
Olivier Deprez157378f2022-04-04 15:47:50 +02001308 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1309 GFP_KERNEL);
1310 if (!ena_delay)
David Brazdil0f672f62019-12-10 10:32:29 +00001311 goto requeue;
1312
Olivier Deprez157378f2022-04-04 15:47:50 +02001313 for (i = 0; i < phba->cfg_irq_chann; i++) {
1314 /* Get the EQ corresponding to the IRQ vector */
1315 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1316 if (!eq)
1317 continue;
1318 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1319 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1320 ena_delay[eq->last_cpu] = 1;
David Brazdil0f672f62019-12-10 10:32:29 +00001321 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001322 }
David Brazdil0f672f62019-12-10 10:32:29 +00001323
1324 for_each_present_cpu(i) {
1325 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
Olivier Deprez157378f2022-04-04 15:47:50 +02001326 if (ena_delay[i]) {
1327 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1328 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1329 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1330 } else {
1331 usdelay = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001332 }
1333
David Brazdil0f672f62019-12-10 10:32:29 +00001334 eqi->icnt = 0;
1335
1336 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001337 if (unlikely(eq->last_cpu != i)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001338 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1339 eq->last_cpu);
1340 list_move_tail(&eq->cpu_list, &eqi_new->list);
1341 continue;
1342 }
1343 if (usdelay != eq->q_mode)
1344 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1345 usdelay);
1346 }
1347 }
1348
Olivier Deprez157378f2022-04-04 15:47:50 +02001349 kfree(ena_delay);
David Brazdil0f672f62019-12-10 10:32:29 +00001350
1351requeue:
1352 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1353 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1354}
1355
1356/**
1357 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1358 * @phba: pointer to lpfc hba data structure.
1359 *
1360 * For each heartbeat, this routine does some heuristic methods to adjust
1361 * XRI distribution. The goal is to fully utilize free XRIs.
1362 **/
1363static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1364{
1365 u32 i;
1366 u32 hwq_count;
1367
1368 hwq_count = phba->cfg_hdw_queue;
1369 for (i = 0; i < hwq_count; i++) {
1370 /* Adjust XRIs in private pool */
1371 lpfc_adjust_pvt_pool_count(phba, i);
1372
1373 /* Adjust high watermark */
1374 lpfc_adjust_high_watermark(phba, i);
1375
1376#ifdef LPFC_MXP_STAT
1377 /* Snapshot pbl, pvt and busy count */
1378 lpfc_snapshot_mxp(phba, i);
1379#endif
1380 }
1381}
1382
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001383/**
1384 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1385 * @phba: pointer to lpfc hba data structure.
1386 *
1387 * This is the actual HBA-timer timeout handler to be invoked by the worker
1388 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1389 * handler performs any periodic operations needed for the device. If such
1390 * periodic event has already been attended to either in the interrupt handler
1391 * or by processing slow-ring or fast-ring events within the HBA-timer
1392 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1393 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1394 * is configured and there is no heart-beat mailbox command outstanding, a
1395 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1396 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1397 * to offline.
1398 **/
1399void
1400lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1401{
1402 struct lpfc_vport **vports;
1403 LPFC_MBOXQ_t *pmboxq;
1404 struct lpfc_dmabuf *buf_ptr;
1405 int retval, i;
1406 struct lpfc_sli *psli = &phba->sli;
1407 LIST_HEAD(completions);
David Brazdil0f672f62019-12-10 10:32:29 +00001408
1409 if (phba->cfg_xri_rebalancing) {
1410 /* Multi-XRI pools handler */
1411 lpfc_hb_mxp_handler(phba);
1412 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001413
1414 vports = lpfc_create_vport_work_array(phba);
1415 if (vports != NULL)
1416 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1417 lpfc_rcv_seq_check_edtov(vports[i]);
Olivier Deprez0e641232021-09-23 10:07:05 +02001418 lpfc_fdmi_change_check(vports[i]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001419 }
1420 lpfc_destroy_vport_work_array(phba, vports);
1421
1422 if ((phba->link_state == LPFC_HBA_ERROR) ||
1423 (phba->pport->load_flag & FC_UNLOADING) ||
1424 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1425 return;
1426
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001427 spin_lock_irq(&phba->pport->work_port_lock);
1428
1429 if (time_after(phba->last_completion_time +
1430 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1431 jiffies)) {
1432 spin_unlock_irq(&phba->pport->work_port_lock);
1433 if (!phba->hb_outstanding)
1434 mod_timer(&phba->hb_tmofunc,
1435 jiffies +
1436 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1437 else
1438 mod_timer(&phba->hb_tmofunc,
1439 jiffies +
1440 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1441 return;
1442 }
1443 spin_unlock_irq(&phba->pport->work_port_lock);
1444
1445 if (phba->elsbuf_cnt &&
1446 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1447 spin_lock_irq(&phba->hbalock);
1448 list_splice_init(&phba->elsbuf, &completions);
1449 phba->elsbuf_cnt = 0;
1450 phba->elsbuf_prev_cnt = 0;
1451 spin_unlock_irq(&phba->hbalock);
1452
1453 while (!list_empty(&completions)) {
1454 list_remove_head(&completions, buf_ptr,
1455 struct lpfc_dmabuf, list);
1456 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1457 kfree(buf_ptr);
1458 }
1459 }
1460 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1461
1462 /* If there is no heart beat outstanding, issue a heartbeat command */
1463 if (phba->cfg_enable_hba_heartbeat) {
1464 if (!phba->hb_outstanding) {
1465 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1466 (list_empty(&psli->mboxq))) {
1467 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1468 GFP_KERNEL);
1469 if (!pmboxq) {
1470 mod_timer(&phba->hb_tmofunc,
1471 jiffies +
1472 msecs_to_jiffies(1000 *
1473 LPFC_HB_MBOX_INTERVAL));
1474 return;
1475 }
1476
1477 lpfc_heart_beat(phba, pmboxq);
1478 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1479 pmboxq->vport = phba->pport;
1480 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1481 MBX_NOWAIT);
1482
1483 if (retval != MBX_BUSY &&
1484 retval != MBX_SUCCESS) {
1485 mempool_free(pmboxq,
1486 phba->mbox_mem_pool);
1487 mod_timer(&phba->hb_tmofunc,
1488 jiffies +
1489 msecs_to_jiffies(1000 *
1490 LPFC_HB_MBOX_INTERVAL));
1491 return;
1492 }
1493 phba->skipped_hb = 0;
1494 phba->hb_outstanding = 1;
1495 } else if (time_before_eq(phba->last_completion_time,
1496 phba->skipped_hb)) {
1497 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1498 "2857 Last completion time not "
1499 " updated in %d ms\n",
1500 jiffies_to_msecs(jiffies
1501 - phba->last_completion_time));
1502 } else
1503 phba->skipped_hb = jiffies;
1504
1505 mod_timer(&phba->hb_tmofunc,
1506 jiffies +
1507 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1508 return;
1509 } else {
1510 /*
1511 * If heart beat timeout called with hb_outstanding set
1512 * we need to give the hb mailbox cmd a chance to
1513 * complete or TMO.
1514 */
1515 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1516 "0459 Adapter heartbeat still out"
1517 "standing:last compl time was %d ms.\n",
1518 jiffies_to_msecs(jiffies
1519 - phba->last_completion_time));
1520 mod_timer(&phba->hb_tmofunc,
1521 jiffies +
1522 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1523 }
1524 } else {
1525 mod_timer(&phba->hb_tmofunc,
1526 jiffies +
1527 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1528 }
1529}
1530
1531/**
1532 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1533 * @phba: pointer to lpfc hba data structure.
1534 *
1535 * This routine is called to bring the HBA offline when HBA hardware error
1536 * other than Port Error 6 has been detected.
1537 **/
1538static void
1539lpfc_offline_eratt(struct lpfc_hba *phba)
1540{
1541 struct lpfc_sli *psli = &phba->sli;
1542
1543 spin_lock_irq(&phba->hbalock);
1544 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1545 spin_unlock_irq(&phba->hbalock);
1546 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1547
1548 lpfc_offline(phba);
1549 lpfc_reset_barrier(phba);
1550 spin_lock_irq(&phba->hbalock);
1551 lpfc_sli_brdreset(phba);
1552 spin_unlock_irq(&phba->hbalock);
1553 lpfc_hba_down_post(phba);
1554 lpfc_sli_brdready(phba, HS_MBRDY);
1555 lpfc_unblock_mgmt_io(phba);
1556 phba->link_state = LPFC_HBA_ERROR;
1557 return;
1558}
1559
1560/**
1561 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1562 * @phba: pointer to lpfc hba data structure.
1563 *
1564 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1565 * other than Port Error 6 has been detected.
1566 **/
1567void
1568lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1569{
1570 spin_lock_irq(&phba->hbalock);
1571 phba->link_state = LPFC_HBA_ERROR;
1572 spin_unlock_irq(&phba->hbalock);
1573
1574 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
David Brazdil0f672f62019-12-10 10:32:29 +00001575 lpfc_sli_flush_io_rings(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001576 lpfc_offline(phba);
1577 lpfc_hba_down_post(phba);
1578 lpfc_unblock_mgmt_io(phba);
1579}
1580
1581/**
1582 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1583 * @phba: pointer to lpfc hba data structure.
1584 *
1585 * This routine is invoked to handle the deferred HBA hardware error
1586 * conditions. This type of error is indicated by HBA by setting ER1
1587 * and another ER bit in the host status register. The driver will
1588 * wait until the ER1 bit clears before handling the error condition.
1589 **/
1590static void
1591lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1592{
1593 uint32_t old_host_status = phba->work_hs;
1594 struct lpfc_sli *psli = &phba->sli;
1595
1596 /* If the pci channel is offline, ignore possible errors,
1597 * since we cannot communicate with the pci card anyway.
1598 */
1599 if (pci_channel_offline(phba->pcidev)) {
1600 spin_lock_irq(&phba->hbalock);
1601 phba->hba_flag &= ~DEFER_ERATT;
1602 spin_unlock_irq(&phba->hbalock);
1603 return;
1604 }
1605
Olivier Deprez157378f2022-04-04 15:47:50 +02001606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1607 "0479 Deferred Adapter Hardware Error "
1608 "Data: x%x x%x x%x\n",
1609 phba->work_hs, phba->work_status[0],
1610 phba->work_status[1]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001611
1612 spin_lock_irq(&phba->hbalock);
1613 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1614 spin_unlock_irq(&phba->hbalock);
1615
1616
1617 /*
1618 * Firmware stops when it triggred erratt. That could cause the I/Os
1619 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1620 * SCSI layer retry it after re-establishing link.
1621 */
1622 lpfc_sli_abort_fcp_rings(phba);
1623
1624 /*
1625 * There was a firmware error. Take the hba offline and then
1626 * attempt to restart it.
1627 */
1628 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1629 lpfc_offline(phba);
1630
1631 /* Wait for the ER1 bit to clear.*/
1632 while (phba->work_hs & HS_FFER1) {
1633 msleep(100);
1634 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1635 phba->work_hs = UNPLUG_ERR ;
1636 break;
1637 }
1638 /* If driver is unloading let the worker thread continue */
1639 if (phba->pport->load_flag & FC_UNLOADING) {
1640 phba->work_hs = 0;
1641 break;
1642 }
1643 }
1644
1645 /*
1646 * This is to ptrotect against a race condition in which
1647 * first write to the host attention register clear the
1648 * host status register.
1649 */
1650 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1651 phba->work_hs = old_host_status & ~HS_FFER1;
1652
1653 spin_lock_irq(&phba->hbalock);
1654 phba->hba_flag &= ~DEFER_ERATT;
1655 spin_unlock_irq(&phba->hbalock);
1656 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1657 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1658}
1659
1660static void
1661lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1662{
1663 struct lpfc_board_event_header board_event;
1664 struct Scsi_Host *shost;
1665
1666 board_event.event_type = FC_REG_BOARD_EVENT;
1667 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1668 shost = lpfc_shost_from_vport(phba->pport);
1669 fc_host_post_vendor_event(shost, fc_get_event_number(),
1670 sizeof(board_event),
1671 (char *) &board_event,
1672 LPFC_NL_VENDOR_ID);
1673}
1674
1675/**
1676 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1677 * @phba: pointer to lpfc hba data structure.
1678 *
1679 * This routine is invoked to handle the following HBA hardware error
1680 * conditions:
1681 * 1 - HBA error attention interrupt
1682 * 2 - DMA ring index out of range
1683 * 3 - Mailbox command came back as unknown
1684 **/
1685static void
1686lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1687{
1688 struct lpfc_vport *vport = phba->pport;
1689 struct lpfc_sli *psli = &phba->sli;
1690 uint32_t event_data;
1691 unsigned long temperature;
1692 struct temp_event temp_event_data;
1693 struct Scsi_Host *shost;
1694
1695 /* If the pci channel is offline, ignore possible errors,
1696 * since we cannot communicate with the pci card anyway.
1697 */
1698 if (pci_channel_offline(phba->pcidev)) {
1699 spin_lock_irq(&phba->hbalock);
1700 phba->hba_flag &= ~DEFER_ERATT;
1701 spin_unlock_irq(&phba->hbalock);
1702 return;
1703 }
1704
1705 /* If resets are disabled then leave the HBA alone and return */
1706 if (!phba->cfg_enable_hba_reset)
1707 return;
1708
1709 /* Send an internal error event to mgmt application */
1710 lpfc_board_errevt_to_mgmt(phba);
1711
1712 if (phba->hba_flag & DEFER_ERATT)
1713 lpfc_handle_deferred_eratt(phba);
1714
1715 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1716 if (phba->work_hs & HS_FFER6)
1717 /* Re-establishing Link */
1718 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1719 "1301 Re-establishing Link "
1720 "Data: x%x x%x x%x\n",
1721 phba->work_hs, phba->work_status[0],
1722 phba->work_status[1]);
1723 if (phba->work_hs & HS_FFER8)
1724 /* Device Zeroization */
1725 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1726 "2861 Host Authentication device "
1727 "zeroization Data:x%x x%x x%x\n",
1728 phba->work_hs, phba->work_status[0],
1729 phba->work_status[1]);
1730
1731 spin_lock_irq(&phba->hbalock);
1732 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1733 spin_unlock_irq(&phba->hbalock);
1734
1735 /*
1736 * Firmware stops when it triggled erratt with HS_FFER6.
1737 * That could cause the I/Os dropped by the firmware.
1738 * Error iocb (I/O) on txcmplq and let the SCSI layer
1739 * retry it after re-establishing link.
1740 */
1741 lpfc_sli_abort_fcp_rings(phba);
1742
1743 /*
1744 * There was a firmware error. Take the hba offline and then
1745 * attempt to restart it.
1746 */
1747 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1748 lpfc_offline(phba);
1749 lpfc_sli_brdrestart(phba);
1750 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1751 lpfc_unblock_mgmt_io(phba);
1752 return;
1753 }
1754 lpfc_unblock_mgmt_io(phba);
1755 } else if (phba->work_hs & HS_CRIT_TEMP) {
1756 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1757 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1758 temp_event_data.event_code = LPFC_CRIT_TEMP;
1759 temp_event_data.data = (uint32_t)temperature;
1760
Olivier Deprez157378f2022-04-04 15:47:50 +02001761 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001762 "0406 Adapter maximum temperature exceeded "
1763 "(%ld), taking this port offline "
1764 "Data: x%x x%x x%x\n",
1765 temperature, phba->work_hs,
1766 phba->work_status[0], phba->work_status[1]);
1767
1768 shost = lpfc_shost_from_vport(phba->pport);
1769 fc_host_post_vendor_event(shost, fc_get_event_number(),
1770 sizeof(temp_event_data),
1771 (char *) &temp_event_data,
1772 SCSI_NL_VID_TYPE_PCI
1773 | PCI_VENDOR_ID_EMULEX);
1774
1775 spin_lock_irq(&phba->hbalock);
1776 phba->over_temp_state = HBA_OVER_TEMP;
1777 spin_unlock_irq(&phba->hbalock);
1778 lpfc_offline_eratt(phba);
1779
1780 } else {
1781 /* The if clause above forces this code path when the status
1782 * failure is a value other than FFER6. Do not call the offline
1783 * twice. This is the adapter hardware error path.
1784 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001785 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001786 "0457 Adapter Hardware Error "
1787 "Data: x%x x%x x%x\n",
1788 phba->work_hs,
1789 phba->work_status[0], phba->work_status[1]);
1790
1791 event_data = FC_REG_DUMP_EVENT;
1792 shost = lpfc_shost_from_vport(vport);
1793 fc_host_post_vendor_event(shost, fc_get_event_number(),
1794 sizeof(event_data), (char *) &event_data,
1795 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1796
1797 lpfc_offline_eratt(phba);
1798 }
1799 return;
1800}
1801
1802/**
1803 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1804 * @phba: pointer to lpfc hba data structure.
1805 * @mbx_action: flag for mailbox shutdown action.
Olivier Deprez157378f2022-04-04 15:47:50 +02001806 * @en_rn_msg: send reset/port recovery message.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001807 * This routine is invoked to perform an SLI4 port PCI function reset in
1808 * response to port status register polling attention. It waits for port
1809 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1810 * During this process, interrupt vectors are freed and later requested
1811 * for handling possible port resource change.
1812 **/
1813static int
1814lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1815 bool en_rn_msg)
1816{
1817 int rc;
1818 uint32_t intr_mode;
1819
1820 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1821 LPFC_SLI_INTF_IF_TYPE_2) {
1822 /*
1823 * On error status condition, driver need to wait for port
1824 * ready before performing reset.
1825 */
1826 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1827 if (rc)
1828 return rc;
1829 }
1830
1831 /* need reset: attempt for port recovery */
1832 if (en_rn_msg)
Olivier Deprez157378f2022-04-04 15:47:50 +02001833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001834 "2887 Reset Needed: Attempting Port "
1835 "Recovery...\n");
1836 lpfc_offline_prep(phba, mbx_action);
David Brazdil0f672f62019-12-10 10:32:29 +00001837 lpfc_sli_flush_io_rings(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001838 lpfc_offline(phba);
1839 /* release interrupt for possible resource change */
1840 lpfc_sli4_disable_intr(phba);
David Brazdil0f672f62019-12-10 10:32:29 +00001841 rc = lpfc_sli_brdrestart(phba);
1842 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001843 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00001844 "6309 Failed to restart board\n");
1845 return rc;
1846 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001847 /* request and enable interrupt */
1848 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1849 if (intr_mode == LPFC_INTR_ERROR) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001850 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001851 "3175 Failed to enable interrupt\n");
1852 return -EIO;
1853 }
1854 phba->intr_mode = intr_mode;
1855 rc = lpfc_online(phba);
1856 if (rc == 0)
1857 lpfc_unblock_mgmt_io(phba);
1858
1859 return rc;
1860}
1861
1862/**
1863 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1864 * @phba: pointer to lpfc hba data structure.
1865 *
1866 * This routine is invoked to handle the SLI4 HBA hardware error attention
1867 * conditions.
1868 **/
1869static void
1870lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1871{
1872 struct lpfc_vport *vport = phba->pport;
1873 uint32_t event_data;
1874 struct Scsi_Host *shost;
1875 uint32_t if_type;
1876 struct lpfc_register portstat_reg = {0};
1877 uint32_t reg_err1, reg_err2;
1878 uint32_t uerrlo_reg, uemasklo_reg;
1879 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1880 bool en_rn_msg = true;
1881 struct temp_event temp_event_data;
1882 struct lpfc_register portsmphr_reg;
1883 int rc, i;
1884
1885 /* If the pci channel is offline, ignore possible errors, since
1886 * we cannot communicate with the pci card anyway.
1887 */
David Brazdil0f672f62019-12-10 10:32:29 +00001888 if (pci_channel_offline(phba->pcidev)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001889 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00001890 "3166 pci channel is offline\n");
1891 lpfc_sli4_offline_eratt(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001892 return;
David Brazdil0f672f62019-12-10 10:32:29 +00001893 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001894
1895 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1896 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1897 switch (if_type) {
1898 case LPFC_SLI_INTF_IF_TYPE_0:
1899 pci_rd_rc1 = lpfc_readl(
1900 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1901 &uerrlo_reg);
1902 pci_rd_rc2 = lpfc_readl(
1903 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1904 &uemasklo_reg);
1905 /* consider PCI bus read error as pci_channel_offline */
1906 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1907 return;
1908 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1909 lpfc_sli4_offline_eratt(phba);
1910 return;
1911 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001912 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001913 "7623 Checking UE recoverable");
1914
1915 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1916 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1917 &portsmphr_reg.word0))
1918 continue;
1919
1920 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1921 &portsmphr_reg);
1922 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1923 LPFC_PORT_SEM_UE_RECOVERABLE)
1924 break;
1925 /*Sleep for 1Sec, before checking SEMAPHORE */
1926 msleep(1000);
1927 }
1928
Olivier Deprez157378f2022-04-04 15:47:50 +02001929 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001930 "4827 smphr_port_status x%x : Waited %dSec",
1931 smphr_port_status, i);
1932
1933 /* Recoverable UE, reset the HBA device */
1934 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1935 LPFC_PORT_SEM_UE_RECOVERABLE) {
1936 for (i = 0; i < 20; i++) {
1937 msleep(1000);
1938 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1939 &portsmphr_reg.word0) &&
1940 (LPFC_POST_STAGE_PORT_READY ==
1941 bf_get(lpfc_port_smphr_port_status,
1942 &portsmphr_reg))) {
1943 rc = lpfc_sli4_port_sta_fn_reset(phba,
1944 LPFC_MBX_NO_WAIT, en_rn_msg);
1945 if (rc == 0)
1946 return;
Olivier Deprez157378f2022-04-04 15:47:50 +02001947 lpfc_printf_log(phba, KERN_ERR,
1948 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001949 "4215 Failed to recover UE");
1950 break;
1951 }
1952 }
1953 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001954 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001955 "7624 Firmware not ready: Failing UE recovery,"
1956 " waited %dSec", i);
David Brazdil0f672f62019-12-10 10:32:29 +00001957 phba->link_state = LPFC_HBA_ERROR;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001958 break;
1959
1960 case LPFC_SLI_INTF_IF_TYPE_2:
1961 case LPFC_SLI_INTF_IF_TYPE_6:
1962 pci_rd_rc1 = lpfc_readl(
1963 phba->sli4_hba.u.if_type2.STATUSregaddr,
1964 &portstat_reg.word0);
1965 /* consider PCI bus read error as pci_channel_offline */
1966 if (pci_rd_rc1 == -EIO) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001968 "3151 PCI bus read access failure: x%x\n",
1969 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
David Brazdil0f672f62019-12-10 10:32:29 +00001970 lpfc_sli4_offline_eratt(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001971 return;
1972 }
1973 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1974 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1975 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001976 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1977 "2889 Port Overtemperature event, "
1978 "taking port offline Data: x%x x%x\n",
1979 reg_err1, reg_err2);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001980
1981 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
1982 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1983 temp_event_data.event_code = LPFC_CRIT_TEMP;
1984 temp_event_data.data = 0xFFFFFFFF;
1985
1986 shost = lpfc_shost_from_vport(phba->pport);
1987 fc_host_post_vendor_event(shost, fc_get_event_number(),
1988 sizeof(temp_event_data),
1989 (char *)&temp_event_data,
1990 SCSI_NL_VID_TYPE_PCI
1991 | PCI_VENDOR_ID_EMULEX);
1992
1993 spin_lock_irq(&phba->hbalock);
1994 phba->over_temp_state = HBA_OVER_TEMP;
1995 spin_unlock_irq(&phba->hbalock);
1996 lpfc_sli4_offline_eratt(phba);
1997 return;
1998 }
1999 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2000 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002001 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002002 "3143 Port Down: Firmware Update "
2003 "Detected\n");
2004 en_rn_msg = false;
2005 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2006 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
Olivier Deprez157378f2022-04-04 15:47:50 +02002007 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002008 "3144 Port Down: Debug Dump\n");
2009 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2010 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
Olivier Deprez157378f2022-04-04 15:47:50 +02002011 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002012 "3145 Port Down: Provisioning\n");
2013
2014 /* If resets are disabled then leave the HBA alone and return */
2015 if (!phba->cfg_enable_hba_reset)
2016 return;
2017
2018 /* Check port status register for function reset */
2019 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2020 en_rn_msg);
2021 if (rc == 0) {
2022 /* don't report event on forced debug dump */
2023 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2024 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2025 return;
2026 else
2027 break;
2028 }
2029 /* fall through for not able to recover */
Olivier Deprez157378f2022-04-04 15:47:50 +02002030 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00002031 "3152 Unrecoverable error\n");
2032 phba->link_state = LPFC_HBA_ERROR;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002033 break;
2034 case LPFC_SLI_INTF_IF_TYPE_1:
2035 default:
2036 break;
2037 }
2038 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2039 "3123 Report dump event to upper layer\n");
2040 /* Send an internal error event to mgmt application */
2041 lpfc_board_errevt_to_mgmt(phba);
2042
2043 event_data = FC_REG_DUMP_EVENT;
2044 shost = lpfc_shost_from_vport(vport);
2045 fc_host_post_vendor_event(shost, fc_get_event_number(),
2046 sizeof(event_data), (char *) &event_data,
2047 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2048}
2049
2050/**
2051 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2052 * @phba: pointer to lpfc HBA data structure.
2053 *
2054 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2055 * routine from the API jump table function pointer from the lpfc_hba struct.
2056 *
2057 * Return codes
2058 * 0 - success.
2059 * Any other value - error.
2060 **/
2061void
2062lpfc_handle_eratt(struct lpfc_hba *phba)
2063{
2064 (*phba->lpfc_handle_eratt)(phba);
2065}
2066
2067/**
2068 * lpfc_handle_latt - The HBA link event handler
2069 * @phba: pointer to lpfc hba data structure.
2070 *
2071 * This routine is invoked from the worker thread to handle a HBA host
2072 * attention link event. SLI3 only.
2073 **/
2074void
2075lpfc_handle_latt(struct lpfc_hba *phba)
2076{
2077 struct lpfc_vport *vport = phba->pport;
2078 struct lpfc_sli *psli = &phba->sli;
2079 LPFC_MBOXQ_t *pmb;
2080 volatile uint32_t control;
2081 struct lpfc_dmabuf *mp;
2082 int rc = 0;
2083
2084 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2085 if (!pmb) {
2086 rc = 1;
2087 goto lpfc_handle_latt_err_exit;
2088 }
2089
2090 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2091 if (!mp) {
2092 rc = 2;
2093 goto lpfc_handle_latt_free_pmb;
2094 }
2095
2096 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2097 if (!mp->virt) {
2098 rc = 3;
2099 goto lpfc_handle_latt_free_mp;
2100 }
2101
2102 /* Cleanup any outstanding ELS commands */
2103 lpfc_els_flush_all_cmd(phba);
2104
2105 psli->slistat.link_event++;
2106 lpfc_read_topology(phba, pmb, mp);
2107 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2108 pmb->vport = vport;
2109 /* Block ELS IOCBs until we have processed this mbox command */
2110 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2111 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2112 if (rc == MBX_NOT_FINISHED) {
2113 rc = 4;
2114 goto lpfc_handle_latt_free_mbuf;
2115 }
2116
2117 /* Clear Link Attention in HA REG */
2118 spin_lock_irq(&phba->hbalock);
2119 writel(HA_LATT, phba->HAregaddr);
2120 readl(phba->HAregaddr); /* flush */
2121 spin_unlock_irq(&phba->hbalock);
2122
2123 return;
2124
2125lpfc_handle_latt_free_mbuf:
2126 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2127 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2128lpfc_handle_latt_free_mp:
2129 kfree(mp);
2130lpfc_handle_latt_free_pmb:
2131 mempool_free(pmb, phba->mbox_mem_pool);
2132lpfc_handle_latt_err_exit:
2133 /* Enable Link attention interrupts */
2134 spin_lock_irq(&phba->hbalock);
2135 psli->sli_flag |= LPFC_PROCESS_LA;
2136 control = readl(phba->HCregaddr);
2137 control |= HC_LAINT_ENA;
2138 writel(control, phba->HCregaddr);
2139 readl(phba->HCregaddr); /* flush */
2140
2141 /* Clear Link Attention in HA REG */
2142 writel(HA_LATT, phba->HAregaddr);
2143 readl(phba->HAregaddr); /* flush */
2144 spin_unlock_irq(&phba->hbalock);
2145 lpfc_linkdown(phba);
2146 phba->link_state = LPFC_HBA_ERROR;
2147
Olivier Deprez157378f2022-04-04 15:47:50 +02002148 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2149 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002150
2151 return;
2152}
2153
2154/**
2155 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2156 * @phba: pointer to lpfc hba data structure.
2157 * @vpd: pointer to the vital product data.
2158 * @len: length of the vital product data in bytes.
2159 *
2160 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2161 * an array of characters. In this routine, the ModelName, ProgramType, and
2162 * ModelDesc, etc. fields of the phba data structure will be populated.
2163 *
2164 * Return codes
2165 * 0 - pointer to the VPD passed in is NULL
2166 * 1 - success
2167 **/
2168int
2169lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2170{
2171 uint8_t lenlo, lenhi;
2172 int Length;
2173 int i, j;
2174 int finished = 0;
2175 int index = 0;
2176
2177 if (!vpd)
2178 return 0;
2179
2180 /* Vital Product */
2181 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2182 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2183 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2184 (uint32_t) vpd[3]);
2185 while (!finished && (index < (len - 4))) {
2186 switch (vpd[index]) {
2187 case 0x82:
2188 case 0x91:
2189 index += 1;
2190 lenlo = vpd[index];
2191 index += 1;
2192 lenhi = vpd[index];
2193 index += 1;
2194 i = ((((unsigned short)lenhi) << 8) + lenlo);
2195 index += i;
2196 break;
2197 case 0x90:
2198 index += 1;
2199 lenlo = vpd[index];
2200 index += 1;
2201 lenhi = vpd[index];
2202 index += 1;
2203 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2204 if (Length > len - index)
2205 Length = len - index;
2206 while (Length > 0) {
2207 /* Look for Serial Number */
2208 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2209 index += 2;
2210 i = vpd[index];
2211 index += 1;
2212 j = 0;
2213 Length -= (3+i);
2214 while(i--) {
2215 phba->SerialNumber[j++] = vpd[index++];
2216 if (j == 31)
2217 break;
2218 }
2219 phba->SerialNumber[j] = 0;
2220 continue;
2221 }
2222 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2223 phba->vpd_flag |= VPD_MODEL_DESC;
2224 index += 2;
2225 i = vpd[index];
2226 index += 1;
2227 j = 0;
2228 Length -= (3+i);
2229 while(i--) {
2230 phba->ModelDesc[j++] = vpd[index++];
2231 if (j == 255)
2232 break;
2233 }
2234 phba->ModelDesc[j] = 0;
2235 continue;
2236 }
2237 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2238 phba->vpd_flag |= VPD_MODEL_NAME;
2239 index += 2;
2240 i = vpd[index];
2241 index += 1;
2242 j = 0;
2243 Length -= (3+i);
2244 while(i--) {
2245 phba->ModelName[j++] = vpd[index++];
2246 if (j == 79)
2247 break;
2248 }
2249 phba->ModelName[j] = 0;
2250 continue;
2251 }
2252 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2253 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2254 index += 2;
2255 i = vpd[index];
2256 index += 1;
2257 j = 0;
2258 Length -= (3+i);
2259 while(i--) {
2260 phba->ProgramType[j++] = vpd[index++];
2261 if (j == 255)
2262 break;
2263 }
2264 phba->ProgramType[j] = 0;
2265 continue;
2266 }
2267 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2268 phba->vpd_flag |= VPD_PORT;
2269 index += 2;
2270 i = vpd[index];
2271 index += 1;
2272 j = 0;
2273 Length -= (3+i);
2274 while(i--) {
2275 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2276 (phba->sli4_hba.pport_name_sta ==
2277 LPFC_SLI4_PPNAME_GET)) {
2278 j++;
2279 index++;
2280 } else
2281 phba->Port[j++] = vpd[index++];
2282 if (j == 19)
2283 break;
2284 }
2285 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2286 (phba->sli4_hba.pport_name_sta ==
2287 LPFC_SLI4_PPNAME_NON))
2288 phba->Port[j] = 0;
2289 continue;
2290 }
2291 else {
2292 index += 2;
2293 i = vpd[index];
2294 index += 1;
2295 index += i;
2296 Length -= (3 + i);
2297 }
2298 }
2299 finished = 0;
2300 break;
2301 case 0x78:
2302 finished = 1;
2303 break;
2304 default:
2305 index ++;
2306 break;
2307 }
2308 }
2309
2310 return(1);
2311}
2312
2313/**
2314 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2315 * @phba: pointer to lpfc hba data structure.
2316 * @mdp: pointer to the data structure to hold the derived model name.
2317 * @descp: pointer to the data structure to hold the derived description.
2318 *
2319 * This routine retrieves HBA's description based on its registered PCI device
2320 * ID. The @descp passed into this function points to an array of 256 chars. It
2321 * shall be returned with the model name, maximum speed, and the host bus type.
2322 * The @mdp passed into this function points to an array of 80 chars. When the
2323 * function returns, the @mdp will be filled with the model name.
2324 **/
2325static void
2326lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2327{
2328 lpfc_vpd_t *vp;
2329 uint16_t dev_id = phba->pcidev->device;
2330 int max_speed;
2331 int GE = 0;
2332 int oneConnect = 0; /* default is not a oneConnect */
2333 struct {
2334 char *name;
2335 char *bus;
2336 char *function;
2337 } m = {"<Unknown>", "", ""};
2338
2339 if (mdp && mdp[0] != '\0'
2340 && descp && descp[0] != '\0')
2341 return;
2342
2343 if (phba->lmt & LMT_64Gb)
2344 max_speed = 64;
2345 else if (phba->lmt & LMT_32Gb)
2346 max_speed = 32;
2347 else if (phba->lmt & LMT_16Gb)
2348 max_speed = 16;
2349 else if (phba->lmt & LMT_10Gb)
2350 max_speed = 10;
2351 else if (phba->lmt & LMT_8Gb)
2352 max_speed = 8;
2353 else if (phba->lmt & LMT_4Gb)
2354 max_speed = 4;
2355 else if (phba->lmt & LMT_2Gb)
2356 max_speed = 2;
2357 else if (phba->lmt & LMT_1Gb)
2358 max_speed = 1;
2359 else
2360 max_speed = 0;
2361
2362 vp = &phba->vpd;
2363
2364 switch (dev_id) {
2365 case PCI_DEVICE_ID_FIREFLY:
2366 m = (typeof(m)){"LP6000", "PCI",
2367 "Obsolete, Unsupported Fibre Channel Adapter"};
2368 break;
2369 case PCI_DEVICE_ID_SUPERFLY:
2370 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2371 m = (typeof(m)){"LP7000", "PCI", ""};
2372 else
2373 m = (typeof(m)){"LP7000E", "PCI", ""};
2374 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2375 break;
2376 case PCI_DEVICE_ID_DRAGONFLY:
2377 m = (typeof(m)){"LP8000", "PCI",
2378 "Obsolete, Unsupported Fibre Channel Adapter"};
2379 break;
2380 case PCI_DEVICE_ID_CENTAUR:
2381 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2382 m = (typeof(m)){"LP9002", "PCI", ""};
2383 else
2384 m = (typeof(m)){"LP9000", "PCI", ""};
2385 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2386 break;
2387 case PCI_DEVICE_ID_RFLY:
2388 m = (typeof(m)){"LP952", "PCI",
2389 "Obsolete, Unsupported Fibre Channel Adapter"};
2390 break;
2391 case PCI_DEVICE_ID_PEGASUS:
2392 m = (typeof(m)){"LP9802", "PCI-X",
2393 "Obsolete, Unsupported Fibre Channel Adapter"};
2394 break;
2395 case PCI_DEVICE_ID_THOR:
2396 m = (typeof(m)){"LP10000", "PCI-X",
2397 "Obsolete, Unsupported Fibre Channel Adapter"};
2398 break;
2399 case PCI_DEVICE_ID_VIPER:
2400 m = (typeof(m)){"LPX1000", "PCI-X",
2401 "Obsolete, Unsupported Fibre Channel Adapter"};
2402 break;
2403 case PCI_DEVICE_ID_PFLY:
2404 m = (typeof(m)){"LP982", "PCI-X",
2405 "Obsolete, Unsupported Fibre Channel Adapter"};
2406 break;
2407 case PCI_DEVICE_ID_TFLY:
2408 m = (typeof(m)){"LP1050", "PCI-X",
2409 "Obsolete, Unsupported Fibre Channel Adapter"};
2410 break;
2411 case PCI_DEVICE_ID_HELIOS:
2412 m = (typeof(m)){"LP11000", "PCI-X2",
2413 "Obsolete, Unsupported Fibre Channel Adapter"};
2414 break;
2415 case PCI_DEVICE_ID_HELIOS_SCSP:
2416 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2417 "Obsolete, Unsupported Fibre Channel Adapter"};
2418 break;
2419 case PCI_DEVICE_ID_HELIOS_DCSP:
2420 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2421 "Obsolete, Unsupported Fibre Channel Adapter"};
2422 break;
2423 case PCI_DEVICE_ID_NEPTUNE:
2424 m = (typeof(m)){"LPe1000", "PCIe",
2425 "Obsolete, Unsupported Fibre Channel Adapter"};
2426 break;
2427 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2428 m = (typeof(m)){"LPe1000-SP", "PCIe",
2429 "Obsolete, Unsupported Fibre Channel Adapter"};
2430 break;
2431 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2432 m = (typeof(m)){"LPe1002-SP", "PCIe",
2433 "Obsolete, Unsupported Fibre Channel Adapter"};
2434 break;
2435 case PCI_DEVICE_ID_BMID:
2436 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2437 break;
2438 case PCI_DEVICE_ID_BSMB:
2439 m = (typeof(m)){"LP111", "PCI-X2",
2440 "Obsolete, Unsupported Fibre Channel Adapter"};
2441 break;
2442 case PCI_DEVICE_ID_ZEPHYR:
2443 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2444 break;
2445 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2446 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2447 break;
2448 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2449 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2450 GE = 1;
2451 break;
2452 case PCI_DEVICE_ID_ZMID:
2453 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2454 break;
2455 case PCI_DEVICE_ID_ZSMB:
2456 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2457 break;
2458 case PCI_DEVICE_ID_LP101:
2459 m = (typeof(m)){"LP101", "PCI-X",
2460 "Obsolete, Unsupported Fibre Channel Adapter"};
2461 break;
2462 case PCI_DEVICE_ID_LP10000S:
2463 m = (typeof(m)){"LP10000-S", "PCI",
2464 "Obsolete, Unsupported Fibre Channel Adapter"};
2465 break;
2466 case PCI_DEVICE_ID_LP11000S:
2467 m = (typeof(m)){"LP11000-S", "PCI-X2",
2468 "Obsolete, Unsupported Fibre Channel Adapter"};
2469 break;
2470 case PCI_DEVICE_ID_LPE11000S:
2471 m = (typeof(m)){"LPe11000-S", "PCIe",
2472 "Obsolete, Unsupported Fibre Channel Adapter"};
2473 break;
2474 case PCI_DEVICE_ID_SAT:
2475 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2476 break;
2477 case PCI_DEVICE_ID_SAT_MID:
2478 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2479 break;
2480 case PCI_DEVICE_ID_SAT_SMB:
2481 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2482 break;
2483 case PCI_DEVICE_ID_SAT_DCSP:
2484 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2485 break;
2486 case PCI_DEVICE_ID_SAT_SCSP:
2487 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2488 break;
2489 case PCI_DEVICE_ID_SAT_S:
2490 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2491 break;
2492 case PCI_DEVICE_ID_HORNET:
2493 m = (typeof(m)){"LP21000", "PCIe",
2494 "Obsolete, Unsupported FCoE Adapter"};
2495 GE = 1;
2496 break;
2497 case PCI_DEVICE_ID_PROTEUS_VF:
2498 m = (typeof(m)){"LPev12000", "PCIe IOV",
2499 "Obsolete, Unsupported Fibre Channel Adapter"};
2500 break;
2501 case PCI_DEVICE_ID_PROTEUS_PF:
2502 m = (typeof(m)){"LPev12000", "PCIe IOV",
2503 "Obsolete, Unsupported Fibre Channel Adapter"};
2504 break;
2505 case PCI_DEVICE_ID_PROTEUS_S:
2506 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2507 "Obsolete, Unsupported Fibre Channel Adapter"};
2508 break;
2509 case PCI_DEVICE_ID_TIGERSHARK:
2510 oneConnect = 1;
2511 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2512 break;
2513 case PCI_DEVICE_ID_TOMCAT:
2514 oneConnect = 1;
2515 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2516 break;
2517 case PCI_DEVICE_ID_FALCON:
2518 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2519 "EmulexSecure Fibre"};
2520 break;
2521 case PCI_DEVICE_ID_BALIUS:
2522 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2523 "Obsolete, Unsupported Fibre Channel Adapter"};
2524 break;
2525 case PCI_DEVICE_ID_LANCER_FC:
2526 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2527 break;
2528 case PCI_DEVICE_ID_LANCER_FC_VF:
2529 m = (typeof(m)){"LPe16000", "PCIe",
2530 "Obsolete, Unsupported Fibre Channel Adapter"};
2531 break;
2532 case PCI_DEVICE_ID_LANCER_FCOE:
2533 oneConnect = 1;
2534 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2535 break;
2536 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2537 oneConnect = 1;
2538 m = (typeof(m)){"OCe15100", "PCIe",
2539 "Obsolete, Unsupported FCoE"};
2540 break;
2541 case PCI_DEVICE_ID_LANCER_G6_FC:
2542 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2543 break;
2544 case PCI_DEVICE_ID_LANCER_G7_FC:
2545 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2546 break;
2547 case PCI_DEVICE_ID_SKYHAWK:
2548 case PCI_DEVICE_ID_SKYHAWK_VF:
2549 oneConnect = 1;
2550 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2551 break;
2552 default:
2553 m = (typeof(m)){"Unknown", "", ""};
2554 break;
2555 }
2556
2557 if (mdp && mdp[0] == '\0')
2558 snprintf(mdp, 79,"%s", m.name);
2559 /*
2560 * oneConnect hba requires special processing, they are all initiators
2561 * and we put the port number on the end
2562 */
2563 if (descp && descp[0] == '\0') {
2564 if (oneConnect)
2565 snprintf(descp, 255,
2566 "Emulex OneConnect %s, %s Initiator %s",
2567 m.name, m.function,
2568 phba->Port);
2569 else if (max_speed == 0)
2570 snprintf(descp, 255,
2571 "Emulex %s %s %s",
2572 m.name, m.bus, m.function);
2573 else
2574 snprintf(descp, 255,
2575 "Emulex %s %d%s %s %s",
2576 m.name, max_speed, (GE) ? "GE" : "Gb",
2577 m.bus, m.function);
2578 }
2579}
2580
2581/**
2582 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2583 * @phba: pointer to lpfc hba data structure.
2584 * @pring: pointer to a IOCB ring.
2585 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2586 *
2587 * This routine posts a given number of IOCBs with the associated DMA buffer
2588 * descriptors specified by the cnt argument to the given IOCB ring.
2589 *
2590 * Return codes
2591 * The number of IOCBs NOT able to be posted to the IOCB ring.
2592 **/
2593int
2594lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2595{
2596 IOCB_t *icmd;
2597 struct lpfc_iocbq *iocb;
2598 struct lpfc_dmabuf *mp1, *mp2;
2599
2600 cnt += pring->missbufcnt;
2601
2602 /* While there are buffers to post */
2603 while (cnt > 0) {
2604 /* Allocate buffer for command iocb */
2605 iocb = lpfc_sli_get_iocbq(phba);
2606 if (iocb == NULL) {
2607 pring->missbufcnt = cnt;
2608 return cnt;
2609 }
2610 icmd = &iocb->iocb;
2611
2612 /* 2 buffers can be posted per command */
2613 /* Allocate buffer to post */
2614 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2615 if (mp1)
2616 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2617 if (!mp1 || !mp1->virt) {
2618 kfree(mp1);
2619 lpfc_sli_release_iocbq(phba, iocb);
2620 pring->missbufcnt = cnt;
2621 return cnt;
2622 }
2623
2624 INIT_LIST_HEAD(&mp1->list);
2625 /* Allocate buffer to post */
2626 if (cnt > 1) {
2627 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2628 if (mp2)
2629 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2630 &mp2->phys);
2631 if (!mp2 || !mp2->virt) {
2632 kfree(mp2);
2633 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2634 kfree(mp1);
2635 lpfc_sli_release_iocbq(phba, iocb);
2636 pring->missbufcnt = cnt;
2637 return cnt;
2638 }
2639
2640 INIT_LIST_HEAD(&mp2->list);
2641 } else {
2642 mp2 = NULL;
2643 }
2644
2645 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2646 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2647 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2648 icmd->ulpBdeCount = 1;
2649 cnt--;
2650 if (mp2) {
2651 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2652 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2653 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2654 cnt--;
2655 icmd->ulpBdeCount = 2;
2656 }
2657
2658 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2659 icmd->ulpLe = 1;
2660
2661 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2662 IOCB_ERROR) {
2663 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2664 kfree(mp1);
2665 cnt++;
2666 if (mp2) {
2667 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2668 kfree(mp2);
2669 cnt++;
2670 }
2671 lpfc_sli_release_iocbq(phba, iocb);
2672 pring->missbufcnt = cnt;
2673 return cnt;
2674 }
2675 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2676 if (mp2)
2677 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2678 }
2679 pring->missbufcnt = 0;
2680 return 0;
2681}
2682
2683/**
2684 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2685 * @phba: pointer to lpfc hba data structure.
2686 *
2687 * This routine posts initial receive IOCB buffers to the ELS ring. The
2688 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2689 * set to 64 IOCBs. SLI3 only.
2690 *
2691 * Return codes
2692 * 0 - success (currently always success)
2693 **/
2694static int
2695lpfc_post_rcv_buf(struct lpfc_hba *phba)
2696{
2697 struct lpfc_sli *psli = &phba->sli;
2698
2699 /* Ring 0, ELS / CT buffers */
2700 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2701 /* Ring 2 - FCP no buffers needed */
2702
2703 return 0;
2704}
2705
2706#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2707
2708/**
2709 * lpfc_sha_init - Set up initial array of hash table entries
2710 * @HashResultPointer: pointer to an array as hash table.
2711 *
2712 * This routine sets up the initial values to the array of hash table entries
2713 * for the LC HBAs.
2714 **/
2715static void
2716lpfc_sha_init(uint32_t * HashResultPointer)
2717{
2718 HashResultPointer[0] = 0x67452301;
2719 HashResultPointer[1] = 0xEFCDAB89;
2720 HashResultPointer[2] = 0x98BADCFE;
2721 HashResultPointer[3] = 0x10325476;
2722 HashResultPointer[4] = 0xC3D2E1F0;
2723}
2724
2725/**
2726 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2727 * @HashResultPointer: pointer to an initial/result hash table.
2728 * @HashWorkingPointer: pointer to an working hash table.
2729 *
2730 * This routine iterates an initial hash table pointed by @HashResultPointer
2731 * with the values from the working hash table pointeed by @HashWorkingPointer.
2732 * The results are putting back to the initial hash table, returned through
2733 * the @HashResultPointer as the result hash table.
2734 **/
2735static void
2736lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2737{
2738 int t;
2739 uint32_t TEMP;
2740 uint32_t A, B, C, D, E;
2741 t = 16;
2742 do {
2743 HashWorkingPointer[t] =
2744 S(1,
2745 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2746 8] ^
2747 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2748 } while (++t <= 79);
2749 t = 0;
2750 A = HashResultPointer[0];
2751 B = HashResultPointer[1];
2752 C = HashResultPointer[2];
2753 D = HashResultPointer[3];
2754 E = HashResultPointer[4];
2755
2756 do {
2757 if (t < 20) {
2758 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2759 } else if (t < 40) {
2760 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2761 } else if (t < 60) {
2762 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2763 } else {
2764 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2765 }
2766 TEMP += S(5, A) + E + HashWorkingPointer[t];
2767 E = D;
2768 D = C;
2769 C = S(30, B);
2770 B = A;
2771 A = TEMP;
2772 } while (++t <= 79);
2773
2774 HashResultPointer[0] += A;
2775 HashResultPointer[1] += B;
2776 HashResultPointer[2] += C;
2777 HashResultPointer[3] += D;
2778 HashResultPointer[4] += E;
2779
2780}
2781
2782/**
2783 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2784 * @RandomChallenge: pointer to the entry of host challenge random number array.
2785 * @HashWorking: pointer to the entry of the working hash array.
2786 *
2787 * This routine calculates the working hash array referred by @HashWorking
2788 * from the challenge random numbers associated with the host, referred by
2789 * @RandomChallenge. The result is put into the entry of the working hash
2790 * array and returned by reference through @HashWorking.
2791 **/
2792static void
2793lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2794{
2795 *HashWorking = (*RandomChallenge ^ *HashWorking);
2796}
2797
2798/**
2799 * lpfc_hba_init - Perform special handling for LC HBA initialization
2800 * @phba: pointer to lpfc hba data structure.
2801 * @hbainit: pointer to an array of unsigned 32-bit integers.
2802 *
2803 * This routine performs the special handling for LC HBA initialization.
2804 **/
2805void
2806lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2807{
2808 int t;
2809 uint32_t *HashWorking;
2810 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2811
2812 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2813 if (!HashWorking)
2814 return;
2815
2816 HashWorking[0] = HashWorking[78] = *pwwnn++;
2817 HashWorking[1] = HashWorking[79] = *pwwnn;
2818
2819 for (t = 0; t < 7; t++)
2820 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2821
2822 lpfc_sha_init(hbainit);
2823 lpfc_sha_iterate(hbainit, HashWorking);
2824 kfree(HashWorking);
2825}
2826
2827/**
2828 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2829 * @vport: pointer to a virtual N_Port data structure.
2830 *
2831 * This routine performs the necessary cleanups before deleting the @vport.
2832 * It invokes the discovery state machine to perform necessary state
2833 * transitions and to release the ndlps associated with the @vport. Note,
2834 * the physical port is treated as @vport 0.
2835 **/
2836void
2837lpfc_cleanup(struct lpfc_vport *vport)
2838{
2839 struct lpfc_hba *phba = vport->phba;
2840 struct lpfc_nodelist *ndlp, *next_ndlp;
2841 int i = 0;
2842
2843 if (phba->link_state > LPFC_LINK_DOWN)
2844 lpfc_port_link_failure(vport);
2845
2846 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2847 if (!NLP_CHK_NODE_ACT(ndlp)) {
2848 ndlp = lpfc_enable_node(vport, ndlp,
2849 NLP_STE_UNUSED_NODE);
2850 if (!ndlp)
2851 continue;
2852 spin_lock_irq(&phba->ndlp_lock);
2853 NLP_SET_FREE_REQ(ndlp);
2854 spin_unlock_irq(&phba->ndlp_lock);
2855 /* Trigger the release of the ndlp memory */
2856 lpfc_nlp_put(ndlp);
2857 continue;
2858 }
2859 spin_lock_irq(&phba->ndlp_lock);
2860 if (NLP_CHK_FREE_REQ(ndlp)) {
2861 /* The ndlp should not be in memory free mode already */
2862 spin_unlock_irq(&phba->ndlp_lock);
2863 continue;
2864 } else
2865 /* Indicate request for freeing ndlp memory */
2866 NLP_SET_FREE_REQ(ndlp);
2867 spin_unlock_irq(&phba->ndlp_lock);
2868
2869 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2870 ndlp->nlp_DID == Fabric_DID) {
2871 /* Just free up ndlp with Fabric_DID for vports */
2872 lpfc_nlp_put(ndlp);
2873 continue;
2874 }
2875
2876 /* take care of nodes in unused state before the state
2877 * machine taking action.
2878 */
2879 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2880 lpfc_nlp_put(ndlp);
2881 continue;
2882 }
2883
2884 if (ndlp->nlp_type & NLP_FABRIC)
2885 lpfc_disc_state_machine(vport, ndlp, NULL,
2886 NLP_EVT_DEVICE_RECOVERY);
2887
2888 lpfc_disc_state_machine(vport, ndlp, NULL,
2889 NLP_EVT_DEVICE_RM);
2890 }
2891
2892 /* At this point, ALL ndlp's should be gone
2893 * because of the previous NLP_EVT_DEVICE_RM.
2894 * Lets wait for this to happen, if needed.
2895 */
2896 while (!list_empty(&vport->fc_nodes)) {
2897 if (i++ > 3000) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002898 lpfc_printf_vlog(vport, KERN_ERR,
2899 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002900 "0233 Nodelist not empty\n");
2901 list_for_each_entry_safe(ndlp, next_ndlp,
2902 &vport->fc_nodes, nlp_listp) {
2903 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
Olivier Deprez157378f2022-04-04 15:47:50 +02002904 LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00002905 "0282 did:x%x ndlp:x%px "
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002906 "usgmap:x%x refcnt:%d\n",
2907 ndlp->nlp_DID, (void *)ndlp,
2908 ndlp->nlp_usg_map,
2909 kref_read(&ndlp->kref));
2910 }
2911 break;
2912 }
2913
2914 /* Wait for any activity on ndlps to settle */
2915 msleep(10);
2916 }
2917 lpfc_cleanup_vports_rrqs(vport, NULL);
2918}
2919
2920/**
2921 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2922 * @vport: pointer to a virtual N_Port data structure.
2923 *
2924 * This routine stops all the timers associated with a @vport. This function
2925 * is invoked before disabling or deleting a @vport. Note that the physical
2926 * port is treated as @vport 0.
2927 **/
2928void
2929lpfc_stop_vport_timers(struct lpfc_vport *vport)
2930{
2931 del_timer_sync(&vport->els_tmofunc);
2932 del_timer_sync(&vport->delayed_disc_tmo);
2933 lpfc_can_disctmo(vport);
2934 return;
2935}
2936
2937/**
2938 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2939 * @phba: pointer to lpfc hba data structure.
2940 *
2941 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2942 * caller of this routine should already hold the host lock.
2943 **/
2944void
2945__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2946{
2947 /* Clear pending FCF rediscovery wait flag */
2948 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2949
2950 /* Now, try to stop the timer */
2951 del_timer(&phba->fcf.redisc_wait);
2952}
2953
2954/**
2955 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2956 * @phba: pointer to lpfc hba data structure.
2957 *
2958 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2959 * checks whether the FCF rediscovery wait timer is pending with the host
2960 * lock held before proceeding with disabling the timer and clearing the
2961 * wait timer pendig flag.
2962 **/
2963void
2964lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2965{
2966 spin_lock_irq(&phba->hbalock);
2967 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2968 /* FCF rediscovery timer already fired or stopped */
2969 spin_unlock_irq(&phba->hbalock);
2970 return;
2971 }
2972 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2973 /* Clear failover in progress flags */
2974 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2975 spin_unlock_irq(&phba->hbalock);
2976}
2977
2978/**
2979 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2980 * @phba: pointer to lpfc hba data structure.
2981 *
2982 * This routine stops all the timers associated with a HBA. This function is
2983 * invoked before either putting a HBA offline or unloading the driver.
2984 **/
2985void
2986lpfc_stop_hba_timers(struct lpfc_hba *phba)
2987{
David Brazdil0f672f62019-12-10 10:32:29 +00002988 if (phba->pport)
2989 lpfc_stop_vport_timers(phba->pport);
2990 cancel_delayed_work_sync(&phba->eq_delay_work);
Olivier Deprez157378f2022-04-04 15:47:50 +02002991 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002992 del_timer_sync(&phba->sli.mbox_tmo);
2993 del_timer_sync(&phba->fabric_block_timer);
2994 del_timer_sync(&phba->eratt_poll);
2995 del_timer_sync(&phba->hb_tmofunc);
2996 if (phba->sli_rev == LPFC_SLI_REV4) {
2997 del_timer_sync(&phba->rrq_tmr);
2998 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2999 }
3000 phba->hb_outstanding = 0;
3001
3002 switch (phba->pci_dev_grp) {
3003 case LPFC_PCI_DEV_LP:
3004 /* Stop any LightPulse device specific driver timers */
3005 del_timer_sync(&phba->fcp_poll_timer);
3006 break;
3007 case LPFC_PCI_DEV_OC:
David Brazdil0f672f62019-12-10 10:32:29 +00003008 /* Stop any OneConnect device specific driver timers */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003009 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3010 break;
3011 default:
Olivier Deprez157378f2022-04-04 15:47:50 +02003012 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003013 "0297 Invalid device group (x%x)\n",
3014 phba->pci_dev_grp);
3015 break;
3016 }
3017 return;
3018}
3019
3020/**
3021 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3022 * @phba: pointer to lpfc hba data structure.
Olivier Deprez157378f2022-04-04 15:47:50 +02003023 * @mbx_action: flag for mailbox no wait action.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003024 *
3025 * This routine marks a HBA's management interface as blocked. Once the HBA's
3026 * management interface is marked as blocked, all the user space access to
3027 * the HBA, whether they are from sysfs interface or libdfc interface will
3028 * all be blocked. The HBA is set to block the management interface when the
3029 * driver prepares the HBA interface for online or offline.
3030 **/
3031static void
3032lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3033{
3034 unsigned long iflag;
3035 uint8_t actcmd = MBX_HEARTBEAT;
3036 unsigned long timeout;
3037
3038 spin_lock_irqsave(&phba->hbalock, iflag);
3039 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3040 spin_unlock_irqrestore(&phba->hbalock, iflag);
3041 if (mbx_action == LPFC_MBX_NO_WAIT)
3042 return;
3043 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3044 spin_lock_irqsave(&phba->hbalock, iflag);
3045 if (phba->sli.mbox_active) {
3046 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3047 /* Determine how long we might wait for the active mailbox
3048 * command to be gracefully completed by firmware.
3049 */
3050 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3051 phba->sli.mbox_active) * 1000) + jiffies;
3052 }
3053 spin_unlock_irqrestore(&phba->hbalock, iflag);
3054
3055 /* Wait for the outstnading mailbox command to complete */
3056 while (phba->sli.mbox_active) {
3057 /* Check active mailbox complete status every 2ms */
3058 msleep(2);
3059 if (time_after(jiffies, timeout)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02003060 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3061 "2813 Mgmt IO is Blocked %x "
3062 "- mbox cmd %x still active\n",
3063 phba->sli.sli_flag, actcmd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003064 break;
3065 }
3066 }
3067}
3068
3069/**
3070 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3071 * @phba: pointer to lpfc hba data structure.
3072 *
3073 * Allocate RPIs for all active remote nodes. This is needed whenever
3074 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3075 * is to fixup the temporary rpi assignments.
3076 **/
3077void
3078lpfc_sli4_node_prep(struct lpfc_hba *phba)
3079{
3080 struct lpfc_nodelist *ndlp, *next_ndlp;
3081 struct lpfc_vport **vports;
3082 int i, rpi;
3083 unsigned long flags;
3084
3085 if (phba->sli_rev != LPFC_SLI_REV4)
3086 return;
3087
3088 vports = lpfc_create_vport_work_array(phba);
3089 if (vports == NULL)
3090 return;
3091
3092 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3093 if (vports[i]->load_flag & FC_UNLOADING)
3094 continue;
3095
3096 list_for_each_entry_safe(ndlp, next_ndlp,
3097 &vports[i]->fc_nodes,
3098 nlp_listp) {
3099 if (!NLP_CHK_NODE_ACT(ndlp))
3100 continue;
3101 rpi = lpfc_sli4_alloc_rpi(phba);
3102 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3103 spin_lock_irqsave(&phba->ndlp_lock, flags);
3104 NLP_CLR_NODE_ACT(ndlp);
3105 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3106 continue;
3107 }
3108 ndlp->nlp_rpi = rpi;
Olivier Deprez157378f2022-04-04 15:47:50 +02003109 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3110 LOG_NODE | LOG_DISCOVERY,
3111 "0009 Assign RPI x%x to ndlp x%px "
3112 "DID:x%06x flg:x%x map:x%x\n",
3113 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3114 ndlp->nlp_flag, ndlp->nlp_usg_map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003115 }
3116 }
3117 lpfc_destroy_vport_work_array(phba, vports);
3118}
3119
3120/**
David Brazdil0f672f62019-12-10 10:32:29 +00003121 * lpfc_create_expedite_pool - create expedite pool
3122 * @phba: pointer to lpfc hba data structure.
3123 *
3124 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3125 * to expedite pool. Mark them as expedite.
3126 **/
3127static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3128{
3129 struct lpfc_sli4_hdw_queue *qp;
3130 struct lpfc_io_buf *lpfc_ncmd;
3131 struct lpfc_io_buf *lpfc_ncmd_next;
3132 struct lpfc_epd_pool *epd_pool;
3133 unsigned long iflag;
3134
3135 epd_pool = &phba->epd_pool;
3136 qp = &phba->sli4_hba.hdwq[0];
3137
3138 spin_lock_init(&epd_pool->lock);
3139 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3140 spin_lock(&epd_pool->lock);
3141 INIT_LIST_HEAD(&epd_pool->list);
3142 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3143 &qp->lpfc_io_buf_list_put, list) {
3144 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3145 lpfc_ncmd->expedite = true;
3146 qp->put_io_bufs--;
3147 epd_pool->count++;
3148 if (epd_pool->count >= XRI_BATCH)
3149 break;
3150 }
3151 spin_unlock(&epd_pool->lock);
3152 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3153}
3154
3155/**
3156 * lpfc_destroy_expedite_pool - destroy expedite pool
3157 * @phba: pointer to lpfc hba data structure.
3158 *
3159 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3160 * of HWQ 0. Clear the mark.
3161 **/
3162static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3163{
3164 struct lpfc_sli4_hdw_queue *qp;
3165 struct lpfc_io_buf *lpfc_ncmd;
3166 struct lpfc_io_buf *lpfc_ncmd_next;
3167 struct lpfc_epd_pool *epd_pool;
3168 unsigned long iflag;
3169
3170 epd_pool = &phba->epd_pool;
3171 qp = &phba->sli4_hba.hdwq[0];
3172
3173 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3174 spin_lock(&epd_pool->lock);
3175 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3176 &epd_pool->list, list) {
3177 list_move_tail(&lpfc_ncmd->list,
3178 &qp->lpfc_io_buf_list_put);
3179 lpfc_ncmd->flags = false;
3180 qp->put_io_bufs++;
3181 epd_pool->count--;
3182 }
3183 spin_unlock(&epd_pool->lock);
3184 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3185}
3186
3187/**
3188 * lpfc_create_multixri_pools - create multi-XRI pools
3189 * @phba: pointer to lpfc hba data structure.
3190 *
3191 * This routine initialize public, private per HWQ. Then, move XRIs from
3192 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3193 * Initialized.
3194 **/
3195void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3196{
3197 u32 i, j;
3198 u32 hwq_count;
3199 u32 count_per_hwq;
3200 struct lpfc_io_buf *lpfc_ncmd;
3201 struct lpfc_io_buf *lpfc_ncmd_next;
3202 unsigned long iflag;
3203 struct lpfc_sli4_hdw_queue *qp;
3204 struct lpfc_multixri_pool *multixri_pool;
3205 struct lpfc_pbl_pool *pbl_pool;
3206 struct lpfc_pvt_pool *pvt_pool;
3207
3208 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3209 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3210 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3211 phba->sli4_hba.io_xri_cnt);
3212
3213 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3214 lpfc_create_expedite_pool(phba);
3215
3216 hwq_count = phba->cfg_hdw_queue;
3217 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3218
3219 for (i = 0; i < hwq_count; i++) {
3220 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3221
3222 if (!multixri_pool) {
3223 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3224 "1238 Failed to allocate memory for "
3225 "multixri_pool\n");
3226
3227 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3228 lpfc_destroy_expedite_pool(phba);
3229
3230 j = 0;
3231 while (j < i) {
3232 qp = &phba->sli4_hba.hdwq[j];
3233 kfree(qp->p_multixri_pool);
3234 j++;
3235 }
3236 phba->cfg_xri_rebalancing = 0;
3237 return;
3238 }
3239
3240 qp = &phba->sli4_hba.hdwq[i];
3241 qp->p_multixri_pool = multixri_pool;
3242
3243 multixri_pool->xri_limit = count_per_hwq;
3244 multixri_pool->rrb_next_hwqid = i;
3245
3246 /* Deal with public free xri pool */
3247 pbl_pool = &multixri_pool->pbl_pool;
3248 spin_lock_init(&pbl_pool->lock);
3249 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3250 spin_lock(&pbl_pool->lock);
3251 INIT_LIST_HEAD(&pbl_pool->list);
3252 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3253 &qp->lpfc_io_buf_list_put, list) {
3254 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3255 qp->put_io_bufs--;
3256 pbl_pool->count++;
3257 }
3258 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3259 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3260 pbl_pool->count, i);
3261 spin_unlock(&pbl_pool->lock);
3262 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3263
3264 /* Deal with private free xri pool */
3265 pvt_pool = &multixri_pool->pvt_pool;
3266 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3267 pvt_pool->low_watermark = XRI_BATCH;
3268 spin_lock_init(&pvt_pool->lock);
3269 spin_lock_irqsave(&pvt_pool->lock, iflag);
3270 INIT_LIST_HEAD(&pvt_pool->list);
3271 pvt_pool->count = 0;
3272 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3273 }
3274}
3275
3276/**
3277 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3278 * @phba: pointer to lpfc hba data structure.
3279 *
3280 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3281 **/
3282static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3283{
3284 u32 i;
3285 u32 hwq_count;
3286 struct lpfc_io_buf *lpfc_ncmd;
3287 struct lpfc_io_buf *lpfc_ncmd_next;
3288 unsigned long iflag;
3289 struct lpfc_sli4_hdw_queue *qp;
3290 struct lpfc_multixri_pool *multixri_pool;
3291 struct lpfc_pbl_pool *pbl_pool;
3292 struct lpfc_pvt_pool *pvt_pool;
3293
3294 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3295 lpfc_destroy_expedite_pool(phba);
3296
3297 if (!(phba->pport->load_flag & FC_UNLOADING))
3298 lpfc_sli_flush_io_rings(phba);
3299
3300 hwq_count = phba->cfg_hdw_queue;
3301
3302 for (i = 0; i < hwq_count; i++) {
3303 qp = &phba->sli4_hba.hdwq[i];
3304 multixri_pool = qp->p_multixri_pool;
3305 if (!multixri_pool)
3306 continue;
3307
3308 qp->p_multixri_pool = NULL;
3309
3310 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3311
3312 /* Deal with public free xri pool */
3313 pbl_pool = &multixri_pool->pbl_pool;
3314 spin_lock(&pbl_pool->lock);
3315
3316 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3317 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3318 pbl_pool->count, i);
3319
3320 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3321 &pbl_pool->list, list) {
3322 list_move_tail(&lpfc_ncmd->list,
3323 &qp->lpfc_io_buf_list_put);
3324 qp->put_io_bufs++;
3325 pbl_pool->count--;
3326 }
3327
3328 INIT_LIST_HEAD(&pbl_pool->list);
3329 pbl_pool->count = 0;
3330
3331 spin_unlock(&pbl_pool->lock);
3332
3333 /* Deal with private free xri pool */
3334 pvt_pool = &multixri_pool->pvt_pool;
3335 spin_lock(&pvt_pool->lock);
3336
3337 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3338 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3339 pvt_pool->count, i);
3340
3341 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3342 &pvt_pool->list, list) {
3343 list_move_tail(&lpfc_ncmd->list,
3344 &qp->lpfc_io_buf_list_put);
3345 qp->put_io_bufs++;
3346 pvt_pool->count--;
3347 }
3348
3349 INIT_LIST_HEAD(&pvt_pool->list);
3350 pvt_pool->count = 0;
3351
3352 spin_unlock(&pvt_pool->lock);
3353 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3354
3355 kfree(multixri_pool);
3356 }
3357}
3358
3359/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003360 * lpfc_online - Initialize and bring a HBA online
3361 * @phba: pointer to lpfc hba data structure.
3362 *
3363 * This routine initializes the HBA and brings a HBA online. During this
3364 * process, the management interface is blocked to prevent user space access
3365 * to the HBA interfering with the driver initialization.
3366 *
3367 * Return codes
3368 * 0 - successful
3369 * 1 - failed
3370 **/
3371int
3372lpfc_online(struct lpfc_hba *phba)
3373{
3374 struct lpfc_vport *vport;
3375 struct lpfc_vport **vports;
3376 int i, error = 0;
3377 bool vpis_cleared = false;
3378
3379 if (!phba)
3380 return 0;
3381 vport = phba->pport;
3382
3383 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3384 return 0;
3385
3386 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3387 "0458 Bring Adapter online\n");
3388
3389 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3390
3391 if (phba->sli_rev == LPFC_SLI_REV4) {
3392 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3393 lpfc_unblock_mgmt_io(phba);
3394 return 1;
3395 }
3396 spin_lock_irq(&phba->hbalock);
3397 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3398 vpis_cleared = true;
3399 spin_unlock_irq(&phba->hbalock);
3400
3401 /* Reestablish the local initiator port.
3402 * The offline process destroyed the previous lport.
3403 */
3404 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3405 !phba->nvmet_support) {
3406 error = lpfc_nvme_create_localport(phba->pport);
3407 if (error)
Olivier Deprez157378f2022-04-04 15:47:50 +02003408 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003409 "6132 NVME restore reg failed "
3410 "on nvmei error x%x\n", error);
3411 }
3412 } else {
3413 lpfc_sli_queue_init(phba);
3414 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3415 lpfc_unblock_mgmt_io(phba);
3416 return 1;
3417 }
3418 }
3419
3420 vports = lpfc_create_vport_work_array(phba);
3421 if (vports != NULL) {
3422 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3423 struct Scsi_Host *shost;
3424 shost = lpfc_shost_from_vport(vports[i]);
3425 spin_lock_irq(shost->host_lock);
3426 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3427 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3428 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3429 if (phba->sli_rev == LPFC_SLI_REV4) {
3430 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3431 if ((vpis_cleared) &&
3432 (vports[i]->port_type !=
3433 LPFC_PHYSICAL_PORT))
3434 vports[i]->vpi = 0;
3435 }
3436 spin_unlock_irq(shost->host_lock);
3437 }
3438 }
3439 lpfc_destroy_vport_work_array(phba, vports);
3440
David Brazdil0f672f62019-12-10 10:32:29 +00003441 if (phba->cfg_xri_rebalancing)
3442 lpfc_create_multixri_pools(phba);
3443
Olivier Deprez0e641232021-09-23 10:07:05 +02003444 lpfc_cpuhp_add(phba);
3445
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003446 lpfc_unblock_mgmt_io(phba);
3447 return 0;
3448}
3449
3450/**
3451 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3452 * @phba: pointer to lpfc hba data structure.
3453 *
3454 * This routine marks a HBA's management interface as not blocked. Once the
3455 * HBA's management interface is marked as not blocked, all the user space
3456 * access to the HBA, whether they are from sysfs interface or libdfc
3457 * interface will be allowed. The HBA is set to block the management interface
3458 * when the driver prepares the HBA interface for online or offline and then
3459 * set to unblock the management interface afterwards.
3460 **/
3461void
3462lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3463{
3464 unsigned long iflag;
3465
3466 spin_lock_irqsave(&phba->hbalock, iflag);
3467 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3468 spin_unlock_irqrestore(&phba->hbalock, iflag);
3469}
3470
3471/**
3472 * lpfc_offline_prep - Prepare a HBA to be brought offline
3473 * @phba: pointer to lpfc hba data structure.
Olivier Deprez157378f2022-04-04 15:47:50 +02003474 * @mbx_action: flag for mailbox shutdown action.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003475 *
3476 * This routine is invoked to prepare a HBA to be brought offline. It performs
3477 * unregistration login to all the nodes on all vports and flushes the mailbox
3478 * queue to make it ready to be brought offline.
3479 **/
3480void
3481lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3482{
3483 struct lpfc_vport *vport = phba->pport;
3484 struct lpfc_nodelist *ndlp, *next_ndlp;
3485 struct lpfc_vport **vports;
3486 struct Scsi_Host *shost;
3487 int i;
3488
3489 if (vport->fc_flag & FC_OFFLINE_MODE)
3490 return;
3491
3492 lpfc_block_mgmt_io(phba, mbx_action);
3493
3494 lpfc_linkdown(phba);
3495
3496 /* Issue an unreg_login to all nodes on all vports */
3497 vports = lpfc_create_vport_work_array(phba);
3498 if (vports != NULL) {
3499 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3500 if (vports[i]->load_flag & FC_UNLOADING)
3501 continue;
3502 shost = lpfc_shost_from_vport(vports[i]);
3503 spin_lock_irq(shost->host_lock);
3504 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3505 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3506 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3507 spin_unlock_irq(shost->host_lock);
3508
3509 shost = lpfc_shost_from_vport(vports[i]);
3510 list_for_each_entry_safe(ndlp, next_ndlp,
3511 &vports[i]->fc_nodes,
3512 nlp_listp) {
Olivier Deprez157378f2022-04-04 15:47:50 +02003513 if ((!NLP_CHK_NODE_ACT(ndlp)) ||
3514 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3515 /* Driver must assume RPI is invalid for
3516 * any unused or inactive node.
3517 */
3518 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003519 continue;
Olivier Deprez157378f2022-04-04 15:47:50 +02003520 }
3521
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003522 if (ndlp->nlp_type & NLP_FABRIC) {
3523 lpfc_disc_state_machine(vports[i], ndlp,
3524 NULL, NLP_EVT_DEVICE_RECOVERY);
3525 lpfc_disc_state_machine(vports[i], ndlp,
3526 NULL, NLP_EVT_DEVICE_RM);
3527 }
3528 spin_lock_irq(shost->host_lock);
3529 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3530 spin_unlock_irq(shost->host_lock);
3531 /*
3532 * Whenever an SLI4 port goes offline, free the
3533 * RPI. Get a new RPI when the adapter port
3534 * comes back online.
3535 */
3536 if (phba->sli_rev == LPFC_SLI_REV4) {
Olivier Deprez157378f2022-04-04 15:47:50 +02003537 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3538 LOG_NODE | LOG_DISCOVERY,
3539 "0011 Free RPI x%x on "
3540 "ndlp:x%px did x%x "
3541 "usgmap:x%x\n",
3542 ndlp->nlp_rpi, ndlp,
3543 ndlp->nlp_DID,
3544 ndlp->nlp_usg_map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003545 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
Olivier Deprez157378f2022-04-04 15:47:50 +02003546 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003547 }
3548 lpfc_unreg_rpi(vports[i], ndlp);
3549 }
3550 }
3551 }
3552 lpfc_destroy_vport_work_array(phba, vports);
3553
3554 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3555
3556 if (phba->wq)
3557 flush_workqueue(phba->wq);
3558}
3559
3560/**
3561 * lpfc_offline - Bring a HBA offline
3562 * @phba: pointer to lpfc hba data structure.
3563 *
3564 * This routine actually brings a HBA offline. It stops all the timers
3565 * associated with the HBA, brings down the SLI layer, and eventually
3566 * marks the HBA as in offline state for the upper layer protocol.
3567 **/
3568void
3569lpfc_offline(struct lpfc_hba *phba)
3570{
3571 struct Scsi_Host *shost;
3572 struct lpfc_vport **vports;
3573 int i;
3574
3575 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3576 return;
3577
3578 /* stop port and all timers associated with this hba */
3579 lpfc_stop_port(phba);
3580
3581 /* Tear down the local and target port registrations. The
3582 * nvme transports need to cleanup.
3583 */
3584 lpfc_nvmet_destroy_targetport(phba);
3585 lpfc_nvme_destroy_localport(phba->pport);
3586
3587 vports = lpfc_create_vport_work_array(phba);
3588 if (vports != NULL)
3589 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3590 lpfc_stop_vport_timers(vports[i]);
3591 lpfc_destroy_vport_work_array(phba, vports);
3592 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3593 "0460 Bring Adapter offline\n");
3594 /* Bring down the SLI Layer and cleanup. The HBA is offline
3595 now. */
3596 lpfc_sli_hba_down(phba);
3597 spin_lock_irq(&phba->hbalock);
3598 phba->work_ha = 0;
3599 spin_unlock_irq(&phba->hbalock);
3600 vports = lpfc_create_vport_work_array(phba);
3601 if (vports != NULL)
3602 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3603 shost = lpfc_shost_from_vport(vports[i]);
3604 spin_lock_irq(shost->host_lock);
3605 vports[i]->work_port_events = 0;
3606 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3607 spin_unlock_irq(shost->host_lock);
3608 }
3609 lpfc_destroy_vport_work_array(phba, vports);
Olivier Deprez0e641232021-09-23 10:07:05 +02003610 __lpfc_cpuhp_remove(phba);
David Brazdil0f672f62019-12-10 10:32:29 +00003611
3612 if (phba->cfg_xri_rebalancing)
3613 lpfc_destroy_multixri_pools(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003614}
3615
3616/**
3617 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3618 * @phba: pointer to lpfc hba data structure.
3619 *
3620 * This routine is to free all the SCSI buffers and IOCBs from the driver
3621 * list back to kernel. It is called from lpfc_pci_remove_one to free
3622 * the internal resources before the device is removed from the system.
3623 **/
3624static void
3625lpfc_scsi_free(struct lpfc_hba *phba)
3626{
David Brazdil0f672f62019-12-10 10:32:29 +00003627 struct lpfc_io_buf *sb, *sb_next;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003628
3629 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3630 return;
3631
3632 spin_lock_irq(&phba->hbalock);
3633
3634 /* Release all the lpfc_scsi_bufs maintained by this host. */
3635
3636 spin_lock(&phba->scsi_buf_list_put_lock);
3637 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3638 list) {
3639 list_del(&sb->list);
3640 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3641 sb->dma_handle);
3642 kfree(sb);
3643 phba->total_scsi_bufs--;
3644 }
3645 spin_unlock(&phba->scsi_buf_list_put_lock);
3646
3647 spin_lock(&phba->scsi_buf_list_get_lock);
3648 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3649 list) {
3650 list_del(&sb->list);
3651 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3652 sb->dma_handle);
3653 kfree(sb);
3654 phba->total_scsi_bufs--;
3655 }
3656 spin_unlock(&phba->scsi_buf_list_get_lock);
3657 spin_unlock_irq(&phba->hbalock);
3658}
David Brazdil0f672f62019-12-10 10:32:29 +00003659
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003660/**
David Brazdil0f672f62019-12-10 10:32:29 +00003661 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003662 * @phba: pointer to lpfc hba data structure.
3663 *
David Brazdil0f672f62019-12-10 10:32:29 +00003664 * This routine is to free all the IO buffers and IOCBs from the driver
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003665 * list back to kernel. It is called from lpfc_pci_remove_one to free
3666 * the internal resources before the device is removed from the system.
3667 **/
David Brazdil0f672f62019-12-10 10:32:29 +00003668void
3669lpfc_io_free(struct lpfc_hba *phba)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003670{
David Brazdil0f672f62019-12-10 10:32:29 +00003671 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3672 struct lpfc_sli4_hdw_queue *qp;
3673 int idx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003674
David Brazdil0f672f62019-12-10 10:32:29 +00003675 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3676 qp = &phba->sli4_hba.hdwq[idx];
3677 /* Release all the lpfc_nvme_bufs maintained by this host. */
3678 spin_lock(&qp->io_buf_list_put_lock);
3679 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3680 &qp->lpfc_io_buf_list_put,
3681 list) {
3682 list_del(&lpfc_ncmd->list);
3683 qp->put_io_bufs--;
3684 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3685 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3686 if (phba->cfg_xpsgl && !phba->nvmet_support)
3687 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3688 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3689 kfree(lpfc_ncmd);
3690 qp->total_io_bufs--;
3691 }
3692 spin_unlock(&qp->io_buf_list_put_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003693
David Brazdil0f672f62019-12-10 10:32:29 +00003694 spin_lock(&qp->io_buf_list_get_lock);
3695 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3696 &qp->lpfc_io_buf_list_get,
3697 list) {
3698 list_del(&lpfc_ncmd->list);
3699 qp->get_io_bufs--;
3700 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3701 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3702 if (phba->cfg_xpsgl && !phba->nvmet_support)
3703 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3704 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3705 kfree(lpfc_ncmd);
3706 qp->total_io_bufs--;
3707 }
3708 spin_unlock(&qp->io_buf_list_get_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003709 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003710}
David Brazdil0f672f62019-12-10 10:32:29 +00003711
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003712/**
3713 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3714 * @phba: pointer to lpfc hba data structure.
3715 *
3716 * This routine first calculates the sizes of the current els and allocated
3717 * scsi sgl lists, and then goes through all sgls to updates the physical
3718 * XRIs assigned due to port function reset. During port initialization, the
3719 * current els and allocated scsi sgl lists are 0s.
3720 *
3721 * Return codes
3722 * 0 - successful (for now, it always returns 0)
3723 **/
3724int
3725lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3726{
3727 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3728 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3729 LIST_HEAD(els_sgl_list);
3730 int rc;
3731
3732 /*
3733 * update on pci function's els xri-sgl list
3734 */
3735 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3736
3737 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3738 /* els xri-sgl expanded */
3739 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3740 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3741 "3157 ELS xri-sgl count increased from "
3742 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3743 els_xri_cnt);
3744 /* allocate the additional els sgls */
3745 for (i = 0; i < xri_cnt; i++) {
3746 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3747 GFP_KERNEL);
3748 if (sglq_entry == NULL) {
Olivier Deprez157378f2022-04-04 15:47:50 +02003749 lpfc_printf_log(phba, KERN_ERR,
3750 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003751 "2562 Failure to allocate an "
3752 "ELS sgl entry:%d\n", i);
3753 rc = -ENOMEM;
3754 goto out_free_mem;
3755 }
3756 sglq_entry->buff_type = GEN_BUFF_TYPE;
3757 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3758 &sglq_entry->phys);
3759 if (sglq_entry->virt == NULL) {
3760 kfree(sglq_entry);
Olivier Deprez157378f2022-04-04 15:47:50 +02003761 lpfc_printf_log(phba, KERN_ERR,
3762 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003763 "2563 Failure to allocate an "
3764 "ELS mbuf:%d\n", i);
3765 rc = -ENOMEM;
3766 goto out_free_mem;
3767 }
3768 sglq_entry->sgl = sglq_entry->virt;
3769 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3770 sglq_entry->state = SGL_FREED;
3771 list_add_tail(&sglq_entry->list, &els_sgl_list);
3772 }
3773 spin_lock_irq(&phba->hbalock);
3774 spin_lock(&phba->sli4_hba.sgl_list_lock);
3775 list_splice_init(&els_sgl_list,
3776 &phba->sli4_hba.lpfc_els_sgl_list);
3777 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3778 spin_unlock_irq(&phba->hbalock);
3779 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3780 /* els xri-sgl shrinked */
3781 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3782 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3783 "3158 ELS xri-sgl count decreased from "
3784 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3785 els_xri_cnt);
3786 spin_lock_irq(&phba->hbalock);
3787 spin_lock(&phba->sli4_hba.sgl_list_lock);
3788 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3789 &els_sgl_list);
3790 /* release extra els sgls from list */
3791 for (i = 0; i < xri_cnt; i++) {
3792 list_remove_head(&els_sgl_list,
3793 sglq_entry, struct lpfc_sglq, list);
3794 if (sglq_entry) {
3795 __lpfc_mbuf_free(phba, sglq_entry->virt,
3796 sglq_entry->phys);
3797 kfree(sglq_entry);
3798 }
3799 }
3800 list_splice_init(&els_sgl_list,
3801 &phba->sli4_hba.lpfc_els_sgl_list);
3802 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3803 spin_unlock_irq(&phba->hbalock);
3804 } else
3805 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3806 "3163 ELS xri-sgl count unchanged: %d\n",
3807 els_xri_cnt);
3808 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3809
3810 /* update xris to els sgls on the list */
3811 sglq_entry = NULL;
3812 sglq_entry_next = NULL;
3813 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3814 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3815 lxri = lpfc_sli4_next_xritag(phba);
3816 if (lxri == NO_XRI) {
Olivier Deprez157378f2022-04-04 15:47:50 +02003817 lpfc_printf_log(phba, KERN_ERR,
3818 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003819 "2400 Failed to allocate xri for "
3820 "ELS sgl\n");
3821 rc = -ENOMEM;
3822 goto out_free_mem;
3823 }
3824 sglq_entry->sli4_lxritag = lxri;
3825 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3826 }
3827 return 0;
3828
3829out_free_mem:
3830 lpfc_free_els_sgl_list(phba);
3831 return rc;
3832}
3833
3834/**
3835 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3836 * @phba: pointer to lpfc hba data structure.
3837 *
3838 * This routine first calculates the sizes of the current els and allocated
3839 * scsi sgl lists, and then goes through all sgls to updates the physical
3840 * XRIs assigned due to port function reset. During port initialization, the
3841 * current els and allocated scsi sgl lists are 0s.
3842 *
3843 * Return codes
3844 * 0 - successful (for now, it always returns 0)
3845 **/
3846int
3847lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3848{
3849 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3850 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3851 uint16_t nvmet_xri_cnt;
3852 LIST_HEAD(nvmet_sgl_list);
3853 int rc;
3854
3855 /*
3856 * update on pci function's nvmet xri-sgl list
3857 */
3858 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3859
3860 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3861 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3862 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3863 /* els xri-sgl expanded */
3864 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3865 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3866 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3867 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3868 /* allocate the additional nvmet sgls */
3869 for (i = 0; i < xri_cnt; i++) {
3870 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3871 GFP_KERNEL);
3872 if (sglq_entry == NULL) {
Olivier Deprez157378f2022-04-04 15:47:50 +02003873 lpfc_printf_log(phba, KERN_ERR,
3874 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003875 "6303 Failure to allocate an "
3876 "NVMET sgl entry:%d\n", i);
3877 rc = -ENOMEM;
3878 goto out_free_mem;
3879 }
3880 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3881 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3882 &sglq_entry->phys);
3883 if (sglq_entry->virt == NULL) {
3884 kfree(sglq_entry);
Olivier Deprez157378f2022-04-04 15:47:50 +02003885 lpfc_printf_log(phba, KERN_ERR,
3886 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003887 "6304 Failure to allocate an "
3888 "NVMET buf:%d\n", i);
3889 rc = -ENOMEM;
3890 goto out_free_mem;
3891 }
3892 sglq_entry->sgl = sglq_entry->virt;
3893 memset(sglq_entry->sgl, 0,
3894 phba->cfg_sg_dma_buf_size);
3895 sglq_entry->state = SGL_FREED;
3896 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3897 }
3898 spin_lock_irq(&phba->hbalock);
3899 spin_lock(&phba->sli4_hba.sgl_list_lock);
3900 list_splice_init(&nvmet_sgl_list,
3901 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3902 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3903 spin_unlock_irq(&phba->hbalock);
3904 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3905 /* nvmet xri-sgl shrunk */
3906 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3907 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3908 "6305 NVMET xri-sgl count decreased from "
3909 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3910 nvmet_xri_cnt);
3911 spin_lock_irq(&phba->hbalock);
3912 spin_lock(&phba->sli4_hba.sgl_list_lock);
3913 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3914 &nvmet_sgl_list);
3915 /* release extra nvmet sgls from list */
3916 for (i = 0; i < xri_cnt; i++) {
3917 list_remove_head(&nvmet_sgl_list,
3918 sglq_entry, struct lpfc_sglq, list);
3919 if (sglq_entry) {
3920 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3921 sglq_entry->phys);
3922 kfree(sglq_entry);
3923 }
3924 }
3925 list_splice_init(&nvmet_sgl_list,
3926 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3927 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3928 spin_unlock_irq(&phba->hbalock);
3929 } else
3930 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3931 "6306 NVMET xri-sgl count unchanged: %d\n",
3932 nvmet_xri_cnt);
3933 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3934
3935 /* update xris to nvmet sgls on the list */
3936 sglq_entry = NULL;
3937 sglq_entry_next = NULL;
3938 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3939 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3940 lxri = lpfc_sli4_next_xritag(phba);
3941 if (lxri == NO_XRI) {
Olivier Deprez157378f2022-04-04 15:47:50 +02003942 lpfc_printf_log(phba, KERN_ERR,
3943 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003944 "6307 Failed to allocate xri for "
3945 "NVMET sgl\n");
3946 rc = -ENOMEM;
3947 goto out_free_mem;
3948 }
3949 sglq_entry->sli4_lxritag = lxri;
3950 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3951 }
3952 return 0;
3953
3954out_free_mem:
3955 lpfc_free_nvmet_sgl_list(phba);
3956 return rc;
3957}
3958
David Brazdil0f672f62019-12-10 10:32:29 +00003959int
3960lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3961{
3962 LIST_HEAD(blist);
3963 struct lpfc_sli4_hdw_queue *qp;
3964 struct lpfc_io_buf *lpfc_cmd;
3965 struct lpfc_io_buf *iobufp, *prev_iobufp;
3966 int idx, cnt, xri, inserted;
3967
3968 cnt = 0;
3969 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3970 qp = &phba->sli4_hba.hdwq[idx];
3971 spin_lock_irq(&qp->io_buf_list_get_lock);
3972 spin_lock(&qp->io_buf_list_put_lock);
3973
3974 /* Take everything off the get and put lists */
3975 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
3976 list_splice(&qp->lpfc_io_buf_list_put, &blist);
3977 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
3978 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
3979 cnt += qp->get_io_bufs + qp->put_io_bufs;
3980 qp->get_io_bufs = 0;
3981 qp->put_io_bufs = 0;
3982 qp->total_io_bufs = 0;
3983 spin_unlock(&qp->io_buf_list_put_lock);
3984 spin_unlock_irq(&qp->io_buf_list_get_lock);
3985 }
3986
3987 /*
3988 * Take IO buffers off blist and put on cbuf sorted by XRI.
3989 * This is because POST_SGL takes a sequential range of XRIs
3990 * to post to the firmware.
3991 */
3992 for (idx = 0; idx < cnt; idx++) {
3993 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
3994 if (!lpfc_cmd)
3995 return cnt;
3996 if (idx == 0) {
3997 list_add_tail(&lpfc_cmd->list, cbuf);
3998 continue;
3999 }
4000 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4001 inserted = 0;
4002 prev_iobufp = NULL;
4003 list_for_each_entry(iobufp, cbuf, list) {
4004 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4005 if (prev_iobufp)
4006 list_add(&lpfc_cmd->list,
4007 &prev_iobufp->list);
4008 else
4009 list_add(&lpfc_cmd->list, cbuf);
4010 inserted = 1;
4011 break;
4012 }
4013 prev_iobufp = iobufp;
4014 }
4015 if (!inserted)
4016 list_add_tail(&lpfc_cmd->list, cbuf);
4017 }
4018 return cnt;
4019}
4020
4021int
4022lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4023{
4024 struct lpfc_sli4_hdw_queue *qp;
4025 struct lpfc_io_buf *lpfc_cmd;
4026 int idx, cnt;
4027
4028 qp = phba->sli4_hba.hdwq;
4029 cnt = 0;
4030 while (!list_empty(cbuf)) {
4031 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4032 list_remove_head(cbuf, lpfc_cmd,
4033 struct lpfc_io_buf, list);
4034 if (!lpfc_cmd)
4035 return cnt;
4036 cnt++;
4037 qp = &phba->sli4_hba.hdwq[idx];
4038 lpfc_cmd->hdwq_no = idx;
4039 lpfc_cmd->hdwq = qp;
4040 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4041 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4042 spin_lock(&qp->io_buf_list_put_lock);
4043 list_add_tail(&lpfc_cmd->list,
4044 &qp->lpfc_io_buf_list_put);
4045 qp->put_io_bufs++;
4046 qp->total_io_bufs++;
4047 spin_unlock(&qp->io_buf_list_put_lock);
4048 }
4049 }
4050 return cnt;
4051}
4052
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004053/**
David Brazdil0f672f62019-12-10 10:32:29 +00004054 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004055 * @phba: pointer to lpfc hba data structure.
4056 *
4057 * This routine first calculates the sizes of the current els and allocated
4058 * scsi sgl lists, and then goes through all sgls to updates the physical
4059 * XRIs assigned due to port function reset. During port initialization, the
4060 * current els and allocated scsi sgl lists are 0s.
4061 *
4062 * Return codes
4063 * 0 - successful (for now, it always returns 0)
4064 **/
4065int
David Brazdil0f672f62019-12-10 10:32:29 +00004066lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004067{
David Brazdil0f672f62019-12-10 10:32:29 +00004068 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4069 uint16_t i, lxri, els_xri_cnt;
4070 uint16_t io_xri_cnt, io_xri_max;
4071 LIST_HEAD(io_sgl_list);
4072 int rc, cnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004073
4074 /*
David Brazdil0f672f62019-12-10 10:32:29 +00004075 * update on pci function's allocated nvme xri-sgl list
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004076 */
David Brazdil0f672f62019-12-10 10:32:29 +00004077
4078 /* maximum number of xris available for nvme buffers */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004079 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
David Brazdil0f672f62019-12-10 10:32:29 +00004080 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4081 phba->sli4_hba.io_xri_max = io_xri_max;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004082
4083 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
David Brazdil0f672f62019-12-10 10:32:29 +00004084 "6074 Current allocated XRI sgl count:%d, "
4085 "maximum XRI count:%d\n",
4086 phba->sli4_hba.io_xri_cnt,
4087 phba->sli4_hba.io_xri_max);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004088
David Brazdil0f672f62019-12-10 10:32:29 +00004089 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4090
4091 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4092 /* max nvme xri shrunk below the allocated nvme buffers */
4093 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4094 phba->sli4_hba.io_xri_max;
4095 /* release the extra allocated nvme buffers */
4096 for (i = 0; i < io_xri_cnt; i++) {
4097 list_remove_head(&io_sgl_list, lpfc_ncmd,
4098 struct lpfc_io_buf, list);
4099 if (lpfc_ncmd) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004100 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
David Brazdil0f672f62019-12-10 10:32:29 +00004101 lpfc_ncmd->data,
4102 lpfc_ncmd->dma_handle);
4103 kfree(lpfc_ncmd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004104 }
4105 }
David Brazdil0f672f62019-12-10 10:32:29 +00004106 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004107 }
4108
David Brazdil0f672f62019-12-10 10:32:29 +00004109 /* update xris associated to remaining allocated nvme buffers */
4110 lpfc_ncmd = NULL;
4111 lpfc_ncmd_next = NULL;
4112 phba->sli4_hba.io_xri_cnt = cnt;
4113 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4114 &io_sgl_list, list) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004115 lxri = lpfc_sli4_next_xritag(phba);
4116 if (lxri == NO_XRI) {
Olivier Deprez157378f2022-04-04 15:47:50 +02004117 lpfc_printf_log(phba, KERN_ERR,
4118 LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00004119 "6075 Failed to allocate xri for "
4120 "nvme buffer\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004121 rc = -ENOMEM;
4122 goto out_free_mem;
4123 }
David Brazdil0f672f62019-12-10 10:32:29 +00004124 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4125 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004126 }
David Brazdil0f672f62019-12-10 10:32:29 +00004127 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004128 return 0;
4129
4130out_free_mem:
David Brazdil0f672f62019-12-10 10:32:29 +00004131 lpfc_io_free(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004132 return rc;
4133}
4134
David Brazdil0f672f62019-12-10 10:32:29 +00004135/**
4136 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
Olivier Deprez157378f2022-04-04 15:47:50 +02004137 * @phba: Pointer to lpfc hba data structure.
4138 * @num_to_alloc: The requested number of buffers to allocate.
David Brazdil0f672f62019-12-10 10:32:29 +00004139 *
4140 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4141 * the nvme buffer contains all the necessary information needed to initiate
4142 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4143 * them on a list, it post them to the port by using SGL block post.
4144 *
4145 * Return codes:
4146 * int - number of IO buffers that were allocated and posted.
4147 * 0 = failure, less than num_to_alloc is a partial failure.
4148 **/
4149int
4150lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4151{
4152 struct lpfc_io_buf *lpfc_ncmd;
4153 struct lpfc_iocbq *pwqeq;
4154 uint16_t iotag, lxri = 0;
4155 int bcnt, num_posted;
4156 LIST_HEAD(prep_nblist);
4157 LIST_HEAD(post_nblist);
4158 LIST_HEAD(nvme_nblist);
4159
4160 phba->sli4_hba.io_xri_cnt = 0;
4161 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4162 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4163 if (!lpfc_ncmd)
4164 break;
4165 /*
4166 * Get memory from the pci pool to map the virt space to
4167 * pci bus space for an I/O. The DMA buffer includes the
4168 * number of SGE's necessary to support the sg_tablesize.
4169 */
4170 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4171 GFP_KERNEL,
4172 &lpfc_ncmd->dma_handle);
4173 if (!lpfc_ncmd->data) {
4174 kfree(lpfc_ncmd);
4175 break;
4176 }
4177
4178 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4179 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4180 } else {
4181 /*
4182 * 4K Page alignment is CRITICAL to BlockGuard, double
4183 * check to be sure.
4184 */
4185 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4186 (((unsigned long)(lpfc_ncmd->data) &
4187 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02004188 lpfc_printf_log(phba, KERN_ERR,
4189 LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00004190 "3369 Memory alignment err: "
4191 "addr=%lx\n",
4192 (unsigned long)lpfc_ncmd->data);
4193 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4194 lpfc_ncmd->data,
4195 lpfc_ncmd->dma_handle);
4196 kfree(lpfc_ncmd);
4197 break;
4198 }
4199 }
4200
4201 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4202
4203 lxri = lpfc_sli4_next_xritag(phba);
4204 if (lxri == NO_XRI) {
4205 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4206 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4207 kfree(lpfc_ncmd);
4208 break;
4209 }
4210 pwqeq = &lpfc_ncmd->cur_iocbq;
4211
4212 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4213 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4214 if (iotag == 0) {
4215 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4216 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4217 kfree(lpfc_ncmd);
Olivier Deprez157378f2022-04-04 15:47:50 +02004218 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00004219 "6121 Failed to allocate IOTAG for"
4220 " XRI:0x%x\n", lxri);
4221 lpfc_sli4_free_xri(phba, lxri);
4222 break;
4223 }
4224 pwqeq->sli4_lxritag = lxri;
4225 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4226 pwqeq->context1 = lpfc_ncmd;
4227
4228 /* Initialize local short-hand pointers. */
4229 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4230 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4231 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4232 spin_lock_init(&lpfc_ncmd->buf_lock);
4233
4234 /* add the nvme buffer to a post list */
4235 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4236 phba->sli4_hba.io_xri_cnt++;
4237 }
4238 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4239 "6114 Allocate %d out of %d requested new NVME "
4240 "buffers\n", bcnt, num_to_alloc);
4241
4242 /* post the list of nvme buffer sgls to port if available */
4243 if (!list_empty(&post_nblist))
4244 num_posted = lpfc_sli4_post_io_sgl_list(
4245 phba, &post_nblist, bcnt);
4246 else
4247 num_posted = 0;
4248
4249 return num_posted;
4250}
4251
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004252static uint64_t
4253lpfc_get_wwpn(struct lpfc_hba *phba)
4254{
4255 uint64_t wwn;
4256 int rc;
4257 LPFC_MBOXQ_t *mboxq;
4258 MAILBOX_t *mb;
4259
4260 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4261 GFP_KERNEL);
4262 if (!mboxq)
4263 return (uint64_t)-1;
4264
4265 /* First get WWN of HBA instance */
4266 lpfc_read_nv(phba, mboxq);
4267 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4268 if (rc != MBX_SUCCESS) {
Olivier Deprez157378f2022-04-04 15:47:50 +02004269 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004270 "6019 Mailbox failed , mbxCmd x%x "
4271 "READ_NV, mbxStatus x%x\n",
4272 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4273 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4274 mempool_free(mboxq, phba->mbox_mem_pool);
4275 return (uint64_t) -1;
4276 }
4277 mb = &mboxq->u.mb;
4278 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4279 /* wwn is WWPN of HBA instance */
4280 mempool_free(mboxq, phba->mbox_mem_pool);
4281 if (phba->sli_rev == LPFC_SLI_REV4)
4282 return be64_to_cpu(wwn);
4283 else
4284 return rol64(wwn, 32);
4285}
4286
4287/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004288 * lpfc_create_port - Create an FC port
4289 * @phba: pointer to lpfc hba data structure.
4290 * @instance: a unique integer ID to this FC port.
4291 * @dev: pointer to the device data structure.
4292 *
4293 * This routine creates a FC port for the upper layer protocol. The FC port
4294 * can be created on top of either a physical port or a virtual port provided
4295 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4296 * and associates the FC port created before adding the shost into the SCSI
4297 * layer.
4298 *
4299 * Return codes
4300 * @vport - pointer to the virtual N_Port data structure.
4301 * NULL - port create failed.
4302 **/
4303struct lpfc_vport *
4304lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4305{
4306 struct lpfc_vport *vport;
4307 struct Scsi_Host *shost = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +02004308 struct scsi_host_template *template;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004309 int error = 0;
4310 int i;
4311 uint64_t wwn;
4312 bool use_no_reset_hba = false;
4313 int rc;
4314
4315 if (lpfc_no_hba_reset_cnt) {
4316 if (phba->sli_rev < LPFC_SLI_REV4 &&
4317 dev == &phba->pcidev->dev) {
4318 /* Reset the port first */
4319 lpfc_sli_brdrestart(phba);
4320 rc = lpfc_sli_chipset_init(phba);
4321 if (rc)
4322 return NULL;
4323 }
4324 wwn = lpfc_get_wwpn(phba);
4325 }
4326
4327 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4328 if (wwn == lpfc_no_hba_reset[i]) {
Olivier Deprez157378f2022-04-04 15:47:50 +02004329 lpfc_printf_log(phba, KERN_ERR,
4330 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004331 "6020 Setting use_no_reset port=%llx\n",
4332 wwn);
4333 use_no_reset_hba = true;
4334 break;
4335 }
4336 }
4337
Olivier Deprez157378f2022-04-04 15:47:50 +02004338 /* Seed template for SCSI host registration */
4339 if (dev == &phba->pcidev->dev) {
4340 template = &phba->port_template;
4341
4342 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4343 /* Seed physical port template */
4344 memcpy(template, &lpfc_template, sizeof(*template));
4345
4346 if (use_no_reset_hba) {
4347 /* template is for a no reset SCSI Host */
4348 template->max_sectors = 0xffff;
4349 template->eh_host_reset_handler = NULL;
4350 }
4351
4352 /* Template for all vports this physical port creates */
4353 memcpy(&phba->vport_template, &lpfc_template,
4354 sizeof(*template));
4355 phba->vport_template.max_sectors = 0xffff;
4356 phba->vport_template.shost_attrs = lpfc_vport_attrs;
4357 phba->vport_template.eh_bus_reset_handler = NULL;
4358 phba->vport_template.eh_host_reset_handler = NULL;
4359 phba->vport_template.vendor_id = 0;
4360
4361 /* Initialize the host templates with updated value */
4362 if (phba->sli_rev == LPFC_SLI_REV4) {
4363 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4364 phba->vport_template.sg_tablesize =
4365 phba->cfg_scsi_seg_cnt;
4366 } else {
4367 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4368 phba->vport_template.sg_tablesize =
4369 phba->cfg_sg_seg_cnt;
4370 }
4371
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004372 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +02004373 /* NVMET is for physical port only */
4374 memcpy(template, &lpfc_template_nvme,
4375 sizeof(*template));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004376 }
Olivier Deprez157378f2022-04-04 15:47:50 +02004377 } else {
4378 template = &phba->vport_template;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004379 }
Olivier Deprez157378f2022-04-04 15:47:50 +02004380
4381 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004382 if (!shost)
4383 goto out;
4384
4385 vport = (struct lpfc_vport *) shost->hostdata;
4386 vport->phba = phba;
4387 vport->load_flag |= FC_LOADING;
4388 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4389 vport->fc_rscn_flush = 0;
4390 lpfc_get_vport_cfgparam(vport);
4391
David Brazdil0f672f62019-12-10 10:32:29 +00004392 /* Adjust value in vport */
4393 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4394
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004395 shost->unique_id = instance;
4396 shost->max_id = LPFC_MAX_TARGET;
4397 shost->max_lun = vport->cfg_max_luns;
4398 shost->this_id = -1;
4399 shost->max_cmd_len = 16;
David Brazdil0f672f62019-12-10 10:32:29 +00004400
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004401 if (phba->sli_rev == LPFC_SLI_REV4) {
David Brazdil0f672f62019-12-10 10:32:29 +00004402 if (!phba->cfg_fcp_mq_threshold ||
4403 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4404 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4405
4406 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4407 phba->cfg_fcp_mq_threshold);
4408
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004409 shost->dma_boundary =
4410 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
David Brazdil0f672f62019-12-10 10:32:29 +00004411
4412 if (phba->cfg_xpsgl && !phba->nvmet_support)
4413 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4414 else
4415 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4416 } else
4417 /* SLI-3 has a limited number of hardware queues (3),
4418 * thus there is only one for FCP processing.
4419 */
4420 shost->nr_hw_queues = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004421
4422 /*
4423 * Set initial can_queue value since 0 is no longer supported and
4424 * scsi_add_host will fail. This will be adjusted later based on the
4425 * max xri value determined in hba setup.
4426 */
4427 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4428 if (dev != &phba->pcidev->dev) {
4429 shost->transportt = lpfc_vport_transport_template;
4430 vport->port_type = LPFC_NPIV_PORT;
4431 } else {
4432 shost->transportt = lpfc_transport_template;
4433 vport->port_type = LPFC_PHYSICAL_PORT;
4434 }
4435
Olivier Deprez157378f2022-04-04 15:47:50 +02004436 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4437 "9081 CreatePort TMPLATE type %x TBLsize %d "
4438 "SEGcnt %d/%d\n",
4439 vport->port_type, shost->sg_tablesize,
4440 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4441
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004442 /* Initialize all internally managed lists. */
4443 INIT_LIST_HEAD(&vport->fc_nodes);
4444 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4445 spin_lock_init(&vport->work_port_lock);
4446
4447 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4448
4449 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4450
4451 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4452
David Brazdil0f672f62019-12-10 10:32:29 +00004453 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4454 lpfc_setup_bg(phba, shost);
4455
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004456 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4457 if (error)
4458 goto out_put_shost;
4459
David Brazdil0f672f62019-12-10 10:32:29 +00004460 spin_lock_irq(&phba->port_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004461 list_add_tail(&vport->listentry, &phba->port_list);
David Brazdil0f672f62019-12-10 10:32:29 +00004462 spin_unlock_irq(&phba->port_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004463 return vport;
4464
4465out_put_shost:
4466 scsi_host_put(shost);
4467out:
4468 return NULL;
4469}
4470
4471/**
4472 * destroy_port - destroy an FC port
4473 * @vport: pointer to an lpfc virtual N_Port data structure.
4474 *
4475 * This routine destroys a FC port from the upper layer protocol. All the
4476 * resources associated with the port are released.
4477 **/
4478void
4479destroy_port(struct lpfc_vport *vport)
4480{
4481 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4482 struct lpfc_hba *phba = vport->phba;
4483
4484 lpfc_debugfs_terminate(vport);
4485 fc_remove_host(shost);
4486 scsi_remove_host(shost);
4487
David Brazdil0f672f62019-12-10 10:32:29 +00004488 spin_lock_irq(&phba->port_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004489 list_del_init(&vport->listentry);
David Brazdil0f672f62019-12-10 10:32:29 +00004490 spin_unlock_irq(&phba->port_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004491
4492 lpfc_cleanup(vport);
4493 return;
4494}
4495
4496/**
4497 * lpfc_get_instance - Get a unique integer ID
4498 *
4499 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4500 * uses the kernel idr facility to perform the task.
4501 *
4502 * Return codes:
4503 * instance - a unique integer ID allocated as the new instance.
4504 * -1 - lpfc get instance failed.
4505 **/
4506int
4507lpfc_get_instance(void)
4508{
4509 int ret;
4510
4511 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4512 return ret < 0 ? -1 : ret;
4513}
4514
4515/**
4516 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4517 * @shost: pointer to SCSI host data structure.
4518 * @time: elapsed time of the scan in jiffies.
4519 *
4520 * This routine is called by the SCSI layer with a SCSI host to determine
4521 * whether the scan host is finished.
4522 *
4523 * Note: there is no scan_start function as adapter initialization will have
4524 * asynchronously kicked off the link initialization.
4525 *
4526 * Return codes
4527 * 0 - SCSI host scan is not over yet.
4528 * 1 - SCSI host scan is over.
4529 **/
4530int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4531{
4532 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4533 struct lpfc_hba *phba = vport->phba;
4534 int stat = 0;
4535
4536 spin_lock_irq(shost->host_lock);
4537
4538 if (vport->load_flag & FC_UNLOADING) {
4539 stat = 1;
4540 goto finished;
4541 }
4542 if (time >= msecs_to_jiffies(30 * 1000)) {
4543 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4544 "0461 Scanning longer than 30 "
4545 "seconds. Continuing initialization\n");
4546 stat = 1;
4547 goto finished;
4548 }
4549 if (time >= msecs_to_jiffies(15 * 1000) &&
4550 phba->link_state <= LPFC_LINK_DOWN) {
4551 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4552 "0465 Link down longer than 15 "
4553 "seconds. Continuing initialization\n");
4554 stat = 1;
4555 goto finished;
4556 }
4557
4558 if (vport->port_state != LPFC_VPORT_READY)
4559 goto finished;
4560 if (vport->num_disc_nodes || vport->fc_prli_sent)
4561 goto finished;
4562 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4563 goto finished;
4564 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4565 goto finished;
4566
4567 stat = 1;
4568
4569finished:
4570 spin_unlock_irq(shost->host_lock);
4571 return stat;
4572}
4573
David Brazdil0f672f62019-12-10 10:32:29 +00004574static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4575{
4576 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4577 struct lpfc_hba *phba = vport->phba;
4578
4579 fc_host_supported_speeds(shost) = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02004580 /*
4581 * Avoid reporting supported link speed for FCoE as it can't be
4582 * controlled via FCoE.
4583 */
4584 if (phba->hba_flag & HBA_FCOE_MODE)
4585 return;
4586
David Brazdil0f672f62019-12-10 10:32:29 +00004587 if (phba->lmt & LMT_128Gb)
4588 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4589 if (phba->lmt & LMT_64Gb)
4590 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4591 if (phba->lmt & LMT_32Gb)
4592 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4593 if (phba->lmt & LMT_16Gb)
4594 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4595 if (phba->lmt & LMT_10Gb)
4596 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4597 if (phba->lmt & LMT_8Gb)
4598 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4599 if (phba->lmt & LMT_4Gb)
4600 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4601 if (phba->lmt & LMT_2Gb)
4602 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4603 if (phba->lmt & LMT_1Gb)
4604 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4605}
4606
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004607/**
4608 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4609 * @shost: pointer to SCSI host data structure.
4610 *
4611 * This routine initializes a given SCSI host attributes on a FC port. The
4612 * SCSI host can be either on top of a physical port or a virtual port.
4613 **/
4614void lpfc_host_attrib_init(struct Scsi_Host *shost)
4615{
4616 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4617 struct lpfc_hba *phba = vport->phba;
4618 /*
4619 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
4620 */
4621
4622 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4623 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4624 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4625
4626 memset(fc_host_supported_fc4s(shost), 0,
4627 sizeof(fc_host_supported_fc4s(shost)));
4628 fc_host_supported_fc4s(shost)[2] = 1;
4629 fc_host_supported_fc4s(shost)[7] = 1;
4630
4631 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4632 sizeof fc_host_symbolic_name(shost));
4633
David Brazdil0f672f62019-12-10 10:32:29 +00004634 lpfc_host_supported_speeds_set(shost);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004635
4636 fc_host_maxframe_size(shost) =
4637 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4638 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4639
4640 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4641
4642 /* This value is also unchanging */
4643 memset(fc_host_active_fc4s(shost), 0,
4644 sizeof(fc_host_active_fc4s(shost)));
4645 fc_host_active_fc4s(shost)[2] = 1;
4646 fc_host_active_fc4s(shost)[7] = 1;
4647
4648 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4649 spin_lock_irq(shost->host_lock);
4650 vport->load_flag &= ~FC_LOADING;
4651 spin_unlock_irq(shost->host_lock);
4652}
4653
4654/**
4655 * lpfc_stop_port_s3 - Stop SLI3 device port
4656 * @phba: pointer to lpfc hba data structure.
4657 *
4658 * This routine is invoked to stop an SLI3 device port, it stops the device
4659 * from generating interrupts and stops the device driver's timers for the
4660 * device.
4661 **/
4662static void
4663lpfc_stop_port_s3(struct lpfc_hba *phba)
4664{
4665 /* Clear all interrupt enable conditions */
4666 writel(0, phba->HCregaddr);
4667 readl(phba->HCregaddr); /* flush */
4668 /* Clear all pending interrupts */
4669 writel(0xffffffff, phba->HAregaddr);
4670 readl(phba->HAregaddr); /* flush */
4671
4672 /* Reset some HBA SLI setup states */
4673 lpfc_stop_hba_timers(phba);
4674 phba->pport->work_port_events = 0;
4675}
4676
4677/**
4678 * lpfc_stop_port_s4 - Stop SLI4 device port
4679 * @phba: pointer to lpfc hba data structure.
4680 *
4681 * This routine is invoked to stop an SLI4 device port, it stops the device
4682 * from generating interrupts and stops the device driver's timers for the
4683 * device.
4684 **/
4685static void
4686lpfc_stop_port_s4(struct lpfc_hba *phba)
4687{
4688 /* Reset some HBA SLI4 setup states */
4689 lpfc_stop_hba_timers(phba);
David Brazdil0f672f62019-12-10 10:32:29 +00004690 if (phba->pport)
4691 phba->pport->work_port_events = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004692 phba->sli4_hba.intr_enable = 0;
4693}
4694
4695/**
4696 * lpfc_stop_port - Wrapper function for stopping hba port
4697 * @phba: Pointer to HBA context object.
4698 *
4699 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4700 * the API jump table function pointer from the lpfc_hba struct.
4701 **/
4702void
4703lpfc_stop_port(struct lpfc_hba *phba)
4704{
4705 phba->lpfc_stop_port(phba);
4706
4707 if (phba->wq)
4708 flush_workqueue(phba->wq);
4709}
4710
4711/**
4712 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4713 * @phba: Pointer to hba for which this call is being executed.
4714 *
4715 * This routine starts the timer waiting for the FCF rediscovery to complete.
4716 **/
4717void
4718lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4719{
4720 unsigned long fcf_redisc_wait_tmo =
4721 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4722 /* Start fcf rediscovery wait period timer */
4723 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4724 spin_lock_irq(&phba->hbalock);
4725 /* Allow action to new fcf asynchronous event */
4726 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4727 /* Mark the FCF rediscovery pending state */
4728 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4729 spin_unlock_irq(&phba->hbalock);
4730}
4731
4732/**
4733 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
Olivier Deprez157378f2022-04-04 15:47:50 +02004734 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004735 *
4736 * This routine is invoked when waiting for FCF table rediscover has been
4737 * timed out. If new FCF record(s) has (have) been discovered during the
4738 * wait period, a new FCF event shall be added to the FCOE async event
4739 * list, and then worker thread shall be waked up for processing from the
4740 * worker thread context.
4741 **/
4742static void
4743lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4744{
4745 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4746
4747 /* Don't send FCF rediscovery event if timer cancelled */
4748 spin_lock_irq(&phba->hbalock);
4749 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4750 spin_unlock_irq(&phba->hbalock);
4751 return;
4752 }
4753 /* Clear FCF rediscovery timer pending flag */
4754 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4755 /* FCF rediscovery event to worker thread */
4756 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4757 spin_unlock_irq(&phba->hbalock);
4758 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4759 "2776 FCF rediscover quiescent timer expired\n");
4760 /* wake up worker thread */
4761 lpfc_worker_wake_up(phba);
4762}
4763
4764/**
4765 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
4766 * @phba: pointer to lpfc hba data structure.
4767 * @acqe_link: pointer to the async link completion queue entry.
4768 *
4769 * This routine is to parse the SLI4 link-attention link fault code.
4770 **/
4771static void
4772lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4773 struct lpfc_acqe_link *acqe_link)
4774{
4775 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4776 case LPFC_ASYNC_LINK_FAULT_NONE:
4777 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4778 case LPFC_ASYNC_LINK_FAULT_REMOTE:
4779 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4780 break;
4781 default:
Olivier Deprez157378f2022-04-04 15:47:50 +02004782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004783 "0398 Unknown link fault code: x%x\n",
4784 bf_get(lpfc_acqe_link_fault, acqe_link));
4785 break;
4786 }
4787}
4788
4789/**
4790 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
4791 * @phba: pointer to lpfc hba data structure.
4792 * @acqe_link: pointer to the async link completion queue entry.
4793 *
4794 * This routine is to parse the SLI4 link attention type and translate it
4795 * into the base driver's link attention type coding.
4796 *
4797 * Return: Link attention type in terms of base driver's coding.
4798 **/
4799static uint8_t
4800lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4801 struct lpfc_acqe_link *acqe_link)
4802{
4803 uint8_t att_type;
4804
4805 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4806 case LPFC_ASYNC_LINK_STATUS_DOWN:
4807 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4808 att_type = LPFC_ATT_LINK_DOWN;
4809 break;
4810 case LPFC_ASYNC_LINK_STATUS_UP:
4811 /* Ignore physical link up events - wait for logical link up */
4812 att_type = LPFC_ATT_RESERVED;
4813 break;
4814 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4815 att_type = LPFC_ATT_LINK_UP;
4816 break;
4817 default:
Olivier Deprez157378f2022-04-04 15:47:50 +02004818 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004819 "0399 Invalid link attention type: x%x\n",
4820 bf_get(lpfc_acqe_link_status, acqe_link));
4821 att_type = LPFC_ATT_RESERVED;
4822 break;
4823 }
4824 return att_type;
4825}
4826
4827/**
4828 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4829 * @phba: pointer to lpfc hba data structure.
4830 *
4831 * This routine is to get an SLI3 FC port's link speed in Mbps.
4832 *
4833 * Return: link speed in terms of Mbps.
4834 **/
4835uint32_t
4836lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4837{
4838 uint32_t link_speed;
4839
4840 if (!lpfc_is_link_up(phba))
4841 return 0;
4842
4843 if (phba->sli_rev <= LPFC_SLI_REV3) {
4844 switch (phba->fc_linkspeed) {
4845 case LPFC_LINK_SPEED_1GHZ:
4846 link_speed = 1000;
4847 break;
4848 case LPFC_LINK_SPEED_2GHZ:
4849 link_speed = 2000;
4850 break;
4851 case LPFC_LINK_SPEED_4GHZ:
4852 link_speed = 4000;
4853 break;
4854 case LPFC_LINK_SPEED_8GHZ:
4855 link_speed = 8000;
4856 break;
4857 case LPFC_LINK_SPEED_10GHZ:
4858 link_speed = 10000;
4859 break;
4860 case LPFC_LINK_SPEED_16GHZ:
4861 link_speed = 16000;
4862 break;
4863 default:
4864 link_speed = 0;
4865 }
4866 } else {
4867 if (phba->sli4_hba.link_state.logical_speed)
4868 link_speed =
4869 phba->sli4_hba.link_state.logical_speed;
4870 else
4871 link_speed = phba->sli4_hba.link_state.speed;
4872 }
4873 return link_speed;
4874}
4875
4876/**
4877 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
4878 * @phba: pointer to lpfc hba data structure.
4879 * @evt_code: asynchronous event code.
4880 * @speed_code: asynchronous event link speed code.
4881 *
4882 * This routine is to parse the giving SLI4 async event link speed code into
4883 * value of Mbps for the link speed.
4884 *
4885 * Return: link speed in terms of Mbps.
4886 **/
4887static uint32_t
4888lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4889 uint8_t speed_code)
4890{
4891 uint32_t port_speed;
4892
4893 switch (evt_code) {
4894 case LPFC_TRAILER_CODE_LINK:
4895 switch (speed_code) {
4896 case LPFC_ASYNC_LINK_SPEED_ZERO:
4897 port_speed = 0;
4898 break;
4899 case LPFC_ASYNC_LINK_SPEED_10MBPS:
4900 port_speed = 10;
4901 break;
4902 case LPFC_ASYNC_LINK_SPEED_100MBPS:
4903 port_speed = 100;
4904 break;
4905 case LPFC_ASYNC_LINK_SPEED_1GBPS:
4906 port_speed = 1000;
4907 break;
4908 case LPFC_ASYNC_LINK_SPEED_10GBPS:
4909 port_speed = 10000;
4910 break;
4911 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4912 port_speed = 20000;
4913 break;
4914 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4915 port_speed = 25000;
4916 break;
4917 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4918 port_speed = 40000;
4919 break;
Olivier Deprez157378f2022-04-04 15:47:50 +02004920 case LPFC_ASYNC_LINK_SPEED_100GBPS:
4921 port_speed = 100000;
4922 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004923 default:
4924 port_speed = 0;
4925 }
4926 break;
4927 case LPFC_TRAILER_CODE_FC:
4928 switch (speed_code) {
4929 case LPFC_FC_LA_SPEED_UNKNOWN:
4930 port_speed = 0;
4931 break;
4932 case LPFC_FC_LA_SPEED_1G:
4933 port_speed = 1000;
4934 break;
4935 case LPFC_FC_LA_SPEED_2G:
4936 port_speed = 2000;
4937 break;
4938 case LPFC_FC_LA_SPEED_4G:
4939 port_speed = 4000;
4940 break;
4941 case LPFC_FC_LA_SPEED_8G:
4942 port_speed = 8000;
4943 break;
4944 case LPFC_FC_LA_SPEED_10G:
4945 port_speed = 10000;
4946 break;
4947 case LPFC_FC_LA_SPEED_16G:
4948 port_speed = 16000;
4949 break;
4950 case LPFC_FC_LA_SPEED_32G:
4951 port_speed = 32000;
4952 break;
4953 case LPFC_FC_LA_SPEED_64G:
4954 port_speed = 64000;
4955 break;
David Brazdil0f672f62019-12-10 10:32:29 +00004956 case LPFC_FC_LA_SPEED_128G:
4957 port_speed = 128000;
4958 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004959 default:
4960 port_speed = 0;
4961 }
4962 break;
4963 default:
4964 port_speed = 0;
4965 }
4966 return port_speed;
4967}
4968
4969/**
4970 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
4971 * @phba: pointer to lpfc hba data structure.
4972 * @acqe_link: pointer to the async link completion queue entry.
4973 *
4974 * This routine is to handle the SLI4 asynchronous FCoE link event.
4975 **/
4976static void
4977lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4978 struct lpfc_acqe_link *acqe_link)
4979{
4980 struct lpfc_dmabuf *mp;
4981 LPFC_MBOXQ_t *pmb;
4982 MAILBOX_t *mb;
4983 struct lpfc_mbx_read_top *la;
4984 uint8_t att_type;
4985 int rc;
4986
4987 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
4988 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
4989 return;
4990 phba->fcoe_eventtag = acqe_link->event_tag;
4991 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4992 if (!pmb) {
Olivier Deprez157378f2022-04-04 15:47:50 +02004993 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004994 "0395 The mboxq allocation failed\n");
4995 return;
4996 }
4997 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4998 if (!mp) {
Olivier Deprez157378f2022-04-04 15:47:50 +02004999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005000 "0396 The lpfc_dmabuf allocation failed\n");
5001 goto out_free_pmb;
5002 }
5003 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5004 if (!mp->virt) {
Olivier Deprez157378f2022-04-04 15:47:50 +02005005 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005006 "0397 The mbuf allocation failed\n");
5007 goto out_free_dmabuf;
5008 }
5009
5010 /* Cleanup any outstanding ELS commands */
5011 lpfc_els_flush_all_cmd(phba);
5012
5013 /* Block ELS IOCBs until we have done process link event */
5014 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5015
5016 /* Update link event statistics */
5017 phba->sli.slistat.link_event++;
5018
5019 /* Create lpfc_handle_latt mailbox command from link ACQE */
5020 lpfc_read_topology(phba, pmb, mp);
5021 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5022 pmb->vport = phba->pport;
5023
5024 /* Keep the link status for extra SLI4 state machine reference */
5025 phba->sli4_hba.link_state.speed =
5026 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5027 bf_get(lpfc_acqe_link_speed, acqe_link));
5028 phba->sli4_hba.link_state.duplex =
5029 bf_get(lpfc_acqe_link_duplex, acqe_link);
5030 phba->sli4_hba.link_state.status =
5031 bf_get(lpfc_acqe_link_status, acqe_link);
5032 phba->sli4_hba.link_state.type =
5033 bf_get(lpfc_acqe_link_type, acqe_link);
5034 phba->sli4_hba.link_state.number =
5035 bf_get(lpfc_acqe_link_number, acqe_link);
5036 phba->sli4_hba.link_state.fault =
5037 bf_get(lpfc_acqe_link_fault, acqe_link);
5038 phba->sli4_hba.link_state.logical_speed =
5039 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5040
5041 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5042 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5043 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5044 "Logical speed:%dMbps Fault:%d\n",
5045 phba->sli4_hba.link_state.speed,
5046 phba->sli4_hba.link_state.topology,
5047 phba->sli4_hba.link_state.status,
5048 phba->sli4_hba.link_state.type,
5049 phba->sli4_hba.link_state.number,
5050 phba->sli4_hba.link_state.logical_speed,
5051 phba->sli4_hba.link_state.fault);
5052 /*
5053 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5054 * topology info. Note: Optional for non FC-AL ports.
5055 */
5056 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5057 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5058 if (rc == MBX_NOT_FINISHED)
5059 goto out_free_dmabuf;
5060 return;
5061 }
5062 /*
5063 * For FCoE Mode: fill in all the topology information we need and call
5064 * the READ_TOPOLOGY completion routine to continue without actually
5065 * sending the READ_TOPOLOGY mailbox command to the port.
5066 */
5067 /* Initialize completion status */
5068 mb = &pmb->u.mb;
5069 mb->mbxStatus = MBX_SUCCESS;
5070
5071 /* Parse port fault information field */
5072 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5073
5074 /* Parse and translate link attention fields */
5075 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5076 la->eventTag = acqe_link->event_tag;
5077 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5078 bf_set(lpfc_mbx_read_top_link_spd, la,
5079 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5080
5081 /* Fake the the following irrelvant fields */
5082 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5083 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5084 bf_set(lpfc_mbx_read_top_il, la, 0);
5085 bf_set(lpfc_mbx_read_top_pb, la, 0);
5086 bf_set(lpfc_mbx_read_top_fa, la, 0);
5087 bf_set(lpfc_mbx_read_top_mm, la, 0);
5088
5089 /* Invoke the lpfc_handle_latt mailbox command callback function */
5090 lpfc_mbx_cmpl_read_topology(phba, pmb);
5091
5092 return;
5093
5094out_free_dmabuf:
5095 kfree(mp);
5096out_free_pmb:
5097 mempool_free(pmb, phba->mbox_mem_pool);
5098}
5099
5100/**
David Brazdil0f672f62019-12-10 10:32:29 +00005101 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5102 * topology.
5103 * @phba: pointer to lpfc hba data structure.
David Brazdil0f672f62019-12-10 10:32:29 +00005104 * @speed_code: asynchronous event link speed code.
5105 *
5106 * This routine is to parse the giving SLI4 async event link speed code into
5107 * value of Read topology link speed.
5108 *
5109 * Return: link speed in terms of Read topology.
5110 **/
5111static uint8_t
5112lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5113{
5114 uint8_t port_speed;
5115
5116 switch (speed_code) {
5117 case LPFC_FC_LA_SPEED_1G:
5118 port_speed = LPFC_LINK_SPEED_1GHZ;
5119 break;
5120 case LPFC_FC_LA_SPEED_2G:
5121 port_speed = LPFC_LINK_SPEED_2GHZ;
5122 break;
5123 case LPFC_FC_LA_SPEED_4G:
5124 port_speed = LPFC_LINK_SPEED_4GHZ;
5125 break;
5126 case LPFC_FC_LA_SPEED_8G:
5127 port_speed = LPFC_LINK_SPEED_8GHZ;
5128 break;
5129 case LPFC_FC_LA_SPEED_16G:
5130 port_speed = LPFC_LINK_SPEED_16GHZ;
5131 break;
5132 case LPFC_FC_LA_SPEED_32G:
5133 port_speed = LPFC_LINK_SPEED_32GHZ;
5134 break;
5135 case LPFC_FC_LA_SPEED_64G:
5136 port_speed = LPFC_LINK_SPEED_64GHZ;
5137 break;
5138 case LPFC_FC_LA_SPEED_128G:
5139 port_speed = LPFC_LINK_SPEED_128GHZ;
5140 break;
5141 case LPFC_FC_LA_SPEED_256G:
5142 port_speed = LPFC_LINK_SPEED_256GHZ;
5143 break;
5144 default:
5145 port_speed = 0;
5146 break;
5147 }
5148
5149 return port_speed;
5150}
5151
5152#define trunk_link_status(__idx)\
5153 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5154 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5155 "Link up" : "Link down") : "NA"
5156/* Did port __idx reported an error */
5157#define trunk_port_fault(__idx)\
5158 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5159 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5160
5161static void
5162lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5163 struct lpfc_acqe_fc_la *acqe_fc)
5164{
5165 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5166 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5167
5168 phba->sli4_hba.link_state.speed =
5169 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5170 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5171
5172 phba->sli4_hba.link_state.logical_speed =
5173 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5174 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
5175 phba->fc_linkspeed =
5176 lpfc_async_link_speed_to_read_top(
5177 phba,
5178 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5179
5180 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5181 phba->trunk_link.link0.state =
5182 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5183 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5184 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5185 }
5186 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5187 phba->trunk_link.link1.state =
5188 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5189 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5190 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5191 }
5192 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5193 phba->trunk_link.link2.state =
5194 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5195 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5196 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5197 }
5198 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5199 phba->trunk_link.link3.state =
5200 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5201 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5202 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5203 }
5204
Olivier Deprez157378f2022-04-04 15:47:50 +02005205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00005206 "2910 Async FC Trunking Event - Speed:%d\n"
5207 "\tLogical speed:%d "
5208 "port0: %s port1: %s port2: %s port3: %s\n",
5209 phba->sli4_hba.link_state.speed,
5210 phba->sli4_hba.link_state.logical_speed,
5211 trunk_link_status(0), trunk_link_status(1),
5212 trunk_link_status(2), trunk_link_status(3));
5213
5214 if (port_fault)
Olivier Deprez157378f2022-04-04 15:47:50 +02005215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00005216 "3202 trunk error:0x%x (%s) seen on port0:%s "
5217 /*
5218 * SLI-4: We have only 0xA error codes
5219 * defined as of now. print an appropriate
5220 * message in case driver needs to be updated.
5221 */
5222 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5223 "UNDEFINED. update driver." : trunk_errmsg[err],
5224 trunk_port_fault(0), trunk_port_fault(1),
5225 trunk_port_fault(2), trunk_port_fault(3));
5226}
5227
5228
5229/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005230 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
5231 * @phba: pointer to lpfc hba data structure.
5232 * @acqe_fc: pointer to the async fc completion queue entry.
5233 *
5234 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
5235 * that the event was received and then issue a read_topology mailbox command so
5236 * that the rest of the driver will treat it the same as SLI3.
5237 **/
5238static void
5239lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5240{
5241 struct lpfc_dmabuf *mp;
5242 LPFC_MBOXQ_t *pmb;
5243 MAILBOX_t *mb;
5244 struct lpfc_mbx_read_top *la;
5245 int rc;
5246
5247 if (bf_get(lpfc_trailer_type, acqe_fc) !=
5248 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
Olivier Deprez157378f2022-04-04 15:47:50 +02005249 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005250 "2895 Non FC link Event detected.(%d)\n",
5251 bf_get(lpfc_trailer_type, acqe_fc));
5252 return;
5253 }
David Brazdil0f672f62019-12-10 10:32:29 +00005254
5255 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5256 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5257 lpfc_update_trunk_link_status(phba, acqe_fc);
5258 return;
5259 }
5260
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005261 /* Keep the link status for extra SLI4 state machine reference */
5262 phba->sli4_hba.link_state.speed =
5263 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5264 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5265 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5266 phba->sli4_hba.link_state.topology =
5267 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5268 phba->sli4_hba.link_state.status =
5269 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5270 phba->sli4_hba.link_state.type =
5271 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5272 phba->sli4_hba.link_state.number =
5273 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5274 phba->sli4_hba.link_state.fault =
5275 bf_get(lpfc_acqe_link_fault, acqe_fc);
David Brazdil0f672f62019-12-10 10:32:29 +00005276
5277 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5278 LPFC_FC_LA_TYPE_LINK_DOWN)
5279 phba->sli4_hba.link_state.logical_speed = 0;
5280 else if (!phba->sli4_hba.conf_trunk)
5281 phba->sli4_hba.link_state.logical_speed =
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005282 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
David Brazdil0f672f62019-12-10 10:32:29 +00005283
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005284 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5285 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5286 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5287 "%dMbps Fault:%d\n",
5288 phba->sli4_hba.link_state.speed,
5289 phba->sli4_hba.link_state.topology,
5290 phba->sli4_hba.link_state.status,
5291 phba->sli4_hba.link_state.type,
5292 phba->sli4_hba.link_state.number,
5293 phba->sli4_hba.link_state.logical_speed,
5294 phba->sli4_hba.link_state.fault);
5295 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5296 if (!pmb) {
Olivier Deprez157378f2022-04-04 15:47:50 +02005297 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005298 "2897 The mboxq allocation failed\n");
5299 return;
5300 }
5301 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5302 if (!mp) {
Olivier Deprez157378f2022-04-04 15:47:50 +02005303 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005304 "2898 The lpfc_dmabuf allocation failed\n");
5305 goto out_free_pmb;
5306 }
5307 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5308 if (!mp->virt) {
Olivier Deprez157378f2022-04-04 15:47:50 +02005309 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005310 "2899 The mbuf allocation failed\n");
5311 goto out_free_dmabuf;
5312 }
5313
5314 /* Cleanup any outstanding ELS commands */
5315 lpfc_els_flush_all_cmd(phba);
5316
5317 /* Block ELS IOCBs until we have done process link event */
5318 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5319
5320 /* Update link event statistics */
5321 phba->sli.slistat.link_event++;
5322
5323 /* Create lpfc_handle_latt mailbox command from link ACQE */
5324 lpfc_read_topology(phba, pmb, mp);
5325 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5326 pmb->vport = phba->pport;
5327
5328 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
5329 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5330
5331 switch (phba->sli4_hba.link_state.status) {
5332 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5333 phba->link_flag |= LS_MDS_LINK_DOWN;
5334 break;
5335 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5336 phba->link_flag |= LS_MDS_LOOPBACK;
5337 break;
5338 default:
5339 break;
5340 }
5341
5342 /* Initialize completion status */
5343 mb = &pmb->u.mb;
5344 mb->mbxStatus = MBX_SUCCESS;
5345
5346 /* Parse port fault information field */
5347 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
5348
5349 /* Parse and translate link attention fields */
5350 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5351 la->eventTag = acqe_fc->event_tag;
5352
5353 if (phba->sli4_hba.link_state.status ==
5354 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5355 bf_set(lpfc_mbx_read_top_att_type, la,
5356 LPFC_FC_LA_TYPE_UNEXP_WWPN);
5357 } else {
5358 bf_set(lpfc_mbx_read_top_att_type, la,
5359 LPFC_FC_LA_TYPE_LINK_DOWN);
5360 }
5361 /* Invoke the mailbox command callback function */
5362 lpfc_mbx_cmpl_read_topology(phba, pmb);
5363
5364 return;
5365 }
5366
5367 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5368 if (rc == MBX_NOT_FINISHED)
5369 goto out_free_dmabuf;
5370 return;
5371
5372out_free_dmabuf:
5373 kfree(mp);
5374out_free_pmb:
5375 mempool_free(pmb, phba->mbox_mem_pool);
5376}
5377
5378/**
5379 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
5380 * @phba: pointer to lpfc hba data structure.
Olivier Deprez157378f2022-04-04 15:47:50 +02005381 * @acqe_sli: pointer to the async SLI completion queue entry.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005382 *
5383 * This routine is to handle the SLI4 asynchronous SLI events.
5384 **/
5385static void
5386lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5387{
5388 char port_name;
5389 char message[128];
5390 uint8_t status;
5391 uint8_t evt_type;
5392 uint8_t operational = 0;
5393 struct temp_event temp_event_data;
5394 struct lpfc_acqe_misconfigured_event *misconfigured;
5395 struct Scsi_Host *shost;
David Brazdil0f672f62019-12-10 10:32:29 +00005396 struct lpfc_vport **vports;
5397 int rc, i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005398
5399 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
5400
5401 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
Olivier Deprez157378f2022-04-04 15:47:50 +02005402 "2901 Async SLI event - Type:%d, Event Data: x%08x "
5403 "x%08x x%08x x%08x\n", evt_type,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005404 acqe_sli->event_data1, acqe_sli->event_data2,
Olivier Deprez157378f2022-04-04 15:47:50 +02005405 acqe_sli->reserved, acqe_sli->trailer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005406
5407 port_name = phba->Port[0];
5408 if (port_name == 0x00)
5409 port_name = '?'; /* get port name is empty */
5410
5411 switch (evt_type) {
5412 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5413 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5414 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5415 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5416
5417 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5418 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5419 acqe_sli->event_data1, port_name);
5420
5421 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
5422 shost = lpfc_shost_from_vport(phba->pport);
5423 fc_host_post_vendor_event(shost, fc_get_event_number(),
5424 sizeof(temp_event_data),
5425 (char *)&temp_event_data,
5426 SCSI_NL_VID_TYPE_PCI
5427 | PCI_VENDOR_ID_EMULEX);
5428 break;
5429 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5430 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5431 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5432 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5433
5434 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5435 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5436 acqe_sli->event_data1, port_name);
5437
5438 shost = lpfc_shost_from_vport(phba->pport);
5439 fc_host_post_vendor_event(shost, fc_get_event_number(),
5440 sizeof(temp_event_data),
5441 (char *)&temp_event_data,
5442 SCSI_NL_VID_TYPE_PCI
5443 | PCI_VENDOR_ID_EMULEX);
5444 break;
5445 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5446 misconfigured = (struct lpfc_acqe_misconfigured_event *)
5447 &acqe_sli->event_data1;
5448
5449 /* fetch the status for this port */
5450 switch (phba->sli4_hba.lnk_info.lnk_no) {
5451 case LPFC_LINK_NUMBER_0:
5452 status = bf_get(lpfc_sli_misconfigured_port0_state,
5453 &misconfigured->theEvent);
5454 operational = bf_get(lpfc_sli_misconfigured_port0_op,
5455 &misconfigured->theEvent);
5456 break;
5457 case LPFC_LINK_NUMBER_1:
5458 status = bf_get(lpfc_sli_misconfigured_port1_state,
5459 &misconfigured->theEvent);
5460 operational = bf_get(lpfc_sli_misconfigured_port1_op,
5461 &misconfigured->theEvent);
5462 break;
5463 case LPFC_LINK_NUMBER_2:
5464 status = bf_get(lpfc_sli_misconfigured_port2_state,
5465 &misconfigured->theEvent);
5466 operational = bf_get(lpfc_sli_misconfigured_port2_op,
5467 &misconfigured->theEvent);
5468 break;
5469 case LPFC_LINK_NUMBER_3:
5470 status = bf_get(lpfc_sli_misconfigured_port3_state,
5471 &misconfigured->theEvent);
5472 operational = bf_get(lpfc_sli_misconfigured_port3_op,
5473 &misconfigured->theEvent);
5474 break;
5475 default:
Olivier Deprez157378f2022-04-04 15:47:50 +02005476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005477 "3296 "
5478 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5479 "event: Invalid link %d",
5480 phba->sli4_hba.lnk_info.lnk_no);
5481 return;
5482 }
5483
5484 /* Skip if optic state unchanged */
5485 if (phba->sli4_hba.lnk_info.optic_state == status)
5486 return;
5487
5488 switch (status) {
5489 case LPFC_SLI_EVENT_STATUS_VALID:
5490 sprintf(message, "Physical Link is functional");
5491 break;
5492 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5493 sprintf(message, "Optics faulted/incorrectly "
5494 "installed/not installed - Reseat optics, "
5495 "if issue not resolved, replace.");
5496 break;
5497 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5498 sprintf(message,
5499 "Optics of two types installed - Remove one "
5500 "optic or install matching pair of optics.");
5501 break;
5502 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5503 sprintf(message, "Incompatible optics - Replace with "
5504 "compatible optics for card to function.");
5505 break;
5506 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5507 sprintf(message, "Unqualified optics - Replace with "
5508 "Avago optics for Warranty and Technical "
5509 "Support - Link is%s operational",
5510 (operational) ? " not" : "");
5511 break;
5512 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5513 sprintf(message, "Uncertified optics - Replace with "
5514 "Avago-certified optics to enable link "
5515 "operation - Link is%s operational",
5516 (operational) ? " not" : "");
5517 break;
5518 default:
5519 /* firmware is reporting a status we don't know about */
5520 sprintf(message, "Unknown event status x%02x", status);
5521 break;
5522 }
David Brazdil0f672f62019-12-10 10:32:29 +00005523
5524 /* Issue READ_CONFIG mbox command to refresh supported speeds */
5525 rc = lpfc_sli4_read_config(phba);
5526 if (rc) {
5527 phba->lmt = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02005528 lpfc_printf_log(phba, KERN_ERR,
5529 LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00005530 "3194 Unable to retrieve supported "
5531 "speeds, rc = 0x%x\n", rc);
5532 }
5533 vports = lpfc_create_vport_work_array(phba);
5534 if (vports != NULL) {
5535 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5536 i++) {
5537 shost = lpfc_shost_from_vport(vports[i]);
5538 lpfc_host_supported_speeds_set(shost);
5539 }
5540 }
5541 lpfc_destroy_vport_work_array(phba, vports);
5542
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005543 phba->sli4_hba.lnk_info.optic_state = status;
5544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5545 "3176 Port Name %c %s\n", port_name, message);
5546 break;
5547 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5548 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5549 "3192 Remote DPort Test Initiated - "
5550 "Event Data1:x%08x Event Data2: x%08x\n",
5551 acqe_sli->event_data1, acqe_sli->event_data2);
5552 break;
Olivier Deprez157378f2022-04-04 15:47:50 +02005553 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
5554 /* Misconfigured WWN. Reports that the SLI Port is configured
5555 * to use FA-WWN, but the attached device doesn’t support it.
5556 * No driver action is required.
5557 * Event Data1 - N.A, Event Data2 - N.A
5558 */
5559 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
5560 "2699 Misconfigured FA-WWN - Attached device does "
5561 "not support FA-WWN\n");
5562 break;
5563 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
5564 /* EEPROM failure. No driver action is required */
5565 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5566 "2518 EEPROM failure - "
5567 "Event Data1: x%08x Event Data2: x%08x\n",
5568 acqe_sli->event_data1, acqe_sli->event_data2);
5569 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005570 default:
5571 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
Olivier Deprez157378f2022-04-04 15:47:50 +02005572 "3193 Unrecognized SLI event, type: 0x%x",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005573 evt_type);
5574 break;
5575 }
5576}
5577
5578/**
5579 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
5580 * @vport: pointer to vport data structure.
5581 *
5582 * This routine is to perform Clear Virtual Link (CVL) on a vport in
5583 * response to a CVL event.
5584 *
5585 * Return the pointer to the ndlp with the vport if successful, otherwise
5586 * return NULL.
5587 **/
5588static struct lpfc_nodelist *
5589lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5590{
5591 struct lpfc_nodelist *ndlp;
5592 struct Scsi_Host *shost;
5593 struct lpfc_hba *phba;
5594
5595 if (!vport)
5596 return NULL;
5597 phba = vport->phba;
5598 if (!phba)
5599 return NULL;
5600 ndlp = lpfc_findnode_did(vport, Fabric_DID);
5601 if (!ndlp) {
5602 /* Cannot find existing Fabric ndlp, so allocate a new one */
5603 ndlp = lpfc_nlp_init(vport, Fabric_DID);
5604 if (!ndlp)
5605 return 0;
5606 /* Set the node type */
5607 ndlp->nlp_type |= NLP_FABRIC;
5608 /* Put ndlp onto node list */
5609 lpfc_enqueue_node(vport, ndlp);
5610 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5611 /* re-setup ndlp without removing from node list */
5612 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5613 if (!ndlp)
5614 return 0;
5615 }
5616 if ((phba->pport->port_state < LPFC_FLOGI) &&
5617 (phba->pport->port_state != LPFC_VPORT_FAILED))
5618 return NULL;
5619 /* If virtual link is not yet instantiated ignore CVL */
5620 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5621 && (vport->port_state != LPFC_VPORT_FAILED))
5622 return NULL;
5623 shost = lpfc_shost_from_vport(vport);
5624 if (!shost)
5625 return NULL;
5626 lpfc_linkdown_port(vport);
5627 lpfc_cleanup_pending_mbox(vport);
5628 spin_lock_irq(shost->host_lock);
5629 vport->fc_flag |= FC_VPORT_CVL_RCVD;
5630 spin_unlock_irq(shost->host_lock);
5631
5632 return ndlp;
5633}
5634
5635/**
5636 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
Olivier Deprez157378f2022-04-04 15:47:50 +02005637 * @phba: pointer to lpfc hba data structure.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005638 *
5639 * This routine is to perform Clear Virtual Link (CVL) on all vports in
5640 * response to a FCF dead event.
5641 **/
5642static void
5643lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5644{
5645 struct lpfc_vport **vports;
5646 int i;
5647
5648 vports = lpfc_create_vport_work_array(phba);
5649 if (vports)
5650 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5651 lpfc_sli4_perform_vport_cvl(vports[i]);
5652 lpfc_destroy_vport_work_array(phba, vports);
5653}
5654
5655/**
5656 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
5657 * @phba: pointer to lpfc hba data structure.
Olivier Deprez157378f2022-04-04 15:47:50 +02005658 * @acqe_fip: pointer to the async fcoe completion queue entry.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005659 *
5660 * This routine is to handle the SLI4 asynchronous fcoe event.
5661 **/
5662static void
5663lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5664 struct lpfc_acqe_fip *acqe_fip)
5665{
5666 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
5667 int rc;
5668 struct lpfc_vport *vport;
5669 struct lpfc_nodelist *ndlp;
5670 struct Scsi_Host *shost;
5671 int active_vlink_present;
5672 struct lpfc_vport **vports;
5673 int i;
5674
5675 phba->fc_eventTag = acqe_fip->event_tag;
5676 phba->fcoe_eventtag = acqe_fip->event_tag;
5677 switch (event_type) {
5678 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5679 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5680 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
Olivier Deprez157378f2022-04-04 15:47:50 +02005681 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005682 "2546 New FCF event, evt_tag:x%x, "
5683 "index:x%x\n",
5684 acqe_fip->event_tag,
5685 acqe_fip->index);
5686 else
5687 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5688 LOG_DISCOVERY,
5689 "2788 FCF param modified event, "
5690 "evt_tag:x%x, index:x%x\n",
5691 acqe_fip->event_tag,
5692 acqe_fip->index);
5693 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5694 /*
5695 * During period of FCF discovery, read the FCF
5696 * table record indexed by the event to update
5697 * FCF roundrobin failover eligible FCF bmask.
5698 */
5699 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5700 LOG_DISCOVERY,
5701 "2779 Read FCF (x%x) for updating "
5702 "roundrobin FCF failover bmask\n",
5703 acqe_fip->index);
5704 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5705 }
5706
5707 /* If the FCF discovery is in progress, do nothing. */
5708 spin_lock_irq(&phba->hbalock);
5709 if (phba->hba_flag & FCF_TS_INPROG) {
5710 spin_unlock_irq(&phba->hbalock);
5711 break;
5712 }
5713 /* If fast FCF failover rescan event is pending, do nothing */
David Brazdil0f672f62019-12-10 10:32:29 +00005714 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005715 spin_unlock_irq(&phba->hbalock);
5716 break;
5717 }
5718
5719 /* If the FCF has been in discovered state, do nothing. */
5720 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5721 spin_unlock_irq(&phba->hbalock);
5722 break;
5723 }
5724 spin_unlock_irq(&phba->hbalock);
5725
5726 /* Otherwise, scan the entire FCF table and re-discover SAN */
5727 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5728 "2770 Start FCF table scan per async FCF "
5729 "event, evt_tag:x%x, index:x%x\n",
5730 acqe_fip->event_tag, acqe_fip->index);
5731 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5732 LPFC_FCOE_FCF_GET_FIRST);
5733 if (rc)
Olivier Deprez157378f2022-04-04 15:47:50 +02005734 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005735 "2547 Issue FCF scan read FCF mailbox "
5736 "command failed (x%x)\n", rc);
5737 break;
5738
5739 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
Olivier Deprez157378f2022-04-04 15:47:50 +02005740 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5741 "2548 FCF Table full count 0x%x tag 0x%x\n",
5742 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5743 acqe_fip->event_tag);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005744 break;
5745
5746 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5747 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
Olivier Deprez157378f2022-04-04 15:47:50 +02005748 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5749 "2549 FCF (x%x) disconnected from network, "
5750 "tag:x%x\n", acqe_fip->index,
5751 acqe_fip->event_tag);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005752 /*
5753 * If we are in the middle of FCF failover process, clear
5754 * the corresponding FCF bit in the roundrobin bitmap.
5755 */
5756 spin_lock_irq(&phba->hbalock);
5757 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5758 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5759 spin_unlock_irq(&phba->hbalock);
5760 /* Update FLOGI FCF failover eligible FCF bmask */
5761 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5762 break;
5763 }
5764 spin_unlock_irq(&phba->hbalock);
5765
5766 /* If the event is not for currently used fcf do nothing */
5767 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5768 break;
5769
5770 /*
5771 * Otherwise, request the port to rediscover the entire FCF
5772 * table for a fast recovery from case that the current FCF
5773 * is no longer valid as we are not in the middle of FCF
5774 * failover process already.
5775 */
5776 spin_lock_irq(&phba->hbalock);
5777 /* Mark the fast failover process in progress */
5778 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5779 spin_unlock_irq(&phba->hbalock);
5780
5781 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5782 "2771 Start FCF fast failover process due to "
5783 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5784 "\n", acqe_fip->event_tag, acqe_fip->index);
5785 rc = lpfc_sli4_redisc_fcf_table(phba);
5786 if (rc) {
5787 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
Olivier Deprez157378f2022-04-04 15:47:50 +02005788 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005789 "2772 Issue FCF rediscover mailbox "
5790 "command failed, fail through to FCF "
5791 "dead event\n");
5792 spin_lock_irq(&phba->hbalock);
5793 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5794 spin_unlock_irq(&phba->hbalock);
5795 /*
5796 * Last resort will fail over by treating this
5797 * as a link down to FCF registration.
5798 */
5799 lpfc_sli4_fcf_dead_failthrough(phba);
5800 } else {
5801 /* Reset FCF roundrobin bmask for new discovery */
5802 lpfc_sli4_clear_fcf_rr_bmask(phba);
5803 /*
5804 * Handling fast FCF failover to a DEAD FCF event is
5805 * considered equalivant to receiving CVL to all vports.
5806 */
5807 lpfc_sli4_perform_all_vport_cvl(phba);
5808 }
5809 break;
5810 case LPFC_FIP_EVENT_TYPE_CVL:
5811 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
Olivier Deprez157378f2022-04-04 15:47:50 +02005812 lpfc_printf_log(phba, KERN_ERR,
5813 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005814 "2718 Clear Virtual Link Received for VPI 0x%x"
5815 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5816
5817 vport = lpfc_find_vport_by_vpid(phba,
5818 acqe_fip->index);
5819 ndlp = lpfc_sli4_perform_vport_cvl(vport);
5820 if (!ndlp)
5821 break;
5822 active_vlink_present = 0;
5823
5824 vports = lpfc_create_vport_work_array(phba);
5825 if (vports) {
5826 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5827 i++) {
5828 if ((!(vports[i]->fc_flag &
5829 FC_VPORT_CVL_RCVD)) &&
5830 (vports[i]->port_state > LPFC_FDISC)) {
5831 active_vlink_present = 1;
5832 break;
5833 }
5834 }
5835 lpfc_destroy_vport_work_array(phba, vports);
5836 }
5837
5838 /*
5839 * Don't re-instantiate if vport is marked for deletion.
5840 * If we are here first then vport_delete is going to wait
5841 * for discovery to complete.
5842 */
5843 if (!(vport->load_flag & FC_UNLOADING) &&
5844 active_vlink_present) {
5845 /*
5846 * If there are other active VLinks present,
5847 * re-instantiate the Vlink using FDISC.
5848 */
5849 mod_timer(&ndlp->nlp_delayfunc,
5850 jiffies + msecs_to_jiffies(1000));
5851 shost = lpfc_shost_from_vport(vport);
5852 spin_lock_irq(shost->host_lock);
5853 ndlp->nlp_flag |= NLP_DELAY_TMO;
5854 spin_unlock_irq(shost->host_lock);
5855 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5856 vport->port_state = LPFC_FDISC;
5857 } else {
5858 /*
5859 * Otherwise, we request port to rediscover
5860 * the entire FCF table for a fast recovery
5861 * from possible case that the current FCF
5862 * is no longer valid if we are not already
5863 * in the FCF failover process.
5864 */
5865 spin_lock_irq(&phba->hbalock);
5866 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5867 spin_unlock_irq(&phba->hbalock);
5868 break;
5869 }
5870 /* Mark the fast failover process in progress */
5871 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5872 spin_unlock_irq(&phba->hbalock);
5873 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5874 LOG_DISCOVERY,
5875 "2773 Start FCF failover per CVL, "
5876 "evt_tag:x%x\n", acqe_fip->event_tag);
5877 rc = lpfc_sli4_redisc_fcf_table(phba);
5878 if (rc) {
5879 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
Olivier Deprez157378f2022-04-04 15:47:50 +02005880 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005881 "2774 Issue FCF rediscover "
5882 "mailbox command failed, "
5883 "through to CVL event\n");
5884 spin_lock_irq(&phba->hbalock);
5885 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5886 spin_unlock_irq(&phba->hbalock);
5887 /*
5888 * Last resort will be re-try on the
5889 * the current registered FCF entry.
5890 */
5891 lpfc_retry_pport_discovery(phba);
5892 } else
5893 /*
5894 * Reset FCF roundrobin bmask for new
5895 * discovery.
5896 */
5897 lpfc_sli4_clear_fcf_rr_bmask(phba);
5898 }
5899 break;
5900 default:
Olivier Deprez157378f2022-04-04 15:47:50 +02005901 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5902 "0288 Unknown FCoE event type 0x%x event tag "
5903 "0x%x\n", event_type, acqe_fip->event_tag);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005904 break;
5905 }
5906}
5907
5908/**
5909 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
5910 * @phba: pointer to lpfc hba data structure.
Olivier Deprez157378f2022-04-04 15:47:50 +02005911 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005912 *
5913 * This routine is to handle the SLI4 asynchronous dcbx event.
5914 **/
5915static void
5916lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5917 struct lpfc_acqe_dcbx *acqe_dcbx)
5918{
5919 phba->fc_eventTag = acqe_dcbx->event_tag;
Olivier Deprez157378f2022-04-04 15:47:50 +02005920 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005921 "0290 The SLI4 DCBX asynchronous event is not "
5922 "handled yet\n");
5923}
5924
5925/**
5926 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
5927 * @phba: pointer to lpfc hba data structure.
Olivier Deprez157378f2022-04-04 15:47:50 +02005928 * @acqe_grp5: pointer to the async grp5 completion queue entry.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005929 *
5930 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
5931 * is an asynchronous notified of a logical link speed change. The Port
5932 * reports the logical link speed in units of 10Mbps.
5933 **/
5934static void
5935lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5936 struct lpfc_acqe_grp5 *acqe_grp5)
5937{
5938 uint16_t prev_ll_spd;
5939
5940 phba->fc_eventTag = acqe_grp5->event_tag;
5941 phba->fcoe_eventtag = acqe_grp5->event_tag;
5942 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5943 phba->sli4_hba.link_state.logical_speed =
5944 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
5945 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5946 "2789 GRP5 Async Event: Updating logical link speed "
5947 "from %dMbps to %dMbps\n", prev_ll_spd,
5948 phba->sli4_hba.link_state.logical_speed);
5949}
5950
5951/**
5952 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
5953 * @phba: pointer to lpfc hba data structure.
5954 *
5955 * This routine is invoked by the worker thread to process all the pending
5956 * SLI4 asynchronous events.
5957 **/
5958void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5959{
5960 struct lpfc_cq_event *cq_event;
Olivier Deprez157378f2022-04-04 15:47:50 +02005961 unsigned long iflags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005962
5963 /* First, declare the async event has been handled */
Olivier Deprez157378f2022-04-04 15:47:50 +02005964 spin_lock_irqsave(&phba->hbalock, iflags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005965 phba->hba_flag &= ~ASYNC_EVENT;
Olivier Deprez157378f2022-04-04 15:47:50 +02005966 spin_unlock_irqrestore(&phba->hbalock, iflags);
5967
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005968 /* Now, handle all the async events */
Olivier Deprez157378f2022-04-04 15:47:50 +02005969 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005970 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005971 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5972 cq_event, struct lpfc_cq_event, list);
Olivier Deprez157378f2022-04-04 15:47:50 +02005973 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
5974 iflags);
5975
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005976 /* Process the asynchronous event */
5977 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5978 case LPFC_TRAILER_CODE_LINK:
5979 lpfc_sli4_async_link_evt(phba,
5980 &cq_event->cqe.acqe_link);
5981 break;
5982 case LPFC_TRAILER_CODE_FCOE:
5983 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
5984 break;
5985 case LPFC_TRAILER_CODE_DCBX:
5986 lpfc_sli4_async_dcbx_evt(phba,
5987 &cq_event->cqe.acqe_dcbx);
5988 break;
5989 case LPFC_TRAILER_CODE_GRP5:
5990 lpfc_sli4_async_grp5_evt(phba,
5991 &cq_event->cqe.acqe_grp5);
5992 break;
5993 case LPFC_TRAILER_CODE_FC:
5994 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5995 break;
5996 case LPFC_TRAILER_CODE_SLI:
5997 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5998 break;
5999 default:
Olivier Deprez157378f2022-04-04 15:47:50 +02006000 lpfc_printf_log(phba, KERN_ERR,
6001 LOG_TRACE_EVENT,
6002 "1804 Invalid asynchronous event code: "
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006003 "x%x\n", bf_get(lpfc_trailer_code,
6004 &cq_event->cqe.mcqe_cmpl));
6005 break;
6006 }
Olivier Deprez157378f2022-04-04 15:47:50 +02006007
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006008 /* Free the completion event processed to the free pool */
6009 lpfc_sli4_cq_event_release(phba, cq_event);
Olivier Deprez157378f2022-04-04 15:47:50 +02006010 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006011 }
Olivier Deprez157378f2022-04-04 15:47:50 +02006012 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006013}
6014
6015/**
6016 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
6017 * @phba: pointer to lpfc hba data structure.
6018 *
6019 * This routine is invoked by the worker thread to process FCF table
6020 * rediscovery pending completion event.
6021 **/
6022void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
6023{
6024 int rc;
6025
6026 spin_lock_irq(&phba->hbalock);
6027 /* Clear FCF rediscovery timeout event */
6028 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
6029 /* Clear driver fast failover FCF record flag */
6030 phba->fcf.failover_rec.flag = 0;
6031 /* Set state for FCF fast failover */
6032 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
6033 spin_unlock_irq(&phba->hbalock);
6034
6035 /* Scan FCF table from the first entry to re-discover SAN */
6036 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6037 "2777 Start post-quiescent FCF table scan\n");
6038 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6039 if (rc)
Olivier Deprez157378f2022-04-04 15:47:50 +02006040 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006041 "2747 Issue FCF scan read FCF mailbox "
6042 "command failed 0x%x\n", rc);
6043}
6044
6045/**
6046 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
6047 * @phba: pointer to lpfc hba data structure.
6048 * @dev_grp: The HBA PCI-Device group number.
6049 *
6050 * This routine is invoked to set up the per HBA PCI-Device group function
6051 * API jump table entries.
6052 *
6053 * Return: 0 if success, otherwise -ENODEV
6054 **/
6055int
6056lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6057{
6058 int rc;
6059
6060 /* Set up lpfc PCI-device group */
6061 phba->pci_dev_grp = dev_grp;
6062
6063 /* The LPFC_PCI_DEV_OC uses SLI4 */
6064 if (dev_grp == LPFC_PCI_DEV_OC)
6065 phba->sli_rev = LPFC_SLI_REV4;
6066
6067 /* Set up device INIT API function jump table */
6068 rc = lpfc_init_api_table_setup(phba, dev_grp);
6069 if (rc)
6070 return -ENODEV;
6071 /* Set up SCSI API function jump table */
6072 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
6073 if (rc)
6074 return -ENODEV;
6075 /* Set up SLI API function jump table */
6076 rc = lpfc_sli_api_table_setup(phba, dev_grp);
6077 if (rc)
6078 return -ENODEV;
6079 /* Set up MBOX API function jump table */
6080 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
6081 if (rc)
6082 return -ENODEV;
6083
6084 return 0;
6085}
6086
6087/**
6088 * lpfc_log_intr_mode - Log the active interrupt mode
6089 * @phba: pointer to lpfc hba data structure.
6090 * @intr_mode: active interrupt mode adopted.
6091 *
6092 * This routine it invoked to log the currently used active interrupt mode
6093 * to the device.
6094 **/
6095static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
6096{
6097 switch (intr_mode) {
6098 case 0:
6099 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6100 "0470 Enable INTx interrupt mode.\n");
6101 break;
6102 case 1:
6103 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6104 "0481 Enabled MSI interrupt mode.\n");
6105 break;
6106 case 2:
6107 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6108 "0480 Enabled MSI-X interrupt mode.\n");
6109 break;
6110 default:
Olivier Deprez157378f2022-04-04 15:47:50 +02006111 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006112 "0482 Illegal interrupt mode.\n");
6113 break;
6114 }
6115 return;
6116}
6117
6118/**
6119 * lpfc_enable_pci_dev - Enable a generic PCI device.
6120 * @phba: pointer to lpfc hba data structure.
6121 *
6122 * This routine is invoked to enable the PCI device that is common to all
6123 * PCI devices.
6124 *
6125 * Return codes
6126 * 0 - successful
6127 * other values - error
6128 **/
6129static int
6130lpfc_enable_pci_dev(struct lpfc_hba *phba)
6131{
6132 struct pci_dev *pdev;
6133
6134 /* Obtain PCI device reference */
6135 if (!phba->pcidev)
6136 goto out_error;
6137 else
6138 pdev = phba->pcidev;
6139 /* Enable PCI device */
6140 if (pci_enable_device_mem(pdev))
6141 goto out_error;
6142 /* Request PCI resource for the device */
6143 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
6144 goto out_disable_device;
6145 /* Set up device as PCI master and save state for EEH */
6146 pci_set_master(pdev);
6147 pci_try_set_mwi(pdev);
6148 pci_save_state(pdev);
6149
6150 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6151 if (pci_is_pcie(pdev))
6152 pdev->needs_freset = 1;
6153
6154 return 0;
6155
6156out_disable_device:
6157 pci_disable_device(pdev);
6158out_error:
Olivier Deprez157378f2022-04-04 15:47:50 +02006159 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006160 "1401 Failed to enable pci device\n");
6161 return -ENODEV;
6162}
6163
6164/**
6165 * lpfc_disable_pci_dev - Disable a generic PCI device.
6166 * @phba: pointer to lpfc hba data structure.
6167 *
6168 * This routine is invoked to disable the PCI device that is common to all
6169 * PCI devices.
6170 **/
6171static void
6172lpfc_disable_pci_dev(struct lpfc_hba *phba)
6173{
6174 struct pci_dev *pdev;
6175
6176 /* Obtain PCI device reference */
6177 if (!phba->pcidev)
6178 return;
6179 else
6180 pdev = phba->pcidev;
6181 /* Release PCI resource and disable PCI device */
6182 pci_release_mem_regions(pdev);
6183 pci_disable_device(pdev);
6184
6185 return;
6186}
6187
6188/**
6189 * lpfc_reset_hba - Reset a hba
6190 * @phba: pointer to lpfc hba data structure.
6191 *
6192 * This routine is invoked to reset a hba device. It brings the HBA
6193 * offline, performs a board restart, and then brings the board back
6194 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
6195 * on outstanding mailbox commands.
6196 **/
6197void
6198lpfc_reset_hba(struct lpfc_hba *phba)
6199{
6200 /* If resets are disabled then set error state and return. */
6201 if (!phba->cfg_enable_hba_reset) {
6202 phba->link_state = LPFC_HBA_ERROR;
6203 return;
6204 }
6205 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
6206 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6207 else
6208 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
6209 lpfc_offline(phba);
6210 lpfc_sli_brdrestart(phba);
6211 lpfc_online(phba);
6212 lpfc_unblock_mgmt_io(phba);
6213}
6214
6215/**
6216 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
6217 * @phba: pointer to lpfc hba data structure.
6218 *
6219 * This function enables the PCI SR-IOV virtual functions to a physical
6220 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6221 * enable the number of virtual functions to the physical function. As
6222 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6223 * API call does not considered as an error condition for most of the device.
6224 **/
6225uint16_t
6226lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6227{
6228 struct pci_dev *pdev = phba->pcidev;
6229 uint16_t nr_virtfn;
6230 int pos;
6231
6232 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6233 if (pos == 0)
6234 return 0;
6235
6236 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6237 return nr_virtfn;
6238}
6239
6240/**
6241 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
6242 * @phba: pointer to lpfc hba data structure.
6243 * @nr_vfn: number of virtual functions to be enabled.
6244 *
6245 * This function enables the PCI SR-IOV virtual functions to a physical
6246 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6247 * enable the number of virtual functions to the physical function. As
6248 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6249 * API call does not considered as an error condition for most of the device.
6250 **/
6251int
6252lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6253{
6254 struct pci_dev *pdev = phba->pcidev;
6255 uint16_t max_nr_vfn;
6256 int rc;
6257
6258 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6259 if (nr_vfn > max_nr_vfn) {
Olivier Deprez157378f2022-04-04 15:47:50 +02006260 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006261 "3057 Requested vfs (%d) greater than "
6262 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6263 return -EINVAL;
6264 }
6265
6266 rc = pci_enable_sriov(pdev, nr_vfn);
6267 if (rc) {
6268 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6269 "2806 Failed to enable sriov on this device "
6270 "with vfn number nr_vf:%d, rc:%d\n",
6271 nr_vfn, rc);
6272 } else
6273 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6274 "2807 Successful enable sriov on this device "
6275 "with vfn number nr_vf:%d\n", nr_vfn);
6276 return rc;
6277}
6278
6279/**
6280 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
6281 * @phba: pointer to lpfc hba data structure.
6282 *
6283 * This routine is invoked to set up the driver internal resources before the
6284 * device specific resource setup to support the HBA device it attached to.
6285 *
6286 * Return codes
6287 * 0 - successful
6288 * other values - error
6289 **/
6290static int
6291lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
6292{
6293 struct lpfc_sli *psli = &phba->sli;
6294
6295 /*
6296 * Driver resources common to all SLI revisions
6297 */
6298 atomic_set(&phba->fast_event_count, 0);
Olivier Deprez157378f2022-04-04 15:47:50 +02006299 atomic_set(&phba->dbg_log_idx, 0);
6300 atomic_set(&phba->dbg_log_cnt, 0);
6301 atomic_set(&phba->dbg_log_dmping, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006302 spin_lock_init(&phba->hbalock);
6303
6304 /* Initialize ndlp management spinlock */
6305 spin_lock_init(&phba->ndlp_lock);
6306
David Brazdil0f672f62019-12-10 10:32:29 +00006307 /* Initialize port_list spinlock */
6308 spin_lock_init(&phba->port_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006309 INIT_LIST_HEAD(&phba->port_list);
David Brazdil0f672f62019-12-10 10:32:29 +00006310
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006311 INIT_LIST_HEAD(&phba->work_list);
6312 init_waitqueue_head(&phba->wait_4_mlo_m_q);
6313
6314 /* Initialize the wait queue head for the kernel thread */
6315 init_waitqueue_head(&phba->work_waitq);
6316
6317 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6318 "1403 Protocols supported %s %s %s\n",
6319 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6320 "SCSI" : " "),
6321 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
6322 "NVME" : " "),
6323 (phba->nvmet_support ? "NVMET" : " "));
6324
David Brazdil0f672f62019-12-10 10:32:29 +00006325 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
6326 spin_lock_init(&phba->scsi_buf_list_get_lock);
6327 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6328 spin_lock_init(&phba->scsi_buf_list_put_lock);
6329 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006330
6331 /* Initialize the fabric iocb list */
6332 INIT_LIST_HEAD(&phba->fabric_iocb_list);
6333
6334 /* Initialize list to save ELS buffers */
6335 INIT_LIST_HEAD(&phba->elsbuf);
6336
6337 /* Initialize FCF connection rec list */
6338 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6339
6340 /* Initialize OAS configuration list */
6341 spin_lock_init(&phba->devicelock);
6342 INIT_LIST_HEAD(&phba->luns);
6343
6344 /* MBOX heartbeat timer */
6345 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
6346 /* Fabric block timer */
6347 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
6348 /* EA polling mode timer */
6349 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
6350 /* Heartbeat timer */
6351 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
6352
David Brazdil0f672f62019-12-10 10:32:29 +00006353 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6354
Olivier Deprez157378f2022-04-04 15:47:50 +02006355 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
6356 lpfc_idle_stat_delay_work);
6357
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006358 return 0;
6359}
6360
6361/**
6362 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
6363 * @phba: pointer to lpfc hba data structure.
6364 *
6365 * This routine is invoked to set up the driver internal resources specific to
6366 * support the SLI-3 HBA device it attached to.
6367 *
6368 * Return codes
6369 * 0 - successful
6370 * other values - error
6371 **/
6372static int
6373lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6374{
David Brazdil0f672f62019-12-10 10:32:29 +00006375 int rc, entry_sz;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006376
6377 /*
6378 * Initialize timers used by driver
6379 */
6380
6381 /* FCP polling mode timer */
6382 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
6383
6384 /* Host attention work mask setup */
6385 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6386 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
6387
6388 /* Get all the module params for configuring this host */
6389 lpfc_get_cfgparam(phba);
6390 /* Set up phase-1 common device driver resources */
6391
6392 rc = lpfc_setup_driver_resource_phase1(phba);
6393 if (rc)
6394 return -ENODEV;
6395
6396 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6397 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6398 /* check for menlo minimum sg count */
6399 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6400 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6401 }
6402
6403 if (!phba->sli.sli3_ring)
6404 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6405 sizeof(struct lpfc_sli_ring),
6406 GFP_KERNEL);
6407 if (!phba->sli.sli3_ring)
6408 return -ENOMEM;
6409
6410 /*
6411 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
6412 * used to create the sg_dma_buf_pool must be dynamically calculated.
6413 */
6414
David Brazdil0f672f62019-12-10 10:32:29 +00006415 if (phba->sli_rev == LPFC_SLI_REV4)
6416 entry_sz = sizeof(struct sli4_sge);
6417 else
6418 entry_sz = sizeof(struct ulp_bde64);
6419
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006420 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
6421 if (phba->cfg_enable_bg) {
6422 /*
6423 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
6424 * the FCP rsp, and a BDE for each. Sice we have no control
6425 * over how many protection data segments the SCSI Layer
6426 * will hand us (ie: there could be one for every block
6427 * in the IO), we just allocate enough BDEs to accomidate
6428 * our max amount and we need to limit lpfc_sg_seg_cnt to
6429 * minimize the risk of running out.
6430 */
6431 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6432 sizeof(struct fcp_rsp) +
David Brazdil0f672f62019-12-10 10:32:29 +00006433 (LPFC_MAX_SG_SEG_CNT * entry_sz);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006434
6435 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6436 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6437
6438 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
6439 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6440 } else {
6441 /*
6442 * The scsi_buf for a regular I/O will hold the FCP cmnd,
6443 * the FCP rsp, a BDE for each, and a BDE for up to
6444 * cfg_sg_seg_cnt data segments.
6445 */
6446 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6447 sizeof(struct fcp_rsp) +
David Brazdil0f672f62019-12-10 10:32:29 +00006448 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006449
6450 /* Total BDEs in BPL for scsi_sg_list */
6451 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
6452 }
6453
6454 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
Olivier Deprez157378f2022-04-04 15:47:50 +02006455 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006456 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6457 phba->cfg_total_seg_cnt);
6458
6459 phba->max_vpi = LPFC_MAX_VPI;
6460 /* This will be set to correct value after config_port mbox */
6461 phba->max_vports = 0;
6462
6463 /*
6464 * Initialize the SLI Layer to run with lpfc HBAs.
6465 */
6466 lpfc_sli_setup(phba);
6467 lpfc_sli_queue_init(phba);
6468
6469 /* Allocate device driver memory */
6470 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6471 return -ENOMEM;
6472
David Brazdil0f672f62019-12-10 10:32:29 +00006473 phba->lpfc_sg_dma_buf_pool =
6474 dma_pool_create("lpfc_sg_dma_buf_pool",
6475 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6476 BPL_ALIGN_SZ, 0);
6477
6478 if (!phba->lpfc_sg_dma_buf_pool)
6479 goto fail_free_mem;
6480
6481 phba->lpfc_cmd_rsp_buf_pool =
6482 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6483 &phba->pcidev->dev,
6484 sizeof(struct fcp_cmnd) +
6485 sizeof(struct fcp_rsp),
6486 BPL_ALIGN_SZ, 0);
6487
6488 if (!phba->lpfc_cmd_rsp_buf_pool)
6489 goto fail_free_dma_buf_pool;
6490
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006491 /*
6492 * Enable sr-iov virtual functions if supported and configured
6493 * through the module parameter.
6494 */
6495 if (phba->cfg_sriov_nr_virtfn > 0) {
6496 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6497 phba->cfg_sriov_nr_virtfn);
6498 if (rc) {
6499 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6500 "2808 Requested number of SR-IOV "
6501 "virtual functions (%d) is not "
6502 "supported\n",
6503 phba->cfg_sriov_nr_virtfn);
6504 phba->cfg_sriov_nr_virtfn = 0;
6505 }
6506 }
6507
6508 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00006509
6510fail_free_dma_buf_pool:
6511 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6512 phba->lpfc_sg_dma_buf_pool = NULL;
6513fail_free_mem:
6514 lpfc_mem_free(phba);
6515 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006516}
6517
6518/**
6519 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
6520 * @phba: pointer to lpfc hba data structure.
6521 *
6522 * This routine is invoked to unset the driver internal resources set up
6523 * specific for supporting the SLI-3 HBA device it attached to.
6524 **/
6525static void
6526lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6527{
6528 /* Free device driver memory allocated */
6529 lpfc_mem_free_all(phba);
6530
6531 return;
6532}
6533
6534/**
6535 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
6536 * @phba: pointer to lpfc hba data structure.
6537 *
6538 * This routine is invoked to set up the driver internal resources specific to
6539 * support the SLI-4 HBA device it attached to.
6540 *
6541 * Return codes
6542 * 0 - successful
6543 * other values - error
6544 **/
6545static int
6546lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6547{
6548 LPFC_MBOXQ_t *mboxq;
6549 MAILBOX_t *mb;
6550 int rc, i, max_buf_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006551 int longs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006552 int extra;
6553 uint64_t wwn;
6554 u32 if_type;
6555 u32 if_fam;
6556
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006557 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
Olivier Deprez157378f2022-04-04 15:47:50 +02006558 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006559 phba->sli4_hba.curr_disp_cpu = 0;
6560
6561 /* Get all the module params for configuring this host */
6562 lpfc_get_cfgparam(phba);
6563
6564 /* Set up phase-1 common device driver resources */
6565 rc = lpfc_setup_driver_resource_phase1(phba);
6566 if (rc)
6567 return -ENODEV;
6568
6569 /* Before proceed, wait for POST done and device ready */
6570 rc = lpfc_sli4_post_status_check(phba);
6571 if (rc)
6572 return -ENODEV;
6573
David Brazdil0f672f62019-12-10 10:32:29 +00006574 /* Allocate all driver workqueues here */
6575
6576 /* The lpfc_wq workqueue for deferred irq use */
6577 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6578
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006579 /*
6580 * Initialize timers used by driver
6581 */
6582
6583 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
6584
6585 /* FCF rediscover timer */
6586 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
6587
6588 /*
6589 * Control structure for handling external multi-buffer mailbox
6590 * command pass-through.
6591 */
6592 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6593 sizeof(struct lpfc_mbox_ext_buf_ctx));
6594 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6595
6596 phba->max_vpi = LPFC_MAX_VPI;
6597
6598 /* This will be set to correct value after the read_config mbox */
6599 phba->max_vports = 0;
6600
6601 /* Program the default value of vlan_id and fc_map */
6602 phba->valid_vlan = 0;
6603 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6604 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6605 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
6606
6607 /*
6608 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
6609 * we will associate a new ring, for each EQ/CQ/WQ tuple.
6610 * The WQ create will allocate the ring.
6611 */
6612
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006613 /* Initialize buffer queue management fields */
6614 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6615 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6616 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6617
6618 /*
6619 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
6620 */
David Brazdil0f672f62019-12-10 10:32:29 +00006621 /* Initialize the Abort buffer list used by driver */
6622 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6623 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006624
6625 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6626 /* Initialize the Abort nvme buffer list used by driver */
David Brazdil0f672f62019-12-10 10:32:29 +00006627 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006628 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6629 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
David Brazdil0f672f62019-12-10 10:32:29 +00006630 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6631 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006632 }
6633
6634 /* This abort list used by worker thread */
6635 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6636 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +02006637 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
6638 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006639
6640 /*
6641 * Initialize driver internal slow-path work queues
6642 */
6643
6644 /* Driver internel slow-path CQ Event pool */
6645 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6646 /* Response IOCB work queue list */
6647 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6648 /* Asynchronous event CQ Event work queue list */
6649 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006650 /* Slow-path XRI aborted CQ Event work queue list */
6651 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6652 /* Receive queue CQ Event work queue list */
6653 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6654
6655 /* Initialize extent block lists. */
6656 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6657 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6658 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6659 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6660
6661 /* Initialize mboxq lists. If the early init routines fail
6662 * these lists need to be correctly initialized.
6663 */
6664 INIT_LIST_HEAD(&phba->sli.mboxq);
6665 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6666
6667 /* initialize optic_state to 0xFF */
6668 phba->sli4_hba.lnk_info.optic_state = 0xff;
6669
6670 /* Allocate device driver memory */
6671 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6672 if (rc)
6673 return -ENOMEM;
6674
6675 /* IF Type 2 ports get initialized now. */
6676 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6677 LPFC_SLI_INTF_IF_TYPE_2) {
6678 rc = lpfc_pci_function_reset(phba);
6679 if (unlikely(rc)) {
6680 rc = -ENODEV;
6681 goto out_free_mem;
6682 }
6683 phba->temp_sensor_support = 1;
6684 }
6685
6686 /* Create the bootstrap mailbox command */
6687 rc = lpfc_create_bootstrap_mbox(phba);
6688 if (unlikely(rc))
6689 goto out_free_mem;
6690
6691 /* Set up the host's endian order with the device. */
6692 rc = lpfc_setup_endian_order(phba);
6693 if (unlikely(rc))
6694 goto out_free_bsmbx;
6695
6696 /* Set up the hba's configuration parameters. */
6697 rc = lpfc_sli4_read_config(phba);
6698 if (unlikely(rc))
6699 goto out_free_bsmbx;
6700 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6701 if (unlikely(rc))
6702 goto out_free_bsmbx;
6703
6704 /* IF Type 0 ports get initialized now. */
6705 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6706 LPFC_SLI_INTF_IF_TYPE_0) {
6707 rc = lpfc_pci_function_reset(phba);
6708 if (unlikely(rc))
6709 goto out_free_bsmbx;
6710 }
6711
6712 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6713 GFP_KERNEL);
6714 if (!mboxq) {
6715 rc = -ENOMEM;
6716 goto out_free_bsmbx;
6717 }
6718
6719 /* Check for NVMET being configured */
6720 phba->nvmet_support = 0;
6721 if (lpfc_enable_nvmet_cnt) {
6722
6723 /* First get WWN of HBA instance */
6724 lpfc_read_nv(phba, mboxq);
6725 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6726 if (rc != MBX_SUCCESS) {
Olivier Deprez157378f2022-04-04 15:47:50 +02006727 lpfc_printf_log(phba, KERN_ERR,
6728 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006729 "6016 Mailbox failed , mbxCmd x%x "
6730 "READ_NV, mbxStatus x%x\n",
6731 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6732 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6733 mempool_free(mboxq, phba->mbox_mem_pool);
6734 rc = -EIO;
6735 goto out_free_bsmbx;
6736 }
6737 mb = &mboxq->u.mb;
6738 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6739 sizeof(uint64_t));
6740 wwn = cpu_to_be64(wwn);
6741 phba->sli4_hba.wwnn.u.name = wwn;
6742 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6743 sizeof(uint64_t));
6744 /* wwn is WWPN of HBA instance */
6745 wwn = cpu_to_be64(wwn);
6746 phba->sli4_hba.wwpn.u.name = wwn;
6747
6748 /* Check to see if it matches any module parameter */
6749 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6750 if (wwn == lpfc_enable_nvmet[i]) {
6751#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6752 if (lpfc_nvmet_mem_alloc(phba))
6753 break;
6754
6755 phba->nvmet_support = 1; /* a match */
6756
Olivier Deprez157378f2022-04-04 15:47:50 +02006757 lpfc_printf_log(phba, KERN_ERR,
6758 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006759 "6017 NVME Target %016llx\n",
6760 wwn);
6761#else
Olivier Deprez157378f2022-04-04 15:47:50 +02006762 lpfc_printf_log(phba, KERN_ERR,
6763 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006764 "6021 Can't enable NVME Target."
6765 " NVME_TARGET_FC infrastructure"
6766 " is not in kernel\n");
6767#endif
David Brazdil0f672f62019-12-10 10:32:29 +00006768 /* Not supported for NVMET */
6769 phba->cfg_xri_rebalancing = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02006770 if (phba->irq_chann_mode == NHT_MODE) {
6771 phba->cfg_irq_chann =
6772 phba->sli4_hba.num_present_cpu;
6773 phba->cfg_hdw_queue =
6774 phba->sli4_hba.num_present_cpu;
6775 phba->irq_chann_mode = NORMAL_MODE;
6776 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006777 break;
6778 }
6779 }
6780 }
6781
6782 lpfc_nvme_mod_param_dep(phba);
6783
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006784 /*
6785 * Get sli4 parameters that override parameters from Port capabilities.
6786 * If this call fails, it isn't critical unless the SLI4 parameters come
6787 * back in conflict.
6788 */
6789 rc = lpfc_get_sli4_parameters(phba, mboxq);
6790 if (rc) {
6791 if_type = bf_get(lpfc_sli_intf_if_type,
6792 &phba->sli4_hba.sli_intf);
6793 if_fam = bf_get(lpfc_sli_intf_sli_family,
6794 &phba->sli4_hba.sli_intf);
6795 if (phba->sli4_hba.extents_in_use &&
6796 phba->sli4_hba.rpi_hdrs_in_use) {
Olivier Deprez157378f2022-04-04 15:47:50 +02006797 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6798 "2999 Unsupported SLI4 Parameters "
6799 "Extents and RPI headers enabled.\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006800 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6801 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
6802 mempool_free(mboxq, phba->mbox_mem_pool);
6803 rc = -EIO;
6804 goto out_free_bsmbx;
6805 }
6806 }
6807 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6808 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6809 mempool_free(mboxq, phba->mbox_mem_pool);
6810 rc = -EIO;
6811 goto out_free_bsmbx;
6812 }
6813 }
6814
David Brazdil0f672f62019-12-10 10:32:29 +00006815 /*
6816 * 1 for cmd, 1 for rsp, NVME adds an extra one
6817 * for boundary conditions in its max_sgl_segment template.
6818 */
6819 extra = 2;
6820 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6821 extra++;
6822
6823 /*
6824 * It doesn't matter what family our adapter is in, we are
6825 * limited to 2 Pages, 512 SGEs, for our SGL.
6826 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6827 */
6828 max_buf_size = (2 * SLI4_PAGE_SIZE);
6829
6830 /*
6831 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6832 * used to create the sg_dma_buf_pool must be calculated.
6833 */
6834 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6835 /* Both cfg_enable_bg and cfg_external_dif code paths */
6836
6837 /*
6838 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6839 * the FCP rsp, and a SGE. Sice we have no control
6840 * over how many protection segments the SCSI Layer
6841 * will hand us (ie: there could be one for every block
6842 * in the IO), just allocate enough SGEs to accomidate
6843 * our max amount and we need to limit lpfc_sg_seg_cnt
6844 * to minimize the risk of running out.
6845 */
6846 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6847 sizeof(struct fcp_rsp) + max_buf_size;
6848
6849 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6850 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6851
6852 /*
6853 * If supporting DIF, reduce the seg count for scsi to
6854 * allow room for the DIF sges.
6855 */
6856 if (phba->cfg_enable_bg &&
6857 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6858 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6859 else
6860 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6861
6862 } else {
6863 /*
6864 * The scsi_buf for a regular I/O holds the FCP cmnd,
6865 * the FCP rsp, a SGE for each, and a SGE for up to
6866 * cfg_sg_seg_cnt data segments.
6867 */
6868 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6869 sizeof(struct fcp_rsp) +
6870 ((phba->cfg_sg_seg_cnt + extra) *
6871 sizeof(struct sli4_sge));
6872
6873 /* Total SGEs for scsi_sg_list */
6874 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6875 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6876
6877 /*
6878 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
6879 * need to post 1 page for the SGL.
6880 */
6881 }
6882
6883 if (phba->cfg_xpsgl && !phba->nvmet_support)
6884 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
6885 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
6886 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6887 else
6888 phba->cfg_sg_dma_buf_size =
6889 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6890
6891 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
6892 sizeof(struct sli4_sge);
6893
6894 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
6895 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6896 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6897 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6898 "6300 Reducing NVME sg segment "
6899 "cnt to %d\n",
6900 LPFC_MAX_NVME_SEG_CNT);
6901 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6902 } else
6903 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6904 }
6905
David Brazdil0f672f62019-12-10 10:32:29 +00006906 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6907 "9087 sg_seg_cnt:%d dmabuf_size:%d "
6908 "total:%d scsi:%d nvme:%d\n",
6909 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6910 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
6911 phba->cfg_nvme_seg_cnt);
6912
6913 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
6914 i = phba->cfg_sg_dma_buf_size;
6915 else
6916 i = SLI4_PAGE_SIZE;
6917
6918 phba->lpfc_sg_dma_buf_pool =
6919 dma_pool_create("lpfc_sg_dma_buf_pool",
6920 &phba->pcidev->dev,
6921 phba->cfg_sg_dma_buf_size,
6922 i, 0);
6923 if (!phba->lpfc_sg_dma_buf_pool)
6924 goto out_free_bsmbx;
6925
6926 phba->lpfc_cmd_rsp_buf_pool =
6927 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6928 &phba->pcidev->dev,
6929 sizeof(struct fcp_cmnd) +
6930 sizeof(struct fcp_rsp),
6931 i, 0);
6932 if (!phba->lpfc_cmd_rsp_buf_pool)
6933 goto out_free_sg_dma_buf;
6934
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006935 mempool_free(mboxq, phba->mbox_mem_pool);
6936
6937 /* Verify OAS is supported */
6938 lpfc_sli4_oas_verify(phba);
David Brazdil0f672f62019-12-10 10:32:29 +00006939
6940 /* Verify RAS support on adapter */
6941 lpfc_sli4_ras_init(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006942
6943 /* Verify all the SLI4 queues */
6944 rc = lpfc_sli4_queue_verify(phba);
6945 if (rc)
David Brazdil0f672f62019-12-10 10:32:29 +00006946 goto out_free_cmd_rsp_buf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006947
6948 /* Create driver internal CQE event pool */
6949 rc = lpfc_sli4_cq_event_pool_create(phba);
6950 if (rc)
David Brazdil0f672f62019-12-10 10:32:29 +00006951 goto out_free_cmd_rsp_buf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006952
6953 /* Initialize sgl lists per host */
6954 lpfc_init_sgl_list(phba);
6955
6956 /* Allocate and initialize active sgl array */
6957 rc = lpfc_init_active_sgl_array(phba);
6958 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02006959 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006960 "1430 Failed to initialize sgl list.\n");
6961 goto out_destroy_cq_event_pool;
6962 }
6963 rc = lpfc_sli4_init_rpi_hdrs(phba);
6964 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02006965 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006966 "1432 Failed to initialize rpi headers.\n");
6967 goto out_free_active_sgl;
6968 }
6969
6970 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
6971 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6972 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
6973 GFP_KERNEL);
6974 if (!phba->fcf.fcf_rr_bmask) {
Olivier Deprez157378f2022-04-04 15:47:50 +02006975 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006976 "2759 Failed allocate memory for FCF round "
6977 "robin failover bmask\n");
6978 rc = -ENOMEM;
6979 goto out_remove_rpi_hdrs;
6980 }
6981
David Brazdil0f672f62019-12-10 10:32:29 +00006982 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
6983 sizeof(struct lpfc_hba_eq_hdl),
6984 GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006985 if (!phba->sli4_hba.hba_eq_hdl) {
Olivier Deprez157378f2022-04-04 15:47:50 +02006986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006987 "2572 Failed allocate memory for "
6988 "fast-path per-EQ handle array\n");
6989 rc = -ENOMEM;
6990 goto out_free_fcf_rr_bmask;
6991 }
6992
David Brazdil0f672f62019-12-10 10:32:29 +00006993 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006994 sizeof(struct lpfc_vector_map_info),
6995 GFP_KERNEL);
6996 if (!phba->sli4_hba.cpu_map) {
Olivier Deprez157378f2022-04-04 15:47:50 +02006997 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006998 "3327 Failed allocate memory for msi-x "
6999 "interrupt vector mapping\n");
7000 rc = -ENOMEM;
7001 goto out_free_hba_eq_hdl;
7002 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007003
David Brazdil0f672f62019-12-10 10:32:29 +00007004 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
7005 if (!phba->sli4_hba.eq_info) {
Olivier Deprez157378f2022-04-04 15:47:50 +02007006 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00007007 "3321 Failed allocation for per_cpu stats\n");
7008 rc = -ENOMEM;
7009 goto out_free_hba_cpu_map;
7010 }
Olivier Deprez157378f2022-04-04 15:47:50 +02007011
7012 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
7013 sizeof(*phba->sli4_hba.idle_stat),
7014 GFP_KERNEL);
7015 if (!phba->sli4_hba.idle_stat) {
7016 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7017 "3390 Failed allocation for idle_stat\n");
7018 rc = -ENOMEM;
7019 goto out_free_hba_eq_info;
7020 }
7021
7022#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7023 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
7024 if (!phba->sli4_hba.c_stat) {
7025 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7026 "3332 Failed allocating per cpu hdwq stats\n");
7027 rc = -ENOMEM;
7028 goto out_free_hba_idle_stat;
7029 }
7030#endif
7031
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007032 /*
7033 * Enable sr-iov virtual functions if supported and configured
7034 * through the module parameter.
7035 */
7036 if (phba->cfg_sriov_nr_virtfn > 0) {
7037 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7038 phba->cfg_sriov_nr_virtfn);
7039 if (rc) {
7040 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7041 "3020 Requested number of SR-IOV "
7042 "virtual functions (%d) is not "
7043 "supported\n",
7044 phba->cfg_sriov_nr_virtfn);
7045 phba->cfg_sriov_nr_virtfn = 0;
7046 }
7047 }
7048
7049 return 0;
7050
Olivier Deprez157378f2022-04-04 15:47:50 +02007051#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7052out_free_hba_idle_stat:
7053 kfree(phba->sli4_hba.idle_stat);
7054#endif
7055out_free_hba_eq_info:
7056 free_percpu(phba->sli4_hba.eq_info);
David Brazdil0f672f62019-12-10 10:32:29 +00007057out_free_hba_cpu_map:
7058 kfree(phba->sli4_hba.cpu_map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007059out_free_hba_eq_hdl:
7060 kfree(phba->sli4_hba.hba_eq_hdl);
7061out_free_fcf_rr_bmask:
7062 kfree(phba->fcf.fcf_rr_bmask);
7063out_remove_rpi_hdrs:
7064 lpfc_sli4_remove_rpi_hdrs(phba);
7065out_free_active_sgl:
7066 lpfc_free_active_sgl(phba);
7067out_destroy_cq_event_pool:
7068 lpfc_sli4_cq_event_pool_destroy(phba);
David Brazdil0f672f62019-12-10 10:32:29 +00007069out_free_cmd_rsp_buf:
7070 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
7071 phba->lpfc_cmd_rsp_buf_pool = NULL;
7072out_free_sg_dma_buf:
7073 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7074 phba->lpfc_sg_dma_buf_pool = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007075out_free_bsmbx:
7076 lpfc_destroy_bootstrap_mbox(phba);
7077out_free_mem:
7078 lpfc_mem_free(phba);
7079 return rc;
7080}
7081
7082/**
7083 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
7084 * @phba: pointer to lpfc hba data structure.
7085 *
7086 * This routine is invoked to unset the driver internal resources set up
7087 * specific for supporting the SLI-4 HBA device it attached to.
7088 **/
7089static void
7090lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
7091{
7092 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
7093
David Brazdil0f672f62019-12-10 10:32:29 +00007094 free_percpu(phba->sli4_hba.eq_info);
Olivier Deprez157378f2022-04-04 15:47:50 +02007095#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7096 free_percpu(phba->sli4_hba.c_stat);
7097#endif
7098 kfree(phba->sli4_hba.idle_stat);
David Brazdil0f672f62019-12-10 10:32:29 +00007099
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007100 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
7101 kfree(phba->sli4_hba.cpu_map);
David Brazdil0f672f62019-12-10 10:32:29 +00007102 phba->sli4_hba.num_possible_cpu = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007103 phba->sli4_hba.num_present_cpu = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007104 phba->sli4_hba.curr_disp_cpu = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02007105 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007106
7107 /* Free memory allocated for fast-path work queue handles */
7108 kfree(phba->sli4_hba.hba_eq_hdl);
7109
7110 /* Free the allocated rpi headers. */
7111 lpfc_sli4_remove_rpi_hdrs(phba);
7112 lpfc_sli4_remove_rpis(phba);
7113
7114 /* Free eligible FCF index bmask */
7115 kfree(phba->fcf.fcf_rr_bmask);
7116
7117 /* Free the ELS sgl list */
7118 lpfc_free_active_sgl(phba);
7119 lpfc_free_els_sgl_list(phba);
7120 lpfc_free_nvmet_sgl_list(phba);
7121
7122 /* Free the completion queue EQ event pool */
7123 lpfc_sli4_cq_event_release_all(phba);
7124 lpfc_sli4_cq_event_pool_destroy(phba);
7125
7126 /* Release resource identifiers. */
7127 lpfc_sli4_dealloc_resource_identifiers(phba);
7128
7129 /* Free the bsmbx region. */
7130 lpfc_destroy_bootstrap_mbox(phba);
7131
7132 /* Free the SLI Layer memory with SLI4 HBAs */
7133 lpfc_mem_free_all(phba);
7134
7135 /* Free the current connect table */
7136 list_for_each_entry_safe(conn_entry, next_conn_entry,
7137 &phba->fcf_conn_rec_list, list) {
7138 list_del_init(&conn_entry->list);
7139 kfree(conn_entry);
7140 }
7141
7142 return;
7143}
7144
7145/**
7146 * lpfc_init_api_table_setup - Set up init api function jump table
7147 * @phba: The hba struct for which this call is being executed.
7148 * @dev_grp: The HBA PCI-Device group number.
7149 *
7150 * This routine sets up the device INIT interface API function jump table
7151 * in @phba struct.
7152 *
7153 * Returns: 0 - success, -ENODEV - failure.
7154 **/
7155int
7156lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7157{
7158 phba->lpfc_hba_init_link = lpfc_hba_init_link;
7159 phba->lpfc_hba_down_link = lpfc_hba_down_link;
7160 phba->lpfc_selective_reset = lpfc_selective_reset;
7161 switch (dev_grp) {
7162 case LPFC_PCI_DEV_LP:
7163 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
7164 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
7165 phba->lpfc_stop_port = lpfc_stop_port_s3;
7166 break;
7167 case LPFC_PCI_DEV_OC:
7168 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
7169 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
7170 phba->lpfc_stop_port = lpfc_stop_port_s4;
7171 break;
7172 default:
Olivier Deprez157378f2022-04-04 15:47:50 +02007173 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007174 "1431 Invalid HBA PCI-device group: 0x%x\n",
7175 dev_grp);
7176 return -ENODEV;
7177 break;
7178 }
7179 return 0;
7180}
7181
7182/**
7183 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
7184 * @phba: pointer to lpfc hba data structure.
7185 *
7186 * This routine is invoked to set up the driver internal resources after the
7187 * device specific resource setup to support the HBA device it attached to.
7188 *
7189 * Return codes
7190 * 0 - successful
7191 * other values - error
7192 **/
7193static int
7194lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7195{
7196 int error;
7197
7198 /* Startup the kernel thread for this host adapter. */
7199 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7200 "lpfc_worker_%d", phba->brd_no);
7201 if (IS_ERR(phba->worker_thread)) {
7202 error = PTR_ERR(phba->worker_thread);
7203 return error;
7204 }
7205
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007206 return 0;
7207}
7208
7209/**
7210 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
7211 * @phba: pointer to lpfc hba data structure.
7212 *
7213 * This routine is invoked to unset the driver internal resources set up after
7214 * the device specific resource setup for supporting the HBA device it
7215 * attached to.
7216 **/
7217static void
7218lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7219{
7220 if (phba->wq) {
7221 flush_workqueue(phba->wq);
7222 destroy_workqueue(phba->wq);
7223 phba->wq = NULL;
7224 }
7225
7226 /* Stop kernel worker thread */
7227 if (phba->worker_thread)
7228 kthread_stop(phba->worker_thread);
7229}
7230
7231/**
7232 * lpfc_free_iocb_list - Free iocb list.
7233 * @phba: pointer to lpfc hba data structure.
7234 *
7235 * This routine is invoked to free the driver's IOCB list and memory.
7236 **/
7237void
7238lpfc_free_iocb_list(struct lpfc_hba *phba)
7239{
7240 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7241
7242 spin_lock_irq(&phba->hbalock);
7243 list_for_each_entry_safe(iocbq_entry, iocbq_next,
7244 &phba->lpfc_iocb_list, list) {
7245 list_del(&iocbq_entry->list);
7246 kfree(iocbq_entry);
7247 phba->total_iocbq_bufs--;
7248 }
7249 spin_unlock_irq(&phba->hbalock);
7250
7251 return;
7252}
7253
7254/**
7255 * lpfc_init_iocb_list - Allocate and initialize iocb list.
7256 * @phba: pointer to lpfc hba data structure.
Olivier Deprez157378f2022-04-04 15:47:50 +02007257 * @iocb_count: number of requested iocbs
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007258 *
7259 * This routine is invoked to allocate and initizlize the driver's IOCB
7260 * list and set up the IOCB tag array accordingly.
7261 *
7262 * Return codes
7263 * 0 - successful
7264 * other values - error
7265 **/
7266int
7267lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7268{
7269 struct lpfc_iocbq *iocbq_entry = NULL;
7270 uint16_t iotag;
7271 int i;
7272
7273 /* Initialize and populate the iocb list per host. */
7274 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
7275 for (i = 0; i < iocb_count; i++) {
7276 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
7277 if (iocbq_entry == NULL) {
7278 printk(KERN_ERR "%s: only allocated %d iocbs of "
7279 "expected %d count. Unloading driver.\n",
Olivier Deprez157378f2022-04-04 15:47:50 +02007280 __func__, i, iocb_count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007281 goto out_free_iocbq;
7282 }
7283
7284 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7285 if (iotag == 0) {
7286 kfree(iocbq_entry);
7287 printk(KERN_ERR "%s: failed to allocate IOTAG. "
7288 "Unloading driver.\n", __func__);
7289 goto out_free_iocbq;
7290 }
7291 iocbq_entry->sli4_lxritag = NO_XRI;
7292 iocbq_entry->sli4_xritag = NO_XRI;
7293
7294 spin_lock_irq(&phba->hbalock);
7295 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7296 phba->total_iocbq_bufs++;
7297 spin_unlock_irq(&phba->hbalock);
7298 }
7299
7300 return 0;
7301
7302out_free_iocbq:
7303 lpfc_free_iocb_list(phba);
7304
7305 return -ENOMEM;
7306}
7307
7308/**
7309 * lpfc_free_sgl_list - Free a given sgl list.
7310 * @phba: pointer to lpfc hba data structure.
7311 * @sglq_list: pointer to the head of sgl list.
7312 *
7313 * This routine is invoked to free a give sgl list and memory.
7314 **/
7315void
7316lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
7317{
7318 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7319
7320 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7321 list_del(&sglq_entry->list);
7322 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7323 kfree(sglq_entry);
7324 }
7325}
7326
7327/**
7328 * lpfc_free_els_sgl_list - Free els sgl list.
7329 * @phba: pointer to lpfc hba data structure.
7330 *
7331 * This routine is invoked to free the driver's els sgl list and memory.
7332 **/
7333static void
7334lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7335{
7336 LIST_HEAD(sglq_list);
7337
7338 /* Retrieve all els sgls from driver list */
7339 spin_lock_irq(&phba->hbalock);
7340 spin_lock(&phba->sli4_hba.sgl_list_lock);
7341 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7342 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7343 spin_unlock_irq(&phba->hbalock);
7344
7345 /* Now free the sgl list */
7346 lpfc_free_sgl_list(phba, &sglq_list);
7347}
7348
7349/**
7350 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
7351 * @phba: pointer to lpfc hba data structure.
7352 *
7353 * This routine is invoked to free the driver's nvmet sgl list and memory.
7354 **/
7355static void
7356lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7357{
7358 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7359 LIST_HEAD(sglq_list);
7360
7361 /* Retrieve all nvmet sgls from driver list */
7362 spin_lock_irq(&phba->hbalock);
7363 spin_lock(&phba->sli4_hba.sgl_list_lock);
7364 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7365 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7366 spin_unlock_irq(&phba->hbalock);
7367
7368 /* Now free the sgl list */
7369 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7370 list_del(&sglq_entry->list);
7371 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7372 kfree(sglq_entry);
7373 }
7374
7375 /* Update the nvmet_xri_cnt to reflect no current sgls.
7376 * The next initialization cycle sets the count and allocates
7377 * the sgls over again.
7378 */
7379 phba->sli4_hba.nvmet_xri_cnt = 0;
7380}
7381
7382/**
7383 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
7384 * @phba: pointer to lpfc hba data structure.
7385 *
7386 * This routine is invoked to allocate the driver's active sgl memory.
7387 * This array will hold the sglq_entry's for active IOs.
7388 **/
7389static int
7390lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7391{
7392 int size;
7393 size = sizeof(struct lpfc_sglq *);
7394 size *= phba->sli4_hba.max_cfg_param.max_xri;
7395
7396 phba->sli4_hba.lpfc_sglq_active_list =
7397 kzalloc(size, GFP_KERNEL);
7398 if (!phba->sli4_hba.lpfc_sglq_active_list)
7399 return -ENOMEM;
7400 return 0;
7401}
7402
7403/**
7404 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
7405 * @phba: pointer to lpfc hba data structure.
7406 *
7407 * This routine is invoked to walk through the array of active sglq entries
7408 * and free all of the resources.
7409 * This is just a place holder for now.
7410 **/
7411static void
7412lpfc_free_active_sgl(struct lpfc_hba *phba)
7413{
7414 kfree(phba->sli4_hba.lpfc_sglq_active_list);
7415}
7416
7417/**
7418 * lpfc_init_sgl_list - Allocate and initialize sgl list.
7419 * @phba: pointer to lpfc hba data structure.
7420 *
7421 * This routine is invoked to allocate and initizlize the driver's sgl
7422 * list and set up the sgl xritag tag array accordingly.
7423 *
7424 **/
7425static void
7426lpfc_init_sgl_list(struct lpfc_hba *phba)
7427{
7428 /* Initialize and populate the sglq list per host/VF. */
7429 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
7430 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7431 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
7432 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7433
7434 /* els xri-sgl book keeping */
7435 phba->sli4_hba.els_xri_cnt = 0;
7436
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007437 /* nvme xri-buffer book keeping */
David Brazdil0f672f62019-12-10 10:32:29 +00007438 phba->sli4_hba.io_xri_cnt = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007439}
7440
7441/**
7442 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
7443 * @phba: pointer to lpfc hba data structure.
7444 *
7445 * This routine is invoked to post rpi header templates to the
7446 * port for those SLI4 ports that do not support extents. This routine
7447 * posts a PAGE_SIZE memory region to the port to hold up to
7448 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
7449 * and should be called only when interrupts are disabled.
7450 *
7451 * Return codes
7452 * 0 - successful
7453 * -ERROR - otherwise.
7454 **/
7455int
7456lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7457{
7458 int rc = 0;
7459 struct lpfc_rpi_hdr *rpi_hdr;
7460
7461 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
7462 if (!phba->sli4_hba.rpi_hdrs_in_use)
7463 return rc;
7464 if (phba->sli4_hba.extents_in_use)
7465 return -EIO;
7466
7467 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7468 if (!rpi_hdr) {
Olivier Deprez157378f2022-04-04 15:47:50 +02007469 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007470 "0391 Error during rpi post operation\n");
7471 lpfc_sli4_remove_rpis(phba);
7472 rc = -ENODEV;
7473 }
7474
7475 return rc;
7476}
7477
7478/**
7479 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
7480 * @phba: pointer to lpfc hba data structure.
7481 *
7482 * This routine is invoked to allocate a single 4KB memory region to
7483 * support rpis and stores them in the phba. This single region
7484 * provides support for up to 64 rpis. The region is used globally
7485 * by the device.
7486 *
7487 * Returns:
7488 * A valid rpi hdr on success.
7489 * A NULL pointer on any failure.
7490 **/
7491struct lpfc_rpi_hdr *
7492lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7493{
7494 uint16_t rpi_limit, curr_rpi_range;
7495 struct lpfc_dmabuf *dmabuf;
7496 struct lpfc_rpi_hdr *rpi_hdr;
7497
7498 /*
7499 * If the SLI4 port supports extents, posting the rpi header isn't
7500 * required. Set the expected maximum count and let the actual value
7501 * get set when extents are fully allocated.
7502 */
7503 if (!phba->sli4_hba.rpi_hdrs_in_use)
7504 return NULL;
7505 if (phba->sli4_hba.extents_in_use)
7506 return NULL;
7507
7508 /* The limit on the logical index is just the max_rpi count. */
7509 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
7510
7511 spin_lock_irq(&phba->hbalock);
7512 /*
7513 * Establish the starting RPI in this header block. The starting
7514 * rpi is normalized to a zero base because the physical rpi is
7515 * port based.
7516 */
7517 curr_rpi_range = phba->sli4_hba.next_rpi;
7518 spin_unlock_irq(&phba->hbalock);
7519
7520 /* Reached full RPI range */
7521 if (curr_rpi_range == rpi_limit)
7522 return NULL;
7523
7524 /*
7525 * First allocate the protocol header region for the port. The
7526 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
7527 */
7528 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7529 if (!dmabuf)
7530 return NULL;
7531
David Brazdil0f672f62019-12-10 10:32:29 +00007532 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7533 LPFC_HDR_TEMPLATE_SIZE,
7534 &dmabuf->phys, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007535 if (!dmabuf->virt) {
7536 rpi_hdr = NULL;
7537 goto err_free_dmabuf;
7538 }
7539
7540 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7541 rpi_hdr = NULL;
7542 goto err_free_coherent;
7543 }
7544
7545 /* Save the rpi header data for cleanup later. */
7546 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7547 if (!rpi_hdr)
7548 goto err_free_coherent;
7549
7550 rpi_hdr->dmabuf = dmabuf;
7551 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7552 rpi_hdr->page_count = 1;
7553 spin_lock_irq(&phba->hbalock);
7554
7555 /* The rpi_hdr stores the logical index only. */
7556 rpi_hdr->start_rpi = curr_rpi_range;
7557 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
7558 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7559
7560 spin_unlock_irq(&phba->hbalock);
7561 return rpi_hdr;
7562
7563 err_free_coherent:
7564 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7565 dmabuf->virt, dmabuf->phys);
7566 err_free_dmabuf:
7567 kfree(dmabuf);
7568 return NULL;
7569}
7570
7571/**
7572 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
7573 * @phba: pointer to lpfc hba data structure.
7574 *
7575 * This routine is invoked to remove all memory resources allocated
7576 * to support rpis for SLI4 ports not supporting extents. This routine
7577 * presumes the caller has released all rpis consumed by fabric or port
7578 * logins and is prepared to have the header pages removed.
7579 **/
7580void
7581lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7582{
7583 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7584
7585 if (!phba->sli4_hba.rpi_hdrs_in_use)
7586 goto exit;
7587
7588 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7589 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7590 list_del(&rpi_hdr->list);
7591 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7592 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7593 kfree(rpi_hdr->dmabuf);
7594 kfree(rpi_hdr);
7595 }
7596 exit:
7597 /* There are no rpis available to the port now. */
7598 phba->sli4_hba.next_rpi = 0;
7599}
7600
7601/**
7602 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
7603 * @pdev: pointer to pci device data structure.
7604 *
7605 * This routine is invoked to allocate the driver hba data structure for an
7606 * HBA device. If the allocation is successful, the phba reference to the
7607 * PCI device data structure is set.
7608 *
7609 * Return codes
7610 * pointer to @phba - successful
7611 * NULL - error
7612 **/
7613static struct lpfc_hba *
7614lpfc_hba_alloc(struct pci_dev *pdev)
7615{
7616 struct lpfc_hba *phba;
7617
7618 /* Allocate memory for HBA structure */
7619 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7620 if (!phba) {
7621 dev_err(&pdev->dev, "failed to allocate hba struct\n");
7622 return NULL;
7623 }
7624
7625 /* Set reference to PCI device in HBA structure */
7626 phba->pcidev = pdev;
7627
7628 /* Assign an unused board number */
7629 phba->brd_no = lpfc_get_instance();
7630 if (phba->brd_no < 0) {
7631 kfree(phba);
7632 return NULL;
7633 }
7634 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
7635
7636 spin_lock_init(&phba->ct_ev_lock);
7637 INIT_LIST_HEAD(&phba->ct_ev_waiters);
7638
7639 return phba;
7640}
7641
7642/**
7643 * lpfc_hba_free - Free driver hba data structure with a device.
7644 * @phba: pointer to lpfc hba data structure.
7645 *
7646 * This routine is invoked to free the driver hba data structure with an
7647 * HBA device.
7648 **/
7649static void
7650lpfc_hba_free(struct lpfc_hba *phba)
7651{
David Brazdil0f672f62019-12-10 10:32:29 +00007652 if (phba->sli_rev == LPFC_SLI_REV4)
7653 kfree(phba->sli4_hba.hdwq);
7654
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007655 /* Release the driver assigned board number */
7656 idr_remove(&lpfc_hba_index, phba->brd_no);
7657
7658 /* Free memory allocated with sli3 rings */
7659 kfree(phba->sli.sli3_ring);
7660 phba->sli.sli3_ring = NULL;
7661
7662 kfree(phba);
7663 return;
7664}
7665
7666/**
7667 * lpfc_create_shost - Create hba physical port with associated scsi host.
7668 * @phba: pointer to lpfc hba data structure.
7669 *
7670 * This routine is invoked to create HBA physical port and associate a SCSI
7671 * host with it.
7672 *
7673 * Return codes
7674 * 0 - successful
7675 * other values - error
7676 **/
7677static int
7678lpfc_create_shost(struct lpfc_hba *phba)
7679{
7680 struct lpfc_vport *vport;
7681 struct Scsi_Host *shost;
7682
7683 /* Initialize HBA FC structure */
7684 phba->fc_edtov = FF_DEF_EDTOV;
7685 phba->fc_ratov = FF_DEF_RATOV;
7686 phba->fc_altov = FF_DEF_ALTOV;
7687 phba->fc_arbtov = FF_DEF_ARBTOV;
7688
7689 atomic_set(&phba->sdev_cnt, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007690 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7691 if (!vport)
7692 return -ENODEV;
7693
7694 shost = lpfc_shost_from_vport(vport);
7695 phba->pport = vport;
7696
7697 if (phba->nvmet_support) {
7698 /* Only 1 vport (pport) will support NVME target */
Olivier Deprez157378f2022-04-04 15:47:50 +02007699 phba->targetport = NULL;
7700 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7701 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
7702 "6076 NVME Target Found\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007703 }
7704
7705 lpfc_debugfs_initialize(vport);
7706 /* Put reference to SCSI host to driver's device private data */
7707 pci_set_drvdata(phba->pcidev, shost);
7708
7709 /*
7710 * At this point we are fully registered with PSA. In addition,
7711 * any initial discovery should be completed.
7712 */
7713 vport->load_flag |= FC_ALLOW_FDMI;
7714 if (phba->cfg_enable_SmartSAN ||
7715 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
7716
7717 /* Setup appropriate attribute masks */
7718 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
7719 if (phba->cfg_enable_SmartSAN)
7720 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7721 else
7722 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7723 }
7724 return 0;
7725}
7726
7727/**
7728 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
7729 * @phba: pointer to lpfc hba data structure.
7730 *
7731 * This routine is invoked to destroy HBA physical port and the associated
7732 * SCSI host.
7733 **/
7734static void
7735lpfc_destroy_shost(struct lpfc_hba *phba)
7736{
7737 struct lpfc_vport *vport = phba->pport;
7738
7739 /* Destroy physical port that associated with the SCSI host */
7740 destroy_port(vport);
7741
7742 return;
7743}
7744
7745/**
7746 * lpfc_setup_bg - Setup Block guard structures and debug areas.
7747 * @phba: pointer to lpfc hba data structure.
7748 * @shost: the shost to be used to detect Block guard settings.
7749 *
7750 * This routine sets up the local Block guard protocol settings for @shost.
7751 * This routine also allocates memory for debugging bg buffers.
7752 **/
7753static void
7754lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7755{
7756 uint32_t old_mask;
7757 uint32_t old_guard;
7758
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007759 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7760 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7761 "1478 Registering BlockGuard with the "
7762 "SCSI layer\n");
7763
7764 old_mask = phba->cfg_prot_mask;
7765 old_guard = phba->cfg_prot_guard;
7766
7767 /* Only allow supported values */
7768 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7769 SHOST_DIX_TYPE0_PROTECTION |
7770 SHOST_DIX_TYPE1_PROTECTION);
7771 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7772 SHOST_DIX_GUARD_CRC);
7773
7774 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
7775 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7776 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7777
7778 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7779 if ((old_mask != phba->cfg_prot_mask) ||
7780 (old_guard != phba->cfg_prot_guard))
Olivier Deprez157378f2022-04-04 15:47:50 +02007781 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007782 "1475 Registering BlockGuard with the "
7783 "SCSI layer: mask %d guard %d\n",
7784 phba->cfg_prot_mask,
7785 phba->cfg_prot_guard);
7786
7787 scsi_host_set_prot(shost, phba->cfg_prot_mask);
7788 scsi_host_set_guard(shost, phba->cfg_prot_guard);
7789 } else
Olivier Deprez157378f2022-04-04 15:47:50 +02007790 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007791 "1479 Not Registering BlockGuard with the SCSI "
7792 "layer, Bad protection parameters: %d %d\n",
7793 old_mask, old_guard);
7794 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007795}
7796
7797/**
7798 * lpfc_post_init_setup - Perform necessary device post initialization setup.
7799 * @phba: pointer to lpfc hba data structure.
7800 *
7801 * This routine is invoked to perform all the necessary post initialization
7802 * setup for the device.
7803 **/
7804static void
7805lpfc_post_init_setup(struct lpfc_hba *phba)
7806{
7807 struct Scsi_Host *shost;
7808 struct lpfc_adapter_event_header adapter_event;
7809
7810 /* Get the default values for Model Name and Description */
7811 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7812
7813 /*
7814 * hba setup may have changed the hba_queue_depth so we need to
7815 * adjust the value of can_queue.
7816 */
7817 shost = pci_get_drvdata(phba->pcidev);
7818 shost->can_queue = phba->cfg_hba_queue_depth - 10;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007819
7820 lpfc_host_attrib_init(shost);
7821
7822 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7823 spin_lock_irq(shost->host_lock);
7824 lpfc_poll_start_timer(phba);
7825 spin_unlock_irq(shost->host_lock);
7826 }
7827
7828 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7829 "0428 Perform SCSI scan\n");
7830 /* Send board arrival event to upper layer */
7831 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7832 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7833 fc_host_post_vendor_event(shost, fc_get_event_number(),
7834 sizeof(adapter_event),
7835 (char *) &adapter_event,
7836 LPFC_NL_VENDOR_ID);
7837 return;
7838}
7839
7840/**
7841 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7842 * @phba: pointer to lpfc hba data structure.
7843 *
7844 * This routine is invoked to set up the PCI device memory space for device
7845 * with SLI-3 interface spec.
7846 *
7847 * Return codes
7848 * 0 - successful
7849 * other values - error
7850 **/
7851static int
7852lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7853{
David Brazdil0f672f62019-12-10 10:32:29 +00007854 struct pci_dev *pdev = phba->pcidev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007855 unsigned long bar0map_len, bar2map_len;
7856 int i, hbq_count;
7857 void *ptr;
David Brazdil0f672f62019-12-10 10:32:29 +00007858 int error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007859
David Brazdil0f672f62019-12-10 10:32:29 +00007860 if (!pdev)
7861 return -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007862
7863 /* Set the device DMA mask size */
David Brazdil0f672f62019-12-10 10:32:29 +00007864 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7865 if (error)
7866 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7867 if (error)
7868 return error;
7869 error = -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007870
7871 /* Get the bus address of Bar0 and Bar2 and the number of bytes
7872 * required by each mapping.
7873 */
7874 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7875 bar0map_len = pci_resource_len(pdev, 0);
7876
7877 phba->pci_bar2_map = pci_resource_start(pdev, 2);
7878 bar2map_len = pci_resource_len(pdev, 2);
7879
7880 /* Map HBA SLIM to a kernel virtual address. */
7881 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7882 if (!phba->slim_memmap_p) {
7883 dev_printk(KERN_ERR, &pdev->dev,
7884 "ioremap failed for SLIM memory.\n");
7885 goto out;
7886 }
7887
7888 /* Map HBA Control Registers to a kernel virtual address. */
7889 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7890 if (!phba->ctrl_regs_memmap_p) {
7891 dev_printk(KERN_ERR, &pdev->dev,
7892 "ioremap failed for HBA control registers.\n");
7893 goto out_iounmap_slim;
7894 }
7895
7896 /* Allocate memory for SLI-2 structures */
David Brazdil0f672f62019-12-10 10:32:29 +00007897 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7898 &phba->slim2p.phys, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007899 if (!phba->slim2p.virt)
7900 goto out_iounmap;
7901
7902 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7903 phba->mbox_ext = (phba->slim2p.virt +
7904 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
7905 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7906 phba->IOCBs = (phba->slim2p.virt +
7907 offsetof(struct lpfc_sli2_slim, IOCBs));
7908
7909 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7910 lpfc_sli_hbq_size(),
7911 &phba->hbqslimp.phys,
7912 GFP_KERNEL);
7913 if (!phba->hbqslimp.virt)
7914 goto out_free_slim;
7915
7916 hbq_count = lpfc_sli_hbq_count();
7917 ptr = phba->hbqslimp.virt;
7918 for (i = 0; i < hbq_count; ++i) {
7919 phba->hbqs[i].hbq_virt = ptr;
7920 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7921 ptr += (lpfc_hbq_defs[i]->entry_count *
7922 sizeof(struct lpfc_hbq_entry));
7923 }
7924 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7925 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7926
7927 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7928
7929 phba->MBslimaddr = phba->slim_memmap_p;
7930 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7931 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7932 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7933 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7934
7935 return 0;
7936
7937out_free_slim:
7938 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7939 phba->slim2p.virt, phba->slim2p.phys);
7940out_iounmap:
7941 iounmap(phba->ctrl_regs_memmap_p);
7942out_iounmap_slim:
7943 iounmap(phba->slim_memmap_p);
7944out:
7945 return error;
7946}
7947
7948/**
7949 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
7950 * @phba: pointer to lpfc hba data structure.
7951 *
7952 * This routine is invoked to unset the PCI device memory space for device
7953 * with SLI-3 interface spec.
7954 **/
7955static void
7956lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7957{
7958 struct pci_dev *pdev;
7959
7960 /* Obtain PCI device reference */
7961 if (!phba->pcidev)
7962 return;
7963 else
7964 pdev = phba->pcidev;
7965
7966 /* Free coherent DMA memory allocated */
7967 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7968 phba->hbqslimp.virt, phba->hbqslimp.phys);
7969 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7970 phba->slim2p.virt, phba->slim2p.phys);
7971
7972 /* I/O memory unmap */
7973 iounmap(phba->ctrl_regs_memmap_p);
7974 iounmap(phba->slim_memmap_p);
7975
7976 return;
7977}
7978
7979/**
7980 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
7981 * @phba: pointer to lpfc hba data structure.
7982 *
7983 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
7984 * done and check status.
7985 *
7986 * Return 0 if successful, otherwise -ENODEV.
7987 **/
7988int
7989lpfc_sli4_post_status_check(struct lpfc_hba *phba)
7990{
7991 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7992 struct lpfc_register reg_data;
7993 int i, port_error = 0;
7994 uint32_t if_type;
7995
7996 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7997 memset(&reg_data, 0, sizeof(reg_data));
7998 if (!phba->sli4_hba.PSMPHRregaddr)
7999 return -ENODEV;
8000
8001 /* Wait up to 30 seconds for the SLI Port POST done and ready */
8002 for (i = 0; i < 3000; i++) {
8003 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
8004 &portsmphr_reg.word0) ||
8005 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
8006 /* Port has a fatal POST error, break out */
8007 port_error = -ENODEV;
8008 break;
8009 }
8010 if (LPFC_POST_STAGE_PORT_READY ==
8011 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
8012 break;
8013 msleep(10);
8014 }
8015
8016 /*
8017 * If there was a port error during POST, then don't proceed with
8018 * other register reads as the data may not be valid. Just exit.
8019 */
8020 if (port_error) {
Olivier Deprez157378f2022-04-04 15:47:50 +02008021 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008022 "1408 Port Failed POST - portsmphr=0x%x, "
8023 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
8024 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
8025 portsmphr_reg.word0,
8026 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
8027 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
8028 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
8029 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
8030 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
8031 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
8032 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
8033 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
8034 } else {
8035 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8036 "2534 Device Info: SLIFamily=0x%x, "
8037 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
8038 "SLIHint_2=0x%x, FT=0x%x\n",
8039 bf_get(lpfc_sli_intf_sli_family,
8040 &phba->sli4_hba.sli_intf),
8041 bf_get(lpfc_sli_intf_slirev,
8042 &phba->sli4_hba.sli_intf),
8043 bf_get(lpfc_sli_intf_if_type,
8044 &phba->sli4_hba.sli_intf),
8045 bf_get(lpfc_sli_intf_sli_hint1,
8046 &phba->sli4_hba.sli_intf),
8047 bf_get(lpfc_sli_intf_sli_hint2,
8048 &phba->sli4_hba.sli_intf),
8049 bf_get(lpfc_sli_intf_func_type,
8050 &phba->sli4_hba.sli_intf));
8051 /*
8052 * Check for other Port errors during the initialization
8053 * process. Fail the load if the port did not come up
8054 * correctly.
8055 */
8056 if_type = bf_get(lpfc_sli_intf_if_type,
8057 &phba->sli4_hba.sli_intf);
8058 switch (if_type) {
8059 case LPFC_SLI_INTF_IF_TYPE_0:
8060 phba->sli4_hba.ue_mask_lo =
8061 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
8062 phba->sli4_hba.ue_mask_hi =
8063 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
8064 uerrlo_reg.word0 =
8065 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
8066 uerrhi_reg.word0 =
8067 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
8068 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
8069 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02008070 lpfc_printf_log(phba, KERN_ERR,
8071 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008072 "1422 Unrecoverable Error "
8073 "Detected during POST "
8074 "uerr_lo_reg=0x%x, "
8075 "uerr_hi_reg=0x%x, "
8076 "ue_mask_lo_reg=0x%x, "
8077 "ue_mask_hi_reg=0x%x\n",
8078 uerrlo_reg.word0,
8079 uerrhi_reg.word0,
8080 phba->sli4_hba.ue_mask_lo,
8081 phba->sli4_hba.ue_mask_hi);
8082 port_error = -ENODEV;
8083 }
8084 break;
8085 case LPFC_SLI_INTF_IF_TYPE_2:
8086 case LPFC_SLI_INTF_IF_TYPE_6:
8087 /* Final checks. The port status should be clean. */
8088 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8089 &reg_data.word0) ||
8090 (bf_get(lpfc_sliport_status_err, &reg_data) &&
8091 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
8092 phba->work_status[0] =
8093 readl(phba->sli4_hba.u.if_type2.
8094 ERR1regaddr);
8095 phba->work_status[1] =
8096 readl(phba->sli4_hba.u.if_type2.
8097 ERR2regaddr);
Olivier Deprez157378f2022-04-04 15:47:50 +02008098 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008099 "2888 Unrecoverable port error "
8100 "following POST: port status reg "
8101 "0x%x, port_smphr reg 0x%x, "
8102 "error 1=0x%x, error 2=0x%x\n",
8103 reg_data.word0,
8104 portsmphr_reg.word0,
8105 phba->work_status[0],
8106 phba->work_status[1]);
8107 port_error = -ENODEV;
8108 }
8109 break;
8110 case LPFC_SLI_INTF_IF_TYPE_1:
8111 default:
8112 break;
8113 }
8114 }
8115 return port_error;
8116}
8117
8118/**
8119 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
8120 * @phba: pointer to lpfc hba data structure.
8121 * @if_type: The SLI4 interface type getting configured.
8122 *
8123 * This routine is invoked to set up SLI4 BAR0 PCI config space register
8124 * memory map.
8125 **/
8126static void
8127lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8128{
8129 switch (if_type) {
8130 case LPFC_SLI_INTF_IF_TYPE_0:
8131 phba->sli4_hba.u.if_type0.UERRLOregaddr =
8132 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
8133 phba->sli4_hba.u.if_type0.UERRHIregaddr =
8134 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
8135 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8136 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8137 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8138 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8139 phba->sli4_hba.SLIINTFregaddr =
8140 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8141 break;
8142 case LPFC_SLI_INTF_IF_TYPE_2:
8143 phba->sli4_hba.u.if_type2.EQDregaddr =
8144 phba->sli4_hba.conf_regs_memmap_p +
8145 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8146 phba->sli4_hba.u.if_type2.ERR1regaddr =
8147 phba->sli4_hba.conf_regs_memmap_p +
8148 LPFC_CTL_PORT_ER1_OFFSET;
8149 phba->sli4_hba.u.if_type2.ERR2regaddr =
8150 phba->sli4_hba.conf_regs_memmap_p +
8151 LPFC_CTL_PORT_ER2_OFFSET;
8152 phba->sli4_hba.u.if_type2.CTRLregaddr =
8153 phba->sli4_hba.conf_regs_memmap_p +
8154 LPFC_CTL_PORT_CTL_OFFSET;
8155 phba->sli4_hba.u.if_type2.STATUSregaddr =
8156 phba->sli4_hba.conf_regs_memmap_p +
8157 LPFC_CTL_PORT_STA_OFFSET;
8158 phba->sli4_hba.SLIINTFregaddr =
8159 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8160 phba->sli4_hba.PSMPHRregaddr =
8161 phba->sli4_hba.conf_regs_memmap_p +
8162 LPFC_CTL_PORT_SEM_OFFSET;
8163 phba->sli4_hba.RQDBregaddr =
8164 phba->sli4_hba.conf_regs_memmap_p +
8165 LPFC_ULP0_RQ_DOORBELL;
8166 phba->sli4_hba.WQDBregaddr =
8167 phba->sli4_hba.conf_regs_memmap_p +
8168 LPFC_ULP0_WQ_DOORBELL;
8169 phba->sli4_hba.CQDBregaddr =
8170 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
8171 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8172 phba->sli4_hba.MQDBregaddr =
8173 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8174 phba->sli4_hba.BMBXregaddr =
8175 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8176 break;
8177 case LPFC_SLI_INTF_IF_TYPE_6:
8178 phba->sli4_hba.u.if_type2.EQDregaddr =
8179 phba->sli4_hba.conf_regs_memmap_p +
8180 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8181 phba->sli4_hba.u.if_type2.ERR1regaddr =
8182 phba->sli4_hba.conf_regs_memmap_p +
8183 LPFC_CTL_PORT_ER1_OFFSET;
8184 phba->sli4_hba.u.if_type2.ERR2regaddr =
8185 phba->sli4_hba.conf_regs_memmap_p +
8186 LPFC_CTL_PORT_ER2_OFFSET;
8187 phba->sli4_hba.u.if_type2.CTRLregaddr =
8188 phba->sli4_hba.conf_regs_memmap_p +
8189 LPFC_CTL_PORT_CTL_OFFSET;
8190 phba->sli4_hba.u.if_type2.STATUSregaddr =
8191 phba->sli4_hba.conf_regs_memmap_p +
8192 LPFC_CTL_PORT_STA_OFFSET;
8193 phba->sli4_hba.PSMPHRregaddr =
8194 phba->sli4_hba.conf_regs_memmap_p +
8195 LPFC_CTL_PORT_SEM_OFFSET;
8196 phba->sli4_hba.BMBXregaddr =
8197 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8198 break;
8199 case LPFC_SLI_INTF_IF_TYPE_1:
8200 default:
8201 dev_printk(KERN_ERR, &phba->pcidev->dev,
8202 "FATAL - unsupported SLI4 interface type - %d\n",
8203 if_type);
8204 break;
8205 }
8206}
8207
8208/**
8209 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
8210 * @phba: pointer to lpfc hba data structure.
Olivier Deprez157378f2022-04-04 15:47:50 +02008211 * @if_type: sli if type to operate on.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008212 *
8213 * This routine is invoked to set up SLI4 BAR1 register memory map.
8214 **/
8215static void
8216lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8217{
8218 switch (if_type) {
8219 case LPFC_SLI_INTF_IF_TYPE_0:
8220 phba->sli4_hba.PSMPHRregaddr =
8221 phba->sli4_hba.ctrl_regs_memmap_p +
8222 LPFC_SLIPORT_IF0_SMPHR;
8223 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8224 LPFC_HST_ISR0;
8225 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8226 LPFC_HST_IMR0;
8227 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8228 LPFC_HST_ISCR0;
8229 break;
8230 case LPFC_SLI_INTF_IF_TYPE_6:
8231 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8232 LPFC_IF6_RQ_DOORBELL;
8233 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8234 LPFC_IF6_WQ_DOORBELL;
8235 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8236 LPFC_IF6_CQ_DOORBELL;
8237 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8238 LPFC_IF6_EQ_DOORBELL;
8239 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8240 LPFC_IF6_MQ_DOORBELL;
8241 break;
8242 case LPFC_SLI_INTF_IF_TYPE_2:
8243 case LPFC_SLI_INTF_IF_TYPE_1:
8244 default:
8245 dev_err(&phba->pcidev->dev,
8246 "FATAL - unsupported SLI4 interface type - %d\n",
8247 if_type);
8248 break;
8249 }
8250}
8251
8252/**
8253 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
8254 * @phba: pointer to lpfc hba data structure.
8255 * @vf: virtual function number
8256 *
8257 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
8258 * based on the given viftual function number, @vf.
8259 *
8260 * Return 0 if successful, otherwise -ENODEV.
8261 **/
8262static int
8263lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
8264{
8265 if (vf > LPFC_VIR_FUNC_MAX)
8266 return -ENODEV;
8267
8268 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8269 vf * LPFC_VFR_PAGE_SIZE +
8270 LPFC_ULP0_RQ_DOORBELL);
8271 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8272 vf * LPFC_VFR_PAGE_SIZE +
8273 LPFC_ULP0_WQ_DOORBELL);
8274 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8275 vf * LPFC_VFR_PAGE_SIZE +
8276 LPFC_EQCQ_DOORBELL);
8277 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
8278 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8279 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8280 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8281 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8282 return 0;
8283}
8284
8285/**
8286 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
8287 * @phba: pointer to lpfc hba data structure.
8288 *
8289 * This routine is invoked to create the bootstrap mailbox
8290 * region consistent with the SLI-4 interface spec. This
8291 * routine allocates all memory necessary to communicate
8292 * mailbox commands to the port and sets up all alignment
8293 * needs. No locks are expected to be held when calling
8294 * this routine.
8295 *
8296 * Return codes
8297 * 0 - successful
8298 * -ENOMEM - could not allocated memory.
8299 **/
8300static int
8301lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
8302{
8303 uint32_t bmbx_size;
8304 struct lpfc_dmabuf *dmabuf;
8305 struct dma_address *dma_address;
8306 uint32_t pa_addr;
8307 uint64_t phys_addr;
8308
8309 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8310 if (!dmabuf)
8311 return -ENOMEM;
8312
8313 /*
8314 * The bootstrap mailbox region is comprised of 2 parts
8315 * plus an alignment restriction of 16 bytes.
8316 */
8317 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
David Brazdil0f672f62019-12-10 10:32:29 +00008318 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8319 &dmabuf->phys, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008320 if (!dmabuf->virt) {
8321 kfree(dmabuf);
8322 return -ENOMEM;
8323 }
8324
8325 /*
8326 * Initialize the bootstrap mailbox pointers now so that the register
8327 * operations are simple later. The mailbox dma address is required
8328 * to be 16-byte aligned. Also align the virtual memory as each
8329 * maibox is copied into the bmbx mailbox region before issuing the
8330 * command to the port.
8331 */
8332 phba->sli4_hba.bmbx.dmabuf = dmabuf;
8333 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8334
8335 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8336 LPFC_ALIGN_16_BYTE);
8337 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8338 LPFC_ALIGN_16_BYTE);
8339
8340 /*
8341 * Set the high and low physical addresses now. The SLI4 alignment
8342 * requirement is 16 bytes and the mailbox is posted to the port
8343 * as two 30-bit addresses. The other data is a bit marking whether
8344 * the 30-bit address is the high or low address.
8345 * Upcast bmbx aphys to 64bits so shift instruction compiles
8346 * clean on 32 bit machines.
8347 */
8348 dma_address = &phba->sli4_hba.bmbx.dma_address;
8349 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8350 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8351 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8352 LPFC_BMBX_BIT1_ADDR_HI);
8353
8354 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8355 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8356 LPFC_BMBX_BIT1_ADDR_LO);
8357 return 0;
8358}
8359
8360/**
8361 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
8362 * @phba: pointer to lpfc hba data structure.
8363 *
8364 * This routine is invoked to teardown the bootstrap mailbox
8365 * region and release all host resources. This routine requires
8366 * the caller to ensure all mailbox commands recovered, no
8367 * additional mailbox comands are sent, and interrupts are disabled
8368 * before calling this routine.
8369 *
8370 **/
8371static void
8372lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
8373{
8374 dma_free_coherent(&phba->pcidev->dev,
8375 phba->sli4_hba.bmbx.bmbx_size,
8376 phba->sli4_hba.bmbx.dmabuf->virt,
8377 phba->sli4_hba.bmbx.dmabuf->phys);
8378
8379 kfree(phba->sli4_hba.bmbx.dmabuf);
8380 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
8381}
8382
Olivier Deprez157378f2022-04-04 15:47:50 +02008383static const char * const lpfc_topo_to_str[] = {
8384 "Loop then P2P",
8385 "Loopback",
8386 "P2P Only",
8387 "Unsupported",
8388 "Loop Only",
8389 "Unsupported",
8390 "P2P then Loop",
8391};
8392
8393#define LINK_FLAGS_DEF 0x0
8394#define LINK_FLAGS_P2P 0x1
8395#define LINK_FLAGS_LOOP 0x2
8396/**
8397 * lpfc_map_topology - Map the topology read from READ_CONFIG
8398 * @phba: pointer to lpfc hba data structure.
8399 * @rd_config: pointer to read config data
8400 *
8401 * This routine is invoked to map the topology values as read
8402 * from the read config mailbox command. If the persistent
8403 * topology feature is supported, the firmware will provide the
8404 * saved topology information to be used in INIT_LINK
8405 **/
8406static void
8407lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
8408{
8409 u8 ptv, tf, pt;
8410
8411 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
8412 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
8413 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
8414
8415 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8416 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
8417 ptv, tf, pt);
8418 if (!ptv) {
8419 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8420 "2019 FW does not support persistent topology "
8421 "Using driver parameter defined value [%s]",
8422 lpfc_topo_to_str[phba->cfg_topology]);
8423 return;
8424 }
8425 /* FW supports persistent topology - override module parameter value */
8426 phba->hba_flag |= HBA_PERSISTENT_TOPO;
8427 switch (phba->pcidev->device) {
8428 case PCI_DEVICE_ID_LANCER_G7_FC:
8429 case PCI_DEVICE_ID_LANCER_G6_FC:
8430 if (!tf) {
8431 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
8432 ? FLAGS_TOPOLOGY_MODE_LOOP
8433 : FLAGS_TOPOLOGY_MODE_PT_PT);
8434 } else {
8435 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
8436 }
8437 break;
8438 default: /* G5 */
8439 if (tf) {
8440 /* If topology failover set - pt is '0' or '1' */
8441 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
8442 FLAGS_TOPOLOGY_MODE_LOOP_PT);
8443 } else {
8444 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
8445 ? FLAGS_TOPOLOGY_MODE_PT_PT
8446 : FLAGS_TOPOLOGY_MODE_LOOP);
8447 }
8448 break;
8449 }
8450 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
8451 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8452 "2020 Using persistent topology value [%s]",
8453 lpfc_topo_to_str[phba->cfg_topology]);
8454 } else {
8455 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8456 "2021 Invalid topology values from FW "
8457 "Using driver parameter defined value [%s]",
8458 lpfc_topo_to_str[phba->cfg_topology]);
8459 }
8460}
8461
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008462/**
8463 * lpfc_sli4_read_config - Get the config parameters.
8464 * @phba: pointer to lpfc hba data structure.
8465 *
8466 * This routine is invoked to read the configuration parameters from the HBA.
8467 * The configuration parameters are used to set the base and maximum values
8468 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
8469 * allocation for the port.
8470 *
8471 * Return codes
8472 * 0 - successful
8473 * -ENOMEM - No available memory
8474 * -EIO - The mailbox failed to complete successfully.
8475 **/
8476int
8477lpfc_sli4_read_config(struct lpfc_hba *phba)
8478{
8479 LPFC_MBOXQ_t *pmb;
8480 struct lpfc_mbx_read_config *rd_config;
8481 union lpfc_sli4_cfg_shdr *shdr;
8482 uint32_t shdr_status, shdr_add_status;
8483 struct lpfc_mbx_get_func_cfg *get_func_cfg;
8484 struct lpfc_rsrc_desc_fcfcoe *desc;
8485 char *pdesc_0;
8486 uint16_t forced_link_speed;
David Brazdil0f672f62019-12-10 10:32:29 +00008487 uint32_t if_type, qmin;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008488 int length, i, rc = 0, rc2;
8489
8490 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8491 if (!pmb) {
Olivier Deprez157378f2022-04-04 15:47:50 +02008492 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008493 "2011 Unable to allocate memory for issuing "
8494 "SLI_CONFIG_SPECIAL mailbox command\n");
8495 return -ENOMEM;
8496 }
8497
8498 lpfc_read_config(phba, pmb);
8499
8500 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8501 if (rc != MBX_SUCCESS) {
Olivier Deprez157378f2022-04-04 15:47:50 +02008502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8503 "2012 Mailbox failed , mbxCmd x%x "
8504 "READ_CONFIG, mbxStatus x%x\n",
8505 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8506 bf_get(lpfc_mqe_status, &pmb->u.mqe));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008507 rc = -EIO;
8508 } else {
8509 rd_config = &pmb->u.mqe.un.rd_config;
8510 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8511 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8512 phba->sli4_hba.lnk_info.lnk_tp =
8513 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8514 phba->sli4_hba.lnk_info.lnk_no =
8515 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8516 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8517 "3081 lnk_type:%d, lnk_numb:%d\n",
8518 phba->sli4_hba.lnk_info.lnk_tp,
8519 phba->sli4_hba.lnk_info.lnk_no);
8520 } else
8521 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8522 "3082 Mailbox (x%x) returned ldv:x0\n",
8523 bf_get(lpfc_mqe_command, &pmb->u.mqe));
8524 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8525 phba->bbcredit_support = 1;
8526 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8527 }
8528
David Brazdil0f672f62019-12-10 10:32:29 +00008529 phba->sli4_hba.conf_trunk =
8530 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008531 phba->sli4_hba.extents_in_use =
8532 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8533 phba->sli4_hba.max_cfg_param.max_xri =
8534 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
David Brazdil0f672f62019-12-10 10:32:29 +00008535 /* Reduce resource usage in kdump environment */
8536 if (is_kdump_kernel() &&
8537 phba->sli4_hba.max_cfg_param.max_xri > 512)
8538 phba->sli4_hba.max_cfg_param.max_xri = 512;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008539 phba->sli4_hba.max_cfg_param.xri_base =
8540 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8541 phba->sli4_hba.max_cfg_param.max_vpi =
8542 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
David Brazdil0f672f62019-12-10 10:32:29 +00008543 /* Limit the max we support */
8544 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8545 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008546 phba->sli4_hba.max_cfg_param.vpi_base =
8547 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8548 phba->sli4_hba.max_cfg_param.max_rpi =
8549 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8550 phba->sli4_hba.max_cfg_param.rpi_base =
8551 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8552 phba->sli4_hba.max_cfg_param.max_vfi =
8553 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8554 phba->sli4_hba.max_cfg_param.vfi_base =
8555 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8556 phba->sli4_hba.max_cfg_param.max_fcfi =
8557 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
8558 phba->sli4_hba.max_cfg_param.max_eq =
8559 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8560 phba->sli4_hba.max_cfg_param.max_rq =
8561 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8562 phba->sli4_hba.max_cfg_param.max_wq =
8563 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8564 phba->sli4_hba.max_cfg_param.max_cq =
8565 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8566 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8567 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8568 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8569 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
8570 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8571 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
8572 phba->max_vports = phba->max_vpi;
Olivier Deprez157378f2022-04-04 15:47:50 +02008573 lpfc_map_topology(phba, rd_config);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008574 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8575 "2003 cfg params Extents? %d "
8576 "XRI(B:%d M:%d), "
8577 "VPI(B:%d M:%d) "
8578 "VFI(B:%d M:%d) "
8579 "RPI(B:%d M:%d) "
Olivier Deprez157378f2022-04-04 15:47:50 +02008580 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008581 phba->sli4_hba.extents_in_use,
8582 phba->sli4_hba.max_cfg_param.xri_base,
8583 phba->sli4_hba.max_cfg_param.max_xri,
8584 phba->sli4_hba.max_cfg_param.vpi_base,
8585 phba->sli4_hba.max_cfg_param.max_vpi,
8586 phba->sli4_hba.max_cfg_param.vfi_base,
8587 phba->sli4_hba.max_cfg_param.max_vfi,
8588 phba->sli4_hba.max_cfg_param.rpi_base,
8589 phba->sli4_hba.max_cfg_param.max_rpi,
8590 phba->sli4_hba.max_cfg_param.max_fcfi,
8591 phba->sli4_hba.max_cfg_param.max_eq,
8592 phba->sli4_hba.max_cfg_param.max_cq,
8593 phba->sli4_hba.max_cfg_param.max_wq,
Olivier Deprez157378f2022-04-04 15:47:50 +02008594 phba->sli4_hba.max_cfg_param.max_rq,
8595 phba->lmt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008596
8597 /*
David Brazdil0f672f62019-12-10 10:32:29 +00008598 * Calculate queue resources based on how
8599 * many WQ/CQ/EQs are available.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008600 */
David Brazdil0f672f62019-12-10 10:32:29 +00008601 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8602 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8603 qmin = phba->sli4_hba.max_cfg_param.max_cq;
8604 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8605 qmin = phba->sli4_hba.max_cfg_param.max_eq;
8606 /*
8607 * Whats left after this can go toward NVME / FCP.
8608 * The minus 4 accounts for ELS, NVME LS, MBOX
8609 * plus one extra. When configured for
8610 * NVMET, FCP io channel WQs are not created.
8611 */
8612 qmin -= 4;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008613
David Brazdil0f672f62019-12-10 10:32:29 +00008614 /* Check to see if there is enough for NVME */
8615 if ((phba->cfg_irq_chann > qmin) ||
8616 (phba->cfg_hdw_queue > qmin)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02008617 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8618 "2005 Reducing Queues - "
8619 "FW resource limitation: "
David Brazdil0f672f62019-12-10 10:32:29 +00008620 "WQ %d CQ %d EQ %d: min %d: "
8621 "IRQ %d HDWQ %d\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008622 phba->sli4_hba.max_cfg_param.max_wq,
8623 phba->sli4_hba.max_cfg_param.max_cq,
David Brazdil0f672f62019-12-10 10:32:29 +00008624 phba->sli4_hba.max_cfg_param.max_eq,
8625 qmin, phba->cfg_irq_chann,
8626 phba->cfg_hdw_queue);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008627
David Brazdil0f672f62019-12-10 10:32:29 +00008628 if (phba->cfg_irq_chann > qmin)
8629 phba->cfg_irq_chann = qmin;
8630 if (phba->cfg_hdw_queue > qmin)
8631 phba->cfg_hdw_queue = qmin;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008632 }
8633 }
8634
8635 if (rc)
8636 goto read_cfg_out;
8637
8638 /* Update link speed if forced link speed is supported */
8639 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8640 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8641 forced_link_speed =
8642 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8643 if (forced_link_speed) {
8644 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8645
8646 switch (forced_link_speed) {
8647 case LINK_SPEED_1G:
8648 phba->cfg_link_speed =
8649 LPFC_USER_LINK_SPEED_1G;
8650 break;
8651 case LINK_SPEED_2G:
8652 phba->cfg_link_speed =
8653 LPFC_USER_LINK_SPEED_2G;
8654 break;
8655 case LINK_SPEED_4G:
8656 phba->cfg_link_speed =
8657 LPFC_USER_LINK_SPEED_4G;
8658 break;
8659 case LINK_SPEED_8G:
8660 phba->cfg_link_speed =
8661 LPFC_USER_LINK_SPEED_8G;
8662 break;
8663 case LINK_SPEED_10G:
8664 phba->cfg_link_speed =
8665 LPFC_USER_LINK_SPEED_10G;
8666 break;
8667 case LINK_SPEED_16G:
8668 phba->cfg_link_speed =
8669 LPFC_USER_LINK_SPEED_16G;
8670 break;
8671 case LINK_SPEED_32G:
8672 phba->cfg_link_speed =
8673 LPFC_USER_LINK_SPEED_32G;
8674 break;
8675 case LINK_SPEED_64G:
8676 phba->cfg_link_speed =
8677 LPFC_USER_LINK_SPEED_64G;
8678 break;
8679 case 0xffff:
8680 phba->cfg_link_speed =
8681 LPFC_USER_LINK_SPEED_AUTO;
8682 break;
8683 default:
Olivier Deprez157378f2022-04-04 15:47:50 +02008684 lpfc_printf_log(phba, KERN_ERR,
8685 LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008686 "0047 Unrecognized link "
8687 "speed : %d\n",
8688 forced_link_speed);
8689 phba->cfg_link_speed =
8690 LPFC_USER_LINK_SPEED_AUTO;
8691 }
8692 }
8693 }
8694
8695 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
8696 length = phba->sli4_hba.max_cfg_param.max_xri -
8697 lpfc_sli4_get_els_iocb_cnt(phba);
8698 if (phba->cfg_hba_queue_depth > length) {
8699 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8700 "3361 HBA queue depth changed from %d to %d\n",
8701 phba->cfg_hba_queue_depth, length);
8702 phba->cfg_hba_queue_depth = length;
8703 }
8704
8705 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
8706 LPFC_SLI_INTF_IF_TYPE_2)
8707 goto read_cfg_out;
8708
8709 /* get the pf# and vf# for SLI4 if_type 2 port */
8710 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8711 sizeof(struct lpfc_sli4_cfg_mhdr));
8712 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8713 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8714 length, LPFC_SLI4_MBX_EMBED);
8715
8716 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8717 shdr = (union lpfc_sli4_cfg_shdr *)
8718 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8719 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8720 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8721 if (rc2 || shdr_status || shdr_add_status) {
Olivier Deprez157378f2022-04-04 15:47:50 +02008722 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008723 "3026 Mailbox failed , mbxCmd x%x "
8724 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8725 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8726 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8727 goto read_cfg_out;
8728 }
8729
8730 /* search for fc_fcoe resrouce descriptor */
8731 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
8732
8733 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8734 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8735 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8736 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8737 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8738 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8739 goto read_cfg_out;
8740
8741 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8742 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
8743 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8744 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
8745 phba->sli4_hba.iov.pf_number =
8746 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8747 phba->sli4_hba.iov.vf_number =
8748 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8749 break;
8750 }
8751 }
8752
8753 if (i < LPFC_RSRC_DESC_MAX_NUM)
8754 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8755 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8756 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8757 phba->sli4_hba.iov.vf_number);
8758 else
Olivier Deprez157378f2022-04-04 15:47:50 +02008759 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008760 "3028 GET_FUNCTION_CONFIG: failed to find "
David Brazdil0f672f62019-12-10 10:32:29 +00008761 "Resource Descriptor:x%x\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008762 LPFC_RSRC_DESC_TYPE_FCFCOE);
8763
8764read_cfg_out:
8765 mempool_free(pmb, phba->mbox_mem_pool);
8766 return rc;
8767}
8768
8769/**
8770 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
8771 * @phba: pointer to lpfc hba data structure.
8772 *
8773 * This routine is invoked to setup the port-side endian order when
8774 * the port if_type is 0. This routine has no function for other
8775 * if_types.
8776 *
8777 * Return codes
8778 * 0 - successful
8779 * -ENOMEM - No available memory
8780 * -EIO - The mailbox failed to complete successfully.
8781 **/
8782static int
8783lpfc_setup_endian_order(struct lpfc_hba *phba)
8784{
8785 LPFC_MBOXQ_t *mboxq;
8786 uint32_t if_type, rc = 0;
8787 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8788 HOST_ENDIAN_HIGH_WORD1};
8789
8790 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8791 switch (if_type) {
8792 case LPFC_SLI_INTF_IF_TYPE_0:
8793 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8794 GFP_KERNEL);
8795 if (!mboxq) {
Olivier Deprez157378f2022-04-04 15:47:50 +02008796 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008797 "0492 Unable to allocate memory for "
8798 "issuing SLI_CONFIG_SPECIAL mailbox "
8799 "command\n");
8800 return -ENOMEM;
8801 }
8802
8803 /*
8804 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
8805 * two words to contain special data values and no other data.
8806 */
8807 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8808 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8809 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8810 if (rc != MBX_SUCCESS) {
Olivier Deprez157378f2022-04-04 15:47:50 +02008811 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008812 "0493 SLI_CONFIG_SPECIAL mailbox "
8813 "failed with status x%x\n",
8814 rc);
8815 rc = -EIO;
8816 }
8817 mempool_free(mboxq, phba->mbox_mem_pool);
8818 break;
8819 case LPFC_SLI_INTF_IF_TYPE_6:
8820 case LPFC_SLI_INTF_IF_TYPE_2:
8821 case LPFC_SLI_INTF_IF_TYPE_1:
8822 default:
8823 break;
8824 }
8825 return rc;
8826}
8827
8828/**
8829 * lpfc_sli4_queue_verify - Verify and update EQ counts
8830 * @phba: pointer to lpfc hba data structure.
8831 *
8832 * This routine is invoked to check the user settable queue counts for EQs.
8833 * After this routine is called the counts will be set to valid values that
8834 * adhere to the constraints of the system's interrupt vectors and the port's
8835 * queue resources.
8836 *
8837 * Return codes
8838 * 0 - successful
8839 * -ENOMEM - No available memory
8840 **/
8841static int
8842lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8843{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008844 /*
8845 * Sanity check for configured queue parameters against the run-time
8846 * device parameters
8847 */
8848
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008849 if (phba->nvmet_support) {
Olivier Deprez157378f2022-04-04 15:47:50 +02008850 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
8851 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
David Brazdil0f672f62019-12-10 10:32:29 +00008852 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8853 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008854 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008855
8856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
David Brazdil0f672f62019-12-10 10:32:29 +00008857 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8858 phba->cfg_hdw_queue, phba->cfg_irq_chann,
8859 phba->cfg_nvmet_mrq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008860
8861 /* Get EQ depth from module parameter, fake the default for now */
8862 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8863 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8864
8865 /* Get CQ depth from module parameter, fake the default for now */
8866 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8867 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8868 return 0;
8869}
8870
8871static int
David Brazdil0f672f62019-12-10 10:32:29 +00008872lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008873{
8874 struct lpfc_queue *qdesc;
David Brazdil0f672f62019-12-10 10:32:29 +00008875 u32 wqesize;
8876 int cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008877
David Brazdil0f672f62019-12-10 10:32:29 +00008878 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
8879 /* Create Fast Path IO CQs */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008880 if (phba->enab_exp_wqcq_pages)
8881 /* Increase the CQ size when WQEs contain an embedded cdb */
8882 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8883 phba->sli4_hba.cq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00008884 LPFC_CQE_EXP_COUNT, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008885
8886 else
8887 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8888 phba->sli4_hba.cq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00008889 phba->sli4_hba.cq_ecount, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008890 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02008891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8892 "0499 Failed allocate fast-path IO CQ (%d)\n",
8893 idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008894 return 1;
8895 }
8896 qdesc->qe_valid = 1;
David Brazdil0f672f62019-12-10 10:32:29 +00008897 qdesc->hdwq = idx;
8898 qdesc->chann = cpu;
8899 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008900
David Brazdil0f672f62019-12-10 10:32:29 +00008901 /* Create Fast Path IO WQs */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008902 if (phba->enab_exp_wqcq_pages) {
8903 /* Increase the WQ size when WQEs contain an embedded cdb */
8904 wqesize = (phba->fcp_embed_io) ?
8905 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
8906 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8907 wqesize,
David Brazdil0f672f62019-12-10 10:32:29 +00008908 LPFC_WQE_EXP_COUNT, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008909 } else
8910 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8911 phba->sli4_hba.wq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00008912 phba->sli4_hba.wq_ecount, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008913
8914 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02008915 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00008916 "0503 Failed allocate fast-path IO WQ (%d)\n",
8917 idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008918 return 1;
8919 }
David Brazdil0f672f62019-12-10 10:32:29 +00008920 qdesc->hdwq = idx;
8921 qdesc->chann = cpu;
8922 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008923 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8924 return 0;
8925}
8926
8927/**
8928 * lpfc_sli4_queue_create - Create all the SLI4 queues
8929 * @phba: pointer to lpfc hba data structure.
8930 *
8931 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
8932 * operation. For each SLI4 queue type, the parameters such as queue entry
8933 * count (queue depth) shall be taken from the module parameter. For now,
8934 * we just use some constant number as place holder.
8935 *
8936 * Return codes
8937 * 0 - successful
8938 * -ENOMEM - No availble memory
8939 * -EIO - The mailbox failed to complete successfully.
8940 **/
8941int
8942lpfc_sli4_queue_create(struct lpfc_hba *phba)
8943{
8944 struct lpfc_queue *qdesc;
David Brazdil0f672f62019-12-10 10:32:29 +00008945 int idx, cpu, eqcpu;
8946 struct lpfc_sli4_hdw_queue *qp;
8947 struct lpfc_vector_map_info *cpup;
8948 struct lpfc_vector_map_info *eqcpup;
8949 struct lpfc_eq_intr_info *eqi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008950
8951 /*
8952 * Create HBA Record arrays.
8953 * Both NVME and FCP will share that same vectors / EQs
8954 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008955 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8956 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8957 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8958 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8959 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8960 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
8961 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8962 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8963 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8964 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8965
David Brazdil0f672f62019-12-10 10:32:29 +00008966 if (!phba->sli4_hba.hdwq) {
8967 phba->sli4_hba.hdwq = kcalloc(
8968 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
8969 GFP_KERNEL);
8970 if (!phba->sli4_hba.hdwq) {
Olivier Deprez157378f2022-04-04 15:47:50 +02008971 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00008972 "6427 Failed allocate memory for "
8973 "fast-path Hardware Queue array\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008974 goto out_error;
8975 }
David Brazdil0f672f62019-12-10 10:32:29 +00008976 /* Prepare hardware queues to take IO buffers */
8977 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8978 qp = &phba->sli4_hba.hdwq[idx];
8979 spin_lock_init(&qp->io_buf_list_get_lock);
8980 spin_lock_init(&qp->io_buf_list_put_lock);
8981 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
8982 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
8983 qp->get_io_bufs = 0;
8984 qp->put_io_bufs = 0;
8985 qp->total_io_bufs = 0;
8986 spin_lock_init(&qp->abts_io_buf_list_lock);
8987 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
8988 qp->abts_scsi_io_bufs = 0;
8989 qp->abts_nvme_io_bufs = 0;
8990 INIT_LIST_HEAD(&qp->sgl_list);
8991 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
8992 spin_lock_init(&qp->hdwq_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008993 }
8994 }
8995
David Brazdil0f672f62019-12-10 10:32:29 +00008996 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008997 if (phba->nvmet_support) {
8998 phba->sli4_hba.nvmet_cqset = kcalloc(
8999 phba->cfg_nvmet_mrq,
9000 sizeof(struct lpfc_queue *),
9001 GFP_KERNEL);
9002 if (!phba->sli4_hba.nvmet_cqset) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009003 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009004 "3121 Fail allocate memory for "
9005 "fast-path CQ set array\n");
9006 goto out_error;
9007 }
9008 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
9009 phba->cfg_nvmet_mrq,
9010 sizeof(struct lpfc_queue *),
9011 GFP_KERNEL);
9012 if (!phba->sli4_hba.nvmet_mrq_hdr) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009013 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009014 "3122 Fail allocate memory for "
9015 "fast-path RQ set hdr array\n");
9016 goto out_error;
9017 }
9018 phba->sli4_hba.nvmet_mrq_data = kcalloc(
9019 phba->cfg_nvmet_mrq,
9020 sizeof(struct lpfc_queue *),
9021 GFP_KERNEL);
9022 if (!phba->sli4_hba.nvmet_mrq_data) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009023 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009024 "3124 Fail allocate memory for "
9025 "fast-path RQ set data array\n");
9026 goto out_error;
9027 }
9028 }
9029 }
9030
9031 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9032
9033 /* Create HBA Event Queues (EQs) */
David Brazdil0f672f62019-12-10 10:32:29 +00009034 for_each_present_cpu(cpu) {
9035 /* We only want to create 1 EQ per vector, even though
9036 * multiple CPUs might be using that vector. so only
9037 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
9038 */
9039 cpup = &phba->sli4_hba.cpu_map[cpu];
9040 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9041 continue;
9042
9043 /* Get a ptr to the Hardware Queue associated with this CPU */
9044 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9045
9046 /* Allocate an EQ */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009047 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9048 phba->sli4_hba.eq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00009049 phba->sli4_hba.eq_ecount, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009050 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009051 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00009052 "0497 Failed allocate EQ (%d)\n",
9053 cpup->hdwq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009054 goto out_error;
9055 }
9056 qdesc->qe_valid = 1;
David Brazdil0f672f62019-12-10 10:32:29 +00009057 qdesc->hdwq = cpup->hdwq;
9058 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
9059 qdesc->last_cpu = qdesc->chann;
9060
9061 /* Save the allocated EQ in the Hardware Queue */
9062 qp->hba_eq = qdesc;
9063
9064 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
9065 list_add(&qdesc->cpu_list, &eqi->list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009066 }
9067
David Brazdil0f672f62019-12-10 10:32:29 +00009068 /* Now we need to populate the other Hardware Queues, that share
9069 * an IRQ vector, with the associated EQ ptr.
9070 */
9071 for_each_present_cpu(cpu) {
9072 cpup = &phba->sli4_hba.cpu_map[cpu];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009073
David Brazdil0f672f62019-12-10 10:32:29 +00009074 /* Check for EQ already allocated in previous loop */
9075 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
9076 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009077
David Brazdil0f672f62019-12-10 10:32:29 +00009078 /* Check for multiple CPUs per hdwq */
9079 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9080 if (qp->hba_eq)
9081 continue;
9082
9083 /* We need to share an EQ for this hdwq */
9084 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
9085 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
9086 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
9087 }
9088
9089 /* Allocate IO Path SLI4 CQ/WQs */
9090 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9091 if (lpfc_alloc_io_wq_cq(phba, idx))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009092 goto out_error;
David Brazdil0f672f62019-12-10 10:32:29 +00009093 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009094
9095 if (phba->nvmet_support) {
9096 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
David Brazdil0f672f62019-12-10 10:32:29 +00009097 cpu = lpfc_find_cpu_handle(phba, idx,
9098 LPFC_FIND_BY_HDWQ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009099 qdesc = lpfc_sli4_queue_alloc(phba,
9100 LPFC_DEFAULT_PAGE_SIZE,
9101 phba->sli4_hba.cq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00009102 phba->sli4_hba.cq_ecount,
9103 cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009104 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009105 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00009106 "3142 Failed allocate NVME "
9107 "CQ Set (%d)\n", idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009108 goto out_error;
9109 }
9110 qdesc->qe_valid = 1;
David Brazdil0f672f62019-12-10 10:32:29 +00009111 qdesc->hdwq = idx;
9112 qdesc->chann = cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009113 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
9114 }
9115 }
9116
9117 /*
9118 * Create Slow Path Completion Queues (CQs)
9119 */
9120
David Brazdil0f672f62019-12-10 10:32:29 +00009121 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009122 /* Create slow-path Mailbox Command Complete Queue */
9123 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9124 phba->sli4_hba.cq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00009125 phba->sli4_hba.cq_ecount, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009126 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009127 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009128 "0500 Failed allocate slow-path mailbox CQ\n");
9129 goto out_error;
9130 }
9131 qdesc->qe_valid = 1;
9132 phba->sli4_hba.mbx_cq = qdesc;
9133
9134 /* Create slow-path ELS Complete Queue */
9135 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9136 phba->sli4_hba.cq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00009137 phba->sli4_hba.cq_ecount, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009138 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009139 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009140 "0501 Failed allocate slow-path ELS CQ\n");
9141 goto out_error;
9142 }
9143 qdesc->qe_valid = 1;
David Brazdil0f672f62019-12-10 10:32:29 +00009144 qdesc->chann = cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009145 phba->sli4_hba.els_cq = qdesc;
9146
9147
9148 /*
9149 * Create Slow Path Work Queues (WQs)
9150 */
9151
9152 /* Create Mailbox Command Queue */
9153
9154 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9155 phba->sli4_hba.mq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00009156 phba->sli4_hba.mq_ecount, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009157 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009158 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009159 "0505 Failed allocate slow-path MQ\n");
9160 goto out_error;
9161 }
David Brazdil0f672f62019-12-10 10:32:29 +00009162 qdesc->chann = cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009163 phba->sli4_hba.mbx_wq = qdesc;
9164
9165 /*
9166 * Create ELS Work Queues
9167 */
9168
9169 /* Create slow-path ELS Work Queue */
9170 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9171 phba->sli4_hba.wq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00009172 phba->sli4_hba.wq_ecount, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009173 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009174 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009175 "0504 Failed allocate slow-path ELS WQ\n");
9176 goto out_error;
9177 }
David Brazdil0f672f62019-12-10 10:32:29 +00009178 qdesc->chann = cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009179 phba->sli4_hba.els_wq = qdesc;
9180 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9181
9182 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9183 /* Create NVME LS Complete Queue */
9184 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9185 phba->sli4_hba.cq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00009186 phba->sli4_hba.cq_ecount, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009187 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009189 "6079 Failed allocate NVME LS CQ\n");
9190 goto out_error;
9191 }
David Brazdil0f672f62019-12-10 10:32:29 +00009192 qdesc->chann = cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009193 qdesc->qe_valid = 1;
9194 phba->sli4_hba.nvmels_cq = qdesc;
9195
9196 /* Create NVME LS Work Queue */
9197 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9198 phba->sli4_hba.wq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00009199 phba->sli4_hba.wq_ecount, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009200 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009202 "6080 Failed allocate NVME LS WQ\n");
9203 goto out_error;
9204 }
David Brazdil0f672f62019-12-10 10:32:29 +00009205 qdesc->chann = cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009206 phba->sli4_hba.nvmels_wq = qdesc;
9207 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9208 }
9209
9210 /*
9211 * Create Receive Queue (RQ)
9212 */
9213
9214 /* Create Receive Queue for header */
9215 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9216 phba->sli4_hba.rq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00009217 phba->sli4_hba.rq_ecount, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009218 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009219 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009220 "0506 Failed allocate receive HRQ\n");
9221 goto out_error;
9222 }
9223 phba->sli4_hba.hdr_rq = qdesc;
9224
9225 /* Create Receive Queue for data */
9226 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9227 phba->sli4_hba.rq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00009228 phba->sli4_hba.rq_ecount, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009229 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009231 "0507 Failed allocate receive DRQ\n");
9232 goto out_error;
9233 }
9234 phba->sli4_hba.dat_rq = qdesc;
9235
David Brazdil0f672f62019-12-10 10:32:29 +00009236 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9237 phba->nvmet_support) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009238 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
David Brazdil0f672f62019-12-10 10:32:29 +00009239 cpu = lpfc_find_cpu_handle(phba, idx,
9240 LPFC_FIND_BY_HDWQ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009241 /* Create NVMET Receive Queue for header */
9242 qdesc = lpfc_sli4_queue_alloc(phba,
9243 LPFC_DEFAULT_PAGE_SIZE,
9244 phba->sli4_hba.rq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00009245 LPFC_NVMET_RQE_DEF_COUNT,
9246 cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009247 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009248 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009249 "3146 Failed allocate "
9250 "receive HRQ\n");
9251 goto out_error;
9252 }
David Brazdil0f672f62019-12-10 10:32:29 +00009253 qdesc->hdwq = idx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009254 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9255
9256 /* Only needed for header of RQ pair */
David Brazdil0f672f62019-12-10 10:32:29 +00009257 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9258 GFP_KERNEL,
9259 cpu_to_node(cpu));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009260 if (qdesc->rqbp == NULL) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009262 "6131 Failed allocate "
9263 "Header RQBP\n");
9264 goto out_error;
9265 }
9266
9267 /* Put list in known state in case driver load fails. */
9268 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9269
9270 /* Create NVMET Receive Queue for data */
9271 qdesc = lpfc_sli4_queue_alloc(phba,
9272 LPFC_DEFAULT_PAGE_SIZE,
9273 phba->sli4_hba.rq_esize,
David Brazdil0f672f62019-12-10 10:32:29 +00009274 LPFC_NVMET_RQE_DEF_COUNT,
9275 cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009276 if (!qdesc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009277 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009278 "3156 Failed allocate "
9279 "receive DRQ\n");
9280 goto out_error;
9281 }
David Brazdil0f672f62019-12-10 10:32:29 +00009282 qdesc->hdwq = idx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009283 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9284 }
9285 }
9286
David Brazdil0f672f62019-12-10 10:32:29 +00009287 /* Clear NVME stats */
9288 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9289 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9290 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9291 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9292 }
9293 }
9294
9295 /* Clear SCSI stats */
9296 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9297 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9298 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9299 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9300 }
9301 }
9302
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009303 return 0;
9304
9305out_error:
9306 lpfc_sli4_queue_destroy(phba);
9307 return -ENOMEM;
9308}
9309
9310static inline void
9311__lpfc_sli4_release_queue(struct lpfc_queue **qp)
9312{
9313 if (*qp != NULL) {
9314 lpfc_sli4_queue_free(*qp);
9315 *qp = NULL;
9316 }
9317}
9318
9319static inline void
9320lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9321{
9322 int idx;
9323
9324 if (*qs == NULL)
9325 return;
9326
9327 for (idx = 0; idx < max; idx++)
9328 __lpfc_sli4_release_queue(&(*qs)[idx]);
9329
9330 kfree(*qs);
9331 *qs = NULL;
9332}
9333
9334static inline void
David Brazdil0f672f62019-12-10 10:32:29 +00009335lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009336{
David Brazdil0f672f62019-12-10 10:32:29 +00009337 struct lpfc_sli4_hdw_queue *hdwq;
9338 struct lpfc_queue *eq;
9339 uint32_t idx;
9340
9341 hdwq = phba->sli4_hba.hdwq;
9342
9343 /* Loop thru all Hardware Queues */
9344 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9345 /* Free the CQ/WQ corresponding to the Hardware Queue */
9346 lpfc_sli4_queue_free(hdwq[idx].io_cq);
9347 lpfc_sli4_queue_free(hdwq[idx].io_wq);
Olivier Deprez0e641232021-09-23 10:07:05 +02009348 hdwq[idx].hba_eq = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00009349 hdwq[idx].io_cq = NULL;
9350 hdwq[idx].io_wq = NULL;
9351 if (phba->cfg_xpsgl && !phba->nvmet_support)
9352 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9353 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
9354 }
9355 /* Loop thru all IRQ vectors */
9356 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9357 /* Free the EQ corresponding to the IRQ vector */
9358 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9359 lpfc_sli4_queue_free(eq);
9360 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009361 }
9362}
9363
9364/**
9365 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
9366 * @phba: pointer to lpfc hba data structure.
9367 *
9368 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
9369 * operation.
9370 *
9371 * Return codes
9372 * 0 - successful
9373 * -ENOMEM - No available memory
9374 * -EIO - The mailbox failed to complete successfully.
9375 **/
9376void
9377lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9378{
David Brazdil0f672f62019-12-10 10:32:29 +00009379 /*
9380 * Set FREE_INIT before beginning to free the queues.
9381 * Wait until the users of queues to acknowledge to
9382 * release queues by clearing FREE_WAIT.
9383 */
9384 spin_lock_irq(&phba->hbalock);
9385 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9386 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9387 spin_unlock_irq(&phba->hbalock);
9388 msleep(20);
9389 spin_lock_irq(&phba->hbalock);
9390 }
9391 spin_unlock_irq(&phba->hbalock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009392
Olivier Deprez0e641232021-09-23 10:07:05 +02009393 lpfc_sli4_cleanup_poll_list(phba);
9394
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009395 /* Release HBA eqs */
David Brazdil0f672f62019-12-10 10:32:29 +00009396 if (phba->sli4_hba.hdwq)
9397 lpfc_sli4_release_hdwq(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009398
9399 if (phba->nvmet_support) {
9400 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9401 phba->cfg_nvmet_mrq);
9402
9403 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9404 phba->cfg_nvmet_mrq);
9405 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9406 phba->cfg_nvmet_mrq);
9407 }
9408
9409 /* Release mailbox command work queue */
9410 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9411
9412 /* Release ELS work queue */
9413 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9414
9415 /* Release ELS work queue */
9416 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9417
9418 /* Release unsolicited receive queue */
9419 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9420 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9421
9422 /* Release ELS complete queue */
9423 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9424
9425 /* Release NVME LS complete queue */
9426 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9427
9428 /* Release mailbox command complete queue */
9429 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9430
9431 /* Everything on this list has been freed */
9432 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
David Brazdil0f672f62019-12-10 10:32:29 +00009433
9434 /* Done with freeing the queues */
9435 spin_lock_irq(&phba->hbalock);
9436 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9437 spin_unlock_irq(&phba->hbalock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009438}
9439
9440int
9441lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9442{
9443 struct lpfc_rqb *rqbp;
9444 struct lpfc_dmabuf *h_buf;
9445 struct rqb_dmabuf *rqb_buffer;
9446
9447 rqbp = rq->rqbp;
9448 while (!list_empty(&rqbp->rqb_buffer_list)) {
9449 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9450 struct lpfc_dmabuf, list);
9451
9452 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9453 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9454 rqbp->buffer_count--;
9455 }
9456 return 1;
9457}
9458
9459static int
9460lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9461 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9462 int qidx, uint32_t qtype)
9463{
9464 struct lpfc_sli_ring *pring;
9465 int rc;
9466
9467 if (!eq || !cq || !wq) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009468 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009469 "6085 Fast-path %s (%d) not allocated\n",
9470 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9471 return -ENOMEM;
9472 }
9473
9474 /* create the Cq first */
9475 rc = lpfc_cq_create(phba, cq, eq,
9476 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9477 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009478 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9479 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9480 qidx, (uint32_t)rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009481 return rc;
9482 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009483
9484 if (qtype != LPFC_MBOX) {
David Brazdil0f672f62019-12-10 10:32:29 +00009485 /* Setup cq_map for fast lookup */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009486 if (cq_map)
9487 *cq_map = cq->queue_id;
9488
9489 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9490 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9491 qidx, cq->queue_id, qidx, eq->queue_id);
9492
9493 /* create the wq */
9494 rc = lpfc_wq_create(phba, wq, cq, qtype);
9495 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009496 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00009497 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009498 qidx, (uint32_t)rc);
9499 /* no need to tear down cq - caller will do so */
9500 return rc;
9501 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009502
9503 /* Bind this CQ/WQ to the NVME ring */
9504 pring = wq->pring;
9505 pring->sli.sli4.wqp = (void *)wq;
9506 cq->pring = pring;
9507
9508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9509 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9510 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9511 } else {
9512 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9513 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009514 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9515 "0539 Failed setup of slow-path MQ: "
9516 "rc = 0x%x\n", rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009517 /* no need to tear down cq - caller will do so */
9518 return rc;
9519 }
9520
9521 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9522 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9523 phba->sli4_hba.mbx_wq->queue_id,
9524 phba->sli4_hba.mbx_cq->queue_id);
9525 }
9526
9527 return 0;
9528}
9529
9530/**
David Brazdil0f672f62019-12-10 10:32:29 +00009531 * lpfc_setup_cq_lookup - Setup the CQ lookup table
9532 * @phba: pointer to lpfc hba data structure.
9533 *
9534 * This routine will populate the cq_lookup table by all
9535 * available CQ queue_id's.
9536 **/
9537static void
9538lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9539{
9540 struct lpfc_queue *eq, *childq;
9541 int qidx;
9542
9543 memset(phba->sli4_hba.cq_lookup, 0,
9544 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9545 /* Loop thru all IRQ vectors */
9546 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9547 /* Get the EQ corresponding to the IRQ vector */
9548 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9549 if (!eq)
9550 continue;
9551 /* Loop through all CQs associated with that EQ */
9552 list_for_each_entry(childq, &eq->child_list, list) {
9553 if (childq->queue_id > phba->sli4_hba.cq_max)
9554 continue;
9555 if (childq->subtype == LPFC_IO)
9556 phba->sli4_hba.cq_lookup[childq->queue_id] =
9557 childq;
9558 }
9559 }
9560}
9561
9562/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009563 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
9564 * @phba: pointer to lpfc hba data structure.
9565 *
9566 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
9567 * operation.
9568 *
9569 * Return codes
9570 * 0 - successful
9571 * -ENOMEM - No available memory
9572 * -EIO - The mailbox failed to complete successfully.
9573 **/
9574int
9575lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9576{
9577 uint32_t shdr_status, shdr_add_status;
9578 union lpfc_sli4_cfg_shdr *shdr;
David Brazdil0f672f62019-12-10 10:32:29 +00009579 struct lpfc_vector_map_info *cpup;
9580 struct lpfc_sli4_hdw_queue *qp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009581 LPFC_MBOXQ_t *mboxq;
David Brazdil0f672f62019-12-10 10:32:29 +00009582 int qidx, cpu;
9583 uint32_t length, usdelay;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009584 int rc = -ENOMEM;
9585
9586 /* Check for dual-ULP support */
9587 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9588 if (!mboxq) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009589 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009590 "3249 Unable to allocate memory for "
9591 "QUERY_FW_CFG mailbox command\n");
9592 return -ENOMEM;
9593 }
9594 length = (sizeof(struct lpfc_mbx_query_fw_config) -
9595 sizeof(struct lpfc_sli4_cfg_mhdr));
9596 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9597 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9598 length, LPFC_SLI4_MBX_EMBED);
9599
9600 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9601
9602 shdr = (union lpfc_sli4_cfg_shdr *)
9603 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9604 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9605 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9606 if (shdr_status || shdr_add_status || rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009607 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009608 "3250 QUERY_FW_CFG mailbox failed with status "
9609 "x%x add_status x%x, mbx status x%x\n",
9610 shdr_status, shdr_add_status, rc);
Olivier Deprez0e641232021-09-23 10:07:05 +02009611 mempool_free(mboxq, phba->mbox_mem_pool);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009612 rc = -ENXIO;
9613 goto out_error;
9614 }
9615
9616 phba->sli4_hba.fw_func_mode =
9617 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9618 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9619 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
9620 phba->sli4_hba.physical_port =
9621 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
9622 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9623 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9624 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9625 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9626
Olivier Deprez0e641232021-09-23 10:07:05 +02009627 mempool_free(mboxq, phba->mbox_mem_pool);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009628
9629 /*
9630 * Set up HBA Event Queues (EQs)
9631 */
David Brazdil0f672f62019-12-10 10:32:29 +00009632 qp = phba->sli4_hba.hdwq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009633
9634 /* Set up HBA event queue */
David Brazdil0f672f62019-12-10 10:32:29 +00009635 if (!qp) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009636 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009637 "3147 Fast-path EQs not allocated\n");
9638 rc = -ENOMEM;
9639 goto out_error;
9640 }
David Brazdil0f672f62019-12-10 10:32:29 +00009641
9642 /* Loop thru all IRQ vectors */
9643 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9644 /* Create HBA Event Queues (EQs) in order */
9645 for_each_present_cpu(cpu) {
9646 cpup = &phba->sli4_hba.cpu_map[cpu];
9647
9648 /* Look for the CPU thats using that vector with
9649 * LPFC_CPU_FIRST_IRQ set.
9650 */
9651 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9652 continue;
9653 if (qidx != cpup->eq)
9654 continue;
9655
9656 /* Create an EQ for that vector */
9657 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9658 phba->cfg_fcp_imax);
9659 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009660 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00009661 "0523 Failed setup of fast-path"
9662 " EQ (%d), rc = 0x%x\n",
9663 cpup->eq, (uint32_t)rc);
9664 goto out_destroy;
9665 }
9666
9667 /* Save the EQ for that vector in the hba_eq_hdl */
9668 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9669 qp[cpup->hdwq].hba_eq;
9670
9671 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9672 "2584 HBA EQ setup: queue[%d]-id=%d\n",
9673 cpup->eq,
9674 qp[cpup->hdwq].hba_eq->queue_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009675 }
David Brazdil0f672f62019-12-10 10:32:29 +00009676 }
9677
9678 /* Loop thru all Hardware Queues */
9679 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9680 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9681 cpup = &phba->sli4_hba.cpu_map[cpu];
9682
9683 /* Create the CQ/WQ corresponding to the Hardware Queue */
9684 rc = lpfc_create_wq_cq(phba,
9685 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9686 qp[qidx].io_cq,
9687 qp[qidx].io_wq,
9688 &phba->sli4_hba.hdwq[qidx].io_cq_map,
9689 qidx,
9690 LPFC_IO);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009691 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009692 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009693 "0535 Failed to setup fastpath "
David Brazdil0f672f62019-12-10 10:32:29 +00009694 "IO WQ/CQ (%d), rc = 0x%x\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009695 qidx, (uint32_t)rc);
David Brazdil0f672f62019-12-10 10:32:29 +00009696 goto out_destroy;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009697 }
9698 }
9699
9700 /*
9701 * Set up Slow Path Complete Queues (CQs)
9702 */
9703
9704 /* Set up slow-path MBOX CQ/MQ */
9705
9706 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009707 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009708 "0528 %s not allocated\n",
9709 phba->sli4_hba.mbx_cq ?
9710 "Mailbox WQ" : "Mailbox CQ");
9711 rc = -ENOMEM;
9712 goto out_destroy;
9713 }
9714
David Brazdil0f672f62019-12-10 10:32:29 +00009715 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009716 phba->sli4_hba.mbx_cq,
9717 phba->sli4_hba.mbx_wq,
9718 NULL, 0, LPFC_MBOX);
9719 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009720 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009721 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9722 (uint32_t)rc);
9723 goto out_destroy;
9724 }
9725 if (phba->nvmet_support) {
9726 if (!phba->sli4_hba.nvmet_cqset) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009727 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009728 "3165 Fast-path NVME CQ Set "
9729 "array not allocated\n");
9730 rc = -ENOMEM;
9731 goto out_destroy;
9732 }
9733 if (phba->cfg_nvmet_mrq > 1) {
9734 rc = lpfc_cq_create_set(phba,
9735 phba->sli4_hba.nvmet_cqset,
David Brazdil0f672f62019-12-10 10:32:29 +00009736 qp,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009737 LPFC_WCQ, LPFC_NVMET);
9738 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009739 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009740 "3164 Failed setup of NVME CQ "
9741 "Set, rc = 0x%x\n",
9742 (uint32_t)rc);
9743 goto out_destroy;
9744 }
9745 } else {
9746 /* Set up NVMET Receive Complete Queue */
9747 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
David Brazdil0f672f62019-12-10 10:32:29 +00009748 qp[0].hba_eq,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009749 LPFC_WCQ, LPFC_NVMET);
9750 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009751 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009752 "6089 Failed setup NVMET CQ: "
9753 "rc = 0x%x\n", (uint32_t)rc);
9754 goto out_destroy;
9755 }
9756 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9757
9758 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9759 "6090 NVMET CQ setup: cq-id=%d, "
9760 "parent eq-id=%d\n",
9761 phba->sli4_hba.nvmet_cqset[0]->queue_id,
David Brazdil0f672f62019-12-10 10:32:29 +00009762 qp[0].hba_eq->queue_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009763 }
9764 }
9765
9766 /* Set up slow-path ELS WQ/CQ */
9767 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009768 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009769 "0530 ELS %s not allocated\n",
9770 phba->sli4_hba.els_cq ? "WQ" : "CQ");
9771 rc = -ENOMEM;
9772 goto out_destroy;
9773 }
David Brazdil0f672f62019-12-10 10:32:29 +00009774 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9775 phba->sli4_hba.els_cq,
9776 phba->sli4_hba.els_wq,
9777 NULL, 0, LPFC_ELS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009778 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009779 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00009780 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9781 (uint32_t)rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009782 goto out_destroy;
9783 }
9784 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9785 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9786 phba->sli4_hba.els_wq->queue_id,
9787 phba->sli4_hba.els_cq->queue_id);
9788
David Brazdil0f672f62019-12-10 10:32:29 +00009789 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009790 /* Set up NVME LS Complete Queue */
9791 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009792 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009793 "6091 LS %s not allocated\n",
9794 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9795 rc = -ENOMEM;
9796 goto out_destroy;
9797 }
David Brazdil0f672f62019-12-10 10:32:29 +00009798 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9799 phba->sli4_hba.nvmels_cq,
9800 phba->sli4_hba.nvmels_wq,
9801 NULL, 0, LPFC_NVME_LS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009802 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00009804 "0526 Failed setup of NVVME LS WQ/CQ: "
9805 "rc = 0x%x\n", (uint32_t)rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009806 goto out_destroy;
9807 }
9808
9809 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9810 "6096 ELS WQ setup: wq-id=%d, "
9811 "parent cq-id=%d\n",
9812 phba->sli4_hba.nvmels_wq->queue_id,
9813 phba->sli4_hba.nvmels_cq->queue_id);
9814 }
9815
9816 /*
9817 * Create NVMET Receive Queue (RQ)
9818 */
9819 if (phba->nvmet_support) {
9820 if ((!phba->sli4_hba.nvmet_cqset) ||
9821 (!phba->sli4_hba.nvmet_mrq_hdr) ||
9822 (!phba->sli4_hba.nvmet_mrq_data)) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009823 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009824 "6130 MRQ CQ Queues not "
9825 "allocated\n");
9826 rc = -ENOMEM;
9827 goto out_destroy;
9828 }
9829 if (phba->cfg_nvmet_mrq > 1) {
9830 rc = lpfc_mrq_create(phba,
9831 phba->sli4_hba.nvmet_mrq_hdr,
9832 phba->sli4_hba.nvmet_mrq_data,
9833 phba->sli4_hba.nvmet_cqset,
9834 LPFC_NVMET);
9835 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009836 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009837 "6098 Failed setup of NVMET "
9838 "MRQ: rc = 0x%x\n",
9839 (uint32_t)rc);
9840 goto out_destroy;
9841 }
9842
9843 } else {
9844 rc = lpfc_rq_create(phba,
9845 phba->sli4_hba.nvmet_mrq_hdr[0],
9846 phba->sli4_hba.nvmet_mrq_data[0],
9847 phba->sli4_hba.nvmet_cqset[0],
9848 LPFC_NVMET);
9849 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009850 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009851 "6057 Failed setup of NVMET "
9852 "Receive Queue: rc = 0x%x\n",
9853 (uint32_t)rc);
9854 goto out_destroy;
9855 }
9856
9857 lpfc_printf_log(
9858 phba, KERN_INFO, LOG_INIT,
9859 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9860 "dat-rq-id=%d parent cq-id=%d\n",
9861 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9862 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9863 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9864
9865 }
9866 }
9867
9868 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009869 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009870 "0540 Receive Queue not allocated\n");
9871 rc = -ENOMEM;
9872 goto out_destroy;
9873 }
9874
9875 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9876 phba->sli4_hba.els_cq, LPFC_USOL);
9877 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009878 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009879 "0541 Failed setup of Receive Queue: "
9880 "rc = 0x%x\n", (uint32_t)rc);
9881 goto out_destroy;
9882 }
9883
9884 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9885 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9886 "parent cq-id=%d\n",
9887 phba->sli4_hba.hdr_rq->queue_id,
9888 phba->sli4_hba.dat_rq->queue_id,
9889 phba->sli4_hba.els_cq->queue_id);
9890
David Brazdil0f672f62019-12-10 10:32:29 +00009891 if (phba->cfg_fcp_imax)
9892 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
9893 else
9894 usdelay = 0;
9895
9896 for (qidx = 0; qidx < phba->cfg_irq_chann;
9897 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9898 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9899 usdelay);
9900
9901 if (phba->sli4_hba.cq_max) {
9902 kfree(phba->sli4_hba.cq_lookup);
9903 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
9904 sizeof(struct lpfc_queue *), GFP_KERNEL);
9905 if (!phba->sli4_hba.cq_lookup) {
Olivier Deprez157378f2022-04-04 15:47:50 +02009906 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +00009907 "0549 Failed setup of CQ Lookup table: "
9908 "size 0x%x\n", phba->sli4_hba.cq_max);
9909 rc = -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009910 goto out_destroy;
9911 }
David Brazdil0f672f62019-12-10 10:32:29 +00009912 lpfc_setup_cq_lookup(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009913 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009914 return 0;
9915
9916out_destroy:
9917 lpfc_sli4_queue_unset(phba);
9918out_error:
9919 return rc;
9920}
9921
9922/**
9923 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
9924 * @phba: pointer to lpfc hba data structure.
9925 *
9926 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
9927 * operation.
9928 *
9929 * Return codes
9930 * 0 - successful
9931 * -ENOMEM - No available memory
9932 * -EIO - The mailbox failed to complete successfully.
9933 **/
9934void
9935lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9936{
David Brazdil0f672f62019-12-10 10:32:29 +00009937 struct lpfc_sli4_hdw_queue *qp;
9938 struct lpfc_queue *eq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009939 int qidx;
9940
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009941 /* Unset mailbox command work queue */
9942 if (phba->sli4_hba.mbx_wq)
9943 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9944
9945 /* Unset NVME LS work queue */
9946 if (phba->sli4_hba.nvmels_wq)
9947 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
9948
9949 /* Unset ELS work queue */
9950 if (phba->sli4_hba.els_wq)
9951 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
9952
9953 /* Unset unsolicited receive queue */
9954 if (phba->sli4_hba.hdr_rq)
9955 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
9956 phba->sli4_hba.dat_rq);
9957
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009958 /* Unset mailbox command complete queue */
9959 if (phba->sli4_hba.mbx_cq)
9960 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
9961
9962 /* Unset ELS complete queue */
9963 if (phba->sli4_hba.els_cq)
9964 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
9965
9966 /* Unset NVME LS complete queue */
9967 if (phba->sli4_hba.nvmels_cq)
9968 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9969
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009970 if (phba->nvmet_support) {
9971 /* Unset NVMET MRQ queue */
9972 if (phba->sli4_hba.nvmet_mrq_hdr) {
9973 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9974 lpfc_rq_destroy(
9975 phba,
9976 phba->sli4_hba.nvmet_mrq_hdr[qidx],
9977 phba->sli4_hba.nvmet_mrq_data[qidx]);
9978 }
9979
9980 /* Unset NVMET CQ Set complete queue */
9981 if (phba->sli4_hba.nvmet_cqset) {
9982 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9983 lpfc_cq_destroy(
9984 phba, phba->sli4_hba.nvmet_cqset[qidx]);
9985 }
9986 }
9987
David Brazdil0f672f62019-12-10 10:32:29 +00009988 /* Unset fast-path SLI4 queues */
9989 if (phba->sli4_hba.hdwq) {
9990 /* Loop thru all Hardware Queues */
9991 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9992 /* Destroy the CQ/WQ corresponding to Hardware Queue */
9993 qp = &phba->sli4_hba.hdwq[qidx];
9994 lpfc_wq_destroy(phba, qp->io_wq);
9995 lpfc_cq_destroy(phba, qp->io_cq);
9996 }
9997 /* Loop thru all IRQ vectors */
9998 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9999 /* Destroy the EQ corresponding to the IRQ vector */
10000 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10001 lpfc_eq_destroy(phba, eq);
10002 }
10003 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010004
David Brazdil0f672f62019-12-10 10:32:29 +000010005 kfree(phba->sli4_hba.cq_lookup);
10006 phba->sli4_hba.cq_lookup = NULL;
10007 phba->sli4_hba.cq_max = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010008}
10009
10010/**
10011 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
10012 * @phba: pointer to lpfc hba data structure.
10013 *
10014 * This routine is invoked to allocate and set up a pool of completion queue
10015 * events. The body of the completion queue event is a completion queue entry
10016 * CQE. For now, this pool is used for the interrupt service routine to queue
10017 * the following HBA completion queue events for the worker thread to process:
10018 * - Mailbox asynchronous events
10019 * - Receive queue completion unsolicited events
10020 * Later, this can be used for all the slow-path events.
10021 *
10022 * Return codes
10023 * 0 - successful
10024 * -ENOMEM - No available memory
10025 **/
10026static int
10027lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
10028{
10029 struct lpfc_cq_event *cq_event;
10030 int i;
10031
10032 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
10033 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
10034 if (!cq_event)
10035 goto out_pool_create_fail;
10036 list_add_tail(&cq_event->list,
10037 &phba->sli4_hba.sp_cqe_event_pool);
10038 }
10039 return 0;
10040
10041out_pool_create_fail:
10042 lpfc_sli4_cq_event_pool_destroy(phba);
10043 return -ENOMEM;
10044}
10045
10046/**
10047 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
10048 * @phba: pointer to lpfc hba data structure.
10049 *
10050 * This routine is invoked to free the pool of completion queue events at
10051 * driver unload time. Note that, it is the responsibility of the driver
10052 * cleanup routine to free all the outstanding completion-queue events
10053 * allocated from this pool back into the pool before invoking this routine
10054 * to destroy the pool.
10055 **/
10056static void
10057lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
10058{
10059 struct lpfc_cq_event *cq_event, *next_cq_event;
10060
10061 list_for_each_entry_safe(cq_event, next_cq_event,
10062 &phba->sli4_hba.sp_cqe_event_pool, list) {
10063 list_del(&cq_event->list);
10064 kfree(cq_event);
10065 }
10066}
10067
10068/**
10069 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
10070 * @phba: pointer to lpfc hba data structure.
10071 *
10072 * This routine is the lock free version of the API invoked to allocate a
10073 * completion-queue event from the free pool.
10074 *
10075 * Return: Pointer to the newly allocated completion-queue event if successful
10076 * NULL otherwise.
10077 **/
10078struct lpfc_cq_event *
10079__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10080{
10081 struct lpfc_cq_event *cq_event = NULL;
10082
10083 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
10084 struct lpfc_cq_event, list);
10085 return cq_event;
10086}
10087
10088/**
10089 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
10090 * @phba: pointer to lpfc hba data structure.
10091 *
10092 * This routine is the lock version of the API invoked to allocate a
10093 * completion-queue event from the free pool.
10094 *
10095 * Return: Pointer to the newly allocated completion-queue event if successful
10096 * NULL otherwise.
10097 **/
10098struct lpfc_cq_event *
10099lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10100{
10101 struct lpfc_cq_event *cq_event;
10102 unsigned long iflags;
10103
10104 spin_lock_irqsave(&phba->hbalock, iflags);
10105 cq_event = __lpfc_sli4_cq_event_alloc(phba);
10106 spin_unlock_irqrestore(&phba->hbalock, iflags);
10107 return cq_event;
10108}
10109
10110/**
10111 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10112 * @phba: pointer to lpfc hba data structure.
10113 * @cq_event: pointer to the completion queue event to be freed.
10114 *
10115 * This routine is the lock free version of the API invoked to release a
10116 * completion-queue event back into the free pool.
10117 **/
10118void
10119__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10120 struct lpfc_cq_event *cq_event)
10121{
10122 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
10123}
10124
10125/**
10126 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10127 * @phba: pointer to lpfc hba data structure.
10128 * @cq_event: pointer to the completion queue event to be freed.
10129 *
10130 * This routine is the lock version of the API invoked to release a
10131 * completion-queue event back into the free pool.
10132 **/
10133void
10134lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10135 struct lpfc_cq_event *cq_event)
10136{
10137 unsigned long iflags;
10138 spin_lock_irqsave(&phba->hbalock, iflags);
10139 __lpfc_sli4_cq_event_release(phba, cq_event);
10140 spin_unlock_irqrestore(&phba->hbalock, iflags);
10141}
10142
10143/**
10144 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
10145 * @phba: pointer to lpfc hba data structure.
10146 *
10147 * This routine is to free all the pending completion-queue events to the
10148 * back into the free pool for device reset.
10149 **/
10150static void
10151lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10152{
Olivier Deprez157378f2022-04-04 15:47:50 +020010153 LIST_HEAD(cq_event_list);
10154 struct lpfc_cq_event *cq_event;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010155 unsigned long iflags;
10156
10157 /* Retrieve all the pending WCQEs from pending WCQE lists */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010158
Olivier Deprez157378f2022-04-04 15:47:50 +020010159 /* Pending ELS XRI abort events */
10160 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10161 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10162 &cq_event_list);
10163 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10164
10165 /* Pending asynnc events */
10166 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
10167 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10168 &cq_event_list);
10169 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
10170
10171 while (!list_empty(&cq_event_list)) {
10172 list_remove_head(&cq_event_list, cq_event,
10173 struct lpfc_cq_event, list);
10174 lpfc_sli4_cq_event_release(phba, cq_event);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010175 }
10176}
10177
10178/**
10179 * lpfc_pci_function_reset - Reset pci function.
10180 * @phba: pointer to lpfc hba data structure.
10181 *
10182 * This routine is invoked to request a PCI function reset. It will destroys
10183 * all resources assigned to the PCI function which originates this request.
10184 *
10185 * Return codes
10186 * 0 - successful
10187 * -ENOMEM - No available memory
10188 * -EIO - The mailbox failed to complete successfully.
10189 **/
10190int
10191lpfc_pci_function_reset(struct lpfc_hba *phba)
10192{
10193 LPFC_MBOXQ_t *mboxq;
10194 uint32_t rc = 0, if_type;
10195 uint32_t shdr_status, shdr_add_status;
10196 uint32_t rdy_chk;
10197 uint32_t port_reset = 0;
10198 union lpfc_sli4_cfg_shdr *shdr;
10199 struct lpfc_register reg_data;
10200 uint16_t devid;
10201
10202 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10203 switch (if_type) {
10204 case LPFC_SLI_INTF_IF_TYPE_0:
10205 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10206 GFP_KERNEL);
10207 if (!mboxq) {
Olivier Deprez157378f2022-04-04 15:47:50 +020010208 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010209 "0494 Unable to allocate memory for "
10210 "issuing SLI_FUNCTION_RESET mailbox "
10211 "command\n");
10212 return -ENOMEM;
10213 }
10214
10215 /* Setup PCI function reset mailbox-ioctl command */
10216 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10217 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10218 LPFC_SLI4_MBX_EMBED);
10219 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10220 shdr = (union lpfc_sli4_cfg_shdr *)
10221 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10222 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10223 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10224 &shdr->response);
Olivier Deprez0e641232021-09-23 10:07:05 +020010225 mempool_free(mboxq, phba->mbox_mem_pool);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010226 if (shdr_status || shdr_add_status || rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +020010227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010228 "0495 SLI_FUNCTION_RESET mailbox "
10229 "failed with status x%x add_status x%x,"
10230 " mbx status x%x\n",
10231 shdr_status, shdr_add_status, rc);
10232 rc = -ENXIO;
10233 }
10234 break;
10235 case LPFC_SLI_INTF_IF_TYPE_2:
10236 case LPFC_SLI_INTF_IF_TYPE_6:
10237wait:
10238 /*
10239 * Poll the Port Status Register and wait for RDY for
10240 * up to 30 seconds. If the port doesn't respond, treat
10241 * it as an error.
10242 */
10243 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
10244 if (lpfc_readl(phba->sli4_hba.u.if_type2.
10245 STATUSregaddr, &reg_data.word0)) {
10246 rc = -ENODEV;
10247 goto out;
10248 }
10249 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
10250 break;
10251 msleep(20);
10252 }
10253
10254 if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
10255 phba->work_status[0] = readl(
10256 phba->sli4_hba.u.if_type2.ERR1regaddr);
10257 phba->work_status[1] = readl(
10258 phba->sli4_hba.u.if_type2.ERR2regaddr);
Olivier Deprez157378f2022-04-04 15:47:50 +020010259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010260 "2890 Port not ready, port status reg "
10261 "0x%x error 1=0x%x, error 2=0x%x\n",
10262 reg_data.word0,
10263 phba->work_status[0],
10264 phba->work_status[1]);
10265 rc = -ENODEV;
10266 goto out;
10267 }
10268
10269 if (!port_reset) {
10270 /*
10271 * Reset the port now
10272 */
10273 reg_data.word0 = 0;
10274 bf_set(lpfc_sliport_ctrl_end, &reg_data,
10275 LPFC_SLIPORT_LITTLE_ENDIAN);
10276 bf_set(lpfc_sliport_ctrl_ip, &reg_data,
10277 LPFC_SLIPORT_INIT_PORT);
10278 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10279 CTRLregaddr);
10280 /* flush */
10281 pci_read_config_word(phba->pcidev,
10282 PCI_DEVICE_ID, &devid);
10283
10284 port_reset = 1;
10285 msleep(20);
10286 goto wait;
10287 } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
10288 rc = -ENODEV;
10289 goto out;
10290 }
10291 break;
10292
10293 case LPFC_SLI_INTF_IF_TYPE_1:
10294 default:
10295 break;
10296 }
10297
10298out:
10299 /* Catch the not-ready port failure after a port reset. */
10300 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +020010301 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010302 "3317 HBA not functional: IP Reset Failed "
10303 "try: echo fw_reset > board_mode\n");
10304 rc = -ENODEV;
10305 }
10306
10307 return rc;
10308}
10309
10310/**
10311 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
10312 * @phba: pointer to lpfc hba data structure.
10313 *
10314 * This routine is invoked to set up the PCI device memory space for device
10315 * with SLI-4 interface spec.
10316 *
10317 * Return codes
10318 * 0 - successful
10319 * other values - error
10320 **/
10321static int
10322lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10323{
David Brazdil0f672f62019-12-10 10:32:29 +000010324 struct pci_dev *pdev = phba->pcidev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010325 unsigned long bar0map_len, bar1map_len, bar2map_len;
David Brazdil0f672f62019-12-10 10:32:29 +000010326 int error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010327 uint32_t if_type;
10328
David Brazdil0f672f62019-12-10 10:32:29 +000010329 if (!pdev)
10330 return -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010331
10332 /* Set the device DMA mask size */
David Brazdil0f672f62019-12-10 10:32:29 +000010333 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10334 if (error)
10335 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10336 if (error)
10337 return error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010338
10339 /*
10340 * The BARs and register set definitions and offset locations are
10341 * dependent on the if_type.
10342 */
10343 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10344 &phba->sli4_hba.sli_intf.word0)) {
David Brazdil0f672f62019-12-10 10:32:29 +000010345 return -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010346 }
10347
10348 /* There is no SLI3 failback for SLI4 devices. */
10349 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10350 LPFC_SLI_INTF_VALID) {
Olivier Deprez157378f2022-04-04 15:47:50 +020010351 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010352 "2894 SLI_INTF reg contents invalid "
10353 "sli_intf reg 0x%x\n",
10354 phba->sli4_hba.sli_intf.word0);
David Brazdil0f672f62019-12-10 10:32:29 +000010355 return -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010356 }
10357
10358 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10359 /*
10360 * Get the bus address of SLI4 device Bar regions and the
10361 * number of bytes required by each mapping. The mapping of the
10362 * particular PCI BARs regions is dependent on the type of
10363 * SLI4 device.
10364 */
10365 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10366 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10367 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
10368
10369 /*
10370 * Map SLI4 PCI Config Space Register base to a kernel virtual
10371 * addr
10372 */
10373 phba->sli4_hba.conf_regs_memmap_p =
10374 ioremap(phba->pci_bar0_map, bar0map_len);
10375 if (!phba->sli4_hba.conf_regs_memmap_p) {
10376 dev_printk(KERN_ERR, &pdev->dev,
10377 "ioremap failed for SLI4 PCI config "
10378 "registers.\n");
David Brazdil0f672f62019-12-10 10:32:29 +000010379 return -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010380 }
10381 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10382 /* Set up BAR0 PCI config space register memory map */
10383 lpfc_sli4_bar0_register_memmap(phba, if_type);
10384 } else {
10385 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10386 bar0map_len = pci_resource_len(pdev, 1);
10387 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10388 dev_printk(KERN_ERR, &pdev->dev,
10389 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
David Brazdil0f672f62019-12-10 10:32:29 +000010390 return -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010391 }
10392 phba->sli4_hba.conf_regs_memmap_p =
10393 ioremap(phba->pci_bar0_map, bar0map_len);
10394 if (!phba->sli4_hba.conf_regs_memmap_p) {
10395 dev_printk(KERN_ERR, &pdev->dev,
10396 "ioremap failed for SLI4 PCI config "
10397 "registers.\n");
David Brazdil0f672f62019-12-10 10:32:29 +000010398 return -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010399 }
10400 lpfc_sli4_bar0_register_memmap(phba, if_type);
10401 }
10402
10403 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10404 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10405 /*
10406 * Map SLI4 if type 0 HBA Control Register base to a
10407 * kernel virtual address and setup the registers.
10408 */
10409 phba->pci_bar1_map = pci_resource_start(pdev,
10410 PCI_64BIT_BAR2);
10411 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10412 phba->sli4_hba.ctrl_regs_memmap_p =
10413 ioremap(phba->pci_bar1_map,
10414 bar1map_len);
10415 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10416 dev_err(&pdev->dev,
10417 "ioremap failed for SLI4 HBA "
10418 "control registers.\n");
10419 error = -ENOMEM;
10420 goto out_iounmap_conf;
10421 }
10422 phba->pci_bar2_memmap_p =
10423 phba->sli4_hba.ctrl_regs_memmap_p;
10424 lpfc_sli4_bar1_register_memmap(phba, if_type);
10425 } else {
10426 error = -ENOMEM;
10427 goto out_iounmap_conf;
10428 }
10429 }
10430
10431 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10432 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10433 /*
10434 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
10435 * virtual address and setup the registers.
10436 */
10437 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10438 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10439 phba->sli4_hba.drbl_regs_memmap_p =
10440 ioremap(phba->pci_bar1_map, bar1map_len);
10441 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10442 dev_err(&pdev->dev,
10443 "ioremap failed for SLI4 HBA doorbell registers.\n");
David Brazdil0f672f62019-12-10 10:32:29 +000010444 error = -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010445 goto out_iounmap_conf;
10446 }
10447 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10448 lpfc_sli4_bar1_register_memmap(phba, if_type);
10449 }
10450
10451 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10452 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10453 /*
10454 * Map SLI4 if type 0 HBA Doorbell Register base to
10455 * a kernel virtual address and setup the registers.
10456 */
10457 phba->pci_bar2_map = pci_resource_start(pdev,
10458 PCI_64BIT_BAR4);
10459 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10460 phba->sli4_hba.drbl_regs_memmap_p =
10461 ioremap(phba->pci_bar2_map,
10462 bar2map_len);
10463 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10464 dev_err(&pdev->dev,
10465 "ioremap failed for SLI4 HBA"
10466 " doorbell registers.\n");
10467 error = -ENOMEM;
10468 goto out_iounmap_ctrl;
10469 }
10470 phba->pci_bar4_memmap_p =
10471 phba->sli4_hba.drbl_regs_memmap_p;
10472 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10473 if (error)
10474 goto out_iounmap_all;
10475 } else {
10476 error = -ENOMEM;
10477 goto out_iounmap_all;
10478 }
10479 }
10480
10481 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10482 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10483 /*
10484 * Map SLI4 if type 6 HBA DPP Register base to a kernel
10485 * virtual address and setup the registers.
10486 */
10487 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10488 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10489 phba->sli4_hba.dpp_regs_memmap_p =
10490 ioremap(phba->pci_bar2_map, bar2map_len);
10491 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10492 dev_err(&pdev->dev,
10493 "ioremap failed for SLI4 HBA dpp registers.\n");
David Brazdil0f672f62019-12-10 10:32:29 +000010494 error = -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010495 goto out_iounmap_ctrl;
10496 }
10497 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10498 }
10499
10500 /* Set up the EQ/CQ register handeling functions now */
10501 switch (if_type) {
10502 case LPFC_SLI_INTF_IF_TYPE_0:
10503 case LPFC_SLI_INTF_IF_TYPE_2:
10504 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
David Brazdil0f672f62019-12-10 10:32:29 +000010505 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10506 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010507 break;
10508 case LPFC_SLI_INTF_IF_TYPE_6:
10509 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
David Brazdil0f672f62019-12-10 10:32:29 +000010510 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10511 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010512 break;
10513 default:
10514 break;
10515 }
10516
10517 return 0;
10518
10519out_iounmap_all:
10520 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10521out_iounmap_ctrl:
10522 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10523out_iounmap_conf:
10524 iounmap(phba->sli4_hba.conf_regs_memmap_p);
David Brazdil0f672f62019-12-10 10:32:29 +000010525
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010526 return error;
10527}
10528
10529/**
10530 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
10531 * @phba: pointer to lpfc hba data structure.
10532 *
10533 * This routine is invoked to unset the PCI device memory space for device
10534 * with SLI-4 interface spec.
10535 **/
10536static void
10537lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10538{
10539 uint32_t if_type;
10540 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10541
10542 switch (if_type) {
10543 case LPFC_SLI_INTF_IF_TYPE_0:
10544 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10545 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10546 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10547 break;
10548 case LPFC_SLI_INTF_IF_TYPE_2:
10549 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10550 break;
10551 case LPFC_SLI_INTF_IF_TYPE_6:
10552 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10553 iounmap(phba->sli4_hba.conf_regs_memmap_p);
Olivier Deprez157378f2022-04-04 15:47:50 +020010554 if (phba->sli4_hba.dpp_regs_memmap_p)
10555 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010556 break;
10557 case LPFC_SLI_INTF_IF_TYPE_1:
10558 default:
10559 dev_printk(KERN_ERR, &phba->pcidev->dev,
10560 "FATAL - unsupported SLI4 interface type - %d\n",
10561 if_type);
10562 break;
10563 }
10564}
10565
10566/**
10567 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
10568 * @phba: pointer to lpfc hba data structure.
10569 *
10570 * This routine is invoked to enable the MSI-X interrupt vectors to device
10571 * with SLI-3 interface specs.
10572 *
10573 * Return codes
10574 * 0 - successful
10575 * other values - error
10576 **/
10577static int
10578lpfc_sli_enable_msix(struct lpfc_hba *phba)
10579{
10580 int rc;
10581 LPFC_MBOXQ_t *pmb;
10582
10583 /* Set up MSI-X multi-message vectors */
10584 rc = pci_alloc_irq_vectors(phba->pcidev,
10585 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10586 if (rc < 0) {
10587 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10588 "0420 PCI enable MSI-X failed (%d)\n", rc);
10589 goto vec_fail_out;
10590 }
10591
10592 /*
10593 * Assign MSI-X vectors to interrupt handlers
10594 */
10595
10596 /* vector-0 is associated to slow-path handler */
10597 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
10598 &lpfc_sli_sp_intr_handler, 0,
10599 LPFC_SP_DRIVER_HANDLER_NAME, phba);
10600 if (rc) {
10601 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10602 "0421 MSI-X slow-path request_irq failed "
10603 "(%d)\n", rc);
10604 goto msi_fail_out;
10605 }
10606
10607 /* vector-1 is associated to fast-path handler */
10608 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
10609 &lpfc_sli_fp_intr_handler, 0,
10610 LPFC_FP_DRIVER_HANDLER_NAME, phba);
10611
10612 if (rc) {
10613 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10614 "0429 MSI-X fast-path request_irq failed "
10615 "(%d)\n", rc);
10616 goto irq_fail_out;
10617 }
10618
10619 /*
10620 * Configure HBA MSI-X attention conditions to messages
10621 */
10622 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10623
10624 if (!pmb) {
10625 rc = -ENOMEM;
Olivier Deprez157378f2022-04-04 15:47:50 +020010626 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010627 "0474 Unable to allocate memory for issuing "
10628 "MBOX_CONFIG_MSI command\n");
10629 goto mem_fail_out;
10630 }
10631 rc = lpfc_config_msi(phba, pmb);
10632 if (rc)
10633 goto mbx_fail_out;
10634 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10635 if (rc != MBX_SUCCESS) {
10636 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10637 "0351 Config MSI mailbox command failed, "
10638 "mbxCmd x%x, mbxStatus x%x\n",
10639 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10640 goto mbx_fail_out;
10641 }
10642
10643 /* Free memory allocated for mailbox command */
10644 mempool_free(pmb, phba->mbox_mem_pool);
10645 return rc;
10646
10647mbx_fail_out:
10648 /* Free memory allocated for mailbox command */
10649 mempool_free(pmb, phba->mbox_mem_pool);
10650
10651mem_fail_out:
10652 /* free the irq already requested */
10653 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
10654
10655irq_fail_out:
10656 /* free the irq already requested */
10657 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
10658
10659msi_fail_out:
10660 /* Unconfigure MSI-X capability structure */
10661 pci_free_irq_vectors(phba->pcidev);
10662
10663vec_fail_out:
10664 return rc;
10665}
10666
10667/**
10668 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
10669 * @phba: pointer to lpfc hba data structure.
10670 *
10671 * This routine is invoked to enable the MSI interrupt mode to device with
10672 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
10673 * enable the MSI vector. The device driver is responsible for calling the
10674 * request_irq() to register MSI vector with a interrupt the handler, which
10675 * is done in this function.
10676 *
10677 * Return codes
10678 * 0 - successful
10679 * other values - error
10680 */
10681static int
10682lpfc_sli_enable_msi(struct lpfc_hba *phba)
10683{
10684 int rc;
10685
10686 rc = pci_enable_msi(phba->pcidev);
10687 if (!rc)
10688 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10689 "0462 PCI enable MSI mode success.\n");
10690 else {
10691 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10692 "0471 PCI enable MSI mode failed (%d)\n", rc);
10693 return rc;
10694 }
10695
10696 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10697 0, LPFC_DRIVER_NAME, phba);
10698 if (rc) {
10699 pci_disable_msi(phba->pcidev);
10700 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10701 "0478 MSI request_irq failed (%d)\n", rc);
10702 }
10703 return rc;
10704}
10705
10706/**
10707 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
10708 * @phba: pointer to lpfc hba data structure.
Olivier Deprez157378f2022-04-04 15:47:50 +020010709 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010710 *
10711 * This routine is invoked to enable device interrupt and associate driver's
10712 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
10713 * spec. Depends on the interrupt mode configured to the driver, the driver
10714 * will try to fallback from the configured interrupt mode to an interrupt
10715 * mode which is supported by the platform, kernel, and device in the order
10716 * of:
10717 * MSI-X -> MSI -> IRQ.
10718 *
10719 * Return codes
10720 * 0 - successful
10721 * other values - error
10722 **/
10723static uint32_t
10724lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10725{
10726 uint32_t intr_mode = LPFC_INTR_ERROR;
10727 int retval;
10728
10729 if (cfg_mode == 2) {
10730 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
10731 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10732 if (!retval) {
10733 /* Now, try to enable MSI-X interrupt mode */
10734 retval = lpfc_sli_enable_msix(phba);
10735 if (!retval) {
10736 /* Indicate initialization to MSI-X mode */
10737 phba->intr_type = MSIX;
10738 intr_mode = 2;
10739 }
10740 }
10741 }
10742
10743 /* Fallback to MSI if MSI-X initialization failed */
10744 if (cfg_mode >= 1 && phba->intr_type == NONE) {
10745 retval = lpfc_sli_enable_msi(phba);
10746 if (!retval) {
10747 /* Indicate initialization to MSI mode */
10748 phba->intr_type = MSI;
10749 intr_mode = 1;
10750 }
10751 }
10752
10753 /* Fallback to INTx if both MSI-X/MSI initalization failed */
10754 if (phba->intr_type == NONE) {
10755 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10756 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10757 if (!retval) {
10758 /* Indicate initialization to INTx mode */
10759 phba->intr_type = INTx;
10760 intr_mode = 0;
10761 }
10762 }
10763 return intr_mode;
10764}
10765
10766/**
10767 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
10768 * @phba: pointer to lpfc hba data structure.
10769 *
10770 * This routine is invoked to disable device interrupt and disassociate the
10771 * driver's interrupt handler(s) from interrupt vector(s) to device with
10772 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
10773 * release the interrupt vector(s) for the message signaled interrupt.
10774 **/
10775static void
10776lpfc_sli_disable_intr(struct lpfc_hba *phba)
10777{
10778 int nr_irqs, i;
10779
10780 if (phba->intr_type == MSIX)
10781 nr_irqs = LPFC_MSIX_VECTORS;
10782 else
10783 nr_irqs = 1;
10784
10785 for (i = 0; i < nr_irqs; i++)
10786 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10787 pci_free_irq_vectors(phba->pcidev);
10788
10789 /* Reset interrupt management states */
10790 phba->intr_type = NONE;
10791 phba->sli.slistat.sli_intr = 0;
10792}
10793
10794/**
David Brazdil0f672f62019-12-10 10:32:29 +000010795 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
10796 * @phba: pointer to lpfc hba data structure.
10797 * @id: EQ vector index or Hardware Queue index
10798 * @match: LPFC_FIND_BY_EQ = match by EQ
10799 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
10800 * Return the CPU that matches the selection criteria
10801 */
10802static uint16_t
10803lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10804{
10805 struct lpfc_vector_map_info *cpup;
10806 int cpu;
10807
10808 /* Loop through all CPUs */
10809 for_each_present_cpu(cpu) {
10810 cpup = &phba->sli4_hba.cpu_map[cpu];
10811
10812 /* If we are matching by EQ, there may be multiple CPUs using
10813 * using the same vector, so select the one with
10814 * LPFC_CPU_FIRST_IRQ set.
10815 */
10816 if ((match == LPFC_FIND_BY_EQ) &&
10817 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
David Brazdil0f672f62019-12-10 10:32:29 +000010818 (cpup->eq == id))
10819 return cpu;
10820
10821 /* If matching by HDWQ, select the first CPU that matches */
10822 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10823 return cpu;
10824 }
10825 return 0;
10826}
10827
10828#ifdef CONFIG_X86
10829/**
10830 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
10831 * @phba: pointer to lpfc hba data structure.
10832 * @cpu: CPU map index
10833 * @phys_id: CPU package physical id
10834 * @core_id: CPU core id
10835 */
10836static int
10837lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10838 uint16_t phys_id, uint16_t core_id)
10839{
10840 struct lpfc_vector_map_info *cpup;
10841 int idx;
10842
10843 for_each_present_cpu(idx) {
10844 cpup = &phba->sli4_hba.cpu_map[idx];
10845 /* Does the cpup match the one we are looking for */
10846 if ((cpup->phys_id == phys_id) &&
10847 (cpup->core_id == core_id) &&
10848 (cpu != idx))
10849 return 1;
10850 }
10851 return 0;
10852}
10853#endif
10854
Olivier Deprez157378f2022-04-04 15:47:50 +020010855/*
10856 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
10857 * @phba: pointer to lpfc hba data structure.
10858 * @eqidx: index for eq and irq vector
10859 * @flag: flags to set for vector_map structure
10860 * @cpu: cpu used to index vector_map structure
10861 *
10862 * The routine assigns eq info into vector_map structure
10863 */
10864static inline void
10865lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
10866 unsigned int cpu)
10867{
10868 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
10869 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
10870
10871 cpup->eq = eqidx;
10872 cpup->flag |= flag;
10873
10874 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10875 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
10876 cpu, eqhdl->irq, cpup->eq, cpup->flag);
10877}
10878
10879/**
10880 * lpfc_cpu_map_array_init - Initialize cpu_map structure
10881 * @phba: pointer to lpfc hba data structure.
10882 *
10883 * The routine initializes the cpu_map array structure
10884 */
10885static void
10886lpfc_cpu_map_array_init(struct lpfc_hba *phba)
10887{
10888 struct lpfc_vector_map_info *cpup;
10889 struct lpfc_eq_intr_info *eqi;
10890 int cpu;
10891
10892 for_each_possible_cpu(cpu) {
10893 cpup = &phba->sli4_hba.cpu_map[cpu];
10894 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
10895 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
10896 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
10897 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
10898 cpup->flag = 0;
10899 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
10900 INIT_LIST_HEAD(&eqi->list);
10901 eqi->icnt = 0;
10902 }
10903}
10904
10905/**
10906 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
10907 * @phba: pointer to lpfc hba data structure.
10908 *
10909 * The routine initializes the hba_eq_hdl array structure
10910 */
10911static void
10912lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
10913{
10914 struct lpfc_hba_eq_hdl *eqhdl;
10915 int i;
10916
10917 for (i = 0; i < phba->cfg_irq_chann; i++) {
10918 eqhdl = lpfc_get_eq_hdl(i);
10919 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
10920 eqhdl->phba = phba;
10921 }
10922}
10923
David Brazdil0f672f62019-12-10 10:32:29 +000010924/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010925 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
10926 * @phba: pointer to lpfc hba data structure.
10927 * @vectors: number of msix vectors allocated.
10928 *
10929 * The routine will figure out the CPU affinity assignment for every
David Brazdil0f672f62019-12-10 10:32:29 +000010930 * MSI-X vector allocated for the HBA.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010931 * In addition, the CPU to IO channel mapping will be calculated
10932 * and the phba->sli4_hba.cpu_map array will reflect this.
10933 */
10934static void
10935lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10936{
David Brazdil0f672f62019-12-10 10:32:29 +000010937 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
10938 int max_phys_id, min_phys_id;
10939 int max_core_id, min_core_id;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010940 struct lpfc_vector_map_info *cpup;
David Brazdil0f672f62019-12-10 10:32:29 +000010941 struct lpfc_vector_map_info *new_cpup;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010942#ifdef CONFIG_X86
10943 struct cpuinfo_x86 *cpuinfo;
10944#endif
Olivier Deprez157378f2022-04-04 15:47:50 +020010945#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
10946 struct lpfc_hdwq_stat *c_stat;
10947#endif
David Brazdil0f672f62019-12-10 10:32:29 +000010948
10949 max_phys_id = 0;
10950 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
10951 max_core_id = 0;
10952 min_core_id = LPFC_VECTOR_MAP_EMPTY;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010953
10954 /* Update CPU map with physical id and core id of each CPU */
David Brazdil0f672f62019-12-10 10:32:29 +000010955 for_each_present_cpu(cpu) {
10956 cpup = &phba->sli4_hba.cpu_map[cpu];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010957#ifdef CONFIG_X86
10958 cpuinfo = &cpu_data(cpu);
10959 cpup->phys_id = cpuinfo->phys_proc_id;
10960 cpup->core_id = cpuinfo->cpu_core_id;
David Brazdil0f672f62019-12-10 10:32:29 +000010961 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
10962 cpup->flag |= LPFC_CPU_MAP_HYPER;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010963#else
10964 /* No distinction between CPUs for other platforms */
10965 cpup->phys_id = 0;
David Brazdil0f672f62019-12-10 10:32:29 +000010966 cpup->core_id = cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010967#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010968
David Brazdil0f672f62019-12-10 10:32:29 +000010969 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10970 "3328 CPU %d physid %d coreid %d flag x%x\n",
10971 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
10972
10973 if (cpup->phys_id > max_phys_id)
10974 max_phys_id = cpup->phys_id;
10975 if (cpup->phys_id < min_phys_id)
10976 min_phys_id = cpup->phys_id;
10977
10978 if (cpup->core_id > max_core_id)
10979 max_core_id = cpup->core_id;
10980 if (cpup->core_id < min_core_id)
10981 min_core_id = cpup->core_id;
10982 }
10983
David Brazdil0f672f62019-12-10 10:32:29 +000010984 /* After looking at each irq vector assigned to this pcidev, its
10985 * possible to see that not ALL CPUs have been accounted for.
10986 * Next we will set any unassigned (unaffinitized) cpu map
10987 * entries to a IRQ on the same phys_id.
10988 */
10989 first_cpu = cpumask_first(cpu_present_mask);
10990 start_cpu = first_cpu;
10991
10992 for_each_present_cpu(cpu) {
10993 cpup = &phba->sli4_hba.cpu_map[cpu];
10994
10995 /* Is this CPU entry unassigned */
10996 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10997 /* Mark CPU as IRQ not assigned by the kernel */
10998 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10999
11000 /* If so, find a new_cpup thats on the the SAME
11001 * phys_id as cpup. start_cpu will start where we
11002 * left off so all unassigned entries don't get assgined
11003 * the IRQ of the first entry.
11004 */
11005 new_cpu = start_cpu;
11006 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11007 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11008 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
Olivier Deprez157378f2022-04-04 15:47:50 +020011009 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
David Brazdil0f672f62019-12-10 10:32:29 +000011010 (new_cpup->phys_id == cpup->phys_id))
11011 goto found_same;
11012 new_cpu = cpumask_next(
11013 new_cpu, cpu_present_mask);
11014 if (new_cpu == nr_cpumask_bits)
11015 new_cpu = first_cpu;
11016 }
11017 /* At this point, we leave the CPU as unassigned */
11018 continue;
11019found_same:
11020 /* We found a matching phys_id, so copy the IRQ info */
11021 cpup->eq = new_cpup->eq;
David Brazdil0f672f62019-12-10 10:32:29 +000011022
11023 /* Bump start_cpu to the next slot to minmize the
11024 * chance of having multiple unassigned CPU entries
11025 * selecting the same IRQ.
11026 */
11027 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11028 if (start_cpu == nr_cpumask_bits)
11029 start_cpu = first_cpu;
11030
11031 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11032 "3337 Set Affinity: CPU %d "
Olivier Deprez157378f2022-04-04 15:47:50 +020011033 "eq %d from peer cpu %d same "
David Brazdil0f672f62019-12-10 10:32:29 +000011034 "phys_id (%d)\n",
Olivier Deprez157378f2022-04-04 15:47:50 +020011035 cpu, cpup->eq, new_cpu,
11036 cpup->phys_id);
David Brazdil0f672f62019-12-10 10:32:29 +000011037 }
11038 }
11039
11040 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
11041 start_cpu = first_cpu;
11042
11043 for_each_present_cpu(cpu) {
11044 cpup = &phba->sli4_hba.cpu_map[cpu];
11045
11046 /* Is this entry unassigned */
11047 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11048 /* Mark it as IRQ not assigned by the kernel */
11049 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11050
11051 /* If so, find a new_cpup thats on ANY phys_id
11052 * as the cpup. start_cpu will start where we
11053 * left off so all unassigned entries don't get
11054 * assigned the IRQ of the first entry.
11055 */
11056 new_cpu = start_cpu;
11057 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11058 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11059 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
Olivier Deprez157378f2022-04-04 15:47:50 +020011060 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
David Brazdil0f672f62019-12-10 10:32:29 +000011061 goto found_any;
11062 new_cpu = cpumask_next(
11063 new_cpu, cpu_present_mask);
11064 if (new_cpu == nr_cpumask_bits)
11065 new_cpu = first_cpu;
11066 }
11067 /* We should never leave an entry unassigned */
11068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11069 "3339 Set Affinity: CPU %d "
Olivier Deprez157378f2022-04-04 15:47:50 +020011070 "eq %d UNASSIGNED\n",
11071 cpup->hdwq, cpup->eq);
David Brazdil0f672f62019-12-10 10:32:29 +000011072 continue;
11073found_any:
11074 /* We found an available entry, copy the IRQ info */
11075 cpup->eq = new_cpup->eq;
David Brazdil0f672f62019-12-10 10:32:29 +000011076
11077 /* Bump start_cpu to the next slot to minmize the
11078 * chance of having multiple unassigned CPU entries
11079 * selecting the same IRQ.
11080 */
11081 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11082 if (start_cpu == nr_cpumask_bits)
11083 start_cpu = first_cpu;
11084
11085 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11086 "3338 Set Affinity: CPU %d "
Olivier Deprez157378f2022-04-04 15:47:50 +020011087 "eq %d from peer cpu %d (%d/%d)\n",
11088 cpu, cpup->eq, new_cpu,
David Brazdil0f672f62019-12-10 10:32:29 +000011089 new_cpup->phys_id, new_cpup->core_id);
11090 }
11091 }
11092
11093 /* Assign hdwq indices that are unique across all cpus in the map
11094 * that are also FIRST_CPUs.
11095 */
11096 idx = 0;
11097 for_each_present_cpu(cpu) {
11098 cpup = &phba->sli4_hba.cpu_map[cpu];
11099
11100 /* Only FIRST IRQs get a hdwq index assignment. */
11101 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11102 continue;
11103
11104 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
11105 cpup->hdwq = idx;
11106 idx++;
Olivier Deprez157378f2022-04-04 15:47:50 +020011107 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
David Brazdil0f672f62019-12-10 10:32:29 +000011108 "3333 Set Affinity: CPU %d (phys %d core %d): "
Olivier Deprez157378f2022-04-04 15:47:50 +020011109 "hdwq %d eq %d flg x%x\n",
David Brazdil0f672f62019-12-10 10:32:29 +000011110 cpu, cpup->phys_id, cpup->core_id,
Olivier Deprez157378f2022-04-04 15:47:50 +020011111 cpup->hdwq, cpup->eq, cpup->flag);
David Brazdil0f672f62019-12-10 10:32:29 +000011112 }
Olivier Deprez157378f2022-04-04 15:47:50 +020011113 /* Associate a hdwq with each cpu_map entry
David Brazdil0f672f62019-12-10 10:32:29 +000011114 * This will be 1 to 1 - hdwq to cpu, unless there are less
11115 * hardware queues then CPUs. For that case we will just round-robin
11116 * the available hardware queues as they get assigned to CPUs.
11117 * The next_idx is the idx from the FIRST_CPU loop above to account
11118 * for irq_chann < hdwq. The idx is used for round-robin assignments
11119 * and needs to start at 0.
11120 */
11121 next_idx = idx;
11122 start_cpu = 0;
11123 idx = 0;
11124 for_each_present_cpu(cpu) {
11125 cpup = &phba->sli4_hba.cpu_map[cpu];
11126
11127 /* FIRST cpus are already mapped. */
11128 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
11129 continue;
11130
11131 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
11132 * of the unassigned cpus to the next idx so that all
11133 * hdw queues are fully utilized.
11134 */
11135 if (next_idx < phba->cfg_hdw_queue) {
11136 cpup->hdwq = next_idx;
11137 next_idx++;
11138 continue;
11139 }
11140
11141 /* Not a First CPU and all hdw_queues are used. Reuse a
11142 * Hardware Queue for another CPU, so be smart about it
11143 * and pick one that has its IRQ/EQ mapped to the same phys_id
11144 * (CPU package) and core_id.
11145 */
11146 new_cpu = start_cpu;
11147 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11148 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11149 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11150 new_cpup->phys_id == cpup->phys_id &&
11151 new_cpup->core_id == cpup->core_id) {
11152 goto found_hdwq;
11153 }
11154 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11155 if (new_cpu == nr_cpumask_bits)
11156 new_cpu = first_cpu;
11157 }
11158
11159 /* If we can't match both phys_id and core_id,
11160 * settle for just a phys_id match.
11161 */
11162 new_cpu = start_cpu;
11163 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11164 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11165 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11166 new_cpup->phys_id == cpup->phys_id)
11167 goto found_hdwq;
11168
11169 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11170 if (new_cpu == nr_cpumask_bits)
11171 new_cpu = first_cpu;
11172 }
11173
11174 /* Otherwise just round robin on cfg_hdw_queue */
11175 cpup->hdwq = idx % phba->cfg_hdw_queue;
11176 idx++;
11177 goto logit;
11178 found_hdwq:
11179 /* We found an available entry, copy the IRQ info */
11180 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11181 if (start_cpu == nr_cpumask_bits)
11182 start_cpu = first_cpu;
11183 cpup->hdwq = new_cpup->hdwq;
11184 logit:
Olivier Deprez157378f2022-04-04 15:47:50 +020011185 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
David Brazdil0f672f62019-12-10 10:32:29 +000011186 "3335 Set Affinity: CPU %d (phys %d core %d): "
Olivier Deprez157378f2022-04-04 15:47:50 +020011187 "hdwq %d eq %d flg x%x\n",
David Brazdil0f672f62019-12-10 10:32:29 +000011188 cpu, cpup->phys_id, cpup->core_id,
Olivier Deprez157378f2022-04-04 15:47:50 +020011189 cpup->hdwq, cpup->eq, cpup->flag);
11190 }
11191
11192 /*
11193 * Initialize the cpu_map slots for not-present cpus in case
11194 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
11195 */
11196 idx = 0;
11197 for_each_possible_cpu(cpu) {
11198 cpup = &phba->sli4_hba.cpu_map[cpu];
11199#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11200 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
11201 c_stat->hdwq_no = cpup->hdwq;
11202#endif
11203 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
11204 continue;
11205
11206 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
11207#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11208 c_stat->hdwq_no = cpup->hdwq;
11209#endif
11210 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11211 "3340 Set Affinity: not present "
11212 "CPU %d hdwq %d\n",
11213 cpu, cpup->hdwq);
David Brazdil0f672f62019-12-10 10:32:29 +000011214 }
11215
11216 /* The cpu_map array will be used later during initialization
11217 * when EQ / CQ / WQs are allocated and configured.
11218 */
11219 return;
11220}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011221
11222/**
Olivier Deprez0e641232021-09-23 10:07:05 +020011223 * lpfc_cpuhp_get_eq
11224 *
11225 * @phba: pointer to lpfc hba data structure.
11226 * @cpu: cpu going offline
Olivier Deprez157378f2022-04-04 15:47:50 +020011227 * @eqlist: eq list to append to
Olivier Deprez0e641232021-09-23 10:07:05 +020011228 */
Olivier Deprez157378f2022-04-04 15:47:50 +020011229static int
Olivier Deprez0e641232021-09-23 10:07:05 +020011230lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
11231 struct list_head *eqlist)
11232{
Olivier Deprez0e641232021-09-23 10:07:05 +020011233 const struct cpumask *maskp;
11234 struct lpfc_queue *eq;
Olivier Deprez157378f2022-04-04 15:47:50 +020011235 struct cpumask *tmp;
Olivier Deprez0e641232021-09-23 10:07:05 +020011236 u16 idx;
11237
Olivier Deprez157378f2022-04-04 15:47:50 +020011238 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
11239 if (!tmp)
11240 return -ENOMEM;
11241
Olivier Deprez0e641232021-09-23 10:07:05 +020011242 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11243 maskp = pci_irq_get_affinity(phba->pcidev, idx);
11244 if (!maskp)
11245 continue;
11246 /*
11247 * if irq is not affinitized to the cpu going
11248 * then we don't need to poll the eq attached
11249 * to it.
11250 */
Olivier Deprez157378f2022-04-04 15:47:50 +020011251 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
Olivier Deprez0e641232021-09-23 10:07:05 +020011252 continue;
11253 /* get the cpus that are online and are affini-
11254 * tized to this irq vector. If the count is
11255 * more than 1 then cpuhp is not going to shut-
11256 * down this vector. Since this cpu has not
11257 * gone offline yet, we need >1.
11258 */
Olivier Deprez157378f2022-04-04 15:47:50 +020011259 cpumask_and(tmp, maskp, cpu_online_mask);
11260 if (cpumask_weight(tmp) > 1)
Olivier Deprez0e641232021-09-23 10:07:05 +020011261 continue;
11262
11263 /* Now that we have an irq to shutdown, get the eq
11264 * mapped to this irq. Note: multiple hdwq's in
11265 * the software can share an eq, but eventually
11266 * only eq will be mapped to this vector
11267 */
Olivier Deprez157378f2022-04-04 15:47:50 +020011268 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
11269 list_add(&eq->_poll_list, eqlist);
Olivier Deprez0e641232021-09-23 10:07:05 +020011270 }
Olivier Deprez157378f2022-04-04 15:47:50 +020011271 kfree(tmp);
11272 return 0;
Olivier Deprez0e641232021-09-23 10:07:05 +020011273}
11274
11275static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11276{
11277 if (phba->sli_rev != LPFC_SLI_REV4)
11278 return;
11279
11280 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11281 &phba->cpuhp);
11282 /*
11283 * unregistering the instance doesn't stop the polling
11284 * timer. Wait for the poll timer to retire.
11285 */
11286 synchronize_rcu();
11287 del_timer_sync(&phba->cpuhp_poll_timer);
11288}
11289
11290static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11291{
11292 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11293 return;
11294
11295 __lpfc_cpuhp_remove(phba);
11296}
11297
11298static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11299{
11300 if (phba->sli_rev != LPFC_SLI_REV4)
11301 return;
11302
11303 rcu_read_lock();
11304
Olivier Deprez157378f2022-04-04 15:47:50 +020011305 if (!list_empty(&phba->poll_list))
Olivier Deprez0e641232021-09-23 10:07:05 +020011306 mod_timer(&phba->cpuhp_poll_timer,
11307 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
Olivier Deprez0e641232021-09-23 10:07:05 +020011308
11309 rcu_read_unlock();
11310
11311 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11312 &phba->cpuhp);
11313}
11314
11315static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11316{
11317 if (phba->pport->load_flag & FC_UNLOADING) {
11318 *retval = -EAGAIN;
11319 return true;
11320 }
11321
11322 if (phba->sli_rev != LPFC_SLI_REV4) {
11323 *retval = 0;
11324 return true;
11325 }
11326
11327 /* proceed with the hotplug */
11328 return false;
11329}
11330
Olivier Deprez157378f2022-04-04 15:47:50 +020011331/**
11332 * lpfc_irq_set_aff - set IRQ affinity
11333 * @eqhdl: EQ handle
11334 * @cpu: cpu to set affinity
11335 *
11336 **/
11337static inline void
11338lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
11339{
11340 cpumask_clear(&eqhdl->aff_mask);
11341 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
11342 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11343 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11344}
11345
11346/**
11347 * lpfc_irq_clear_aff - clear IRQ affinity
11348 * @eqhdl: EQ handle
11349 *
11350 **/
11351static inline void
11352lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
11353{
11354 cpumask_clear(&eqhdl->aff_mask);
11355 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11356}
11357
11358/**
11359 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
11360 * @phba: pointer to HBA context object.
11361 * @cpu: cpu going offline/online
11362 * @offline: true, cpu is going offline. false, cpu is coming online.
11363 *
11364 * If cpu is going offline, we'll try our best effort to find the next
11365 * online cpu on the phba's original_mask and migrate all offlining IRQ
11366 * affinities.
11367 *
11368 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
11369 *
11370 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
11371 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
11372 *
11373 **/
11374static void
11375lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
11376{
11377 struct lpfc_vector_map_info *cpup;
11378 struct cpumask *aff_mask;
11379 unsigned int cpu_select, cpu_next, idx;
11380 const struct cpumask *orig_mask;
11381
11382 if (phba->irq_chann_mode == NORMAL_MODE)
11383 return;
11384
11385 orig_mask = &phba->sli4_hba.irq_aff_mask;
11386
11387 if (!cpumask_test_cpu(cpu, orig_mask))
11388 return;
11389
11390 cpup = &phba->sli4_hba.cpu_map[cpu];
11391
11392 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11393 return;
11394
11395 if (offline) {
11396 /* Find next online CPU on original mask */
11397 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
11398 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
11399
11400 /* Found a valid CPU */
11401 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
11402 /* Go through each eqhdl and ensure offlining
11403 * cpu aff_mask is migrated
11404 */
11405 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11406 aff_mask = lpfc_get_aff_mask(idx);
11407
11408 /* Migrate affinity */
11409 if (cpumask_test_cpu(cpu, aff_mask))
11410 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
11411 cpu_select);
11412 }
11413 } else {
11414 /* Rely on irqbalance if no online CPUs left on NUMA */
11415 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
11416 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
11417 }
11418 } else {
11419 /* Migrate affinity back to this CPU */
11420 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
11421 }
11422}
11423
Olivier Deprez0e641232021-09-23 10:07:05 +020011424static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11425{
11426 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11427 struct lpfc_queue *eq, *next;
11428 LIST_HEAD(eqlist);
11429 int retval;
11430
11431 if (!phba) {
11432 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11433 return 0;
11434 }
11435
11436 if (__lpfc_cpuhp_checks(phba, &retval))
11437 return retval;
11438
Olivier Deprez157378f2022-04-04 15:47:50 +020011439 lpfc_irq_rebalance(phba, cpu, true);
11440
11441 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11442 if (retval)
11443 return retval;
Olivier Deprez0e641232021-09-23 10:07:05 +020011444
11445 /* start polling on these eq's */
11446 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11447 list_del_init(&eq->_poll_list);
11448 lpfc_sli4_start_polling(eq);
11449 }
11450
11451 return 0;
11452}
11453
11454static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11455{
11456 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11457 struct lpfc_queue *eq, *next;
11458 unsigned int n;
11459 int retval;
11460
11461 if (!phba) {
11462 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11463 return 0;
11464 }
11465
11466 if (__lpfc_cpuhp_checks(phba, &retval))
11467 return retval;
11468
Olivier Deprez157378f2022-04-04 15:47:50 +020011469 lpfc_irq_rebalance(phba, cpu, false);
11470
Olivier Deprez0e641232021-09-23 10:07:05 +020011471 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11472 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11473 if (n == cpu)
11474 lpfc_sli4_stop_polling(eq);
11475 }
11476
11477 return 0;
11478}
11479
11480/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011481 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
11482 * @phba: pointer to lpfc hba data structure.
11483 *
11484 * This routine is invoked to enable the MSI-X interrupt vectors to device
Olivier Deprez157378f2022-04-04 15:47:50 +020011485 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
11486 * to cpus on the system.
11487 *
11488 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
11489 * the number of cpus on the same numa node as this adapter. The vectors are
11490 * allocated without requesting OS affinity mapping. A vector will be
11491 * allocated and assigned to each online and offline cpu. If the cpu is
11492 * online, then affinity will be set to that cpu. If the cpu is offline, then
11493 * affinity will be set to the nearest peer cpu within the numa node that is
11494 * online. If there are no online cpus within the numa node, affinity is not
11495 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
11496 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
11497 * configured.
11498 *
11499 * If numa mode is not enabled and there is more than 1 vector allocated, then
11500 * the driver relies on the managed irq interface where the OS assigns vector to
11501 * cpu affinity. The driver will then use that affinity mapping to setup its
11502 * cpu mapping table.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011503 *
11504 * Return codes
11505 * 0 - successful
11506 * other values - error
11507 **/
11508static int
11509lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11510{
11511 int vectors, rc, index;
11512 char *name;
Olivier Deprez157378f2022-04-04 15:47:50 +020011513 const struct cpumask *aff_mask = NULL;
11514 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
11515 struct lpfc_vector_map_info *cpup;
11516 struct lpfc_hba_eq_hdl *eqhdl;
11517 const struct cpumask *maskp;
11518 unsigned int flags = PCI_IRQ_MSIX;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011519
11520 /* Set up MSI-X multi-message vectors */
David Brazdil0f672f62019-12-10 10:32:29 +000011521 vectors = phba->cfg_irq_chann;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011522
Olivier Deprez157378f2022-04-04 15:47:50 +020011523 if (phba->irq_chann_mode != NORMAL_MODE)
11524 aff_mask = &phba->sli4_hba.irq_aff_mask;
11525
11526 if (aff_mask) {
11527 cpu_cnt = cpumask_weight(aff_mask);
11528 vectors = min(phba->cfg_irq_chann, cpu_cnt);
11529
11530 /* cpu: iterates over aff_mask including offline or online
11531 * cpu_select: iterates over online aff_mask to set affinity
11532 */
11533 cpu = cpumask_first(aff_mask);
11534 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11535 } else {
11536 flags |= PCI_IRQ_AFFINITY;
11537 }
11538
11539 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011540 if (rc < 0) {
11541 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11542 "0484 PCI enable MSI-X failed (%d)\n", rc);
11543 goto vec_fail_out;
11544 }
11545 vectors = rc;
11546
11547 /* Assign MSI-X vectors to interrupt handlers */
11548 for (index = 0; index < vectors; index++) {
Olivier Deprez157378f2022-04-04 15:47:50 +020011549 eqhdl = lpfc_get_eq_hdl(index);
11550 name = eqhdl->handler_name;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011551 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11552 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
11553 LPFC_DRIVER_HANDLER_NAME"%d", index);
11554
Olivier Deprez157378f2022-04-04 15:47:50 +020011555 eqhdl->idx = index;
David Brazdil0f672f62019-12-10 10:32:29 +000011556 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11557 &lpfc_sli4_hba_intr_handler, 0,
Olivier Deprez157378f2022-04-04 15:47:50 +020011558 name, eqhdl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011559 if (rc) {
11560 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11561 "0486 MSI-X fast-path (%d) "
11562 "request_irq failed (%d)\n", index, rc);
11563 goto cfg_fail_out;
11564 }
Olivier Deprez157378f2022-04-04 15:47:50 +020011565
11566 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
11567
11568 if (aff_mask) {
11569 /* If found a neighboring online cpu, set affinity */
11570 if (cpu_select < nr_cpu_ids)
11571 lpfc_irq_set_aff(eqhdl, cpu_select);
11572
11573 /* Assign EQ to cpu_map */
11574 lpfc_assign_eq_map_info(phba, index,
11575 LPFC_CPU_FIRST_IRQ,
11576 cpu);
11577
11578 /* Iterate to next offline or online cpu in aff_mask */
11579 cpu = cpumask_next(cpu, aff_mask);
11580
11581 /* Find next online cpu in aff_mask to set affinity */
11582 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11583 } else if (vectors == 1) {
11584 cpu = cpumask_first(cpu_present_mask);
11585 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
11586 cpu);
11587 } else {
11588 maskp = pci_irq_get_affinity(phba->pcidev, index);
11589
11590 /* Loop through all CPUs associated with vector index */
11591 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
11592 cpup = &phba->sli4_hba.cpu_map[cpu];
11593
11594 /* If this is the first CPU thats assigned to
11595 * this vector, set LPFC_CPU_FIRST_IRQ.
11596 *
11597 * With certain platforms its possible that irq
11598 * vectors are affinitized to all the cpu's.
11599 * This can result in each cpu_map.eq to be set
11600 * to the last vector, resulting in overwrite
11601 * of all the previous cpu_map.eq. Ensure that
11602 * each vector receives a place in cpu_map.
11603 * Later call to lpfc_cpu_affinity_check will
11604 * ensure we are nicely balanced out.
11605 */
11606 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
11607 continue;
11608 lpfc_assign_eq_map_info(phba, index,
11609 LPFC_CPU_FIRST_IRQ,
11610 cpu);
11611 break;
11612 }
11613 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011614 }
11615
David Brazdil0f672f62019-12-10 10:32:29 +000011616 if (vectors != phba->cfg_irq_chann) {
Olivier Deprez157378f2022-04-04 15:47:50 +020011617 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011618 "3238 Reducing IO channels to match number of "
11619 "MSI-X vectors, requested %d got %d\n",
David Brazdil0f672f62019-12-10 10:32:29 +000011620 phba->cfg_irq_chann, vectors);
11621 if (phba->cfg_irq_chann > vectors)
11622 phba->cfg_irq_chann = vectors;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011623 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011624
11625 return rc;
11626
11627cfg_fail_out:
11628 /* free the irq already requested */
Olivier Deprez157378f2022-04-04 15:47:50 +020011629 for (--index; index >= 0; index--) {
11630 eqhdl = lpfc_get_eq_hdl(index);
11631 lpfc_irq_clear_aff(eqhdl);
11632 irq_set_affinity_hint(eqhdl->irq, NULL);
11633 free_irq(eqhdl->irq, eqhdl);
11634 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011635
11636 /* Unconfigure MSI-X capability structure */
11637 pci_free_irq_vectors(phba->pcidev);
11638
11639vec_fail_out:
11640 return rc;
11641}
11642
11643/**
11644 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
11645 * @phba: pointer to lpfc hba data structure.
11646 *
11647 * This routine is invoked to enable the MSI interrupt mode to device with
David Brazdil0f672f62019-12-10 10:32:29 +000011648 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
11649 * called to enable the MSI vector. The device driver is responsible for
11650 * calling the request_irq() to register MSI vector with a interrupt the
11651 * handler, which is done in this function.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011652 *
11653 * Return codes
11654 * 0 - successful
11655 * other values - error
11656 **/
11657static int
11658lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11659{
11660 int rc, index;
Olivier Deprez157378f2022-04-04 15:47:50 +020011661 unsigned int cpu;
11662 struct lpfc_hba_eq_hdl *eqhdl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011663
David Brazdil0f672f62019-12-10 10:32:29 +000011664 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11665 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11666 if (rc > 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011667 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11668 "0487 PCI enable MSI mode success.\n");
11669 else {
11670 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11671 "0488 PCI enable MSI mode failed (%d)\n", rc);
David Brazdil0f672f62019-12-10 10:32:29 +000011672 return rc ? rc : -1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011673 }
11674
11675 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11676 0, LPFC_DRIVER_NAME, phba);
11677 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +000011678 pci_free_irq_vectors(phba->pcidev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011679 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11680 "0490 MSI request_irq failed (%d)\n", rc);
11681 return rc;
11682 }
11683
Olivier Deprez157378f2022-04-04 15:47:50 +020011684 eqhdl = lpfc_get_eq_hdl(0);
11685 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11686
11687 cpu = cpumask_first(cpu_present_mask);
11688 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
11689
David Brazdil0f672f62019-12-10 10:32:29 +000011690 for (index = 0; index < phba->cfg_irq_chann; index++) {
Olivier Deprez157378f2022-04-04 15:47:50 +020011691 eqhdl = lpfc_get_eq_hdl(index);
11692 eqhdl->idx = index;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011693 }
11694
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011695 return 0;
11696}
11697
11698/**
11699 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
11700 * @phba: pointer to lpfc hba data structure.
Olivier Deprez157378f2022-04-04 15:47:50 +020011701 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011702 *
11703 * This routine is invoked to enable device interrupt and associate driver's
11704 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
11705 * interface spec. Depends on the interrupt mode configured to the driver,
11706 * the driver will try to fallback from the configured interrupt mode to an
11707 * interrupt mode which is supported by the platform, kernel, and device in
11708 * the order of:
11709 * MSI-X -> MSI -> IRQ.
11710 *
11711 * Return codes
11712 * 0 - successful
11713 * other values - error
11714 **/
11715static uint32_t
11716lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11717{
11718 uint32_t intr_mode = LPFC_INTR_ERROR;
11719 int retval, idx;
11720
11721 if (cfg_mode == 2) {
11722 /* Preparation before conf_msi mbox cmd */
11723 retval = 0;
11724 if (!retval) {
11725 /* Now, try to enable MSI-X interrupt mode */
11726 retval = lpfc_sli4_enable_msix(phba);
11727 if (!retval) {
11728 /* Indicate initialization to MSI-X mode */
11729 phba->intr_type = MSIX;
11730 intr_mode = 2;
11731 }
11732 }
11733 }
11734
11735 /* Fallback to MSI if MSI-X initialization failed */
11736 if (cfg_mode >= 1 && phba->intr_type == NONE) {
11737 retval = lpfc_sli4_enable_msi(phba);
11738 if (!retval) {
11739 /* Indicate initialization to MSI mode */
11740 phba->intr_type = MSI;
11741 intr_mode = 1;
11742 }
11743 }
11744
11745 /* Fallback to INTx if both MSI-X/MSI initalization failed */
11746 if (phba->intr_type == NONE) {
11747 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11748 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11749 if (!retval) {
11750 struct lpfc_hba_eq_hdl *eqhdl;
Olivier Deprez157378f2022-04-04 15:47:50 +020011751 unsigned int cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011752
11753 /* Indicate initialization to INTx mode */
11754 phba->intr_type = INTx;
11755 intr_mode = 0;
11756
Olivier Deprez157378f2022-04-04 15:47:50 +020011757 eqhdl = lpfc_get_eq_hdl(0);
11758 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11759
11760 cpu = cpumask_first(cpu_present_mask);
11761 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
11762 cpu);
David Brazdil0f672f62019-12-10 10:32:29 +000011763 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
Olivier Deprez157378f2022-04-04 15:47:50 +020011764 eqhdl = lpfc_get_eq_hdl(idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011765 eqhdl->idx = idx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011766 }
11767 }
11768 }
11769 return intr_mode;
11770}
11771
11772/**
11773 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
11774 * @phba: pointer to lpfc hba data structure.
11775 *
11776 * This routine is invoked to disable device interrupt and disassociate
11777 * the driver's interrupt handler(s) from interrupt vector(s) to device
11778 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
11779 * will release the interrupt vector(s) for the message signaled interrupt.
11780 **/
11781static void
11782lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11783{
11784 /* Disable the currently initialized interrupt mode */
11785 if (phba->intr_type == MSIX) {
11786 int index;
Olivier Deprez157378f2022-04-04 15:47:50 +020011787 struct lpfc_hba_eq_hdl *eqhdl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011788
11789 /* Free up MSI-X multi-message vectors */
David Brazdil0f672f62019-12-10 10:32:29 +000011790 for (index = 0; index < phba->cfg_irq_chann; index++) {
Olivier Deprez157378f2022-04-04 15:47:50 +020011791 eqhdl = lpfc_get_eq_hdl(index);
11792 lpfc_irq_clear_aff(eqhdl);
11793 irq_set_affinity_hint(eqhdl->irq, NULL);
11794 free_irq(eqhdl->irq, eqhdl);
David Brazdil0f672f62019-12-10 10:32:29 +000011795 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011796 } else {
11797 free_irq(phba->pcidev->irq, phba);
11798 }
11799
11800 pci_free_irq_vectors(phba->pcidev);
11801
11802 /* Reset interrupt management states */
11803 phba->intr_type = NONE;
11804 phba->sli.slistat.sli_intr = 0;
11805}
11806
11807/**
11808 * lpfc_unset_hba - Unset SLI3 hba device initialization
11809 * @phba: pointer to lpfc hba data structure.
11810 *
11811 * This routine is invoked to unset the HBA device initialization steps to
11812 * a device with SLI-3 interface spec.
11813 **/
11814static void
11815lpfc_unset_hba(struct lpfc_hba *phba)
11816{
11817 struct lpfc_vport *vport = phba->pport;
11818 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11819
11820 spin_lock_irq(shost->host_lock);
11821 vport->load_flag |= FC_UNLOADING;
11822 spin_unlock_irq(shost->host_lock);
11823
11824 kfree(phba->vpi_bmask);
11825 kfree(phba->vpi_ids);
11826
11827 lpfc_stop_hba_timers(phba);
11828
11829 phba->pport->work_port_events = 0;
11830
11831 lpfc_sli_hba_down(phba);
11832
11833 lpfc_sli_brdrestart(phba);
11834
11835 lpfc_sli_disable_intr(phba);
11836
11837 return;
11838}
11839
11840/**
11841 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
11842 * @phba: Pointer to HBA context object.
11843 *
11844 * This function is called in the SLI4 code path to wait for completion
11845 * of device's XRIs exchange busy. It will check the XRI exchange busy
11846 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
11847 * that, it will check the XRI exchange busy on outstanding FCP and ELS
11848 * I/Os every 30 seconds, log error message, and wait forever. Only when
11849 * all XRI exchange busy complete, the driver unload shall proceed with
11850 * invoking the function reset ioctl mailbox command to the CNA and the
11851 * the rest of the driver unload resource release.
11852 **/
11853static void
11854lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11855{
David Brazdil0f672f62019-12-10 10:32:29 +000011856 struct lpfc_sli4_hdw_queue *qp;
11857 int idx, ccnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011858 int wait_time = 0;
David Brazdil0f672f62019-12-10 10:32:29 +000011859 int io_xri_cmpl = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011860 int nvmet_xri_cmpl = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011861 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11862
11863 /* Driver just aborted IOs during the hba_unset process. Pause
11864 * here to give the HBA time to complete the IO and get entries
11865 * into the abts lists.
11866 */
11867 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11868
11869 /* Wait for NVME pending IO to flush back to transport. */
11870 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11871 lpfc_nvme_wait_for_io_drain(phba);
11872
David Brazdil0f672f62019-12-10 10:32:29 +000011873 ccnt = 0;
11874 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11875 qp = &phba->sli4_hba.hdwq[idx];
11876 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
11877 if (!io_xri_cmpl) /* if list is NOT empty */
11878 ccnt++;
11879 }
11880 if (ccnt)
11881 io_xri_cmpl = 0;
11882
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011883 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011884 nvmet_xri_cmpl =
11885 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11886 }
11887
David Brazdil0f672f62019-12-10 10:32:29 +000011888 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011889 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
11890 if (!nvmet_xri_cmpl)
Olivier Deprez157378f2022-04-04 15:47:50 +020011891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011892 "6424 NVMET XRI exchange busy "
11893 "wait time: %d seconds.\n",
11894 wait_time/1000);
David Brazdil0f672f62019-12-10 10:32:29 +000011895 if (!io_xri_cmpl)
Olivier Deprez157378f2022-04-04 15:47:50 +020011896 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +000011897 "6100 IO XRI exchange busy "
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011898 "wait time: %d seconds.\n",
11899 wait_time/1000);
11900 if (!els_xri_cmpl)
Olivier Deprez157378f2022-04-04 15:47:50 +020011901 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011902 "2878 ELS XRI exchange busy "
11903 "wait time: %d seconds.\n",
11904 wait_time/1000);
11905 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
11906 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
11907 } else {
11908 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
11909 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
11910 }
David Brazdil0f672f62019-12-10 10:32:29 +000011911
11912 ccnt = 0;
11913 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11914 qp = &phba->sli4_hba.hdwq[idx];
11915 io_xri_cmpl = list_empty(
11916 &qp->lpfc_abts_io_buf_list);
11917 if (!io_xri_cmpl) /* if list is NOT empty */
11918 ccnt++;
11919 }
11920 if (ccnt)
11921 io_xri_cmpl = 0;
11922
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011923 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011924 nvmet_xri_cmpl = list_empty(
11925 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11926 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011927 els_xri_cmpl =
11928 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11929
11930 }
11931}
11932
11933/**
11934 * lpfc_sli4_hba_unset - Unset the fcoe hba
11935 * @phba: Pointer to HBA context object.
11936 *
11937 * This function is called in the SLI4 code path to reset the HBA's FCoE
11938 * function. The caller is not required to hold any lock. This routine
11939 * issues PCI function reset mailbox command to reset the FCoE function.
11940 * At the end of the function, it calls lpfc_hba_down_post function to
11941 * free any pending commands.
11942 **/
11943static void
11944lpfc_sli4_hba_unset(struct lpfc_hba *phba)
11945{
11946 int wait_cnt = 0;
11947 LPFC_MBOXQ_t *mboxq;
11948 struct pci_dev *pdev = phba->pcidev;
11949
11950 lpfc_stop_hba_timers(phba);
David Brazdil0f672f62019-12-10 10:32:29 +000011951 if (phba->pport)
11952 phba->sli4_hba.intr_enable = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011953
11954 /*
11955 * Gracefully wait out the potential current outstanding asynchronous
11956 * mailbox command.
11957 */
11958
11959 /* First, block any pending async mailbox command from posted */
11960 spin_lock_irq(&phba->hbalock);
11961 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11962 spin_unlock_irq(&phba->hbalock);
11963 /* Now, trying to wait it out if we can */
11964 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11965 msleep(10);
11966 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
11967 break;
11968 }
11969 /* Forcefully release the outstanding mailbox command if timed out */
11970 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11971 spin_lock_irq(&phba->hbalock);
11972 mboxq = phba->sli.mbox_active;
11973 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
11974 __lpfc_mbox_cmpl_put(phba, mboxq);
11975 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11976 phba->sli.mbox_active = NULL;
11977 spin_unlock_irq(&phba->hbalock);
11978 }
11979
11980 /* Abort all iocbs associated with the hba */
11981 lpfc_sli_hba_iocb_abort(phba);
11982
11983 /* Wait for completion of device XRI exchange busy */
11984 lpfc_sli4_xri_exchange_busy_wait(phba);
11985
Olivier Deprez0e641232021-09-23 10:07:05 +020011986 /* per-phba callback de-registration for hotplug event */
11987 if (phba->pport)
11988 lpfc_cpuhp_remove(phba);
11989
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011990 /* Disable PCI subsystem interrupt */
11991 lpfc_sli4_disable_intr(phba);
11992
11993 /* Disable SR-IOV if enabled */
11994 if (phba->cfg_sriov_nr_virtfn)
11995 pci_disable_sriov(pdev);
11996
11997 /* Stop kthread signal shall trigger work_done one more time */
11998 kthread_stop(phba->worker_thread);
11999
David Brazdil0f672f62019-12-10 10:32:29 +000012000 /* Disable FW logging to host memory */
12001 lpfc_ras_stop_fwlog(phba);
12002
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012003 /* Unset the queues shared with the hardware then release all
12004 * allocated resources.
12005 */
12006 lpfc_sli4_queue_unset(phba);
12007 lpfc_sli4_queue_destroy(phba);
12008
12009 /* Reset SLI4 HBA FCoE function */
12010 lpfc_pci_function_reset(phba);
12011
David Brazdil0f672f62019-12-10 10:32:29 +000012012 /* Free RAS DMA memory */
12013 if (phba->ras_fwlog.ras_enabled)
12014 lpfc_sli4_ras_dma_free(phba);
12015
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012016 /* Stop the SLI4 device port */
David Brazdil0f672f62019-12-10 10:32:29 +000012017 if (phba->pport)
12018 phba->pport->work_port_events = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012019}
12020
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012021/**
12022 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
12023 * @phba: Pointer to HBA context object.
12024 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
12025 *
12026 * This function is called in the SLI4 code path to read the port's
12027 * sli4 capabilities.
12028 *
12029 * This function may be be called from any context that can block-wait
12030 * for the completion. The expectation is that this routine is called
12031 * typically from probe_one or from the online routine.
12032 **/
12033int
12034lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
12035{
12036 int rc;
12037 struct lpfc_mqe *mqe = &mboxq->u.mqe;
12038 struct lpfc_pc_sli4_params *sli4_params;
12039 uint32_t mbox_tmo;
12040 int length;
12041 bool exp_wqcq_pages = true;
12042 struct lpfc_sli4_parameters *mbx_sli4_parameters;
12043
12044 /*
12045 * By default, the driver assumes the SLI4 port requires RPI
12046 * header postings. The SLI4_PARAM response will correct this
12047 * assumption.
12048 */
12049 phba->sli4_hba.rpi_hdrs_in_use = 1;
12050
12051 /* Read the port's SLI4 Config Parameters */
12052 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
12053 sizeof(struct lpfc_sli4_cfg_mhdr));
12054 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
12055 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
12056 length, LPFC_SLI4_MBX_EMBED);
12057 if (!phba->sli4_hba.intr_enable)
12058 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
12059 else {
12060 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
12061 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12062 }
12063 if (unlikely(rc))
12064 return rc;
12065 sli4_params = &phba->sli4_hba.pc_sli4_params;
12066 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
12067 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
12068 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
12069 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
12070 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
12071 mbx_sli4_parameters);
12072 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
12073 mbx_sli4_parameters);
12074 if (bf_get(cfg_phwq, mbx_sli4_parameters))
12075 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
12076 else
12077 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
12078 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
Olivier Deprez0e641232021-09-23 10:07:05 +020012079 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
12080 mbx_sli4_parameters);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012081 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
12082 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
12083 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
12084 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
12085 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
12086 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
12087 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
12088 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
12089 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
Olivier Deprez157378f2022-04-04 15:47:50 +020012090 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012091 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
12092 mbx_sli4_parameters);
12093 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
12094 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
12095 mbx_sli4_parameters);
12096 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
12097 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012098
David Brazdil0f672f62019-12-10 10:32:29 +000012099 /* Check for Extended Pre-Registered SGL support */
12100 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012101
David Brazdil0f672f62019-12-10 10:32:29 +000012102 /* Check for firmware nvme support */
12103 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
12104 bf_get(cfg_xib, mbx_sli4_parameters));
12105
12106 if (rc) {
12107 /* Save this to indicate the Firmware supports NVME */
12108 sli4_params->nvme = 1;
12109
12110 /* Firmware NVME support, check driver FC4 NVME support */
12111 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
12112 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12113 "6133 Disabling NVME support: "
12114 "FC4 type not supported: x%x\n",
12115 phba->cfg_enable_fc4_type);
12116 goto fcponly;
12117 }
12118 } else {
12119 /* No firmware NVME support, check driver FC4 NVME support */
12120 sli4_params->nvme = 0;
12121 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12122 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
12123 "6101 Disabling NVME support: Not "
12124 "supported by firmware (%d %d) x%x\n",
12125 bf_get(cfg_nvme, mbx_sli4_parameters),
12126 bf_get(cfg_xib, mbx_sli4_parameters),
12127 phba->cfg_enable_fc4_type);
12128fcponly:
12129 phba->nvme_support = 0;
12130 phba->nvmet_support = 0;
12131 phba->cfg_nvmet_mrq = 0;
12132 phba->cfg_nvme_seg_cnt = 0;
12133
12134 /* If no FC4 type support, move to just SCSI support */
12135 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
12136 return -ENODEV;
12137 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
12138 }
12139 }
12140
12141 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
Olivier Deprez157378f2022-04-04 15:47:50 +020012142 * accommodate 512K and 1M IOs in a single nvme buf.
David Brazdil0f672f62019-12-10 10:32:29 +000012143 */
Olivier Deprez157378f2022-04-04 15:47:50 +020012144 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
David Brazdil0f672f62019-12-10 10:32:29 +000012145 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012146
12147 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
12148 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
12149 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
12150 phba->cfg_enable_pbde = 0;
12151
12152 /*
12153 * To support Suppress Response feature we must satisfy 3 conditions.
12154 * lpfc_suppress_rsp module parameter must be set (default).
12155 * In SLI4-Parameters Descriptor:
12156 * Extended Inline Buffers (XIB) must be supported.
12157 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
12158 * (double negative).
12159 */
12160 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
12161 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
12162 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
12163 else
12164 phba->cfg_suppress_rsp = 0;
12165
12166 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
12167 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
12168
12169 /* Make sure that sge_supp_len can be handled by the driver */
12170 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
12171 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12172
12173 /*
12174 * Check whether the adapter supports an embedded copy of the
12175 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
12176 * to use this option, 128-byte WQEs must be used.
12177 */
12178 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
12179 phba->fcp_embed_io = 1;
12180 else
12181 phba->fcp_embed_io = 0;
12182
12183 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12184 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
12185 bf_get(cfg_xib, mbx_sli4_parameters),
12186 phba->cfg_enable_pbde,
12187 phba->fcp_embed_io, phba->nvme_support,
12188 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
12189
12190 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
12191 LPFC_SLI_INTF_IF_TYPE_2) &&
12192 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
12193 LPFC_SLI_INTF_FAMILY_LNCR_A0))
12194 exp_wqcq_pages = false;
12195
12196 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
12197 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
12198 exp_wqcq_pages &&
12199 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
12200 phba->enab_exp_wqcq_pages = 1;
12201 else
12202 phba->enab_exp_wqcq_pages = 0;
12203 /*
12204 * Check if the SLI port supports MDS Diagnostics
12205 */
12206 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
12207 phba->mds_diags_support = 1;
12208 else
12209 phba->mds_diags_support = 0;
David Brazdil0f672f62019-12-10 10:32:29 +000012210
12211 /*
12212 * Check if the SLI port supports NSLER
12213 */
12214 if (bf_get(cfg_nsler, mbx_sli4_parameters))
12215 phba->nsler = 1;
12216 else
12217 phba->nsler = 0;
12218
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012219 return 0;
12220}
12221
12222/**
12223 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
12224 * @pdev: pointer to PCI device
12225 * @pid: pointer to PCI device identifier
12226 *
12227 * This routine is to be called to attach a device with SLI-3 interface spec
12228 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12229 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
12230 * information of the device and driver to see if the driver state that it can
12231 * support this kind of device. If the match is successful, the driver core
12232 * invokes this routine. If this routine determines it can claim the HBA, it
12233 * does all the initialization that it needs to do to handle the HBA properly.
12234 *
12235 * Return code
12236 * 0 - driver can claim the device
12237 * negative value - driver can not claim the device
12238 **/
12239static int
12240lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
12241{
12242 struct lpfc_hba *phba;
12243 struct lpfc_vport *vport = NULL;
12244 struct Scsi_Host *shost = NULL;
12245 int error;
12246 uint32_t cfg_mode, intr_mode;
12247
12248 /* Allocate memory for HBA structure */
12249 phba = lpfc_hba_alloc(pdev);
12250 if (!phba)
12251 return -ENOMEM;
12252
12253 /* Perform generic PCI device enabling operation */
12254 error = lpfc_enable_pci_dev(phba);
12255 if (error)
12256 goto out_free_phba;
12257
12258 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
12259 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
12260 if (error)
12261 goto out_disable_pci_dev;
12262
12263 /* Set up SLI-3 specific device PCI memory space */
12264 error = lpfc_sli_pci_mem_setup(phba);
12265 if (error) {
12266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12267 "1402 Failed to set up pci memory space.\n");
12268 goto out_disable_pci_dev;
12269 }
12270
12271 /* Set up SLI-3 specific device driver resources */
12272 error = lpfc_sli_driver_resource_setup(phba);
12273 if (error) {
12274 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12275 "1404 Failed to set up driver resource.\n");
12276 goto out_unset_pci_mem_s3;
12277 }
12278
12279 /* Initialize and populate the iocb list per host */
12280
12281 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
12282 if (error) {
12283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12284 "1405 Failed to initialize iocb list.\n");
12285 goto out_unset_driver_resource_s3;
12286 }
12287
12288 /* Set up common device driver resources */
12289 error = lpfc_setup_driver_resource_phase2(phba);
12290 if (error) {
12291 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12292 "1406 Failed to set up driver resource.\n");
12293 goto out_free_iocb_list;
12294 }
12295
12296 /* Get the default values for Model Name and Description */
12297 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12298
12299 /* Create SCSI host to the physical port */
12300 error = lpfc_create_shost(phba);
12301 if (error) {
12302 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12303 "1407 Failed to create scsi host.\n");
12304 goto out_unset_driver_resource;
12305 }
12306
12307 /* Configure sysfs attributes */
12308 vport = phba->pport;
12309 error = lpfc_alloc_sysfs_attr(vport);
12310 if (error) {
12311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12312 "1476 Failed to allocate sysfs attr\n");
12313 goto out_destroy_shost;
12314 }
12315
12316 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
12317 /* Now, trying to enable interrupt and bring up the device */
12318 cfg_mode = phba->cfg_use_msi;
12319 while (true) {
12320 /* Put device to a known state before enabling interrupt */
12321 lpfc_stop_port(phba);
12322 /* Configure and enable interrupt */
12323 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
12324 if (intr_mode == LPFC_INTR_ERROR) {
Olivier Deprez157378f2022-04-04 15:47:50 +020012325 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012326 "0431 Failed to enable interrupt.\n");
12327 error = -ENODEV;
12328 goto out_free_sysfs_attr;
12329 }
12330 /* SLI-3 HBA setup */
12331 if (lpfc_sli_hba_setup(phba)) {
Olivier Deprez157378f2022-04-04 15:47:50 +020012332 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012333 "1477 Failed to set up hba\n");
12334 error = -ENODEV;
12335 goto out_remove_device;
12336 }
12337
12338 /* Wait 50ms for the interrupts of previous mailbox commands */
12339 msleep(50);
12340 /* Check active interrupts on message signaled interrupts */
12341 if (intr_mode == 0 ||
12342 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
12343 /* Log the current active interrupt mode */
12344 phba->intr_mode = intr_mode;
12345 lpfc_log_intr_mode(phba, intr_mode);
12346 break;
12347 } else {
12348 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12349 "0447 Configure interrupt mode (%d) "
12350 "failed active interrupt test.\n",
12351 intr_mode);
12352 /* Disable the current interrupt mode */
12353 lpfc_sli_disable_intr(phba);
12354 /* Try next level of interrupt mode */
12355 cfg_mode = --intr_mode;
12356 }
12357 }
12358
12359 /* Perform post initialization setup */
12360 lpfc_post_init_setup(phba);
12361
12362 /* Check if there are static vports to be created. */
12363 lpfc_create_static_vport(phba);
12364
12365 return 0;
12366
12367out_remove_device:
12368 lpfc_unset_hba(phba);
12369out_free_sysfs_attr:
12370 lpfc_free_sysfs_attr(vport);
12371out_destroy_shost:
12372 lpfc_destroy_shost(phba);
12373out_unset_driver_resource:
12374 lpfc_unset_driver_resource_phase2(phba);
12375out_free_iocb_list:
12376 lpfc_free_iocb_list(phba);
12377out_unset_driver_resource_s3:
12378 lpfc_sli_driver_resource_unset(phba);
12379out_unset_pci_mem_s3:
12380 lpfc_sli_pci_mem_unset(phba);
12381out_disable_pci_dev:
12382 lpfc_disable_pci_dev(phba);
12383 if (shost)
12384 scsi_host_put(shost);
12385out_free_phba:
12386 lpfc_hba_free(phba);
12387 return error;
12388}
12389
12390/**
12391 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
12392 * @pdev: pointer to PCI device
12393 *
12394 * This routine is to be called to disattach a device with SLI-3 interface
12395 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12396 * removed from PCI bus, it performs all the necessary cleanup for the HBA
12397 * device to be removed from the PCI subsystem properly.
12398 **/
12399static void
12400lpfc_pci_remove_one_s3(struct pci_dev *pdev)
12401{
12402 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12403 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12404 struct lpfc_vport **vports;
12405 struct lpfc_hba *phba = vport->phba;
12406 int i;
12407
12408 spin_lock_irq(&phba->hbalock);
12409 vport->load_flag |= FC_UNLOADING;
12410 spin_unlock_irq(&phba->hbalock);
12411
12412 lpfc_free_sysfs_attr(vport);
12413
12414 /* Release all the vports against this physical port */
12415 vports = lpfc_create_vport_work_array(phba);
12416 if (vports != NULL)
12417 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12418 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12419 continue;
12420 fc_vport_terminate(vports[i]->fc_vport);
12421 }
12422 lpfc_destroy_vport_work_array(phba, vports);
12423
12424 /* Remove FC host and then SCSI host with the physical port */
12425 fc_remove_host(shost);
12426 scsi_remove_host(shost);
12427
12428 lpfc_cleanup(vport);
12429
12430 /*
12431 * Bring down the SLI Layer. This step disable all interrupts,
12432 * clears the rings, discards all mailbox commands, and resets
12433 * the HBA.
12434 */
12435
12436 /* HBA interrupt will be disabled after this call */
12437 lpfc_sli_hba_down(phba);
12438 /* Stop kthread signal shall trigger work_done one more time */
12439 kthread_stop(phba->worker_thread);
12440 /* Final cleanup of txcmplq and reset the HBA */
12441 lpfc_sli_brdrestart(phba);
12442
12443 kfree(phba->vpi_bmask);
12444 kfree(phba->vpi_ids);
12445
12446 lpfc_stop_hba_timers(phba);
David Brazdil0f672f62019-12-10 10:32:29 +000012447 spin_lock_irq(&phba->port_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012448 list_del_init(&vport->listentry);
David Brazdil0f672f62019-12-10 10:32:29 +000012449 spin_unlock_irq(&phba->port_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012450
12451 lpfc_debugfs_terminate(vport);
12452
12453 /* Disable SR-IOV if enabled */
12454 if (phba->cfg_sriov_nr_virtfn)
12455 pci_disable_sriov(pdev);
12456
12457 /* Disable interrupt */
12458 lpfc_sli_disable_intr(phba);
12459
12460 scsi_host_put(shost);
12461
12462 /*
12463 * Call scsi_free before mem_free since scsi bufs are released to their
12464 * corresponding pools here.
12465 */
12466 lpfc_scsi_free(phba);
David Brazdil0f672f62019-12-10 10:32:29 +000012467 lpfc_free_iocb_list(phba);
12468
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012469 lpfc_mem_free_all(phba);
12470
12471 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12472 phba->hbqslimp.virt, phba->hbqslimp.phys);
12473
12474 /* Free resources associated with SLI2 interface */
12475 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
12476 phba->slim2p.virt, phba->slim2p.phys);
12477
12478 /* unmap adapter SLIM and Control Registers */
12479 iounmap(phba->ctrl_regs_memmap_p);
12480 iounmap(phba->slim_memmap_p);
12481
12482 lpfc_hba_free(phba);
12483
12484 pci_release_mem_regions(pdev);
12485 pci_disable_device(pdev);
12486}
12487
12488/**
12489 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
12490 * @pdev: pointer to PCI device
12491 * @msg: power management message
12492 *
12493 * This routine is to be called from the kernel's PCI subsystem to support
12494 * system Power Management (PM) to device with SLI-3 interface spec. When
12495 * PM invokes this method, it quiesces the device by stopping the driver's
12496 * worker thread for the device, turning off device's interrupt and DMA,
12497 * and bring the device offline. Note that as the driver implements the
12498 * minimum PM requirements to a power-aware driver's PM support for the
12499 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
12500 * to the suspend() method call will be treated as SUSPEND and the driver will
12501 * fully reinitialize its device during resume() method call, the driver will
12502 * set device to PCI_D3hot state in PCI config space instead of setting it
12503 * according to the @msg provided by the PM.
12504 *
12505 * Return code
12506 * 0 - driver suspended the device
12507 * Error otherwise
12508 **/
12509static int
12510lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
12511{
12512 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12513 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12514
12515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12516 "0473 PCI device Power Management suspend.\n");
12517
12518 /* Bring down the device */
12519 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12520 lpfc_offline(phba);
12521 kthread_stop(phba->worker_thread);
12522
12523 /* Disable interrupt from device */
12524 lpfc_sli_disable_intr(phba);
12525
12526 /* Save device state to PCI config space */
12527 pci_save_state(pdev);
12528 pci_set_power_state(pdev, PCI_D3hot);
12529
12530 return 0;
12531}
12532
12533/**
12534 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
12535 * @pdev: pointer to PCI device
12536 *
12537 * This routine is to be called from the kernel's PCI subsystem to support
12538 * system Power Management (PM) to device with SLI-3 interface spec. When PM
12539 * invokes this method, it restores the device's PCI config space state and
12540 * fully reinitializes the device and brings it online. Note that as the
12541 * driver implements the minimum PM requirements to a power-aware driver's
12542 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
12543 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
12544 * driver will fully reinitialize its device during resume() method call,
12545 * the device will be set to PCI_D0 directly in PCI config space before
12546 * restoring the state.
12547 *
12548 * Return code
12549 * 0 - driver suspended the device
12550 * Error otherwise
12551 **/
12552static int
12553lpfc_pci_resume_one_s3(struct pci_dev *pdev)
12554{
12555 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12556 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12557 uint32_t intr_mode;
12558 int error;
12559
12560 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12561 "0452 PCI device Power Management resume.\n");
12562
12563 /* Restore device state from PCI config space */
12564 pci_set_power_state(pdev, PCI_D0);
12565 pci_restore_state(pdev);
12566
12567 /*
12568 * As the new kernel behavior of pci_restore_state() API call clears
12569 * device saved_state flag, need to save the restored state again.
12570 */
12571 pci_save_state(pdev);
12572
12573 if (pdev->is_busmaster)
12574 pci_set_master(pdev);
12575
12576 /* Startup the kernel thread for this host adapter. */
12577 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12578 "lpfc_worker_%d", phba->brd_no);
12579 if (IS_ERR(phba->worker_thread)) {
12580 error = PTR_ERR(phba->worker_thread);
12581 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12582 "0434 PM resume failed to start worker "
12583 "thread: error=x%x.\n", error);
12584 return error;
12585 }
12586
12587 /* Configure and enable interrupt */
12588 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12589 if (intr_mode == LPFC_INTR_ERROR) {
Olivier Deprez157378f2022-04-04 15:47:50 +020012590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012591 "0430 PM resume Failed to enable interrupt\n");
12592 return -EIO;
12593 } else
12594 phba->intr_mode = intr_mode;
12595
12596 /* Restart HBA and bring it online */
12597 lpfc_sli_brdrestart(phba);
12598 lpfc_online(phba);
12599
12600 /* Log the current active interrupt mode */
12601 lpfc_log_intr_mode(phba, phba->intr_mode);
12602
12603 return 0;
12604}
12605
12606/**
12607 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
12608 * @phba: pointer to lpfc hba data structure.
12609 *
12610 * This routine is called to prepare the SLI3 device for PCI slot recover. It
12611 * aborts all the outstanding SCSI I/Os to the pci device.
12612 **/
12613static void
12614lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12615{
Olivier Deprez157378f2022-04-04 15:47:50 +020012616 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012617 "2723 PCI channel I/O abort preparing for recovery\n");
12618
12619 /*
12620 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
12621 * and let the SCSI mid-layer to retry them to recover.
12622 */
12623 lpfc_sli_abort_fcp_rings(phba);
12624}
12625
12626/**
12627 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
12628 * @phba: pointer to lpfc hba data structure.
12629 *
12630 * This routine is called to prepare the SLI3 device for PCI slot reset. It
12631 * disables the device interrupt and pci device, and aborts the internal FCP
12632 * pending I/Os.
12633 **/
12634static void
12635lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12636{
Olivier Deprez157378f2022-04-04 15:47:50 +020012637 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012638 "2710 PCI channel disable preparing for reset\n");
12639
12640 /* Block any management I/Os to the device */
12641 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
12642
12643 /* Block all SCSI devices' I/Os on the host */
12644 lpfc_scsi_dev_block(phba);
12645
12646 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
David Brazdil0f672f62019-12-10 10:32:29 +000012647 lpfc_sli_flush_io_rings(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012648
12649 /* stop all timers */
12650 lpfc_stop_hba_timers(phba);
12651
12652 /* Disable interrupt and pci device */
12653 lpfc_sli_disable_intr(phba);
12654 pci_disable_device(phba->pcidev);
12655}
12656
12657/**
12658 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
12659 * @phba: pointer to lpfc hba data structure.
12660 *
12661 * This routine is called to prepare the SLI3 device for PCI slot permanently
12662 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
12663 * pending I/Os.
12664 **/
12665static void
12666lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12667{
Olivier Deprez157378f2022-04-04 15:47:50 +020012668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012669 "2711 PCI channel permanent disable for failure\n");
12670 /* Block all SCSI devices' I/Os on the host */
12671 lpfc_scsi_dev_block(phba);
12672
12673 /* stop all timers */
12674 lpfc_stop_hba_timers(phba);
12675
12676 /* Clean up all driver's outstanding SCSI I/Os */
David Brazdil0f672f62019-12-10 10:32:29 +000012677 lpfc_sli_flush_io_rings(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012678}
12679
12680/**
12681 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
12682 * @pdev: pointer to PCI device.
12683 * @state: the current PCI connection state.
12684 *
12685 * This routine is called from the PCI subsystem for I/O error handling to
12686 * device with SLI-3 interface spec. This function is called by the PCI
12687 * subsystem after a PCI bus error affecting this device has been detected.
12688 * When this function is invoked, it will need to stop all the I/Os and
12689 * interrupt(s) to the device. Once that is done, it will return
12690 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
12691 * as desired.
12692 *
12693 * Return codes
12694 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
12695 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12696 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12697 **/
12698static pci_ers_result_t
12699lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12700{
12701 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12702 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12703
12704 switch (state) {
12705 case pci_channel_io_normal:
12706 /* Non-fatal error, prepare for recovery */
12707 lpfc_sli_prep_dev_for_recover(phba);
12708 return PCI_ERS_RESULT_CAN_RECOVER;
12709 case pci_channel_io_frozen:
12710 /* Fatal error, prepare for slot reset */
12711 lpfc_sli_prep_dev_for_reset(phba);
12712 return PCI_ERS_RESULT_NEED_RESET;
12713 case pci_channel_io_perm_failure:
12714 /* Permanent failure, prepare for device down */
12715 lpfc_sli_prep_dev_for_perm_failure(phba);
12716 return PCI_ERS_RESULT_DISCONNECT;
12717 default:
12718 /* Unknown state, prepare and request slot reset */
Olivier Deprez157378f2022-04-04 15:47:50 +020012719 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012720 "0472 Unknown PCI error state: x%x\n", state);
12721 lpfc_sli_prep_dev_for_reset(phba);
12722 return PCI_ERS_RESULT_NEED_RESET;
12723 }
12724}
12725
12726/**
12727 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
12728 * @pdev: pointer to PCI device.
12729 *
12730 * This routine is called from the PCI subsystem for error handling to
12731 * device with SLI-3 interface spec. This is called after PCI bus has been
12732 * reset to restart the PCI card from scratch, as if from a cold-boot.
12733 * During the PCI subsystem error recovery, after driver returns
12734 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
12735 * recovery and then call this routine before calling the .resume method
12736 * to recover the device. This function will initialize the HBA device,
12737 * enable the interrupt, but it will just put the HBA to offline state
12738 * without passing any I/O traffic.
12739 *
12740 * Return codes
12741 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
12742 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12743 */
12744static pci_ers_result_t
12745lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12746{
12747 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12748 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12749 struct lpfc_sli *psli = &phba->sli;
12750 uint32_t intr_mode;
12751
12752 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12753 if (pci_enable_device_mem(pdev)) {
12754 printk(KERN_ERR "lpfc: Cannot re-enable "
12755 "PCI device after reset.\n");
12756 return PCI_ERS_RESULT_DISCONNECT;
12757 }
12758
12759 pci_restore_state(pdev);
12760
12761 /*
12762 * As the new kernel behavior of pci_restore_state() API call clears
12763 * device saved_state flag, need to save the restored state again.
12764 */
12765 pci_save_state(pdev);
12766
12767 if (pdev->is_busmaster)
12768 pci_set_master(pdev);
12769
12770 spin_lock_irq(&phba->hbalock);
12771 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12772 spin_unlock_irq(&phba->hbalock);
12773
12774 /* Configure and enable interrupt */
12775 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12776 if (intr_mode == LPFC_INTR_ERROR) {
Olivier Deprez157378f2022-04-04 15:47:50 +020012777 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012778 "0427 Cannot re-enable interrupt after "
12779 "slot reset.\n");
12780 return PCI_ERS_RESULT_DISCONNECT;
12781 } else
12782 phba->intr_mode = intr_mode;
12783
12784 /* Take device offline, it will perform cleanup */
12785 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12786 lpfc_offline(phba);
12787 lpfc_sli_brdrestart(phba);
12788
12789 /* Log the current active interrupt mode */
12790 lpfc_log_intr_mode(phba, phba->intr_mode);
12791
12792 return PCI_ERS_RESULT_RECOVERED;
12793}
12794
12795/**
12796 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
12797 * @pdev: pointer to PCI device
12798 *
12799 * This routine is called from the PCI subsystem for error handling to device
12800 * with SLI-3 interface spec. It is called when kernel error recovery tells
12801 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
12802 * error recovery. After this call, traffic can start to flow from this device
12803 * again.
12804 */
12805static void
12806lpfc_io_resume_s3(struct pci_dev *pdev)
12807{
12808 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12809 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12810
12811 /* Bring device online, it will be no-op for non-fatal error resume */
12812 lpfc_online(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012813}
12814
12815/**
12816 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
12817 * @phba: pointer to lpfc hba data structure.
12818 *
12819 * returns the number of ELS/CT IOCBs to reserve
12820 **/
12821int
12822lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12823{
12824 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12825
12826 if (phba->sli_rev == LPFC_SLI_REV4) {
12827 if (max_xri <= 100)
12828 return 10;
12829 else if (max_xri <= 256)
12830 return 25;
12831 else if (max_xri <= 512)
12832 return 50;
12833 else if (max_xri <= 1024)
12834 return 100;
12835 else if (max_xri <= 1536)
12836 return 150;
12837 else if (max_xri <= 2048)
12838 return 200;
12839 else
12840 return 250;
12841 } else
12842 return 0;
12843}
12844
12845/**
12846 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
12847 * @phba: pointer to lpfc hba data structure.
12848 *
12849 * returns the number of ELS/CT + NVMET IOCBs to reserve
12850 **/
12851int
12852lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12853{
12854 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12855
12856 if (phba->nvmet_support)
12857 max_xri += LPFC_NVMET_BUF_POST;
12858 return max_xri;
12859}
12860
12861
Olivier Deprez157378f2022-04-04 15:47:50 +020012862static int
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012863lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12864 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12865 const struct firmware *fw)
12866{
Olivier Deprez157378f2022-04-04 15:47:50 +020012867 int rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012868
Olivier Deprez157378f2022-04-04 15:47:50 +020012869 /* Three cases: (1) FW was not supported on the detected adapter.
12870 * (2) FW update has been locked out administratively.
12871 * (3) Some other error during FW update.
12872 * In each case, an unmaskable message is written to the console
12873 * for admin diagnosis.
12874 */
12875 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
12876 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12877 magic_number != MAGIC_NUMBER_G6) ||
12878 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12879 magic_number != MAGIC_NUMBER_G7)) {
12880 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12881 "3030 This firmware version is not supported on"
12882 " this HBA model. Device:%x Magic:%x Type:%x "
12883 "ID:%x Size %d %zd\n",
12884 phba->pcidev->device, magic_number, ftype, fid,
12885 fsize, fw->size);
12886 rc = -EINVAL;
12887 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
12888 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12889 "3021 Firmware downloads have been prohibited "
12890 "by a system configuration setting on "
12891 "Device:%x Magic:%x Type:%x ID:%x Size %d "
12892 "%zd\n",
12893 phba->pcidev->device, magic_number, ftype, fid,
12894 fsize, fw->size);
12895 rc = -EACCES;
12896 } else {
12897 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12898 "3022 FW Download failed. Add Status x%x "
12899 "Device:%x Magic:%x Type:%x ID:%x Size %d "
12900 "%zd\n",
12901 offset, phba->pcidev->device, magic_number,
12902 ftype, fid, fsize, fw->size);
12903 rc = -EIO;
12904 }
12905 return rc;
12906}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012907
12908/**
12909 * lpfc_write_firmware - attempt to write a firmware image to the port
12910 * @fw: pointer to firmware image returned from request_firmware.
Olivier Deprez157378f2022-04-04 15:47:50 +020012911 * @context: pointer to firmware image returned from request_firmware.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012912 *
12913 **/
12914static void
12915lpfc_write_firmware(const struct firmware *fw, void *context)
12916{
12917 struct lpfc_hba *phba = (struct lpfc_hba *)context;
12918 char fwrev[FW_REV_STR_SIZE];
12919 struct lpfc_grp_hdr *image;
12920 struct list_head dma_buffer_list;
12921 int i, rc = 0;
12922 struct lpfc_dmabuf *dmabuf, *next;
12923 uint32_t offset = 0, temp_offset = 0;
12924 uint32_t magic_number, ftype, fid, fsize;
12925
12926 /* It can be null in no-wait mode, sanity check */
12927 if (!fw) {
12928 rc = -ENXIO;
12929 goto out;
12930 }
12931 image = (struct lpfc_grp_hdr *)fw->data;
12932
12933 magic_number = be32_to_cpu(image->magic_number);
12934 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
12935 fid = bf_get_be32(lpfc_grp_hdr_id, image);
12936 fsize = be32_to_cpu(image->size);
12937
12938 INIT_LIST_HEAD(&dma_buffer_list);
12939 lpfc_decode_firmware_rev(phba, fwrev, 1);
12940 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
Olivier Deprez157378f2022-04-04 15:47:50 +020012941 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012942 "3023 Updating Firmware, Current Version:%s "
12943 "New Version:%s\n",
12944 fwrev, image->revision);
12945 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
12946 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
12947 GFP_KERNEL);
12948 if (!dmabuf) {
12949 rc = -ENOMEM;
12950 goto release_out;
12951 }
12952 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
12953 SLI4_PAGE_SIZE,
12954 &dmabuf->phys,
12955 GFP_KERNEL);
12956 if (!dmabuf->virt) {
12957 kfree(dmabuf);
12958 rc = -ENOMEM;
12959 goto release_out;
12960 }
12961 list_add_tail(&dmabuf->list, &dma_buffer_list);
12962 }
12963 while (offset < fw->size) {
12964 temp_offset = offset;
12965 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
12966 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
12967 memcpy(dmabuf->virt,
12968 fw->data + temp_offset,
12969 fw->size - temp_offset);
12970 temp_offset = fw->size;
12971 break;
12972 }
12973 memcpy(dmabuf->virt, fw->data + temp_offset,
12974 SLI4_PAGE_SIZE);
12975 temp_offset += SLI4_PAGE_SIZE;
12976 }
12977 rc = lpfc_wr_object(phba, &dma_buffer_list,
12978 (fw->size - offset), &offset);
12979 if (rc) {
Olivier Deprez157378f2022-04-04 15:47:50 +020012980 rc = lpfc_log_write_firmware_error(phba, offset,
12981 magic_number,
12982 ftype,
12983 fid,
12984 fsize,
12985 fw);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012986 goto release_out;
12987 }
12988 }
12989 rc = offset;
12990 } else
Olivier Deprez157378f2022-04-04 15:47:50 +020012991 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012992 "3029 Skipped Firmware update, Current "
12993 "Version:%s New Version:%s\n",
12994 fwrev, image->revision);
12995
12996release_out:
12997 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
12998 list_del(&dmabuf->list);
12999 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
13000 dmabuf->virt, dmabuf->phys);
13001 kfree(dmabuf);
13002 }
13003 release_firmware(fw);
13004out:
Olivier Deprez157378f2022-04-04 15:47:50 +020013005 if (rc < 0)
13006 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13007 "3062 Firmware update error, status %d.\n", rc);
13008 else
13009 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13010 "3024 Firmware update success: size %d.\n", rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013011}
13012
13013/**
13014 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
13015 * @phba: pointer to lpfc hba data structure.
Olivier Deprez157378f2022-04-04 15:47:50 +020013016 * @fw_upgrade: which firmware to update.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013017 *
13018 * This routine is called to perform Linux generic firmware upgrade on device
13019 * that supports such feature.
13020 **/
13021int
13022lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
13023{
13024 uint8_t file_name[ELX_MODEL_NAME_SIZE];
13025 int ret;
13026 const struct firmware *fw;
13027
13028 /* Only supported on SLI4 interface type 2 for now */
13029 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
13030 LPFC_SLI_INTF_IF_TYPE_2)
13031 return -EPERM;
13032
13033 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
13034
13035 if (fw_upgrade == INT_FW_UPGRADE) {
13036 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
13037 file_name, &phba->pcidev->dev,
13038 GFP_KERNEL, (void *)phba,
13039 lpfc_write_firmware);
13040 } else if (fw_upgrade == RUN_FW_UPGRADE) {
13041 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
13042 if (!ret)
13043 lpfc_write_firmware(fw, (void *)phba);
13044 } else {
13045 ret = -EINVAL;
13046 }
13047
13048 return ret;
13049}
13050
13051/**
13052 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
13053 * @pdev: pointer to PCI device
13054 * @pid: pointer to PCI device identifier
13055 *
13056 * This routine is called from the kernel's PCI subsystem to device with
13057 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
13058 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13059 * information of the device and driver to see if the driver state that it
13060 * can support this kind of device. If the match is successful, the driver
13061 * core invokes this routine. If this routine determines it can claim the HBA,
13062 * it does all the initialization that it needs to do to handle the HBA
13063 * properly.
13064 *
13065 * Return code
13066 * 0 - driver can claim the device
13067 * negative value - driver can not claim the device
13068 **/
13069static int
13070lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
13071{
13072 struct lpfc_hba *phba;
13073 struct lpfc_vport *vport = NULL;
13074 struct Scsi_Host *shost = NULL;
13075 int error;
13076 uint32_t cfg_mode, intr_mode;
13077
13078 /* Allocate memory for HBA structure */
13079 phba = lpfc_hba_alloc(pdev);
13080 if (!phba)
13081 return -ENOMEM;
13082
Olivier Deprez157378f2022-04-04 15:47:50 +020013083 INIT_LIST_HEAD(&phba->poll_list);
13084
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013085 /* Perform generic PCI device enabling operation */
13086 error = lpfc_enable_pci_dev(phba);
13087 if (error)
13088 goto out_free_phba;
13089
13090 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
13091 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
13092 if (error)
13093 goto out_disable_pci_dev;
13094
13095 /* Set up SLI-4 specific device PCI memory space */
13096 error = lpfc_sli4_pci_mem_setup(phba);
13097 if (error) {
13098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13099 "1410 Failed to set up pci memory space.\n");
13100 goto out_disable_pci_dev;
13101 }
13102
13103 /* Set up SLI-4 Specific device driver resources */
13104 error = lpfc_sli4_driver_resource_setup(phba);
13105 if (error) {
13106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13107 "1412 Failed to set up driver resource.\n");
13108 goto out_unset_pci_mem_s4;
13109 }
13110
13111 INIT_LIST_HEAD(&phba->active_rrq_list);
13112 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
13113
13114 /* Set up common device driver resources */
13115 error = lpfc_setup_driver_resource_phase2(phba);
13116 if (error) {
13117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13118 "1414 Failed to set up driver resource.\n");
13119 goto out_unset_driver_resource_s4;
13120 }
13121
13122 /* Get the default values for Model Name and Description */
13123 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13124
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013125 /* Now, trying to enable interrupt and bring up the device */
13126 cfg_mode = phba->cfg_use_msi;
13127
13128 /* Put device to a known state before enabling interrupt */
David Brazdil0f672f62019-12-10 10:32:29 +000013129 phba->pport = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013130 lpfc_stop_port(phba);
13131
Olivier Deprez157378f2022-04-04 15:47:50 +020013132 /* Init cpu_map array */
13133 lpfc_cpu_map_array_init(phba);
13134
13135 /* Init hba_eq_hdl array */
13136 lpfc_hba_eq_hdl_array_init(phba);
13137
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013138 /* Configure and enable interrupt */
13139 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
13140 if (intr_mode == LPFC_INTR_ERROR) {
Olivier Deprez157378f2022-04-04 15:47:50 +020013141 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013142 "0426 Failed to enable interrupt.\n");
13143 error = -ENODEV;
David Brazdil0f672f62019-12-10 10:32:29 +000013144 goto out_unset_driver_resource;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013145 }
13146 /* Default to single EQ for non-MSI-X */
13147 if (phba->intr_type != MSIX) {
David Brazdil0f672f62019-12-10 10:32:29 +000013148 phba->cfg_irq_chann = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013149 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013150 if (phba->nvmet_support)
13151 phba->cfg_nvmet_mrq = 1;
13152 }
David Brazdil0f672f62019-12-10 10:32:29 +000013153 }
13154 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
13155
13156 /* Create SCSI host to the physical port */
13157 error = lpfc_create_shost(phba);
13158 if (error) {
13159 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13160 "1415 Failed to create scsi host.\n");
13161 goto out_disable_intr;
13162 }
13163 vport = phba->pport;
13164 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
13165
13166 /* Configure sysfs attributes */
13167 error = lpfc_alloc_sysfs_attr(vport);
13168 if (error) {
13169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13170 "1416 Failed to allocate sysfs attr\n");
13171 goto out_destroy_shost;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013172 }
13173
13174 /* Set up SLI-4 HBA */
13175 if (lpfc_sli4_hba_setup(phba)) {
Olivier Deprez157378f2022-04-04 15:47:50 +020013176 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013177 "1421 Failed to set up hba\n");
13178 error = -ENODEV;
David Brazdil0f672f62019-12-10 10:32:29 +000013179 goto out_free_sysfs_attr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013180 }
13181
13182 /* Log the current active interrupt mode */
13183 phba->intr_mode = intr_mode;
13184 lpfc_log_intr_mode(phba, intr_mode);
13185
13186 /* Perform post initialization setup */
13187 lpfc_post_init_setup(phba);
13188
13189 /* NVME support in FW earlier in the driver load corrects the
13190 * FC4 type making a check for nvme_support unnecessary.
13191 */
David Brazdil0f672f62019-12-10 10:32:29 +000013192 if (phba->nvmet_support == 0) {
13193 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13194 /* Create NVME binding with nvme_fc_transport. This
13195 * ensures the vport is initialized. If the localport
13196 * create fails, it should not unload the driver to
13197 * support field issues.
13198 */
13199 error = lpfc_nvme_create_localport(vport);
13200 if (error) {
Olivier Deprez157378f2022-04-04 15:47:50 +020013201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
David Brazdil0f672f62019-12-10 10:32:29 +000013202 "6004 NVME registration "
13203 "failed, error x%x\n",
13204 error);
13205 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013206 }
13207 }
13208
13209 /* check for firmware upgrade or downgrade */
13210 if (phba->cfg_request_firmware_upgrade)
13211 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
13212
13213 /* Check if there are static vports to be created. */
13214 lpfc_create_static_vport(phba);
David Brazdil0f672f62019-12-10 10:32:29 +000013215
13216 /* Enable RAS FW log support */
13217 lpfc_sli4_ras_setup(phba);
13218
Olivier Deprez157378f2022-04-04 15:47:50 +020013219 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
Olivier Deprez0e641232021-09-23 10:07:05 +020013220 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
13221
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013222 return 0;
13223
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013224out_free_sysfs_attr:
13225 lpfc_free_sysfs_attr(vport);
13226out_destroy_shost:
13227 lpfc_destroy_shost(phba);
David Brazdil0f672f62019-12-10 10:32:29 +000013228out_disable_intr:
13229 lpfc_sli4_disable_intr(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013230out_unset_driver_resource:
13231 lpfc_unset_driver_resource_phase2(phba);
13232out_unset_driver_resource_s4:
13233 lpfc_sli4_driver_resource_unset(phba);
13234out_unset_pci_mem_s4:
13235 lpfc_sli4_pci_mem_unset(phba);
13236out_disable_pci_dev:
13237 lpfc_disable_pci_dev(phba);
13238 if (shost)
13239 scsi_host_put(shost);
13240out_free_phba:
13241 lpfc_hba_free(phba);
13242 return error;
13243}
13244
13245/**
13246 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
13247 * @pdev: pointer to PCI device
13248 *
13249 * This routine is called from the kernel's PCI subsystem to device with
13250 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
13251 * removed from PCI bus, it performs all the necessary cleanup for the HBA
13252 * device to be removed from the PCI subsystem properly.
13253 **/
13254static void
13255lpfc_pci_remove_one_s4(struct pci_dev *pdev)
13256{
13257 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13258 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13259 struct lpfc_vport **vports;
13260 struct lpfc_hba *phba = vport->phba;
13261 int i;
13262
13263 /* Mark the device unloading flag */
13264 spin_lock_irq(&phba->hbalock);
13265 vport->load_flag |= FC_UNLOADING;
13266 spin_unlock_irq(&phba->hbalock);
13267
13268 /* Free the HBA sysfs attributes */
13269 lpfc_free_sysfs_attr(vport);
13270
13271 /* Release all the vports against this physical port */
13272 vports = lpfc_create_vport_work_array(phba);
13273 if (vports != NULL)
13274 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13275 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13276 continue;
13277 fc_vport_terminate(vports[i]->fc_vport);
13278 }
13279 lpfc_destroy_vport_work_array(phba, vports);
13280
13281 /* Remove FC host and then SCSI host with the physical port */
13282 fc_remove_host(shost);
13283 scsi_remove_host(shost);
13284
13285 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
13286 * localports are destroyed after to cleanup all transport memory.
13287 */
13288 lpfc_cleanup(vport);
13289 lpfc_nvmet_destroy_targetport(phba);
13290 lpfc_nvme_destroy_localport(vport);
13291
David Brazdil0f672f62019-12-10 10:32:29 +000013292 /* De-allocate multi-XRI pools */
13293 if (phba->cfg_xri_rebalancing)
13294 lpfc_destroy_multixri_pools(phba);
13295
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013296 /*
13297 * Bring down the SLI Layer. This step disables all interrupts,
13298 * clears the rings, discards all mailbox commands, and resets
13299 * the HBA FCoE function.
13300 */
13301 lpfc_debugfs_terminate(vport);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013302
13303 lpfc_stop_hba_timers(phba);
David Brazdil0f672f62019-12-10 10:32:29 +000013304 spin_lock_irq(&phba->port_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013305 list_del_init(&vport->listentry);
David Brazdil0f672f62019-12-10 10:32:29 +000013306 spin_unlock_irq(&phba->port_list_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013307
13308 /* Perform scsi free before driver resource_unset since scsi
13309 * buffers are released to their corresponding pools here.
13310 */
David Brazdil0f672f62019-12-10 10:32:29 +000013311 lpfc_io_free(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013312 lpfc_free_iocb_list(phba);
David Brazdil0f672f62019-12-10 10:32:29 +000013313 lpfc_sli4_hba_unset(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013314
13315 lpfc_unset_driver_resource_phase2(phba);
13316 lpfc_sli4_driver_resource_unset(phba);
13317
13318 /* Unmap adapter Control and Doorbell registers */
13319 lpfc_sli4_pci_mem_unset(phba);
13320
13321 /* Release PCI resources and disable device's PCI function */
13322 scsi_host_put(shost);
13323 lpfc_disable_pci_dev(phba);
13324
13325 /* Finally, free the driver's device data structure */
13326 lpfc_hba_free(phba);
13327
13328 return;
13329}
13330
13331/**
13332 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
13333 * @pdev: pointer to PCI device
13334 * @msg: power management message
13335 *
13336 * This routine is called from the kernel's PCI subsystem to support system
13337 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
13338 * this method, it quiesces the device by stopping the driver's worker
13339 * thread for the device, turning off device's interrupt and DMA, and bring
13340 * the device offline. Note that as the driver implements the minimum PM
13341 * requirements to a power-aware driver's PM support for suspend/resume -- all
13342 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
13343 * method call will be treated as SUSPEND and the driver will fully
13344 * reinitialize its device during resume() method call, the driver will set
13345 * device to PCI_D3hot state in PCI config space instead of setting it
13346 * according to the @msg provided by the PM.
13347 *
13348 * Return code
13349 * 0 - driver suspended the device
13350 * Error otherwise
13351 **/
13352static int
13353lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
13354{
13355 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13356 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13357
13358 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13359 "2843 PCI device Power Management suspend.\n");
13360
13361 /* Bring down the device */
13362 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13363 lpfc_offline(phba);
13364 kthread_stop(phba->worker_thread);
13365
13366 /* Disable interrupt from device */
13367 lpfc_sli4_disable_intr(phba);
13368 lpfc_sli4_queue_destroy(phba);
13369
13370 /* Save device state to PCI config space */
13371 pci_save_state(pdev);
13372 pci_set_power_state(pdev, PCI_D3hot);
13373
13374 return 0;
13375}
13376
13377/**
13378 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
13379 * @pdev: pointer to PCI device
13380 *
13381 * This routine is called from the kernel's PCI subsystem to support system
13382 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
13383 * this method, it restores the device's PCI config space state and fully
13384 * reinitializes the device and brings it online. Note that as the driver
13385 * implements the minimum PM requirements to a power-aware driver's PM for
13386 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
13387 * to the suspend() method call will be treated as SUSPEND and the driver
13388 * will fully reinitialize its device during resume() method call, the device
13389 * will be set to PCI_D0 directly in PCI config space before restoring the
13390 * state.
13391 *
13392 * Return code
13393 * 0 - driver suspended the device
13394 * Error otherwise
13395 **/
13396static int
13397lpfc_pci_resume_one_s4(struct pci_dev *pdev)
13398{
13399 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13400 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13401 uint32_t intr_mode;
13402 int error;
13403
13404 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13405 "0292 PCI device Power Management resume.\n");
13406
13407 /* Restore device state from PCI config space */
13408 pci_set_power_state(pdev, PCI_D0);
13409 pci_restore_state(pdev);
13410
13411 /*
13412 * As the new kernel behavior of pci_restore_state() API call clears
13413 * device saved_state flag, need to save the restored state again.
13414 */
13415 pci_save_state(pdev);
13416
13417 if (pdev->is_busmaster)
13418 pci_set_master(pdev);
13419
13420 /* Startup the kernel thread for this host adapter. */
13421 phba->worker_thread = kthread_run(lpfc_do_work, phba,
13422 "lpfc_worker_%d", phba->brd_no);
13423 if (IS_ERR(phba->worker_thread)) {
13424 error = PTR_ERR(phba->worker_thread);
13425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13426 "0293 PM resume failed to start worker "
13427 "thread: error=x%x.\n", error);
13428 return error;
13429 }
13430
13431 /* Configure and enable interrupt */
13432 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13433 if (intr_mode == LPFC_INTR_ERROR) {
Olivier Deprez157378f2022-04-04 15:47:50 +020013434 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013435 "0294 PM resume Failed to enable interrupt\n");
13436 return -EIO;
13437 } else
13438 phba->intr_mode = intr_mode;
13439
13440 /* Restart HBA and bring it online */
13441 lpfc_sli_brdrestart(phba);
13442 lpfc_online(phba);
13443
13444 /* Log the current active interrupt mode */
13445 lpfc_log_intr_mode(phba, phba->intr_mode);
13446
13447 return 0;
13448}
13449
13450/**
13451 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
13452 * @phba: pointer to lpfc hba data structure.
13453 *
13454 * This routine is called to prepare the SLI4 device for PCI slot recover. It
13455 * aborts all the outstanding SCSI I/Os to the pci device.
13456 **/
13457static void
13458lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
13459{
Olivier Deprez157378f2022-04-04 15:47:50 +020013460 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013461 "2828 PCI channel I/O abort preparing for recovery\n");
13462 /*
13463 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
13464 * and let the SCSI mid-layer to retry them to recover.
13465 */
13466 lpfc_sli_abort_fcp_rings(phba);
13467}
13468
13469/**
13470 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
13471 * @phba: pointer to lpfc hba data structure.
13472 *
13473 * This routine is called to prepare the SLI4 device for PCI slot reset. It
13474 * disables the device interrupt and pci device, and aborts the internal FCP
13475 * pending I/Os.
13476 **/
13477static void
13478lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
13479{
Olivier Deprez157378f2022-04-04 15:47:50 +020013480 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013481 "2826 PCI channel disable preparing for reset\n");
13482
13483 /* Block any management I/Os to the device */
13484 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
13485
13486 /* Block all SCSI devices' I/Os on the host */
13487 lpfc_scsi_dev_block(phba);
13488
David Brazdil0f672f62019-12-10 10:32:29 +000013489 /* Flush all driver's outstanding I/Os as we are to reset */
13490 lpfc_sli_flush_io_rings(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013491
13492 /* stop all timers */
13493 lpfc_stop_hba_timers(phba);
13494
13495 /* Disable interrupt and pci device */
13496 lpfc_sli4_disable_intr(phba);
13497 lpfc_sli4_queue_destroy(phba);
13498 pci_disable_device(phba->pcidev);
13499}
13500
13501/**
13502 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
13503 * @phba: pointer to lpfc hba data structure.
13504 *
13505 * This routine is called to prepare the SLI4 device for PCI slot permanently
13506 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
13507 * pending I/Os.
13508 **/
13509static void
13510lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13511{
Olivier Deprez157378f2022-04-04 15:47:50 +020013512 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013513 "2827 PCI channel permanent disable for failure\n");
13514
13515 /* Block all SCSI devices' I/Os on the host */
13516 lpfc_scsi_dev_block(phba);
13517
13518 /* stop all timers */
13519 lpfc_stop_hba_timers(phba);
13520
David Brazdil0f672f62019-12-10 10:32:29 +000013521 /* Clean up all driver's outstanding I/Os */
13522 lpfc_sli_flush_io_rings(phba);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013523}
13524
13525/**
13526 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
13527 * @pdev: pointer to PCI device.
13528 * @state: the current PCI connection state.
13529 *
13530 * This routine is called from the PCI subsystem for error handling to device
13531 * with SLI-4 interface spec. This function is called by the PCI subsystem
13532 * after a PCI bus error affecting this device has been detected. When this
13533 * function is invoked, it will need to stop all the I/Os and interrupt(s)
13534 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
13535 * for the PCI subsystem to perform proper recovery as desired.
13536 *
13537 * Return codes
13538 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13539 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13540 **/
13541static pci_ers_result_t
13542lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
13543{
13544 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13545 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13546
13547 switch (state) {
13548 case pci_channel_io_normal:
13549 /* Non-fatal error, prepare for recovery */
13550 lpfc_sli4_prep_dev_for_recover(phba);
13551 return PCI_ERS_RESULT_CAN_RECOVER;
13552 case pci_channel_io_frozen:
13553 /* Fatal error, prepare for slot reset */
13554 lpfc_sli4_prep_dev_for_reset(phba);
13555 return PCI_ERS_RESULT_NEED_RESET;
13556 case pci_channel_io_perm_failure:
13557 /* Permanent failure, prepare for device down */
13558 lpfc_sli4_prep_dev_for_perm_failure(phba);
13559 return PCI_ERS_RESULT_DISCONNECT;
13560 default:
13561 /* Unknown state, prepare and request slot reset */
Olivier Deprez157378f2022-04-04 15:47:50 +020013562 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013563 "2825 Unknown PCI error state: x%x\n", state);
13564 lpfc_sli4_prep_dev_for_reset(phba);
13565 return PCI_ERS_RESULT_NEED_RESET;
13566 }
13567}
13568
13569/**
13570 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
13571 * @pdev: pointer to PCI device.
13572 *
13573 * This routine is called from the PCI subsystem for error handling to device
13574 * with SLI-4 interface spec. It is called after PCI bus has been reset to
13575 * restart the PCI card from scratch, as if from a cold-boot. During the
13576 * PCI subsystem error recovery, after the driver returns
13577 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
13578 * recovery and then call this routine before calling the .resume method to
13579 * recover the device. This function will initialize the HBA device, enable
13580 * the interrupt, but it will just put the HBA to offline state without
13581 * passing any I/O traffic.
13582 *
13583 * Return codes
13584 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13585 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13586 */
13587static pci_ers_result_t
13588lpfc_io_slot_reset_s4(struct pci_dev *pdev)
13589{
13590 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13591 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13592 struct lpfc_sli *psli = &phba->sli;
13593 uint32_t intr_mode;
13594
13595 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13596 if (pci_enable_device_mem(pdev)) {
13597 printk(KERN_ERR "lpfc: Cannot re-enable "
13598 "PCI device after reset.\n");
13599 return PCI_ERS_RESULT_DISCONNECT;
13600 }
13601
13602 pci_restore_state(pdev);
13603
13604 /*
13605 * As the new kernel behavior of pci_restore_state() API call clears
13606 * device saved_state flag, need to save the restored state again.
13607 */
13608 pci_save_state(pdev);
13609
13610 if (pdev->is_busmaster)
13611 pci_set_master(pdev);
13612
13613 spin_lock_irq(&phba->hbalock);
13614 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13615 spin_unlock_irq(&phba->hbalock);
13616
13617 /* Configure and enable interrupt */
13618 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13619 if (intr_mode == LPFC_INTR_ERROR) {
Olivier Deprez157378f2022-04-04 15:47:50 +020013620 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013621 "2824 Cannot re-enable interrupt after "
13622 "slot reset.\n");
13623 return PCI_ERS_RESULT_DISCONNECT;
13624 } else
13625 phba->intr_mode = intr_mode;
13626
13627 /* Log the current active interrupt mode */
13628 lpfc_log_intr_mode(phba, phba->intr_mode);
13629
13630 return PCI_ERS_RESULT_RECOVERED;
13631}
13632
13633/**
13634 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
13635 * @pdev: pointer to PCI device
13636 *
13637 * This routine is called from the PCI subsystem for error handling to device
13638 * with SLI-4 interface spec. It is called when kernel error recovery tells
13639 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
13640 * error recovery. After this call, traffic can start to flow from this device
13641 * again.
13642 **/
13643static void
13644lpfc_io_resume_s4(struct pci_dev *pdev)
13645{
13646 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13647 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13648
13649 /*
13650 * In case of slot reset, as function reset is performed through
13651 * mailbox command which needs DMA to be enabled, this operation
13652 * has to be moved to the io resume phase. Taking device offline
13653 * will perform the necessary cleanup.
13654 */
13655 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13656 /* Perform device reset */
13657 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
13658 lpfc_offline(phba);
13659 lpfc_sli_brdrestart(phba);
13660 /* Bring the device back online */
13661 lpfc_online(phba);
13662 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013663}
13664
13665/**
13666 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
13667 * @pdev: pointer to PCI device
13668 * @pid: pointer to PCI device identifier
13669 *
13670 * This routine is to be registered to the kernel's PCI subsystem. When an
13671 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
13672 * at PCI device-specific information of the device and driver to see if the
13673 * driver state that it can support this kind of device. If the match is
13674 * successful, the driver core invokes this routine. This routine dispatches
13675 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
13676 * do all the initialization that it needs to do to handle the HBA device
13677 * properly.
13678 *
13679 * Return code
13680 * 0 - driver can claim the device
13681 * negative value - driver can not claim the device
13682 **/
13683static int
13684lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13685{
13686 int rc;
13687 struct lpfc_sli_intf intf;
13688
13689 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
13690 return -ENODEV;
13691
13692 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
13693 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
13694 rc = lpfc_pci_probe_one_s4(pdev, pid);
13695 else
13696 rc = lpfc_pci_probe_one_s3(pdev, pid);
13697
13698 return rc;
13699}
13700
13701/**
13702 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
13703 * @pdev: pointer to PCI device
13704 *
13705 * This routine is to be registered to the kernel's PCI subsystem. When an
13706 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
13707 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
13708 * remove routine, which will perform all the necessary cleanup for the
13709 * device to be removed from the PCI subsystem properly.
13710 **/
13711static void
13712lpfc_pci_remove_one(struct pci_dev *pdev)
13713{
13714 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13715 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13716
13717 switch (phba->pci_dev_grp) {
13718 case LPFC_PCI_DEV_LP:
13719 lpfc_pci_remove_one_s3(pdev);
13720 break;
13721 case LPFC_PCI_DEV_OC:
13722 lpfc_pci_remove_one_s4(pdev);
13723 break;
13724 default:
Olivier Deprez157378f2022-04-04 15:47:50 +020013725 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013726 "1424 Invalid PCI device group: 0x%x\n",
13727 phba->pci_dev_grp);
13728 break;
13729 }
13730 return;
13731}
13732
13733/**
13734 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
13735 * @pdev: pointer to PCI device
13736 * @msg: power management message
13737 *
13738 * This routine is to be registered to the kernel's PCI subsystem to support
13739 * system Power Management (PM). When PM invokes this method, it dispatches
13740 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
13741 * suspend the device.
13742 *
13743 * Return code
13744 * 0 - driver suspended the device
13745 * Error otherwise
13746 **/
13747static int
13748lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
13749{
13750 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13751 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13752 int rc = -ENODEV;
13753
13754 switch (phba->pci_dev_grp) {
13755 case LPFC_PCI_DEV_LP:
13756 rc = lpfc_pci_suspend_one_s3(pdev, msg);
13757 break;
13758 case LPFC_PCI_DEV_OC:
13759 rc = lpfc_pci_suspend_one_s4(pdev, msg);
13760 break;
13761 default:
Olivier Deprez157378f2022-04-04 15:47:50 +020013762 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013763 "1425 Invalid PCI device group: 0x%x\n",
13764 phba->pci_dev_grp);
13765 break;
13766 }
13767 return rc;
13768}
13769
13770/**
13771 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
13772 * @pdev: pointer to PCI device
13773 *
13774 * This routine is to be registered to the kernel's PCI subsystem to support
13775 * system Power Management (PM). When PM invokes this method, it dispatches
13776 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
13777 * resume the device.
13778 *
13779 * Return code
13780 * 0 - driver suspended the device
13781 * Error otherwise
13782 **/
13783static int
13784lpfc_pci_resume_one(struct pci_dev *pdev)
13785{
13786 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13787 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13788 int rc = -ENODEV;
13789
13790 switch (phba->pci_dev_grp) {
13791 case LPFC_PCI_DEV_LP:
13792 rc = lpfc_pci_resume_one_s3(pdev);
13793 break;
13794 case LPFC_PCI_DEV_OC:
13795 rc = lpfc_pci_resume_one_s4(pdev);
13796 break;
13797 default:
Olivier Deprez157378f2022-04-04 15:47:50 +020013798 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013799 "1426 Invalid PCI device group: 0x%x\n",
13800 phba->pci_dev_grp);
13801 break;
13802 }
13803 return rc;
13804}
13805
13806/**
13807 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
13808 * @pdev: pointer to PCI device.
13809 * @state: the current PCI connection state.
13810 *
13811 * This routine is registered to the PCI subsystem for error handling. This
13812 * function is called by the PCI subsystem after a PCI bus error affecting
13813 * this device has been detected. When this routine is invoked, it dispatches
13814 * the action to the proper SLI-3 or SLI-4 device error detected handling
13815 * routine, which will perform the proper error detected operation.
13816 *
13817 * Return codes
13818 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13819 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13820 **/
13821static pci_ers_result_t
13822lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13823{
13824 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13825 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13826 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13827
13828 switch (phba->pci_dev_grp) {
13829 case LPFC_PCI_DEV_LP:
13830 rc = lpfc_io_error_detected_s3(pdev, state);
13831 break;
13832 case LPFC_PCI_DEV_OC:
13833 rc = lpfc_io_error_detected_s4(pdev, state);
13834 break;
13835 default:
Olivier Deprez157378f2022-04-04 15:47:50 +020013836 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013837 "1427 Invalid PCI device group: 0x%x\n",
13838 phba->pci_dev_grp);
13839 break;
13840 }
13841 return rc;
13842}
13843
13844/**
13845 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
13846 * @pdev: pointer to PCI device.
13847 *
13848 * This routine is registered to the PCI subsystem for error handling. This
13849 * function is called after PCI bus has been reset to restart the PCI card
13850 * from scratch, as if from a cold-boot. When this routine is invoked, it
13851 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
13852 * routine, which will perform the proper device reset.
13853 *
13854 * Return codes
13855 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13856 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13857 **/
13858static pci_ers_result_t
13859lpfc_io_slot_reset(struct pci_dev *pdev)
13860{
13861 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13862 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13863 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13864
13865 switch (phba->pci_dev_grp) {
13866 case LPFC_PCI_DEV_LP:
13867 rc = lpfc_io_slot_reset_s3(pdev);
13868 break;
13869 case LPFC_PCI_DEV_OC:
13870 rc = lpfc_io_slot_reset_s4(pdev);
13871 break;
13872 default:
Olivier Deprez157378f2022-04-04 15:47:50 +020013873 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013874 "1428 Invalid PCI device group: 0x%x\n",
13875 phba->pci_dev_grp);
13876 break;
13877 }
13878 return rc;
13879}
13880
13881/**
13882 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
13883 * @pdev: pointer to PCI device
13884 *
13885 * This routine is registered to the PCI subsystem for error handling. It
13886 * is called when kernel error recovery tells the lpfc driver that it is
13887 * OK to resume normal PCI operation after PCI bus error recovery. When
13888 * this routine is invoked, it dispatches the action to the proper SLI-3
13889 * or SLI-4 device io_resume routine, which will resume the device operation.
13890 **/
13891static void
13892lpfc_io_resume(struct pci_dev *pdev)
13893{
13894 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13895 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13896
13897 switch (phba->pci_dev_grp) {
13898 case LPFC_PCI_DEV_LP:
13899 lpfc_io_resume_s3(pdev);
13900 break;
13901 case LPFC_PCI_DEV_OC:
13902 lpfc_io_resume_s4(pdev);
13903 break;
13904 default:
Olivier Deprez157378f2022-04-04 15:47:50 +020013905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013906 "1429 Invalid PCI device group: 0x%x\n",
13907 phba->pci_dev_grp);
13908 break;
13909 }
13910 return;
13911}
13912
13913/**
13914 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
13915 * @phba: pointer to lpfc hba data structure.
13916 *
13917 * This routine checks to see if OAS is supported for this adapter. If
13918 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
13919 * the enable oas flag is cleared and the pool created for OAS device data
13920 * is destroyed.
13921 *
13922 **/
David Brazdil0f672f62019-12-10 10:32:29 +000013923static void
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013924lpfc_sli4_oas_verify(struct lpfc_hba *phba)
13925{
13926
13927 if (!phba->cfg_EnableXLane)
13928 return;
13929
13930 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
13931 phba->cfg_fof = 1;
13932 } else {
13933 phba->cfg_fof = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +020013934 mempool_destroy(phba->device_data_mem_pool);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013935 phba->device_data_mem_pool = NULL;
13936 }
13937
13938 return;
13939}
13940
13941/**
David Brazdil0f672f62019-12-10 10:32:29 +000013942 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013943 * @phba: pointer to lpfc hba data structure.
13944 *
David Brazdil0f672f62019-12-10 10:32:29 +000013945 * This routine checks to see if RAS is supported by the adapter. Check the
13946 * function through which RAS support enablement is to be done.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013947 **/
David Brazdil0f672f62019-12-10 10:32:29 +000013948void
13949lpfc_sli4_ras_init(struct lpfc_hba *phba)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013950{
David Brazdil0f672f62019-12-10 10:32:29 +000013951 switch (phba->pcidev->device) {
13952 case PCI_DEVICE_ID_LANCER_G6_FC:
13953 case PCI_DEVICE_ID_LANCER_G7_FC:
13954 phba->ras_fwlog.ras_hwsupport = true;
13955 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
13956 phba->cfg_ras_fwlog_buffsize)
13957 phba->ras_fwlog.ras_enabled = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013958 else
David Brazdil0f672f62019-12-10 10:32:29 +000013959 phba->ras_fwlog.ras_enabled = false;
13960 break;
13961 default:
13962 phba->ras_fwlog.ras_hwsupport = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013963 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013964}
13965
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013966
13967MODULE_DEVICE_TABLE(pci, lpfc_id_table);
13968
13969static const struct pci_error_handlers lpfc_err_handler = {
13970 .error_detected = lpfc_io_error_detected,
13971 .slot_reset = lpfc_io_slot_reset,
13972 .resume = lpfc_io_resume,
13973};
13974
13975static struct pci_driver lpfc_driver = {
13976 .name = LPFC_DRIVER_NAME,
13977 .id_table = lpfc_id_table,
13978 .probe = lpfc_pci_probe_one,
13979 .remove = lpfc_pci_remove_one,
13980 .shutdown = lpfc_pci_remove_one,
13981 .suspend = lpfc_pci_suspend_one,
13982 .resume = lpfc_pci_resume_one,
13983 .err_handler = &lpfc_err_handler,
13984};
13985
13986static const struct file_operations lpfc_mgmt_fop = {
13987 .owner = THIS_MODULE,
13988};
13989
13990static struct miscdevice lpfc_mgmt_dev = {
13991 .minor = MISC_DYNAMIC_MINOR,
13992 .name = "lpfcmgmt",
13993 .fops = &lpfc_mgmt_fop,
13994};
13995
13996/**
13997 * lpfc_init - lpfc module initialization routine
13998 *
13999 * This routine is to be invoked when the lpfc module is loaded into the
14000 * kernel. The special kernel macro module_init() is used to indicate the
14001 * role of this routine to the kernel as lpfc module entry point.
14002 *
14003 * Return codes
14004 * 0 - successful
14005 * -ENOMEM - FC attach transport failed
14006 * all others - failed
14007 */
14008static int __init
14009lpfc_init(void)
14010{
14011 int error = 0;
14012
Olivier Deprez157378f2022-04-04 15:47:50 +020014013 pr_info(LPFC_MODULE_DESC "\n");
14014 pr_info(LPFC_COPYRIGHT "\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014015
14016 error = misc_register(&lpfc_mgmt_dev);
14017 if (error)
14018 printk(KERN_ERR "Could not register lpfcmgmt device, "
14019 "misc_register returned with status %d", error);
14020
Olivier Deprez157378f2022-04-04 15:47:50 +020014021 error = -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014022 lpfc_transport_functions.vport_create = lpfc_vport_create;
14023 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
14024 lpfc_transport_template =
14025 fc_attach_transport(&lpfc_transport_functions);
14026 if (lpfc_transport_template == NULL)
Olivier Deprez157378f2022-04-04 15:47:50 +020014027 goto unregister;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014028 lpfc_vport_transport_template =
14029 fc_attach_transport(&lpfc_vport_transport_functions);
14030 if (lpfc_vport_transport_template == NULL) {
14031 fc_release_transport(lpfc_transport_template);
Olivier Deprez157378f2022-04-04 15:47:50 +020014032 goto unregister;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014033 }
14034 lpfc_nvme_cmd_template();
14035 lpfc_nvmet_cmd_template();
14036
14037 /* Initialize in case vector mapping is needed */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014038 lpfc_present_cpu = num_present_cpus();
14039
Olivier Deprez0e641232021-09-23 10:07:05 +020014040 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
14041 "lpfc/sli4:online",
14042 lpfc_cpu_online, lpfc_cpu_offline);
14043 if (error < 0)
14044 goto cpuhp_failure;
14045 lpfc_cpuhp_state = error;
14046
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014047 error = pci_register_driver(&lpfc_driver);
Olivier Deprez0e641232021-09-23 10:07:05 +020014048 if (error)
14049 goto unwind;
14050
14051 return error;
14052
14053unwind:
14054 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14055cpuhp_failure:
14056 fc_release_transport(lpfc_transport_template);
14057 fc_release_transport(lpfc_vport_transport_template);
Olivier Deprez157378f2022-04-04 15:47:50 +020014058unregister:
14059 misc_deregister(&lpfc_mgmt_dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014060
14061 return error;
14062}
14063
Olivier Deprez157378f2022-04-04 15:47:50 +020014064void lpfc_dmp_dbg(struct lpfc_hba *phba)
14065{
14066 unsigned int start_idx;
14067 unsigned int dbg_cnt;
14068 unsigned int temp_idx;
14069 int i;
14070 int j = 0;
14071 unsigned long rem_nsec;
14072
14073 if (phba->cfg_log_verbose)
14074 return;
14075
14076 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
14077 return;
14078
14079 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
14080 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
14081 temp_idx = start_idx;
14082 if (dbg_cnt >= DBG_LOG_SZ) {
14083 dbg_cnt = DBG_LOG_SZ;
14084 temp_idx -= 1;
14085 } else {
14086 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
14087 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
14088 } else {
14089 if (start_idx < dbg_cnt)
14090 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
14091 else
14092 start_idx -= dbg_cnt;
14093 }
14094 }
14095 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
14096 start_idx, temp_idx, dbg_cnt);
14097
14098 for (i = 0; i < dbg_cnt; i++) {
14099 if ((start_idx + i) < DBG_LOG_SZ)
14100 temp_idx = (start_idx + i) % DBG_LOG_SZ;
14101 else
14102 temp_idx = j++;
14103 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
14104 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
14105 temp_idx,
14106 (unsigned long)phba->dbg_log[temp_idx].t_ns,
14107 rem_nsec / 1000,
14108 phba->dbg_log[temp_idx].log);
14109 }
14110 atomic_set(&phba->dbg_log_cnt, 0);
14111 atomic_set(&phba->dbg_log_dmping, 0);
14112}
14113
14114__printf(2, 3)
14115void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
14116{
14117 unsigned int idx;
14118 va_list args;
14119 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
14120 struct va_format vaf;
14121
14122
14123 va_start(args, fmt);
14124 if (unlikely(dbg_dmping)) {
14125 vaf.fmt = fmt;
14126 vaf.va = &args;
14127 dev_info(&phba->pcidev->dev, "%pV", &vaf);
14128 va_end(args);
14129 return;
14130 }
14131 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
14132 DBG_LOG_SZ;
14133
14134 atomic_inc(&phba->dbg_log_cnt);
14135
14136 vscnprintf(phba->dbg_log[idx].log,
14137 sizeof(phba->dbg_log[idx].log), fmt, args);
14138 va_end(args);
14139
14140 phba->dbg_log[idx].t_ns = local_clock();
14141}
14142
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014143/**
14144 * lpfc_exit - lpfc module removal routine
14145 *
14146 * This routine is invoked when the lpfc module is removed from the kernel.
14147 * The special kernel macro module_exit() is used to indicate the role of
14148 * this routine to the kernel as lpfc module exit point.
14149 */
14150static void __exit
14151lpfc_exit(void)
14152{
14153 misc_deregister(&lpfc_mgmt_dev);
14154 pci_unregister_driver(&lpfc_driver);
Olivier Deprez0e641232021-09-23 10:07:05 +020014155 cpuhp_remove_multi_state(lpfc_cpuhp_state);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014156 fc_release_transport(lpfc_transport_template);
14157 fc_release_transport(lpfc_vport_transport_template);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014158 idr_destroy(&lpfc_hba_index);
14159}
14160
14161module_init(lpfc_init);
14162module_exit(lpfc_exit);
14163MODULE_LICENSE("GPL");
14164MODULE_DESCRIPTION(LPFC_MODULE_DESC);
14165MODULE_AUTHOR("Broadcom");
14166MODULE_VERSION("0:" LPFC_DRIVER_VERSION);