v4.19.13 snapshot.
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
new file mode 100644
index 0000000..442fc3d
--- /dev/null
+++ b/drivers/scsi/bfa/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
+
+bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o bfad_bsg.o
+bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
+bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
+bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
new file mode 100644
index 0000000..0e119d8
--- /dev/null
+++ b/drivers/scsi/bfa/bfa.h
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#ifndef __BFA_H__
+#define __BFA_H__
+
+#include "bfad_drv.h"
+#include "bfa_cs.h"
+#include "bfa_plog.h"
+#include "bfa_defs_svc.h"
+#include "bfi.h"
+#include "bfa_ioc.h"
+
+struct bfa_s;
+
+typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
+typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
+
+/*
+ * Interrupt message handlers
+ */
+void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
+
+/*
+ * Request and response queue related defines
+ */
+#define BFA_REQQ_NELEMS_MIN	(4)
+#define BFA_RSPQ_NELEMS_MIN	(4)
+
+#define bfa_reqq_pi(__bfa, __reqq)	((__bfa)->iocfc.req_cq_pi[__reqq])
+#define bfa_reqq_ci(__bfa, __reqq)					\
+	(*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva))
+
+#define bfa_reqq_full(__bfa, __reqq)				\
+	(((bfa_reqq_pi(__bfa, __reqq) + 1) &			\
+	  ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) ==	\
+	 bfa_reqq_ci(__bfa, __reqq))
+
+#define bfa_reqq_next(__bfa, __reqq)					\
+	(bfa_reqq_full(__bfa, __reqq) ? NULL :				\
+	 ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
+		   + bfa_reqq_pi((__bfa), (__reqq)))))
+
+#define bfa_reqq_produce(__bfa, __reqq, __mh)  do {			\
+		(__mh).mtag.h2i.qid     = (__bfa)->iocfc.hw_qid[__reqq];\
+		(__bfa)->iocfc.req_cq_pi[__reqq]++;			\
+		(__bfa)->iocfc.req_cq_pi[__reqq] &=			\
+			((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
+		writel((__bfa)->iocfc.req_cq_pi[__reqq],		\
+			(__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]);	\
+		mmiowb();      \
+	} while (0)
+
+#define bfa_rspq_pi(__bfa, __rspq)					\
+	(*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
+
+#define bfa_rspq_ci(__bfa, __rspq)	((__bfa)->iocfc.rsp_cq_ci[__rspq])
+#define bfa_rspq_elem(__bfa, __rspq, __ci)				\
+	(&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci])
+
+#define CQ_INCR(__index, __size) do {			\
+	(__index)++;					\
+	(__index) &= ((__size) - 1);			\
+} while (0)
+
+/*
+ * Circular queue usage assignments
+ */
+enum {
+	BFA_REQQ_IOC	= 0,	/*  all low-priority IOC msgs	*/
+	BFA_REQQ_FCXP	= 0,	/*  all FCXP messages		*/
+	BFA_REQQ_LPS	= 0,	/*  all lport service msgs	*/
+	BFA_REQQ_PORT	= 0,	/*  all port messages		*/
+	BFA_REQQ_FLASH	= 0,	/*  for flash module		*/
+	BFA_REQQ_DIAG	= 0,	/*  for diag module		*/
+	BFA_REQQ_RPORT	= 0,	/*  all port messages		*/
+	BFA_REQQ_SBOOT	= 0,	/*  all san boot messages	*/
+	BFA_REQQ_QOS_LO	= 1,	/*  all low priority IO	*/
+	BFA_REQQ_QOS_MD	= 2,	/*  all medium priority IO	*/
+	BFA_REQQ_QOS_HI	= 3,	/*  all high priority IO	*/
+};
+
+static inline void
+bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
+	       void *cbarg)
+{
+	wqe->qresume = qresume;
+	wqe->cbarg = cbarg;
+}
+
+#define bfa_reqq(__bfa, __reqq)	(&(__bfa)->reqq_waitq[__reqq])
+
+/*
+ * static inline void
+ * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
+ */
+#define bfa_reqq_wait(__bfa, __reqq, __wqe) do {			\
+									\
+		struct list_head *waitq = bfa_reqq(__bfa, __reqq);      \
+									\
+		WARN_ON(((__reqq) >= BFI_IOC_MAX_CQS));			\
+		WARN_ON(!((__wqe)->qresume && (__wqe)->cbarg));		\
+									\
+		list_add_tail(&(__wqe)->qe, waitq);      \
+	} while (0)
+
+#define bfa_reqq_wcancel(__wqe)	list_del(&(__wqe)->qe)
+
+#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do {	\
+		(__hcb_qe)->cbfn  = (__cbfn);      \
+		(__hcb_qe)->cbarg = (__cbarg);      \
+		(__hcb_qe)->pre_rmv = BFA_FALSE;		\
+		list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q);      \
+	} while (0)
+
+#define bfa_cb_dequeue(__hcb_qe)	list_del(&(__hcb_qe)->qe)
+
+#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do {	\
+		(__hcb_qe)->cbfn  = (__cbfn);      \
+		(__hcb_qe)->cbarg = (__cbarg);      \
+		if (!(__hcb_qe)->once) {      \
+			list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q);      \
+			(__hcb_qe)->once = BFA_TRUE;			\
+		}							\
+	} while (0)
+
+#define bfa_cb_queue_status(__bfa, __hcb_qe, __status) do {		\
+		(__hcb_qe)->fw_status = (__status);			\
+		list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q);	\
+} while (0)
+
+#define bfa_cb_queue_done(__hcb_qe) do {	\
+		(__hcb_qe)->once = BFA_FALSE;	\
+	} while (0)
+
+
+/*
+ * PCI devices supported by the current BFA
+ */
+struct bfa_pciid_s {
+	u16	device_id;
+	u16	vendor_id;
+};
+
+extern char     bfa_version[];
+
+struct bfa_iocfc_regs_s {
+	void __iomem	*intr_status;
+	void __iomem	*intr_mask;
+	void __iomem	*cpe_q_pi[BFI_IOC_MAX_CQS];
+	void __iomem	*cpe_q_ci[BFI_IOC_MAX_CQS];
+	void __iomem	*cpe_q_ctrl[BFI_IOC_MAX_CQS];
+	void __iomem	*rme_q_ci[BFI_IOC_MAX_CQS];
+	void __iomem	*rme_q_pi[BFI_IOC_MAX_CQS];
+	void __iomem	*rme_q_ctrl[BFI_IOC_MAX_CQS];
+};
+
+/*
+ * MSIX vector handlers
+ */
+#define BFA_MSIX_MAX_VECTORS	22
+typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec);
+struct bfa_msix_s {
+	int	nvecs;
+	bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
+};
+
+/*
+ * Chip specific interfaces
+ */
+struct bfa_hwif_s {
+	void (*hw_reginit)(struct bfa_s *bfa);
+	void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
+	void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci);
+	void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
+	void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
+	void (*hw_msix_queue_install)(struct bfa_s *bfa);
+	void (*hw_msix_uninstall)(struct bfa_s *bfa);
+	void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
+	void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
+				u32 *nvecs, u32 *maxvec);
+	void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
+				       u32 *end);
+	int	cpe_vec_q0;
+	int	rme_vec_q0;
+};
+typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
+
+struct bfa_faa_cbfn_s {
+	bfa_cb_iocfc_t	faa_cbfn;
+	void		*faa_cbarg;
+};
+
+#define BFA_FAA_ENABLED		1
+#define BFA_FAA_DISABLED	2
+
+/*
+ *	FAA attributes
+ */
+struct bfa_faa_attr_s {
+	wwn_t	faa;
+	u8	faa_state;
+	u8	pwwn_source;
+	u8	rsvd[6];
+};
+
+struct bfa_faa_args_s {
+	struct bfa_faa_attr_s	*faa_attr;
+	struct bfa_faa_cbfn_s	faa_cb;
+	u8			faa_state;
+	bfa_boolean_t		busy;
+};
+
+struct bfa_iocfc_s {
+	bfa_fsm_t		fsm;
+	struct bfa_s		*bfa;
+	struct bfa_iocfc_cfg_s	cfg;
+	u32		req_cq_pi[BFI_IOC_MAX_CQS];
+	u32		rsp_cq_ci[BFI_IOC_MAX_CQS];
+	u8		hw_qid[BFI_IOC_MAX_CQS];
+	struct bfa_cb_qe_s	init_hcb_qe;
+	struct bfa_cb_qe_s	stop_hcb_qe;
+	struct bfa_cb_qe_s	dis_hcb_qe;
+	struct bfa_cb_qe_s	en_hcb_qe;
+	struct bfa_cb_qe_s	stats_hcb_qe;
+	bfa_boolean_t		submod_enabled;
+	bfa_boolean_t		cb_reqd;	/* Driver call back reqd */
+	bfa_status_t		op_status;	/* Status of bfa iocfc op */
+
+	struct bfa_dma_s	cfg_info;
+	struct bfi_iocfc_cfg_s *cfginfo;
+	struct bfa_dma_s	cfgrsp_dma;
+	struct bfi_iocfc_cfgrsp_s *cfgrsp;
+	struct bfa_dma_s	req_cq_ba[BFI_IOC_MAX_CQS];
+	struct bfa_dma_s	req_cq_shadow_ci[BFI_IOC_MAX_CQS];
+	struct bfa_dma_s	rsp_cq_ba[BFI_IOC_MAX_CQS];
+	struct bfa_dma_s	rsp_cq_shadow_pi[BFI_IOC_MAX_CQS];
+	struct bfa_iocfc_regs_s	bfa_regs;	/*  BFA device registers */
+	struct bfa_hwif_s	hwif;
+	bfa_cb_iocfc_t		updateq_cbfn; /*  bios callback function */
+	void			*updateq_cbarg;	/*  bios callback arg */
+	u32	intr_mask;
+	struct bfa_faa_args_s	faa_args;
+	struct bfa_mem_dma_s	ioc_dma;
+	struct bfa_mem_dma_s	iocfc_dma;
+	struct bfa_mem_dma_s	reqq_dma[BFI_IOC_MAX_CQS];
+	struct bfa_mem_dma_s	rspq_dma[BFI_IOC_MAX_CQS];
+	struct bfa_mem_kva_s	kva_seg;
+};
+
+#define BFA_MEM_IOC_DMA(_bfa)		(&((_bfa)->iocfc.ioc_dma))
+#define BFA_MEM_IOCFC_DMA(_bfa)		(&((_bfa)->iocfc.iocfc_dma))
+#define BFA_MEM_REQQ_DMA(_bfa, _qno)	(&((_bfa)->iocfc.reqq_dma[(_qno)]))
+#define BFA_MEM_RSPQ_DMA(_bfa, _qno)	(&((_bfa)->iocfc.rspq_dma[(_qno)]))
+#define BFA_MEM_IOCFC_KVA(_bfa)		(&((_bfa)->iocfc.kva_seg))
+
+#define bfa_fn_lpu(__bfa)	\
+	bfi_fn_lpu(bfa_ioc_pcifn(&(__bfa)->ioc), bfa_ioc_portid(&(__bfa)->ioc))
+#define bfa_msix_init(__bfa, __nvecs)					\
+	((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
+#define bfa_msix_ctrl_install(__bfa)					\
+	((__bfa)->iocfc.hwif.hw_msix_ctrl_install(__bfa))
+#define bfa_msix_queue_install(__bfa)					\
+	((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
+#define bfa_msix_uninstall(__bfa)					\
+	((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
+#define bfa_isr_rspq_ack(__bfa, __queue, __ci)				\
+	((__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue, __ci))
+#define bfa_isr_reqq_ack(__bfa, __queue) do {				\
+	if ((__bfa)->iocfc.hwif.hw_reqq_ack)				\
+		(__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue);	\
+} while (0)
+#define bfa_isr_mode_set(__bfa, __msix) do {				\
+	if ((__bfa)->iocfc.hwif.hw_isr_mode_set)			\
+		(__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix);	\
+} while (0)
+#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec)		\
+	((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap,		\
+					__nvecs, __maxvec))
+#define bfa_msix_get_rme_range(__bfa, __start, __end)			\
+	((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end))
+#define bfa_msix(__bfa, __vec)						\
+	((__bfa)->msix.handler[__vec](__bfa, __vec))
+
+/*
+ * FC specific IOC functions.
+ */
+void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg,
+			struct bfa_meminfo_s *meminfo,
+			struct bfa_s *bfa);
+void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
+		      struct bfa_iocfc_cfg_s *cfg,
+		      struct bfa_pcidev_s *pcidev);
+void bfa_iocfc_init(struct bfa_s *bfa);
+void bfa_iocfc_start(struct bfa_s *bfa);
+void bfa_iocfc_stop(struct bfa_s *bfa);
+void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
+void bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa);
+bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
+void bfa_iocfc_reset_queues(struct bfa_s *bfa);
+
+void bfa_msix_all(struct bfa_s *bfa, int vec);
+void bfa_msix_reqq(struct bfa_s *bfa, int vec);
+void bfa_msix_rspq(struct bfa_s *bfa, int vec);
+void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
+
+void bfa_hwcb_reginit(struct bfa_s *bfa);
+void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
+void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
+void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
+void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
+void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
+void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
+void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
+			   u32 *maxvec);
+void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
+				 u32 *end);
+void bfa_hwct_reginit(struct bfa_s *bfa);
+void bfa_hwct2_reginit(struct bfa_s *bfa);
+void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
+void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
+void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
+void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
+void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
+void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
+void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
+void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
+			   u32 *maxvec);
+void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
+				 u32 *end);
+void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
+int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
+				struct bfi_pbc_vport_s *pbc_vport);
+
+
+/*
+ *----------------------------------------------------------------------
+ *		BFA public interfaces
+ *----------------------------------------------------------------------
+ */
+#define bfa_stats(_mod, _stats)	((_mod)->stats._stats++)
+#define bfa_ioc_get_stats(__bfa, __ioc_stats)		\
+	bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
+#define bfa_ioc_clear_stats(__bfa)		\
+	bfa_ioc_clr_stats(&(__bfa)->ioc)
+#define bfa_get_nports(__bfa)			\
+	bfa_ioc_get_nports(&(__bfa)->ioc)
+#define bfa_get_adapter_manufacturer(__bfa, __manufacturer)		\
+	bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer)
+#define bfa_get_adapter_model(__bfa, __model)			\
+	bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model)
+#define bfa_get_adapter_serial_num(__bfa, __serial_num)			\
+	bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num)
+#define bfa_get_adapter_fw_ver(__bfa, __fw_ver)			\
+	bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver)
+#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver)			\
+	bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver)
+#define bfa_get_pci_chip_rev(__bfa, __chip_rev)			\
+	bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev)
+#define bfa_get_ioc_state(__bfa)		\
+	bfa_ioc_get_state(&(__bfa)->ioc)
+#define bfa_get_type(__bfa)			\
+	bfa_ioc_get_type(&(__bfa)->ioc)
+#define bfa_get_mac(__bfa)			\
+	bfa_ioc_get_mac(&(__bfa)->ioc)
+#define bfa_get_mfg_mac(__bfa)			\
+	bfa_ioc_get_mfg_mac(&(__bfa)->ioc)
+#define bfa_get_fw_clock_res(__bfa)		\
+	((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
+
+/*
+ * lun mask macros return NULL when min cfg is enabled and there is
+ * no memory allocated for lunmask.
+ */
+#define bfa_get_lun_mask(__bfa)					\
+	((&(__bfa)->modules.dconf_mod)->min_cfg) ? NULL :	\
+	 (&(BFA_DCONF_MOD(__bfa)->dconf->lun_mask))
+
+#define bfa_get_lun_mask_list(_bfa)				\
+	((&(_bfa)->modules.dconf_mod)->min_cfg) ? NULL :	\
+	 (bfa_get_lun_mask(_bfa)->lun_list)
+
+#define bfa_get_lun_mask_status(_bfa)				\
+	(((&(_bfa)->modules.dconf_mod)->min_cfg)		\
+	 ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status))
+
+void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
+void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
+void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
+void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
+			struct bfa_meminfo_s *meminfo,
+			struct bfa_s *bfa);
+void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		struct bfa_meminfo_s *meminfo,
+		struct bfa_pcidev_s *pcidev);
+void bfa_detach(struct bfa_s *bfa);
+void bfa_cb_init(void *bfad, bfa_status_t status);
+void bfa_cb_updateq(void *bfad, bfa_status_t status);
+
+bfa_boolean_t bfa_intx(struct bfa_s *bfa);
+void bfa_isr_enable(struct bfa_s *bfa);
+void bfa_isr_disable(struct bfa_s *bfa);
+
+void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q);
+void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q);
+void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q);
+
+typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
+void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
+
+
+bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
+				   struct bfa_iocfc_intr_attr_s *attr);
+
+void bfa_iocfc_enable(struct bfa_s *bfa);
+void bfa_iocfc_disable(struct bfa_s *bfa);
+#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout)		\
+	bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
+
+struct bfa_cb_pending_q_s {
+	struct bfa_cb_qe_s	hcb_qe;
+	void			*data;  /* Driver buffer */
+};
+
+/* Common macros to operate on pending stats/attr apis */
+#define bfa_pending_q_init(__qe, __cbfn, __cbarg, __data) do {	\
+	bfa_q_qe_init(&((__qe)->hcb_qe.qe));			\
+	(__qe)->hcb_qe.cbfn = (__cbfn);				\
+	(__qe)->hcb_qe.cbarg = (__cbarg);			\
+	(__qe)->hcb_qe.pre_rmv = BFA_TRUE;			\
+	(__qe)->data = (__data);				\
+} while (0)
+
+#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
new file mode 100644
index 0000000..10a63be
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -0,0 +1,2016 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_modules.h"
+#include "bfi_reg.h"
+
+BFA_TRC_FILE(HAL, CORE);
+
+/*
+ * Message handlers for various modules.
+ */
+static bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
+	bfa_isr_unhandled,	/* NONE */
+	bfa_isr_unhandled,	/* BFI_MC_IOC */
+	bfa_fcdiag_intr,	/* BFI_MC_DIAG */
+	bfa_isr_unhandled,	/* BFI_MC_FLASH */
+	bfa_isr_unhandled,	/* BFI_MC_CEE */
+	bfa_fcport_isr,		/* BFI_MC_FCPORT */
+	bfa_isr_unhandled,	/* BFI_MC_IOCFC */
+	bfa_isr_unhandled,	/* BFI_MC_LL */
+	bfa_uf_isr,		/* BFI_MC_UF */
+	bfa_fcxp_isr,		/* BFI_MC_FCXP */
+	bfa_lps_isr,		/* BFI_MC_LPS */
+	bfa_rport_isr,		/* BFI_MC_RPORT */
+	bfa_itn_isr,		/* BFI_MC_ITN */
+	bfa_isr_unhandled,	/* BFI_MC_IOIM_READ */
+	bfa_isr_unhandled,	/* BFI_MC_IOIM_WRITE */
+	bfa_isr_unhandled,	/* BFI_MC_IOIM_IO */
+	bfa_ioim_isr,		/* BFI_MC_IOIM */
+	bfa_ioim_good_comp_isr,	/* BFI_MC_IOIM_IOCOM */
+	bfa_tskim_isr,		/* BFI_MC_TSKIM */
+	bfa_isr_unhandled,	/* BFI_MC_SBOOT */
+	bfa_isr_unhandled,	/* BFI_MC_IPFC */
+	bfa_isr_unhandled,	/* BFI_MC_PORT */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+};
+/*
+ * Message handlers for mailbox command classes
+ */
+static bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
+	NULL,
+	NULL,		/* BFI_MC_IOC   */
+	NULL,		/* BFI_MC_DIAG  */
+	NULL,		/* BFI_MC_FLASH */
+	NULL,		/* BFI_MC_CEE   */
+	NULL,		/* BFI_MC_PORT  */
+	bfa_iocfc_isr,	/* BFI_MC_IOCFC */
+	NULL,
+};
+
+
+
+void
+__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
+{
+	int		tail = trcm->tail;
+	struct bfa_trc_s	*trc = &trcm->trc[tail];
+
+	if (trcm->stopped)
+		return;
+
+	trc->fileno = (u16) fileno;
+	trc->line = (u16) line;
+	trc->data.u64 = data;
+	trc->timestamp = BFA_TRC_TS(trcm);
+
+	trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
+	if (trcm->tail == trcm->head)
+		trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
+}
+
+static void
+bfa_com_port_attach(struct bfa_s *bfa)
+{
+	struct bfa_port_s	*port = &bfa->modules.port;
+	struct bfa_mem_dma_s	*port_dma = BFA_MEM_PORT_DMA(bfa);
+
+	bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
+	bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
+}
+
+/*
+ * ablk module attach
+ */
+static void
+bfa_com_ablk_attach(struct bfa_s *bfa)
+{
+	struct bfa_ablk_s	*ablk = &bfa->modules.ablk;
+	struct bfa_mem_dma_s	*ablk_dma = BFA_MEM_ABLK_DMA(bfa);
+
+	bfa_ablk_attach(ablk, &bfa->ioc);
+	bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
+}
+
+static void
+bfa_com_cee_attach(struct bfa_s *bfa)
+{
+	struct bfa_cee_s	*cee = &bfa->modules.cee;
+	struct bfa_mem_dma_s	*cee_dma = BFA_MEM_CEE_DMA(bfa);
+
+	cee->trcmod = bfa->trcmod;
+	bfa_cee_attach(cee, &bfa->ioc, bfa);
+	bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
+}
+
+static void
+bfa_com_sfp_attach(struct bfa_s *bfa)
+{
+	struct bfa_sfp_s	*sfp = BFA_SFP_MOD(bfa);
+	struct bfa_mem_dma_s	*sfp_dma = BFA_MEM_SFP_DMA(bfa);
+
+	bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
+	bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
+}
+
+static void
+bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+	struct bfa_flash_s	*flash = BFA_FLASH(bfa);
+	struct bfa_mem_dma_s	*flash_dma = BFA_MEM_FLASH_DMA(bfa);
+
+	bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+	bfa_flash_memclaim(flash, flash_dma->kva_curp,
+			   flash_dma->dma_curp, mincfg);
+}
+
+static void
+bfa_com_diag_attach(struct bfa_s *bfa)
+{
+	struct bfa_diag_s	*diag = BFA_DIAG_MOD(bfa);
+	struct bfa_mem_dma_s	*diag_dma = BFA_MEM_DIAG_DMA(bfa);
+
+	bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
+	bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
+}
+
+static void
+bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+	struct bfa_phy_s	*phy = BFA_PHY(bfa);
+	struct bfa_mem_dma_s	*phy_dma = BFA_MEM_PHY_DMA(bfa);
+
+	bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+	bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
+}
+
+static void
+bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+	struct bfa_fru_s	*fru = BFA_FRU(bfa);
+	struct bfa_mem_dma_s	*fru_dma = BFA_MEM_FRU_DMA(bfa);
+
+	bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+	bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg);
+}
+
+/*
+ * BFA IOC FC related definitions
+ */
+
+/*
+ * IOC local definitions
+ */
+#define BFA_IOCFC_TOV		5000	/* msecs */
+
+enum {
+	BFA_IOCFC_ACT_NONE	= 0,
+	BFA_IOCFC_ACT_INIT	= 1,
+	BFA_IOCFC_ACT_STOP	= 2,
+	BFA_IOCFC_ACT_DISABLE	= 3,
+	BFA_IOCFC_ACT_ENABLE	= 4,
+};
+
+#define DEF_CFG_NUM_FABRICS		1
+#define DEF_CFG_NUM_LPORTS		256
+#define DEF_CFG_NUM_CQS			4
+#define DEF_CFG_NUM_IOIM_REQS		(BFA_IOIM_MAX)
+#define DEF_CFG_NUM_TSKIM_REQS		128
+#define DEF_CFG_NUM_FCXP_REQS		64
+#define DEF_CFG_NUM_UF_BUFS		64
+#define DEF_CFG_NUM_RPORTS		1024
+#define DEF_CFG_NUM_ITNIMS		(DEF_CFG_NUM_RPORTS)
+#define DEF_CFG_NUM_TINS		256
+
+#define DEF_CFG_NUM_SGPGS		2048
+#define DEF_CFG_NUM_REQQ_ELEMS		256
+#define DEF_CFG_NUM_RSPQ_ELEMS		64
+#define DEF_CFG_NUM_SBOOT_TGTS		16
+#define DEF_CFG_NUM_SBOOT_LUNS		16
+
+/*
+ * IOCFC state machine definitions/declarations
+ */
+bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
+		   struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
+		   struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, operational,
+		   struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_write,
+		   struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_failed,
+		   struct bfa_iocfc_s, enum iocfc_event);
+
+/*
+ * forward declaration for IOC FC functions
+ */
+static void bfa_iocfc_start_submod(struct bfa_s *bfa);
+static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
+static void bfa_iocfc_send_cfg(void *bfa_arg);
+static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
+static void bfa_iocfc_disable_cbfn(void *bfa_arg);
+static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
+static void bfa_iocfc_reset_cbfn(void *bfa_arg);
+static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
+static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
+static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
+
+static void
+bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
+{
+}
+
+static void
+bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_INIT:
+	case IOCFC_E_ENABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_IOC_ENABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+		break;
+
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_dconf_modinit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_DCONF_DONE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
+		break;
+
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_CFG_DONE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
+		break;
+
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
+{
+	iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+	bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+		     bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_START:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+		break;
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_fcport_init(iocfc->bfa);
+	bfa_iocfc_start_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_dconf_modexit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_DCONF_DONE:
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_IOC_DISABLED:
+		bfa_isr_disable(iocfc->bfa);
+		bfa_iocfc_disable_submod(iocfc->bfa);
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
+			     bfa_iocfc_stop_cb, iocfc->bfa);
+		break;
+
+	case IOCFC_E_IOC_ENABLED:
+	case IOCFC_E_DCONF_DONE:
+	case IOCFC_E_CFG_DONE:
+		break;
+
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_IOC_ENABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+		break;
+
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
+
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+
+		if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+			break;
+
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+			     bfa_iocfc_enable_cb, iocfc->bfa);
+		iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_CFG_DONE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+		if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+			break;
+
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+			     bfa_iocfc_enable_cb, iocfc->bfa);
+		iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+		if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+			break;
+
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+			     bfa_iocfc_enable_cb, iocfc->bfa);
+		iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_IOC_DISABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
+		break;
+	case IOCFC_E_IOC_ENABLED:
+	case IOCFC_E_DCONF_DONE:
+	case IOCFC_E_CFG_DONE:
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_isr_disable(iocfc->bfa);
+	bfa_iocfc_disable_submod(iocfc->bfa);
+	iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+	bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+		     bfa_iocfc_disable_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
+	case IOCFC_E_ENABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_isr_disable(iocfc->bfa);
+	bfa_iocfc_disable_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+	case IOCFC_E_IOC_ENABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_isr_disable(iocfc->bfa);
+	iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+	bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+		     bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_ioc_disable(&iocfc->bfa->ioc);
+		break;
+	case IOCFC_E_IOC_ENABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+		break;
+	case IOCFC_E_IOC_DISABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+			     bfa_iocfc_disable_cb, iocfc->bfa);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+/*
+ * BFA Interrupt handling functions
+ */
+static void
+bfa_reqq_resume(struct bfa_s *bfa, int qid)
+{
+	struct list_head *waitq, *qe, *qen;
+	struct bfa_reqq_wait_s *wqe;
+
+	waitq = bfa_reqq(bfa, qid);
+	list_for_each_safe(qe, qen, waitq) {
+		/*
+		 * Callback only as long as there is room in request queue
+		 */
+		if (bfa_reqq_full(bfa, qid))
+			break;
+
+		list_del(qe);
+		wqe = (struct bfa_reqq_wait_s *) qe;
+		wqe->qresume(wqe->cbarg);
+	}
+}
+
+bfa_boolean_t
+bfa_isr_rspq(struct bfa_s *bfa, int qid)
+{
+	struct bfi_msg_s *m;
+	u32	pi, ci;
+	struct list_head *waitq;
+	bfa_boolean_t ret;
+
+	ci = bfa_rspq_ci(bfa, qid);
+	pi = bfa_rspq_pi(bfa, qid);
+
+	ret = (ci != pi);
+
+	while (ci != pi) {
+		m = bfa_rspq_elem(bfa, qid, ci);
+		WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
+
+		bfa_isrs[m->mhdr.msg_class] (bfa, m);
+		CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
+	}
+
+	/*
+	 * acknowledge RME completions and update CI
+	 */
+	bfa_isr_rspq_ack(bfa, qid, ci);
+
+	/*
+	 * Resume any pending requests in the corresponding reqq.
+	 */
+	waitq = bfa_reqq(bfa, qid);
+	if (!list_empty(waitq))
+		bfa_reqq_resume(bfa, qid);
+
+	return ret;
+}
+
+static inline void
+bfa_isr_reqq(struct bfa_s *bfa, int qid)
+{
+	struct list_head *waitq;
+
+	bfa_isr_reqq_ack(bfa, qid);
+
+	/*
+	 * Resume any pending requests in the corresponding reqq.
+	 */
+	waitq = bfa_reqq(bfa, qid);
+	if (!list_empty(waitq))
+		bfa_reqq_resume(bfa, qid);
+}
+
+void
+bfa_msix_all(struct bfa_s *bfa, int vec)
+{
+	u32	intr, qintr;
+	int	queue;
+
+	intr = readl(bfa->iocfc.bfa_regs.intr_status);
+	if (!intr)
+		return;
+
+	/*
+	 * RME completion queue interrupt
+	 */
+	qintr = intr & __HFN_INT_RME_MASK;
+	if (qintr && bfa->queue_process) {
+		for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+			bfa_isr_rspq(bfa, queue);
+	}
+
+	intr &= ~qintr;
+	if (!intr)
+		return;
+
+	/*
+	 * CPE completion queue interrupt
+	 */
+	qintr = intr & __HFN_INT_CPE_MASK;
+	if (qintr && bfa->queue_process) {
+		for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+			bfa_isr_reqq(bfa, queue);
+	}
+	intr &= ~qintr;
+	if (!intr)
+		return;
+
+	bfa_msix_lpu_err(bfa, intr);
+}
+
+bfa_boolean_t
+bfa_intx(struct bfa_s *bfa)
+{
+	u32 intr, qintr;
+	int queue;
+	bfa_boolean_t rspq_comp = BFA_FALSE;
+
+	intr = readl(bfa->iocfc.bfa_regs.intr_status);
+
+	qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
+	if (qintr)
+		writel(qintr, bfa->iocfc.bfa_regs.intr_status);
+
+	/*
+	 * Unconditional RME completion queue interrupt
+	 */
+	if (bfa->queue_process) {
+		for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+			if (bfa_isr_rspq(bfa, queue))
+				rspq_comp = BFA_TRUE;
+	}
+
+	if (!intr)
+		return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE;
+
+	/*
+	 * CPE completion queue interrupt
+	 */
+	qintr = intr & __HFN_INT_CPE_MASK;
+	if (qintr && bfa->queue_process) {
+		for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+			bfa_isr_reqq(bfa, queue);
+	}
+	intr &= ~qintr;
+	if (!intr)
+		return BFA_TRUE;
+
+	if (bfa->intr_enabled)
+		bfa_msix_lpu_err(bfa, intr);
+
+	return BFA_TRUE;
+}
+
+void
+bfa_isr_enable(struct bfa_s *bfa)
+{
+	u32 umsk;
+	int port_id = bfa_ioc_portid(&bfa->ioc);
+
+	bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc));
+	bfa_trc(bfa, port_id);
+
+	bfa_msix_ctrl_install(bfa);
+
+	if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
+		umsk = __HFN_INT_ERR_MASK_CT2;
+		umsk |= port_id == 0 ?
+			__HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
+	} else {
+		umsk = __HFN_INT_ERR_MASK;
+		umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
+	}
+
+	writel(umsk, bfa->iocfc.bfa_regs.intr_status);
+	writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
+	bfa->iocfc.intr_mask = ~umsk;
+	bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
+
+	/*
+	 * Set the flag indicating successful enabling of interrupts
+	 */
+	bfa->intr_enabled = BFA_TRUE;
+}
+
+void
+bfa_isr_disable(struct bfa_s *bfa)
+{
+	bfa->intr_enabled = BFA_FALSE;
+	bfa_isr_mode_set(bfa, BFA_FALSE);
+	writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
+	bfa_msix_uninstall(bfa);
+}
+
+void
+bfa_msix_reqq(struct bfa_s *bfa, int vec)
+{
+	bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
+}
+
+void
+bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	bfa_trc(bfa, m->mhdr.msg_class);
+	bfa_trc(bfa, m->mhdr.msg_id);
+	bfa_trc(bfa, m->mhdr.mtag.i2htok);
+	WARN_ON(1);
+	bfa_trc_stop(bfa->trcmod);
+}
+
+void
+bfa_msix_rspq(struct bfa_s *bfa, int vec)
+{
+	bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
+}
+
+void
+bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
+{
+	u32 intr, curr_value;
+	bfa_boolean_t lpu_isr, halt_isr, pss_isr;
+
+	intr = readl(bfa->iocfc.bfa_regs.intr_status);
+
+	if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
+		halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
+		pss_isr  = intr & __HFN_INT_ERR_PSS_CT2;
+		lpu_isr  = intr & (__HFN_INT_MBOX_LPU0_CT2 |
+				   __HFN_INT_MBOX_LPU1_CT2);
+		intr    &= __HFN_INT_ERR_MASK_CT2;
+	} else {
+		halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
+					  (intr & __HFN_INT_LL_HALT) : 0;
+		pss_isr  = intr & __HFN_INT_ERR_PSS;
+		lpu_isr  = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
+		intr    &= __HFN_INT_ERR_MASK;
+	}
+
+	if (lpu_isr)
+		bfa_ioc_mbox_isr(&bfa->ioc);
+
+	if (intr) {
+		if (halt_isr) {
+			/*
+			 * If LL_HALT bit is set then FW Init Halt LL Port
+			 * Register needs to be cleared as well so Interrupt
+			 * Status Register will be cleared.
+			 */
+			curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
+			curr_value &= ~__FW_INIT_HALT_P;
+			writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
+		}
+
+		if (pss_isr) {
+			/*
+			 * ERR_PSS bit needs to be cleared as well in case
+			 * interrups are shared so driver's interrupt handler is
+			 * still called even though it is already masked out.
+			 */
+			curr_value = readl(
+					bfa->ioc.ioc_regs.pss_err_status_reg);
+			writel(curr_value,
+				bfa->ioc.ioc_regs.pss_err_status_reg);
+		}
+
+		writel(intr, bfa->iocfc.bfa_regs.intr_status);
+		bfa_ioc_error_isr(&bfa->ioc);
+	}
+}
+
+/*
+ * BFA IOC FC related functions
+ */
+
+/*
+ *  BFA IOC private functions
+ */
+
+/*
+ * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
+ */
+static void
+bfa_iocfc_send_cfg(void *bfa_arg)
+{
+	struct bfa_s *bfa = bfa_arg;
+	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+	struct bfi_iocfc_cfg_req_s cfg_req;
+	struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
+	struct bfa_iocfc_cfg_s	*cfg = &iocfc->cfg;
+	int		i;
+
+	WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
+	bfa_trc(bfa, cfg->fwcfg.num_cqs);
+
+	bfa_iocfc_reset_queues(bfa);
+
+	/*
+	 * initialize IOC configuration info
+	 */
+	cfg_info->single_msix_vec = 0;
+	if (bfa->msix.nvecs == 1)
+		cfg_info->single_msix_vec = 1;
+	cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
+	cfg_info->num_cqs = cfg->fwcfg.num_cqs;
+	cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa,
+					       cfg->fwcfg.num_ioim_reqs));
+	cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
+
+	bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
+	/*
+	 * dma map REQ and RSP circular queues and shadow pointers
+	 */
+	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
+		bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
+				    iocfc->req_cq_ba[i].pa);
+		bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
+				    iocfc->req_cq_shadow_ci[i].pa);
+		cfg_info->req_cq_elems[i] =
+			cpu_to_be16(cfg->drvcfg.num_reqq_elems);
+
+		bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
+				    iocfc->rsp_cq_ba[i].pa);
+		bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
+				    iocfc->rsp_cq_shadow_pi[i].pa);
+		cfg_info->rsp_cq_elems[i] =
+			cpu_to_be16(cfg->drvcfg.num_rspq_elems);
+	}
+
+	/*
+	 * Enable interrupt coalescing if it is driver init path
+	 * and not ioc disable/enable path.
+	 */
+	if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
+		cfg_info->intr_attr.coalesce = BFA_TRUE;
+
+	/*
+	 * dma map IOC configuration itself
+	 */
+	bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
+		    bfa_fn_lpu(bfa));
+	bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
+
+	bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
+			  sizeof(struct bfi_iocfc_cfg_req_s));
+}
+
+static void
+bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		   struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
+
+	bfa->bfad = bfad;
+	iocfc->bfa = bfa;
+	iocfc->cfg = *cfg;
+
+	/*
+	 * Initialize chip specific handlers.
+	 */
+	if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
+		iocfc->hwif.hw_reginit = bfa_hwct_reginit;
+		iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
+		iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
+		iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
+		iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
+		iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
+		iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
+		iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
+		iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
+		iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
+		iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
+		iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
+	} else {
+		iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
+		iocfc->hwif.hw_reqq_ack = NULL;
+		iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
+		iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
+		iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
+		iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
+		iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
+		iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
+		iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
+		iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
+		iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
+			bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
+		iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
+			bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
+	}
+
+	if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
+		iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
+		iocfc->hwif.hw_isr_mode_set = NULL;
+		iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
+	}
+
+	iocfc->hwif.hw_reginit(bfa);
+	bfa->msix.nvecs = 0;
+}
+
+static void
+bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
+{
+	u8	*dm_kva = NULL;
+	u64	dm_pa = 0;
+	int	i, per_reqq_sz, per_rspq_sz;
+	struct bfa_iocfc_s  *iocfc = &bfa->iocfc;
+	struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
+	struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
+	struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
+
+	/* First allocate dma memory for IOC */
+	bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
+			bfa_mem_dma_phys(ioc_dma));
+
+	/* Claim DMA-able memory for the request/response queues */
+	per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
+				BFA_DMA_ALIGN_SZ);
+	per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
+				BFA_DMA_ALIGN_SZ);
+
+	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
+		reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
+		iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
+		iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
+		memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
+
+		rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
+		iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
+		iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
+		memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
+	}
+
+	/* Claim IOCFC dma memory - for shadow CI/PI */
+	dm_kva = bfa_mem_dma_virt(iocfc_dma);
+	dm_pa  = bfa_mem_dma_phys(iocfc_dma);
+
+	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
+		iocfc->req_cq_shadow_ci[i].kva = dm_kva;
+		iocfc->req_cq_shadow_ci[i].pa = dm_pa;
+		dm_kva += BFA_CACHELINE_SZ;
+		dm_pa += BFA_CACHELINE_SZ;
+
+		iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
+		iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
+		dm_kva += BFA_CACHELINE_SZ;
+		dm_pa += BFA_CACHELINE_SZ;
+	}
+
+	/* Claim IOCFC dma memory - for the config info page */
+	bfa->iocfc.cfg_info.kva = dm_kva;
+	bfa->iocfc.cfg_info.pa = dm_pa;
+	bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
+	dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
+	dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
+
+	/* Claim IOCFC dma memory - for the config response */
+	bfa->iocfc.cfgrsp_dma.kva = dm_kva;
+	bfa->iocfc.cfgrsp_dma.pa = dm_pa;
+	bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
+	dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+			BFA_CACHELINE_SZ);
+	dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+			BFA_CACHELINE_SZ);
+
+	/* Claim IOCFC kva memory */
+	bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
+	bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN;
+}
+
+/*
+ * Start BFA submodules.
+ */
+static void
+bfa_iocfc_start_submod(struct bfa_s *bfa)
+{
+	int		i;
+
+	bfa->queue_process = BFA_TRUE;
+	for (i = 0; i < BFI_IOC_MAX_CQS; i++)
+		bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
+
+	bfa_fcport_start(bfa);
+	bfa_uf_start(bfa);
+	/*
+	 * bfa_init() with flash read is complete. now invalidate the stale
+	 * content of lun mask like unit attention, rp tag and lp tag.
+	 */
+	bfa_ioim_lm_init(BFA_FCP_MOD(bfa)->bfa);
+
+	bfa->iocfc.submod_enabled = BFA_TRUE;
+}
+
+/*
+ * Disable BFA submodules.
+ */
+static void
+bfa_iocfc_disable_submod(struct bfa_s *bfa)
+{
+	if (bfa->iocfc.submod_enabled == BFA_FALSE)
+		return;
+
+	bfa_fcdiag_iocdisable(bfa);
+	bfa_fcport_iocdisable(bfa);
+	bfa_fcxp_iocdisable(bfa);
+	bfa_lps_iocdisable(bfa);
+	bfa_rport_iocdisable(bfa);
+	bfa_fcp_iocdisable(bfa);
+	bfa_dconf_iocdisable(bfa);
+
+	bfa->iocfc.submod_enabled = BFA_FALSE;
+}
+
+static void
+bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
+{
+	struct bfa_s	*bfa = bfa_arg;
+
+	if (complete)
+		bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
+}
+
+static void
+bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
+{
+	struct bfa_s  *bfa = bfa_arg;
+	struct bfad_s *bfad = bfa->bfad;
+
+	if (compl)
+		complete(&bfad->comp);
+}
+
+static void
+bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
+{
+	struct bfa_s	*bfa = bfa_arg;
+	struct bfad_s *bfad = bfa->bfad;
+
+	if (compl)
+		complete(&bfad->enable_comp);
+}
+
+static void
+bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
+{
+	struct bfa_s  *bfa = bfa_arg;
+	struct bfad_s *bfad = bfa->bfad;
+
+	if (compl)
+		complete(&bfad->disable_comp);
+}
+
+/**
+ * configure queue registers from firmware response
+ */
+static void
+bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
+{
+	int     i;
+	struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
+	void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
+
+	for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
+		bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
+		r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
+		r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
+		r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
+		r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
+		r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
+		r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
+	}
+}
+
+static void
+bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
+{
+	struct bfa_iocfc_s	*iocfc   = &bfa->iocfc;
+	struct bfi_iocfc_cfg_s	*cfg_info = iocfc->cfginfo;
+
+	bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
+	bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
+	bfa_rport_res_recfg(bfa, fwcfg->num_rports);
+	bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs),
+			  fwcfg->num_ioim_reqs);
+	bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
+}
+
+/*
+ * Update BFA configuration from firmware configuration.
+ */
+static void
+bfa_iocfc_cfgrsp(struct bfa_s *bfa)
+{
+	struct bfa_iocfc_s		*iocfc	 = &bfa->iocfc;
+	struct bfi_iocfc_cfgrsp_s	*cfgrsp	 = iocfc->cfgrsp;
+	struct bfa_iocfc_fwcfg_s	*fwcfg	 = &cfgrsp->fwcfg;
+
+	fwcfg->num_cqs	      = fwcfg->num_cqs;
+	fwcfg->num_ioim_reqs  = be16_to_cpu(fwcfg->num_ioim_reqs);
+	fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
+	fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
+	fwcfg->num_fcxp_reqs  = be16_to_cpu(fwcfg->num_fcxp_reqs);
+	fwcfg->num_uf_bufs    = be16_to_cpu(fwcfg->num_uf_bufs);
+	fwcfg->num_rports     = be16_to_cpu(fwcfg->num_rports);
+
+	/*
+	 * configure queue register offsets as learnt from firmware
+	 */
+	bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
+
+	/*
+	 * Re-configure resources as learnt from Firmware
+	 */
+	bfa_iocfc_res_recfg(bfa, fwcfg);
+
+	/*
+	 * Install MSIX queue handlers
+	 */
+	bfa_msix_queue_install(bfa);
+
+	if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
+		bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
+		bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
+		bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
+	}
+}
+
+void
+bfa_iocfc_reset_queues(struct bfa_s *bfa)
+{
+	int		q;
+
+	for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
+		bfa_reqq_ci(bfa, q) = 0;
+		bfa_reqq_pi(bfa, q) = 0;
+		bfa_rspq_ci(bfa, q) = 0;
+		bfa_rspq_pi(bfa, q) = 0;
+	}
+}
+
+/*
+ *	Process FAA pwwn msg from fw.
+ */
+static void
+bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
+{
+	struct bfa_iocfc_s		*iocfc   = &bfa->iocfc;
+	struct bfi_iocfc_cfgrsp_s	*cfgrsp  = iocfc->cfgrsp;
+
+	cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
+	cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
+
+	bfa->ioc.attr->pwwn = msg->pwwn;
+	bfa->ioc.attr->nwwn = msg->nwwn;
+	bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
+}
+
+/* Fabric Assigned Address specific functions */
+
+/*
+ *	Check whether IOC is ready before sending command down
+ */
+static bfa_status_t
+bfa_faa_validate_request(struct bfa_s *bfa)
+{
+	enum bfa_ioc_type_e	ioc_type = bfa_get_type(bfa);
+	u32	card_type = bfa->ioc.attr->card_type;
+
+	if (bfa_ioc_is_operational(&bfa->ioc)) {
+		if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
+			return BFA_STATUS_FEATURE_NOT_SUPPORTED;
+	} else {
+		return BFA_STATUS_IOC_NON_OP;
+	}
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
+		bfa_cb_iocfc_t cbfn, void *cbarg)
+{
+	struct bfi_faa_query_s  faa_attr_req;
+	struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
+	bfa_status_t            status;
+
+	status = bfa_faa_validate_request(bfa);
+	if (status != BFA_STATUS_OK)
+		return status;
+
+	if (iocfc->faa_args.busy == BFA_TRUE)
+		return BFA_STATUS_DEVBUSY;
+
+	iocfc->faa_args.faa_attr = attr;
+	iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
+	iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
+
+	iocfc->faa_args.busy = BFA_TRUE;
+	memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
+	bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
+		BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
+
+	bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
+		sizeof(struct bfi_faa_query_s));
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ *	FAA query response
+ */
+static void
+bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
+		bfi_faa_query_rsp_t *rsp)
+{
+	void	*cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
+
+	if (iocfc->faa_args.faa_attr) {
+		iocfc->faa_args.faa_attr->faa = rsp->faa;
+		iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
+		iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
+	}
+
+	WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
+
+	iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
+	iocfc->faa_args.busy = BFA_FALSE;
+}
+
+/*
+ * IOC enable request is complete
+ */
+static void
+bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
+{
+	struct bfa_s	*bfa = bfa_arg;
+
+	if (status == BFA_STATUS_OK)
+		bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
+	else
+		bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
+}
+
+/*
+ * IOC disable request is complete
+ */
+static void
+bfa_iocfc_disable_cbfn(void *bfa_arg)
+{
+	struct bfa_s	*bfa = bfa_arg;
+
+	bfa->queue_process = BFA_FALSE;
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
+}
+
+/*
+ * Notify sub-modules of hardware failure.
+ */
+static void
+bfa_iocfc_hbfail_cbfn(void *bfa_arg)
+{
+	struct bfa_s	*bfa = bfa_arg;
+
+	bfa->queue_process = BFA_FALSE;
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
+}
+
+/*
+ * Actions on chip-reset completion.
+ */
+static void
+bfa_iocfc_reset_cbfn(void *bfa_arg)
+{
+	struct bfa_s	*bfa = bfa_arg;
+
+	bfa_iocfc_reset_queues(bfa);
+	bfa_isr_enable(bfa);
+}
+
+/*
+ * Query IOC memory requirement information.
+ */
+void
+bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+		  struct bfa_s *bfa)
+{
+	int q, per_reqq_sz, per_rspq_sz;
+	struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
+	struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
+	struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
+	u32	dm_len = 0;
+
+	/* dma memory setup for IOC */
+	bfa_mem_dma_setup(meminfo, ioc_dma,
+		BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
+
+	/* dma memory setup for REQ/RSP queues */
+	per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
+				BFA_DMA_ALIGN_SZ);
+	per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
+				BFA_DMA_ALIGN_SZ);
+
+	for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
+		bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
+				per_reqq_sz);
+		bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
+				per_rspq_sz);
+	}
+
+	/* IOCFC dma memory - calculate Shadow CI/PI size */
+	for (q = 0; q < cfg->fwcfg.num_cqs; q++)
+		dm_len += (2 * BFA_CACHELINE_SZ);
+
+	/* IOCFC dma memory - calculate config info / rsp size */
+	dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
+	dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+			BFA_CACHELINE_SZ);
+
+	/* dma memory setup for IOCFC */
+	bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
+
+	/* kva memory setup for IOCFC */
+	bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN);
+}
+
+/*
+ * Query IOC memory requirement information.
+ */
+void
+bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		 struct bfa_pcidev_s *pcidev)
+{
+	int		i;
+	struct bfa_ioc_s *ioc = &bfa->ioc;
+
+	bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
+	bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
+	bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
+	bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
+
+	ioc->trcmod = bfa->trcmod;
+	bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
+
+	bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
+	bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
+
+	bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
+	bfa_iocfc_mem_claim(bfa, cfg);
+	INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
+
+	INIT_LIST_HEAD(&bfa->comp_q);
+	for (i = 0; i < BFI_IOC_MAX_CQS; i++)
+		INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
+
+	bfa->iocfc.cb_reqd = BFA_FALSE;
+	bfa->iocfc.op_status = BFA_STATUS_OK;
+	bfa->iocfc.submod_enabled = BFA_FALSE;
+
+	bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
+}
+
+/*
+ * Query IOC memory requirement information.
+ */
+void
+bfa_iocfc_init(struct bfa_s *bfa)
+{
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
+}
+
+/*
+ * IOC start called from bfa_start(). Called to start IOC operations
+ * at driver instantiation for this instance.
+ */
+void
+bfa_iocfc_start(struct bfa_s *bfa)
+{
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
+}
+
+/*
+ * IOC stop called from bfa_stop(). Called only when driver is unloaded
+ * for this instance.
+ */
+void
+bfa_iocfc_stop(struct bfa_s *bfa)
+{
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
+}
+
+void
+bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
+{
+	struct bfa_s		*bfa = bfaarg;
+	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
+	union bfi_iocfc_i2h_msg_u	*msg;
+
+	msg = (union bfi_iocfc_i2h_msg_u *) m;
+	bfa_trc(bfa, msg->mh.msg_id);
+
+	switch (msg->mh.msg_id) {
+	case BFI_IOCFC_I2H_CFG_REPLY:
+		bfa_iocfc_cfgrsp(bfa);
+		break;
+	case BFI_IOCFC_I2H_UPDATEQ_RSP:
+		iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
+		break;
+	case BFI_IOCFC_I2H_ADDR_MSG:
+		bfa_iocfc_process_faa_addr(bfa,
+				(struct bfi_faa_addr_msg_s *)msg);
+		break;
+	case BFI_IOCFC_I2H_FAA_QUERY_RSP:
+		bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+void
+bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
+{
+	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
+
+	attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
+
+	attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
+				be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
+				be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
+
+	attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
+			be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
+			be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
+
+	attr->config	= iocfc->cfg;
+}
+
+bfa_status_t
+bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
+{
+	struct bfa_iocfc_s		*iocfc = &bfa->iocfc;
+	struct bfi_iocfc_set_intr_req_s *m;
+
+	iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
+	iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
+	iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
+
+	if (!bfa_iocfc_is_operational(bfa))
+		return BFA_STATUS_OK;
+
+	m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
+	if (!m)
+		return BFA_STATUS_DEVBUSY;
+
+	bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
+		    bfa_fn_lpu(bfa));
+	m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
+	m->delay    = iocfc->cfginfo->intr_attr.delay;
+	m->latency  = iocfc->cfginfo->intr_attr.latency;
+
+	bfa_trc(bfa, attr->delay);
+	bfa_trc(bfa, attr->latency);
+
+	bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
+	return BFA_STATUS_OK;
+}
+
+void
+bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
+{
+	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
+
+	iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
+	bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
+}
+/*
+ * Enable IOC after it is disabled.
+ */
+void
+bfa_iocfc_enable(struct bfa_s *bfa)
+{
+	bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
+		     "IOC Enable");
+	bfa->iocfc.cb_reqd = BFA_TRUE;
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
+}
+
+void
+bfa_iocfc_disable(struct bfa_s *bfa)
+{
+	bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
+		     "IOC Disable");
+
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
+}
+
+bfa_boolean_t
+bfa_iocfc_is_operational(struct bfa_s *bfa)
+{
+	return bfa_ioc_is_operational(&bfa->ioc) &&
+		bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
+}
+
+/*
+ * Return boot target port wwns -- read from boot information in flash.
+ */
+void
+bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
+{
+	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+	int i;
+
+	if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
+		bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
+		*nwwns = cfgrsp->pbc_cfg.nbluns;
+		for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
+			wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
+
+		return;
+	}
+
+	*nwwns = cfgrsp->bootwwns.nwwns;
+	memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
+}
+
+int
+bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
+{
+	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+
+	memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
+	return cfgrsp->pbc_cfg.nvports;
+}
+
+
+/*
+ * Use this function query the memory requirement of the BFA library.
+ * This function needs to be called before bfa_attach() to get the
+ * memory required of the BFA layer for a given driver configuration.
+ *
+ * This call will fail, if the cap is out of range compared to pre-defined
+ * values within the BFA library
+ *
+ * @param[in] cfg -	pointer to bfa_ioc_cfg_t. Driver layer should indicate
+ *			its configuration in this structure.
+ *			The default values for struct bfa_iocfc_cfg_s can be
+ *			fetched using bfa_cfg_get_default() API.
+ *
+ *			If cap's boundary check fails, the library will use
+ *			the default bfa_cap_t values (and log a warning msg).
+ *
+ * @param[out] meminfo - pointer to bfa_meminfo_t. This content
+ *			indicates the memory type (see bfa_mem_type_t) and
+ *			amount of memory required.
+ *
+ *			Driver should allocate the memory, populate the
+ *			starting address for each block and provide the same
+ *			structure as input parameter to bfa_attach() call.
+ *
+ * @param[in] bfa -	pointer to the bfa structure, used while fetching the
+ *			dma, kva memory information of the bfa sub-modules.
+ *
+ * @return void
+ *
+ * Special Considerations: @note
+ */
+void
+bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+		struct bfa_s *bfa)
+{
+	struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
+	struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
+	struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
+	struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
+	struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
+	struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
+	struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
+	struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa);
+
+	WARN_ON((cfg == NULL) || (meminfo == NULL));
+
+	memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
+
+	/* Initialize the DMA & KVA meminfo queues */
+	INIT_LIST_HEAD(&meminfo->dma_info.qe);
+	INIT_LIST_HEAD(&meminfo->kva_info.qe);
+
+	bfa_iocfc_meminfo(cfg, meminfo, bfa);
+	bfa_sgpg_meminfo(cfg, meminfo, bfa);
+	bfa_fcport_meminfo(cfg, meminfo, bfa);
+	bfa_fcxp_meminfo(cfg, meminfo, bfa);
+	bfa_lps_meminfo(cfg, meminfo, bfa);
+	bfa_uf_meminfo(cfg, meminfo, bfa);
+	bfa_rport_meminfo(cfg, meminfo, bfa);
+	bfa_fcp_meminfo(cfg, meminfo, bfa);
+	bfa_dconf_meminfo(cfg, meminfo, bfa);
+
+	/* dma info setup */
+	bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
+	bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
+	bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
+	bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
+	bfa_mem_dma_setup(meminfo, flash_dma,
+			  bfa_flash_meminfo(cfg->drvcfg.min_cfg));
+	bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
+	bfa_mem_dma_setup(meminfo, phy_dma,
+			  bfa_phy_meminfo(cfg->drvcfg.min_cfg));
+	bfa_mem_dma_setup(meminfo, fru_dma,
+			  bfa_fru_meminfo(cfg->drvcfg.min_cfg));
+}
+
+/*
+ * Use this function to do attach the driver instance with the BFA
+ * library. This function will not trigger any HW initialization
+ * process (which will be done in bfa_init() call)
+ *
+ * This call will fail, if the cap is out of range compared to
+ * pre-defined values within the BFA library
+ *
+ * @param[out]	bfa	Pointer to bfa_t.
+ * @param[in]	bfad	Opaque handle back to the driver's IOC structure
+ * @param[in]	cfg	Pointer to bfa_ioc_cfg_t. Should be same structure
+ *			that was used in bfa_cfg_get_meminfo().
+ * @param[in]	meminfo	Pointer to bfa_meminfo_t. The driver should
+ *			use the bfa_cfg_get_meminfo() call to
+ *			find the memory blocks required, allocate the
+ *			required memory and provide the starting addresses.
+ * @param[in]	pcidev	pointer to struct bfa_pcidev_s
+ *
+ * @return
+ * void
+ *
+ * Special Considerations:
+ *
+ * @note
+ *
+ */
+void
+bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+	       struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_mem_dma_s *dma_info, *dma_elem;
+	struct bfa_mem_kva_s *kva_info, *kva_elem;
+	struct list_head *dm_qe, *km_qe;
+
+	bfa->fcs = BFA_FALSE;
+
+	WARN_ON((cfg == NULL) || (meminfo == NULL));
+
+	/* Initialize memory pointers for iterative allocation */
+	dma_info = &meminfo->dma_info;
+	dma_info->kva_curp = dma_info->kva;
+	dma_info->dma_curp = dma_info->dma;
+
+	kva_info = &meminfo->kva_info;
+	kva_info->kva_curp = kva_info->kva;
+
+	list_for_each(dm_qe, &dma_info->qe) {
+		dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+		dma_elem->kva_curp = dma_elem->kva;
+		dma_elem->dma_curp = dma_elem->dma;
+	}
+
+	list_for_each(km_qe, &kva_info->qe) {
+		kva_elem = (struct bfa_mem_kva_s *) km_qe;
+		kva_elem->kva_curp = kva_elem->kva;
+	}
+
+	bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
+	bfa_fcdiag_attach(bfa, bfad, cfg, pcidev);
+	bfa_sgpg_attach(bfa, bfad, cfg, pcidev);
+	bfa_fcport_attach(bfa, bfad, cfg, pcidev);
+	bfa_fcxp_attach(bfa, bfad, cfg, pcidev);
+	bfa_lps_attach(bfa, bfad, cfg, pcidev);
+	bfa_uf_attach(bfa, bfad, cfg, pcidev);
+	bfa_rport_attach(bfa, bfad, cfg, pcidev);
+	bfa_fcp_attach(bfa, bfad, cfg, pcidev);
+	bfa_dconf_attach(bfa, bfad, cfg);
+	bfa_com_port_attach(bfa);
+	bfa_com_ablk_attach(bfa);
+	bfa_com_cee_attach(bfa);
+	bfa_com_sfp_attach(bfa);
+	bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
+	bfa_com_diag_attach(bfa);
+	bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
+	bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg);
+}
+
+/*
+ * Use this function to delete a BFA IOC. IOC should be stopped (by
+ * calling bfa_stop()) before this function call.
+ *
+ * @param[in] bfa - pointer to bfa_t.
+ *
+ * @return
+ * void
+ *
+ * Special Considerations:
+ *
+ * @note
+ */
+void
+bfa_detach(struct bfa_s *bfa)
+{
+	bfa_ioc_detach(&bfa->ioc);
+}
+
+void
+bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
+{
+	INIT_LIST_HEAD(comp_q);
+	list_splice_tail_init(&bfa->comp_q, comp_q);
+}
+
+void
+bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
+{
+	struct list_head		*qe;
+	struct list_head		*qen;
+	struct bfa_cb_qe_s	*hcb_qe;
+	bfa_cb_cbfn_status_t	cbfn;
+
+	list_for_each_safe(qe, qen, comp_q) {
+		hcb_qe = (struct bfa_cb_qe_s *) qe;
+		if (hcb_qe->pre_rmv) {
+			/* qe is invalid after return, dequeue before cbfn() */
+			list_del(qe);
+			cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
+			cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
+		} else
+			hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
+	}
+}
+
+void
+bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
+{
+	struct list_head		*qe;
+	struct bfa_cb_qe_s	*hcb_qe;
+
+	while (!list_empty(comp_q)) {
+		bfa_q_deq(comp_q, &qe);
+		hcb_qe = (struct bfa_cb_qe_s *) qe;
+		WARN_ON(hcb_qe->pre_rmv);
+		hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
+	}
+}
+
+/*
+ * Return the list of PCI vendor/device id lists supported by this
+ * BFA instance.
+ */
+void
+bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
+{
+	static struct bfa_pciid_s __pciids[] = {
+		{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
+		{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
+		{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
+		{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
+	};
+
+	*npciids = ARRAY_SIZE(__pciids);
+	*pciids = __pciids;
+}
+
+/*
+ * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
+ * into BFA layer). The OS driver can then turn back and overwrite entries that
+ * have been configured by the user.
+ *
+ * @param[in] cfg - pointer to bfa_ioc_cfg_t
+ *
+ * @return
+ *	void
+ *
+ * Special Considerations:
+ * note
+ */
+void
+bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
+{
+	cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
+	cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
+	cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
+	cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
+	cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
+	cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
+	cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
+	cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
+	cfg->fwcfg.num_fwtio_reqs = 0;
+
+	cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
+	cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
+	cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
+	cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
+	cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
+	cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
+	cfg->drvcfg.ioc_recover = BFA_FALSE;
+	cfg->drvcfg.delay_comp = BFA_FALSE;
+
+}
+
+void
+bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
+{
+	bfa_cfg_get_default(cfg);
+	cfg->fwcfg.num_ioim_reqs   = BFA_IOIM_MIN;
+	cfg->fwcfg.num_tskim_reqs  = BFA_TSKIM_MIN;
+	cfg->fwcfg.num_fcxp_reqs   = BFA_FCXP_MIN;
+	cfg->fwcfg.num_uf_bufs     = BFA_UF_MIN;
+	cfg->fwcfg.num_rports      = BFA_RPORT_MIN;
+	cfg->fwcfg.num_fwtio_reqs = 0;
+
+	cfg->drvcfg.num_sgpgs      = BFA_SGPG_MIN;
+	cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
+	cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
+	cfg->drvcfg.min_cfg	   = BFA_TRUE;
+}
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
new file mode 100644
index 0000000..9685efc
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ *  bfa_cs.h BFA common services
+ */
+
+#ifndef __BFA_CS_H__
+#define __BFA_CS_H__
+
+#include "bfad_drv.h"
+
+/*
+ * BFA TRC
+ */
+
+#ifndef BFA_TRC_MAX
+#define BFA_TRC_MAX	(4 * 1024)
+#endif
+
+#define BFA_TRC_TS(_trcm)                               \
+	({                                              \
+		struct timespec64 ts;                   \
+							\
+		ktime_get_ts64(&ts);                    \
+		(ts.tv_sec*1000000+ts.tv_nsec / 1000);  \
+	})
+
+#ifndef BFA_TRC_TS
+#define BFA_TRC_TS(_trcm)	((_trcm)->ticks++)
+#endif
+
+struct bfa_trc_s {
+#ifdef __BIG_ENDIAN
+	u16	fileno;
+	u16	line;
+#else
+	u16	line;
+	u16	fileno;
+#endif
+	u32	timestamp;
+	union {
+		struct {
+			u32	rsvd;
+			u32	u32;
+		} u32;
+		u64	u64;
+	} data;
+};
+
+struct bfa_trc_mod_s {
+	u32	head;
+	u32	tail;
+	u32	ntrc;
+	u32	stopped;
+	u32	ticks;
+	u32	rsvd[3];
+	struct bfa_trc_s trc[BFA_TRC_MAX];
+};
+
+enum {
+	BFA_TRC_HAL  = 1,	/*  BFA modules */
+	BFA_TRC_FCS  = 2,	/*  BFA FCS modules */
+	BFA_TRC_LDRV = 3,	/*  Linux driver modules */
+	BFA_TRC_CNA  = 4,	/*  Common modules */
+};
+#define BFA_TRC_MOD_SH	10
+#define BFA_TRC_MOD(__mod)	((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
+
+/*
+ * Define a new tracing file (module). Module should match one defined above.
+ */
+#define BFA_TRC_FILE(__mod, __submod)					\
+	static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \
+						 BFA_TRC_MOD(__mod))
+
+
+#define bfa_trc32(_trcp, _data)	\
+	__bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
+#define bfa_trc(_trcp, _data)	\
+	__bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data)
+
+static inline void
+bfa_trc_init(struct bfa_trc_mod_s *trcm)
+{
+	trcm->head = trcm->tail = trcm->stopped = 0;
+	trcm->ntrc = BFA_TRC_MAX;
+}
+
+static inline void
+bfa_trc_stop(struct bfa_trc_mod_s *trcm)
+{
+	trcm->stopped = 1;
+}
+
+void
+__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data);
+
+void
+__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data);
+
+#define bfa_sm_fault(__mod, __event)	do {				\
+	bfa_trc(__mod, (((u32)0xDEAD << 16) | __event));		\
+	printk(KERN_ERR	"Assertion failure: %s:%d: %d",			\
+		__FILE__, __LINE__, (__event));				\
+} while (0)
+
+/* BFA queue definitions */
+#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
+#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
+#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
+
+/*
+ * bfa_q_qe_init - to initialize a queue element
+ */
+#define bfa_q_qe_init(_qe) {				\
+	bfa_q_next(_qe) = (struct list_head *) NULL;	\
+	bfa_q_prev(_qe) = (struct list_head *) NULL;	\
+}
+
+/*
+ * bfa_q_deq - dequeue an element from head of the queue
+ */
+#define bfa_q_deq(_q, _qe) do {						\
+	if (!list_empty(_q)) {						\
+		(*((struct list_head **) (_qe))) = bfa_q_next(_q);	\
+		bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) =	\
+				(struct list_head *) (_q);		\
+		bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\
+	} else {							\
+		*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
+	}								\
+} while (0)
+
+/*
+ * bfa_q_deq_tail - dequeue an element from tail of the queue
+ */
+#define bfa_q_deq_tail(_q, _qe) {					\
+	if (!list_empty(_q)) {						\
+		*((struct list_head **) (_qe)) = bfa_q_prev(_q);	\
+		bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) =	\
+			(struct list_head *) (_q);			\
+		bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
+	} else {							\
+		*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
+	}								\
+}
+
+static inline int
+bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
+{
+	struct list_head        *tqe;
+
+	tqe = bfa_q_next(q);
+	while (tqe != q) {
+		if (tqe == qe)
+			return 1;
+		tqe = bfa_q_next(tqe);
+		if (tqe == NULL)
+			break;
+	}
+	return 0;
+}
+
+#define bfa_q_is_on_q(_q, _qe)      \
+	bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
+
+/*
+ * @ BFA state machine interfaces
+ */
+
+typedef void (*bfa_sm_t)(void *sm, int event);
+
+/*
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc_s
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_sm_state_decl(oc, st, otype, etype)		\
+	static void oc ## _sm_ ## st(otype * fsm, etype event)
+
+#define bfa_sm_set_state(_sm, _state)	((_sm)->sm = (bfa_sm_t)(_state))
+#define bfa_sm_send_event(_sm, _event)	((_sm)->sm((_sm), (_event)))
+#define bfa_sm_get_state(_sm)		((_sm)->sm)
+#define bfa_sm_cmp_state(_sm, _state)	((_sm)->sm == (bfa_sm_t)(_state))
+
+/*
+ * For converting from state machine function to state encoding.
+ */
+struct bfa_sm_table_s {
+	bfa_sm_t	sm;	/*  state machine function	*/
+	int		state;	/*  state machine encoding	*/
+	char		*name;	/*  state name for display	*/
+};
+#define BFA_SM(_sm)	((bfa_sm_t)(_sm))
+
+/*
+ * State machine with entry actions.
+ */
+typedef void (*bfa_fsm_t)(void *fsm, int event);
+
+/*
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc_s
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_fsm_state_decl(oc, st, otype, etype)		\
+	static void oc ## _sm_ ## st(otype * fsm, etype event);      \
+	static void oc ## _sm_ ## st ## _entry(otype * fsm)
+
+#define bfa_fsm_set_state(_fsm, _state) do {	\
+	(_fsm)->fsm = (bfa_fsm_t)(_state);      \
+	_state ## _entry(_fsm);      \
+} while (0)
+
+#define bfa_fsm_send_event(_fsm, _event)	((_fsm)->fsm((_fsm), (_event)))
+#define bfa_fsm_get_state(_fsm)			((_fsm)->fsm)
+#define bfa_fsm_cmp_state(_fsm, _state)		\
+	((_fsm)->fsm == (bfa_fsm_t)(_state))
+
+static inline int
+bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
+{
+	int	i = 0;
+
+	while (smt[i].sm && smt[i].sm != sm)
+		i++;
+	return smt[i].state;
+}
+
+/*
+ * @ Generic wait counter.
+ */
+
+typedef void (*bfa_wc_resume_t) (void *cbarg);
+
+struct bfa_wc_s {
+	bfa_wc_resume_t wc_resume;
+	void		*wc_cbarg;
+	int		wc_count;
+};
+
+static inline void
+bfa_wc_up(struct bfa_wc_s *wc)
+{
+	wc->wc_count++;
+}
+
+static inline void
+bfa_wc_down(struct bfa_wc_s *wc)
+{
+	wc->wc_count--;
+	if (wc->wc_count == 0)
+		wc->wc_resume(wc->wc_cbarg);
+}
+
+/*
+ * Initialize a waiting counter.
+ */
+static inline void
+bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
+{
+	wc->wc_resume = wc_resume;
+	wc->wc_cbarg = wc_cbarg;
+	wc->wc_count = 0;
+	bfa_wc_up(wc);
+}
+
+/*
+ * Wait for counter to reach zero
+ */
+static inline void
+bfa_wc_wait(struct bfa_wc_s *wc)
+{
+	bfa_wc_down(wc);
+}
+
+static inline void
+wwn2str(char *wwn_str, u64 wwn)
+{
+	union {
+		u64 wwn;
+		u8 byte[8];
+	} w;
+
+	w.wwn = wwn;
+	sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0],
+		w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5],
+		w.byte[6], w.byte[7]);
+}
+
+static inline void
+fcid2str(char *fcid_str, u32 fcid)
+{
+	union {
+		u32 fcid;
+		u8 byte[4];
+	} f;
+
+	f.fcid = fcid;
+	sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]);
+}
+
+#define bfa_swap_3b(_x)				\
+	((((_x) & 0xff) << 16) |		\
+	((_x) & 0x00ff00) |			\
+	(((_x) & 0xff0000) >> 16))
+
+#ifndef __BIG_ENDIAN
+#define bfa_hton3b(_x)  bfa_swap_3b(_x)
+#else
+#define bfa_hton3b(_x)  (_x)
+#endif
+
+#define bfa_ntoh3b(_x)  bfa_hton3b(_x)
+
+#endif /* __BFA_CS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
new file mode 100644
index 0000000..5dc3782
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -0,0 +1,1288 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_DEFS_H__
+#define __BFA_DEFS_H__
+
+#include "bfa_fc.h"
+#include "bfad_drv.h"
+
+#define BFA_MFG_SERIALNUM_SIZE                  11
+#define STRSZ(_n)                               (((_n) + 4) & ~3)
+
+/*
+ * Manufacturing card type
+ */
+enum {
+	BFA_MFG_TYPE_CB_MAX  = 825,      /*  Crossbow card type max     */
+	BFA_MFG_TYPE_FC8P2   = 825,      /*  8G 2port FC card           */
+	BFA_MFG_TYPE_FC8P1   = 815,      /*  8G 1port FC card           */
+	BFA_MFG_TYPE_FC4P2   = 425,      /*  4G 2port FC card           */
+	BFA_MFG_TYPE_FC4P1   = 415,      /*  4G 1port FC card           */
+	BFA_MFG_TYPE_CNA10P2 = 1020,     /*  10G 2port CNA card */
+	BFA_MFG_TYPE_CNA10P1 = 1010,     /*  10G 1port CNA card */
+	BFA_MFG_TYPE_JAYHAWK = 804,      /*  Jayhawk mezz card          */
+	BFA_MFG_TYPE_WANCHESE = 1007,    /*  Wanchese mezz card */
+	BFA_MFG_TYPE_ASTRA    = 807,     /*  Astra mezz card            */
+	BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*  Lightning mezz card - old  */
+	BFA_MFG_TYPE_LIGHTNING = 1741,   /*  Lightning mezz card        */
+	BFA_MFG_TYPE_PROWLER_F = 1560,	 /*  Prowler FC only cards	*/
+	BFA_MFG_TYPE_PROWLER_N = 1410,	 /*  Prowler NIC only cards	*/
+	BFA_MFG_TYPE_PROWLER_C = 1710,   /*  Prowler CNA only cards	*/
+	BFA_MFG_TYPE_PROWLER_D = 1860,   /*  Prowler Dual cards		*/
+	BFA_MFG_TYPE_CHINOOK   = 1867,   /*  Chinook cards		*/
+	BFA_MFG_TYPE_CHINOOK2   = 1869,	 /*!< Chinook2 cards		*/
+	BFA_MFG_TYPE_INVALID = 0,        /*  Invalid card type		*/
+};
+
+#pragma pack(1)
+
+/*
+ * Check if Mezz card
+ */
+#define bfa_mfg_is_mezz(type) (( \
+	(type) == BFA_MFG_TYPE_JAYHAWK || \
+	(type) == BFA_MFG_TYPE_WANCHESE || \
+	(type) == BFA_MFG_TYPE_ASTRA || \
+	(type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
+	(type) == BFA_MFG_TYPE_LIGHTNING || \
+	(type) == BFA_MFG_TYPE_CHINOOK || \
+	(type) == BFA_MFG_TYPE_CHINOOK2))
+
+/*
+ * Check if the card having old wwn/mac handling
+ */
+#define bfa_mfg_is_old_wwn_mac_model(type) (( \
+	(type) == BFA_MFG_TYPE_FC8P2 || \
+	(type) == BFA_MFG_TYPE_FC8P1 || \
+	(type) == BFA_MFG_TYPE_FC4P2 || \
+	(type) == BFA_MFG_TYPE_FC4P1 || \
+	(type) == BFA_MFG_TYPE_CNA10P2 || \
+	(type) == BFA_MFG_TYPE_CNA10P1 || \
+	(type) == BFA_MFG_TYPE_JAYHAWK || \
+	(type) == BFA_MFG_TYPE_WANCHESE))
+
+#define bfa_mfg_increment_wwn_mac(m, i)                         \
+do {                                                            \
+	u32 t = ((u32)(m)[0] << 16) | ((u32)(m)[1] << 8) | \
+		(u32)(m)[2];  \
+	t += (i);      \
+	(m)[0] = (t >> 16) & 0xFF;                              \
+	(m)[1] = (t >> 8) & 0xFF;                               \
+	(m)[2] = t & 0xFF;                                      \
+} while (0)
+
+/*
+ * VPD data length
+ */
+#define BFA_MFG_VPD_LEN                 512
+
+/*
+ * VPD vendor tag
+ */
+enum {
+	BFA_MFG_VPD_UNKNOWN     = 0,     /*  vendor unknown             */
+	BFA_MFG_VPD_IBM         = 1,     /*  vendor IBM                 */
+	BFA_MFG_VPD_HP          = 2,     /*  vendor HP                  */
+	BFA_MFG_VPD_DELL        = 3,     /*  vendor DELL                */
+	BFA_MFG_VPD_PCI_IBM     = 0x08,  /*  PCI VPD IBM                */
+	BFA_MFG_VPD_PCI_HP      = 0x10,  /*  PCI VPD HP         */
+	BFA_MFG_VPD_PCI_DELL    = 0x20,  /*  PCI VPD DELL               */
+	BFA_MFG_VPD_PCI_BRCD    = 0xf8,  /*  PCI VPD Brocade            */
+};
+
+/*
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_vpd_s {
+	u8              version;        /*  vpd data version */
+	u8              vpd_sig[3];     /*  characters 'V', 'P', 'D' */
+	u8              chksum;         /*  u8 checksum */
+	u8              vendor;         /*  vendor */
+	u8      len;            /*  vpd data length excluding header */
+	u8      rsv;
+	u8              data[BFA_MFG_VPD_LEN];  /*  vpd data */
+};
+
+#pragma pack()
+
+/*
+ * Status return values
+ */
+enum bfa_status {
+	BFA_STATUS_OK		= 0,	/*  Success */
+	BFA_STATUS_FAILED	= 1,	/*  Operation failed */
+	BFA_STATUS_EINVAL	= 2,	/*  Invalid params Check input
+					 *  parameters */
+	BFA_STATUS_ENOMEM	= 3,	/*  Out of resources */
+	BFA_STATUS_ETIMER	= 5,	/*  Timer expired - Retry, if persists,
+					 *  contact support */
+	BFA_STATUS_EPROTOCOL	= 6,	/*  Protocol error */
+	BFA_STATUS_BADFLASH	= 9,	/*  Flash is bad */
+	BFA_STATUS_SFP_UNSUPP	= 10,	/*  Unsupported SFP - Replace SFP */
+	BFA_STATUS_UNKNOWN_VFID = 11,	/*  VF_ID not found */
+	BFA_STATUS_DATACORRUPTED = 12,  /*  Diag returned data corrupted */
+	BFA_STATUS_DEVBUSY	= 13,	/*  Device busy - Retry operation */
+	BFA_STATUS_HDMA_FAILED  = 16,   /* Host dma failed contact support */
+	BFA_STATUS_FLASH_BAD_LEN = 17,	/*  Flash bad length */
+	BFA_STATUS_UNKNOWN_LWWN = 18,	/*  LPORT PWWN not found */
+	BFA_STATUS_UNKNOWN_RWWN = 19,	/*  RPORT PWWN not found */
+	BFA_STATUS_VPORT_EXISTS = 21,	/*  VPORT already exists */
+	BFA_STATUS_VPORT_MAX	= 22,	/*  Reached max VPORT supported limit */
+	BFA_STATUS_UNSUPP_SPEED	= 23,	/*  Invalid Speed Check speed setting */
+	BFA_STATUS_INVLD_DFSZ	= 24,	/*  Invalid Max data field size */
+	BFA_STATUS_CMD_NOTSUPP  = 26,   /*  Command/API not supported */
+	BFA_STATUS_FABRIC_RJT	= 29,	/*  Reject from attached fabric */
+	BFA_STATUS_UNKNOWN_VWWN = 30,	/*  VPORT PWWN not found */
+	BFA_STATUS_PORT_OFFLINE = 34,	/*  Port is not online */
+	BFA_STATUS_VPORT_WWN_BP	= 46,	/*  WWN is same as base port's WWN */
+	BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */
+	BFA_STATUS_NO_FCPIM_NEXUS = 52,	/* No FCP Nexus exists with the rport */
+	BFA_STATUS_IOC_FAILURE	= 56,	/* IOC failure - Retry, if persists
+					 * contact support */
+	BFA_STATUS_INVALID_WWN	= 57,	/*  Invalid WWN */
+	BFA_STATUS_ADAPTER_ENABLED = 60, /* Adapter is not disabled */
+	BFA_STATUS_IOC_NON_OP   = 61,	/* IOC is not operational */
+	BFA_STATUS_VERSION_FAIL = 70, /* Application/Driver version mismatch */
+	BFA_STATUS_DIAG_BUSY	= 71,	/*  diag busy */
+	BFA_STATUS_BEACON_ON    = 72,   /* Port Beacon already on */
+	BFA_STATUS_ENOFSAVE	= 78,	/*  No saved firmware trace */
+	BFA_STATUS_IOC_DISABLED = 82,   /* IOC is already disabled */
+	BFA_STATUS_ERROR_TRL_ENABLED  = 87,   /* TRL is enabled */
+	BFA_STATUS_ERROR_QOS_ENABLED  = 88,   /* QoS is enabled */
+	BFA_STATUS_NO_SFP_DEV = 89,	/* No SFP device check or replace SFP */
+	BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */
+	BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
+	BFA_STATUS_INVALID_MAC  = 134, /*  Invalid MAC address */
+	BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */
+	BFA_STATUS_PBC		= 154, /*  Operation not allowed for pre-boot
+					*  configuration */
+	BFA_STATUS_BAD_FWCFG = 156,	/* Bad firmware configuration */
+	BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
+	BFA_STATUS_SFP_NOT_READY = 159,	/* SFP info is not ready. Retry */
+	BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
+					 * this adapter */
+	BFA_STATUS_TRUNK_DISABLED  = 165, /* Trunking is disabled on
+					   * the adapter */
+	BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
+	BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */
+	BFA_STATUS_FEATURE_NOT_SUPPORTED = 192,	/* Feature not supported */
+	BFA_STATUS_ENTRY_EXISTS = 193,	/* Entry already exists */
+	BFA_STATUS_ENTRY_NOT_EXISTS = 194, /* Entry does not exist */
+	BFA_STATUS_NO_CHANGE = 195,	/* Feature already in that state */
+	BFA_STATUS_FAA_ENABLED = 197,	/* FAA is already enabled */
+	BFA_STATUS_FAA_DISABLED = 198,	/* FAA is already disabled */
+	BFA_STATUS_FAA_ACQUIRED = 199,	/* FAA is already acquired */
+	BFA_STATUS_FAA_ACQ_ADDR = 200,	/* Acquiring addr */
+	BFA_STATUS_BBCR_FC_ONLY = 201, /*!< BBCredit Recovery is supported for *
+					* FC mode only */
+	BFA_STATUS_ERROR_TRUNK_ENABLED = 203,	/* Trunk enabled on adapter */
+	BFA_STATUS_MAX_ENTRY_REACHED = 212,	/* MAX entry reached */
+	BFA_STATUS_TOPOLOGY_LOOP = 230, /* Topology is set to Loop */
+	BFA_STATUS_LOOP_UNSUPP_MEZZ = 231, /* Loop topology is not supported
+					    * on mezz cards */
+	BFA_STATUS_INVALID_BW = 233,	/* Invalid bandwidth value */
+	BFA_STATUS_QOS_BW_INVALID = 234,   /* Invalid QOS bandwidth
+					    * configuration */
+	BFA_STATUS_DPORT_ENABLED = 235, /* D-port mode is already enabled */
+	BFA_STATUS_DPORT_DISABLED = 236, /* D-port mode is already disabled */
+	BFA_STATUS_CMD_NOTSUPP_MEZZ = 239, /* Cmd not supported for MEZZ card */
+	BFA_STATUS_FRU_NOT_PRESENT = 240, /* fru module not present */
+	BFA_STATUS_DPORT_NO_SFP = 243, /* SFP is not present.\n D-port will be
+					* enabled but it will be operational
+					* only after inserting a valid SFP. */
+	BFA_STATUS_DPORT_ERR = 245,	/* D-port mode is enabled */
+	BFA_STATUS_DPORT_ENOSYS = 254, /* Switch has no D_Port functionality */
+	BFA_STATUS_DPORT_CANT_PERF = 255, /* Switch port is not D_Port capable
+					* or D_Port is disabled */
+	BFA_STATUS_DPORT_LOGICALERR = 256, /* Switch D_Port fail */
+	BFA_STATUS_DPORT_SWBUSY = 257, /* Switch port busy */
+	BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT = 258, /*!< BB credit recovery is
+					* supported at max port speed alone */
+	BFA_STATUS_ERROR_BBCR_ENABLED  = 259, /*!< BB credit recovery
+					* is enabled */
+	BFA_STATUS_INVALID_BBSCN = 260, /*!< Invalid BBSCN value.
+					 * Valid range is [1-15] */
+	BFA_STATUS_DDPORT_ERR = 261, /* Dynamic D_Port mode is active.\n To
+					* exit dynamic mode, disable D_Port on
+					* the remote port */
+	BFA_STATUS_DPORT_SFPWRAP_ERR = 262, /* Clear e/o_wrap fail, check or
+						* replace SFP */
+	BFA_STATUS_BBCR_CFG_NO_CHANGE = 265, /*!< BBCR is operational.
+			* Disable BBCR and try this operation again. */
+	BFA_STATUS_DPORT_SW_NOTREADY = 268, /* Remote port is not ready to
+					* start dport test. Check remote
+					* port status. */
+	BFA_STATUS_DPORT_INV_SFP = 271, /* Invalid SFP for D-PORT mode. */
+	BFA_STATUS_DPORT_CMD_NOTSUPP    = 273, /* Dport is not supported by
+					* remote port */
+	BFA_STATUS_MAX_VAL		/* Unknown error code */
+};
+#define bfa_status_t enum bfa_status
+
+enum bfa_eproto_status {
+	BFA_EPROTO_BAD_ACCEPT = 0,
+	BFA_EPROTO_UNKNOWN_RSP = 1
+};
+#define bfa_eproto_status_t enum bfa_eproto_status
+
+enum bfa_boolean {
+	BFA_FALSE = 0,
+	BFA_TRUE  = 1
+};
+#define bfa_boolean_t enum bfa_boolean
+
+#define BFA_STRING_32	32
+#define BFA_VERSION_LEN 64
+
+/*
+ * ---------------------- adapter definitions ------------
+ */
+
+/*
+ * BFA adapter level attributes.
+ */
+enum {
+	BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
+					/*
+					 *!< adapter serial num length
+					 */
+	BFA_ADAPTER_MODEL_NAME_LEN  = 16,  /*  model name length */
+	BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*  model description length */
+	BFA_ADAPTER_MFG_NAME_LEN    = 8,   /*  manufacturer name length */
+	BFA_ADAPTER_SYM_NAME_LEN    = 64,  /*  adapter symbolic name length */
+	BFA_ADAPTER_OS_TYPE_LEN	    = 64,  /*  adapter os type length */
+	BFA_ADAPTER_UUID_LEN	    = 16,  /* adapter uuid length */
+};
+
+struct bfa_adapter_attr_s {
+	char		manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
+	char		serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+	u32	card_type;
+	char		model[BFA_ADAPTER_MODEL_NAME_LEN];
+	char		model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
+	wwn_t		pwwn;
+	char		node_symname[FC_SYMNAME_MAX];
+	char		hw_ver[BFA_VERSION_LEN];
+	char		fw_ver[BFA_VERSION_LEN];
+	char		optrom_ver[BFA_VERSION_LEN];
+	char		os_type[BFA_ADAPTER_OS_TYPE_LEN];
+	struct bfa_mfg_vpd_s	vpd;
+	struct mac_s	mac;
+
+	u8		nports;
+	u8		max_speed;
+	u8		prototype;
+	char	        asic_rev;
+
+	u8		pcie_gen;
+	u8		pcie_lanes_orig;
+	u8		pcie_lanes;
+	u8	        cna_capable;
+
+	u8		is_mezz;
+	u8		trunk_capable;
+	u8		mfg_day;	/* manufacturing day */
+	u8		mfg_month;	/* manufacturing month */
+	u16		mfg_year;	/* manufacturing year */
+	u16		rsvd;
+	u8		uuid[BFA_ADAPTER_UUID_LEN];
+};
+
+/*
+ * ---------------------- IOC definitions ------------
+ */
+
+enum {
+	BFA_IOC_DRIVER_LEN	= 16,
+	BFA_IOC_CHIP_REV_LEN	= 8,
+};
+
+/*
+ * Driver and firmware versions.
+ */
+struct bfa_ioc_driver_attr_s {
+	char		driver[BFA_IOC_DRIVER_LEN];	/*  driver name */
+	char		driver_ver[BFA_VERSION_LEN];	/*  driver version */
+	char		fw_ver[BFA_VERSION_LEN];	/*  firmware version */
+	char		bios_ver[BFA_VERSION_LEN];	/*  bios version */
+	char		efi_ver[BFA_VERSION_LEN];	/*  EFI version */
+	char		ob_ver[BFA_VERSION_LEN];	/*  openboot version */
+};
+
+/*
+ * IOC PCI device attributes
+ */
+struct bfa_ioc_pci_attr_s {
+	u16	vendor_id;	/*  PCI vendor ID */
+	u16	device_id;	/*  PCI device ID */
+	u16	ssid;		/*  subsystem ID */
+	u16	ssvid;		/*  subsystem vendor ID */
+	u32	pcifn;		/*  PCI device function */
+	u32	rsvd;		/* padding */
+	char		chip_rev[BFA_IOC_CHIP_REV_LEN];	 /*  chip revision */
+};
+
+/*
+ * IOC states
+ */
+enum bfa_ioc_state {
+	BFA_IOC_UNINIT		= 1,	/*  IOC is in uninit state */
+	BFA_IOC_RESET		= 2,	/*  IOC is in reset state */
+	BFA_IOC_SEMWAIT		= 3,	/*  Waiting for IOC h/w semaphore */
+	BFA_IOC_HWINIT		= 4,	/*  IOC h/w is being initialized */
+	BFA_IOC_GETATTR		= 5,	/*  IOC is being configured */
+	BFA_IOC_OPERATIONAL	= 6,	/*  IOC is operational */
+	BFA_IOC_INITFAIL	= 7,	/*  IOC hardware failure */
+	BFA_IOC_FAIL		= 8,	/*  IOC heart-beat failure */
+	BFA_IOC_DISABLING	= 9,	/*  IOC is being disabled */
+	BFA_IOC_DISABLED	= 10,	/*  IOC is disabled */
+	BFA_IOC_FWMISMATCH	= 11,	/*  IOC f/w different from drivers */
+	BFA_IOC_ENABLING	= 12,	/*  IOC is being enabled */
+	BFA_IOC_HWFAIL		= 13,	/*  PCI mapping doesn't exist */
+	BFA_IOC_ACQ_ADDR	= 14,	/*  Acquiring addr from fabric */
+};
+
+/*
+ * IOC firmware stats
+ */
+struct bfa_fw_ioc_stats_s {
+	u32	enable_reqs;
+	u32	disable_reqs;
+	u32	get_attr_reqs;
+	u32	dbg_sync;
+	u32	dbg_dump;
+	u32	unknown_reqs;
+};
+
+/*
+ * IOC driver stats
+ */
+struct bfa_ioc_drv_stats_s {
+	u32	ioc_isrs;
+	u32	ioc_enables;
+	u32	ioc_disables;
+	u32	ioc_hbfails;
+	u32	ioc_boots;
+	u32	stats_tmos;
+	u32	hb_count;
+	u32	disable_reqs;
+	u32	enable_reqs;
+	u32	disable_replies;
+	u32	enable_replies;
+	u32	rsvd;
+};
+
+/*
+ * IOC statistics
+ */
+struct bfa_ioc_stats_s {
+	struct bfa_ioc_drv_stats_s	drv_stats; /*  driver IOC stats */
+	struct bfa_fw_ioc_stats_s	fw_stats;  /*  firmware IOC stats */
+};
+
+enum bfa_ioc_type_e {
+	BFA_IOC_TYPE_FC		= 1,
+	BFA_IOC_TYPE_FCoE	= 2,
+	BFA_IOC_TYPE_LL		= 3,
+};
+
+/*
+ * IOC attributes returned in queries
+ */
+struct bfa_ioc_attr_s {
+	enum bfa_ioc_type_e		ioc_type;
+	enum bfa_ioc_state		state;		/*  IOC state      */
+	struct bfa_adapter_attr_s	adapter_attr;	/*  HBA attributes */
+	struct bfa_ioc_driver_attr_s	driver_attr;	/*  driver attr    */
+	struct bfa_ioc_pci_attr_s	pci_attr;
+	u8				port_id;	/*  port number    */
+	u8				port_mode;	/*  bfa_mode_s	*/
+	u8				cap_bm;		/*  capability	*/
+	u8				port_mode_cfg;	/*  bfa_mode_s	*/
+	u8				def_fn;		/* 1 if default fn */
+	u8				rsvd[3];	/*  64bit align	*/
+};
+
+/*
+ *			AEN related definitions
+ */
+enum bfa_aen_category {
+	BFA_AEN_CAT_ADAPTER	= 1,
+	BFA_AEN_CAT_PORT	= 2,
+	BFA_AEN_CAT_LPORT	= 3,
+	BFA_AEN_CAT_RPORT	= 4,
+	BFA_AEN_CAT_ITNIM	= 5,
+	BFA_AEN_CAT_AUDIT	= 8,
+	BFA_AEN_CAT_IOC		= 9,
+};
+
+/* BFA adapter level events */
+enum bfa_adapter_aen_event {
+	BFA_ADAPTER_AEN_ADD	= 1,	/* New Adapter found event */
+	BFA_ADAPTER_AEN_REMOVE	= 2,	/* Adapter removed event */
+};
+
+struct bfa_adapter_aen_data_s {
+	char	serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+	u32	nports; /* Number of NPorts */
+	wwn_t	pwwn;   /* WWN of one of its physical port */
+};
+
+/* BFA physical port Level events */
+enum bfa_port_aen_event {
+	BFA_PORT_AEN_ONLINE	= 1,    /* Physical Port online event */
+	BFA_PORT_AEN_OFFLINE	= 2,    /* Physical Port offline event */
+	BFA_PORT_AEN_RLIR	= 3,    /* RLIR event, not supported */
+	BFA_PORT_AEN_SFP_INSERT	= 4,    /* SFP inserted event */
+	BFA_PORT_AEN_SFP_REMOVE	= 5,    /* SFP removed event */
+	BFA_PORT_AEN_SFP_POM	= 6,    /* SFP POM event */
+	BFA_PORT_AEN_ENABLE	= 7,    /* Physical Port enable event */
+	BFA_PORT_AEN_DISABLE	= 8,    /* Physical Port disable event */
+	BFA_PORT_AEN_AUTH_ON	= 9,    /* Physical Port auth success event */
+	BFA_PORT_AEN_AUTH_OFF	= 10,   /* Physical Port auth fail event */
+	BFA_PORT_AEN_DISCONNECT	= 11,   /* Physical Port disconnect event */
+	BFA_PORT_AEN_QOS_NEG	= 12,   /* Base Port QOS negotiation event */
+	BFA_PORT_AEN_FABRIC_NAME_CHANGE	= 13, /* Fabric Name/WWN change */
+	BFA_PORT_AEN_SFP_ACCESS_ERROR	= 14, /* SFP read error event */
+	BFA_PORT_AEN_SFP_UNSUPPORT	= 15, /* Unsupported SFP event */
+};
+
+enum bfa_port_aen_sfp_pom {
+	BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
+	BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
+	BFA_PORT_AEN_SFP_POM_RED   = 3, /* Critical */
+	BFA_PORT_AEN_SFP_POM_MAX   = BFA_PORT_AEN_SFP_POM_RED
+};
+
+struct bfa_port_aen_data_s {
+	wwn_t		pwwn;		/* WWN of the physical port */
+	wwn_t		fwwn;		/* WWN of the fabric port */
+	u32		phy_port_num;	/* For SFP related events */
+	u16		ioc_type;
+	u16		level;		/* Only transitions will be informed */
+	mac_t		mac;		/* MAC address of the ethernet port */
+	u16		rsvd;
+};
+
+/* BFA AEN logical port events */
+enum bfa_lport_aen_event {
+	BFA_LPORT_AEN_NEW	= 1,		/* LPort created event */
+	BFA_LPORT_AEN_DELETE	= 2,		/* LPort deleted event */
+	BFA_LPORT_AEN_ONLINE	= 3,		/* LPort online event */
+	BFA_LPORT_AEN_OFFLINE	= 4,		/* LPort offline event */
+	BFA_LPORT_AEN_DISCONNECT = 5,		/* LPort disconnect event */
+	BFA_LPORT_AEN_NEW_PROP	= 6,		/* VPort created event */
+	BFA_LPORT_AEN_DELETE_PROP = 7,		/* VPort deleted event */
+	BFA_LPORT_AEN_NEW_STANDARD = 8,		/* VPort created event */
+	BFA_LPORT_AEN_DELETE_STANDARD = 9,	/* VPort deleted event */
+	BFA_LPORT_AEN_NPIV_DUP_WWN = 10,	/* VPort with duplicate WWN */
+	BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11,	/* Max NPIV in fabric/fport */
+	BFA_LPORT_AEN_NPIV_UNKNOWN = 12,	/* Unknown NPIV Error code */
+};
+
+struct bfa_lport_aen_data_s {
+	u16	vf_id;	/* vf_id of this logical port */
+	u16	roles;	/* Logical port mode,IM/TM/IP etc */
+	u32	rsvd;
+	wwn_t	ppwwn;	/* WWN of its physical port */
+	wwn_t	lpwwn;	/* WWN of this logical port */
+};
+
+/* BFA ITNIM events */
+enum bfa_itnim_aen_event {
+	BFA_ITNIM_AEN_ONLINE	 = 1,	/* Target online */
+	BFA_ITNIM_AEN_OFFLINE	 = 2,	/* Target offline */
+	BFA_ITNIM_AEN_DISCONNECT = 3,	/* Target disconnected */
+};
+
+struct bfa_itnim_aen_data_s {
+	u16		vf_id;		/* vf_id of the IT nexus */
+	u16		rsvd[3];
+	wwn_t		ppwwn;		/* WWN of its physical port */
+	wwn_t		lpwwn;		/* WWN of logical port */
+	wwn_t		rpwwn;		/* WWN of remote(target) port */
+};
+
+/* BFA audit events */
+enum bfa_audit_aen_event {
+	BFA_AUDIT_AEN_AUTH_ENABLE	= 1,
+	BFA_AUDIT_AEN_AUTH_DISABLE	= 2,
+	BFA_AUDIT_AEN_FLASH_ERASE	= 3,
+	BFA_AUDIT_AEN_FLASH_UPDATE	= 4,
+};
+
+struct bfa_audit_aen_data_s {
+	wwn_t	pwwn;
+	int	partition_inst;
+	int	partition_type;
+};
+
+/* BFA IOC level events */
+enum bfa_ioc_aen_event {
+	BFA_IOC_AEN_HBGOOD  = 1,	/* Heart Beat restore event	*/
+	BFA_IOC_AEN_HBFAIL  = 2,	/* Heart Beat failure event	*/
+	BFA_IOC_AEN_ENABLE  = 3,	/* IOC enabled event		*/
+	BFA_IOC_AEN_DISABLE = 4,	/* IOC disabled event		*/
+	BFA_IOC_AEN_FWMISMATCH  = 5,	/* IOC firmware mismatch	*/
+	BFA_IOC_AEN_FWCFG_ERROR = 6,	/* IOC firmware config error	*/
+	BFA_IOC_AEN_INVALID_VENDOR = 7,
+	BFA_IOC_AEN_INVALID_NWWN = 8,	/* Zero NWWN			*/
+	BFA_IOC_AEN_INVALID_PWWN = 9	/* Zero PWWN			*/
+};
+
+struct bfa_ioc_aen_data_s {
+	wwn_t	pwwn;
+	u16	ioc_type;
+	mac_t	mac;
+};
+
+/*
+ * ---------------------- mfg definitions ------------
+ */
+
+/*
+ * Checksum size
+ */
+#define BFA_MFG_CHKSUM_SIZE			16
+
+#define BFA_MFG_PARTNUM_SIZE			14
+#define BFA_MFG_SUPPLIER_ID_SIZE		10
+#define BFA_MFG_SUPPLIER_PARTNUM_SIZE		20
+#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE		20
+#define BFA_MFG_SUPPLIER_REVISION_SIZE		4
+/*
+ * Initial capability definition
+ */
+#define BFA_MFG_IC_FC	0x01
+#define BFA_MFG_IC_ETH	0x02
+
+/*
+ * Adapter capability mask definition
+ */
+#define BFA_CM_HBA	0x01
+#define BFA_CM_CNA	0x02
+#define BFA_CM_NIC	0x04
+#define BFA_CM_FC16G	0x08
+#define BFA_CM_SRIOV	0x10
+#define BFA_CM_MEZZ	0x20
+
+#pragma pack(1)
+
+/*
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_block_s {
+	u8	version;    /*!< manufacturing block version */
+	u8     mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
+	u16    mfgsize;    /*!< mfg block size */
+	u16    u16_chksum; /*!< old u16 checksum */
+	char        brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+	char        brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+	u8     mfg_day;    /*!< manufacturing day */
+	u8     mfg_month;  /*!< manufacturing month */
+	u16    mfg_year;   /*!< manufacturing year */
+	wwn_t       mfg_wwn;    /*!< wwn base for this adapter */
+	u8     num_wwn;    /*!< number of wwns assigned */
+	u8     mfg_speeds; /*!< speeds allowed for this adapter */
+	u8     rsv[2];
+	char    supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+	char    supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+	char    supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+	char    supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+	mac_t       mfg_mac;    /*!< base mac address */
+	u8     num_mac;    /*!< number of mac addresses */
+	u8     rsv2;
+	u32    card_type;  /*!< card type          */
+	char        cap_nic;    /*!< capability nic     */
+	char        cap_cna;    /*!< capability cna     */
+	char        cap_hba;    /*!< capability hba     */
+	char        cap_fc16g;  /*!< capability fc 16g      */
+	char        cap_sriov;  /*!< capability sriov       */
+	char        cap_mezz;   /*!< capability mezz        */
+	u8     rsv3;
+	u8     mfg_nports; /*!< number of ports        */
+	char        media[8];   /*!< xfi/xaui           */
+	char        initial_mode[8]; /*!< initial mode: hba/cna/nic */
+	u8     rsv4[84];
+	u8     md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+};
+
+#pragma pack()
+
+/*
+ * ---------------------- pci definitions ------------
+ */
+
+/*
+ * PCI device and vendor ID information
+ */
+enum {
+	BFA_PCI_VENDOR_ID_BROCADE	= 0x1657,
+	BFA_PCI_DEVICE_ID_FC_8G2P	= 0x13,
+	BFA_PCI_DEVICE_ID_FC_8G1P	= 0x17,
+	BFA_PCI_DEVICE_ID_CT		= 0x14,
+	BFA_PCI_DEVICE_ID_CT_FC		= 0x21,
+	BFA_PCI_DEVICE_ID_CT2		= 0x22,
+	BFA_PCI_DEVICE_ID_CT2_QUAD	= 0x23,
+};
+
+#define bfa_asic_id_cb(__d)			\
+	((__d) == BFA_PCI_DEVICE_ID_FC_8G2P ||	\
+	 (__d) == BFA_PCI_DEVICE_ID_FC_8G1P)
+#define bfa_asic_id_ct(__d)			\
+	((__d) == BFA_PCI_DEVICE_ID_CT ||	\
+	 (__d) == BFA_PCI_DEVICE_ID_CT_FC)
+#define bfa_asic_id_ct2(__d)			\
+	((__d) == BFA_PCI_DEVICE_ID_CT2 ||	\
+	(__d) == BFA_PCI_DEVICE_ID_CT2_QUAD)
+#define bfa_asic_id_ctc(__d)	\
+	(bfa_asic_id_ct(__d) || bfa_asic_id_ct2(__d))
+
+/*
+ * PCI sub-system device and vendor ID information
+ */
+enum {
+	BFA_PCI_FCOE_SSDEVICE_ID	= 0x14,
+	BFA_PCI_CT2_SSID_FCoE		= 0x22,
+	BFA_PCI_CT2_SSID_ETH		= 0x23,
+	BFA_PCI_CT2_SSID_FC		= 0x24,
+};
+
+/*
+ * Maximum number of device address ranges mapped through different BAR(s)
+ */
+#define BFA_PCI_ACCESS_RANGES 1
+
+/*
+ *	Port speed settings. Each specific speed is a bit field. Use multiple
+ *	bits to specify speeds to be selected for auto-negotiation.
+ */
+enum bfa_port_speed {
+	BFA_PORT_SPEED_UNKNOWN = 0,
+	BFA_PORT_SPEED_1GBPS	= 1,
+	BFA_PORT_SPEED_2GBPS	= 2,
+	BFA_PORT_SPEED_4GBPS	= 4,
+	BFA_PORT_SPEED_8GBPS	= 8,
+	BFA_PORT_SPEED_10GBPS	= 10,
+	BFA_PORT_SPEED_16GBPS	= 16,
+	BFA_PORT_SPEED_AUTO	= 0xf,
+};
+#define bfa_port_speed_t enum bfa_port_speed
+
+enum {
+	BFA_BOOT_BOOTLUN_MAX = 4,       /*  maximum boot lun per IOC */
+	BFA_PREBOOT_BOOTLUN_MAX = 8,    /*  maximum preboot lun per IOC */
+};
+
+#define BOOT_CFG_REV1   1
+#define BOOT_CFG_VLAN   1
+
+/*
+ *      Boot options setting. Boot options setting determines from where
+ *      to get the boot lun information
+ */
+enum bfa_boot_bootopt {
+	BFA_BOOT_AUTO_DISCOVER  = 0, /*  Boot from blun provided by fabric */
+	BFA_BOOT_STORED_BLUN = 1, /*  Boot from bluns stored in flash */
+	BFA_BOOT_FIRST_LUN      = 2, /*  Boot from first discovered blun */
+	BFA_BOOT_PBC    = 3, /*  Boot from pbc configured blun  */
+};
+
+#pragma pack(1)
+/*
+ * Boot lun information.
+ */
+struct bfa_boot_bootlun_s {
+	wwn_t   pwwn;		/*  port wwn of target */
+	struct scsi_lun   lun;  /*  64-bit lun */
+};
+#pragma pack()
+
+/*
+ * BOOT boot configuraton
+ */
+struct bfa_boot_cfg_s {
+	u8		version;
+	u8		rsvd1;
+	u16		chksum;
+	u8		enable;		/* enable/disable SAN boot */
+	u8		speed;          /* boot speed settings */
+	u8		topology;       /* boot topology setting */
+	u8		bootopt;        /* bfa_boot_bootopt_t */
+	u32		nbluns;         /* number of boot luns */
+	u32		rsvd2;
+	struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
+	struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
+};
+
+struct bfa_boot_pbc_s {
+	u8              enable;         /*  enable/disable SAN boot */
+	u8              speed;          /*  boot speed settings */
+	u8              topology;       /*  boot topology setting */
+	u8              rsvd1;
+	u32     nbluns;         /*  number of boot luns */
+	struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
+};
+
+struct bfa_ethboot_cfg_s {
+	u8		version;
+	u8		rsvd1;
+	u16		chksum;
+	u8		enable;	/* enable/disable Eth/PXE boot */
+	u8		rsvd2;
+	u16		vlan;
+};
+
+/*
+ * ASIC block configuration related structures
+ */
+#define BFA_ABLK_MAX_PORTS	2
+#define BFA_ABLK_MAX_PFS	16
+#define BFA_ABLK_MAX		2
+
+#pragma pack(1)
+enum bfa_mode_s {
+	BFA_MODE_HBA	= 1,
+	BFA_MODE_CNA	= 2,
+	BFA_MODE_NIC	= 3
+};
+
+struct bfa_adapter_cfg_mode_s {
+	u16	max_pf;
+	u16	max_vf;
+	enum bfa_mode_s	mode;
+};
+
+struct bfa_ablk_cfg_pf_s {
+	u16	pers;
+	u8	port_id;
+	u8	optrom;
+	u8	valid;
+	u8	sriov;
+	u8	max_vfs;
+	u8	rsvd[1];
+	u16	num_qpairs;
+	u16	num_vectors;
+	u16	bw_min;
+	u16	bw_max;
+};
+
+struct bfa_ablk_cfg_port_s {
+	u8	mode;
+	u8	type;
+	u8	max_pfs;
+	u8	rsvd[5];
+};
+
+struct bfa_ablk_cfg_inst_s {
+	u8	nports;
+	u8	max_pfs;
+	u8	rsvd[6];
+	struct bfa_ablk_cfg_pf_s	pf_cfg[BFA_ABLK_MAX_PFS];
+	struct bfa_ablk_cfg_port_s	port_cfg[BFA_ABLK_MAX_PORTS];
+};
+
+struct bfa_ablk_cfg_s {
+	struct bfa_ablk_cfg_inst_s	inst[BFA_ABLK_MAX];
+};
+
+
+/*
+ *	SFP module specific
+ */
+#define SFP_DIAGMON_SIZE	10 /* num bytes of diag monitor data */
+
+/* SFP state change notification event */
+#define BFA_SFP_SCN_REMOVED	0
+#define BFA_SFP_SCN_INSERTED	1
+#define BFA_SFP_SCN_POM		2
+#define BFA_SFP_SCN_FAILED	3
+#define BFA_SFP_SCN_UNSUPPORT	4
+#define BFA_SFP_SCN_VALID	5
+
+enum bfa_defs_sfp_media_e {
+	BFA_SFP_MEDIA_UNKNOWN	= 0x00,
+	BFA_SFP_MEDIA_CU	= 0x01,
+	BFA_SFP_MEDIA_LW	= 0x02,
+	BFA_SFP_MEDIA_SW	= 0x03,
+	BFA_SFP_MEDIA_EL	= 0x04,
+	BFA_SFP_MEDIA_UNSUPPORT	= 0x05,
+};
+
+/*
+ * values for xmtr_tech above
+ */
+enum {
+	SFP_XMTR_TECH_CU = (1 << 0),	/* copper FC-BaseT */
+	SFP_XMTR_TECH_CP = (1 << 1),	/* copper passive */
+	SFP_XMTR_TECH_CA = (1 << 2),	/* copper active */
+	SFP_XMTR_TECH_LL = (1 << 3),	/* longwave laser */
+	SFP_XMTR_TECH_SL = (1 << 4),	/* shortwave laser w/ OFC */
+	SFP_XMTR_TECH_SN = (1 << 5),	/* shortwave laser w/o OFC */
+	SFP_XMTR_TECH_EL_INTRA = (1 << 6), /* elec intra-enclosure */
+	SFP_XMTR_TECH_EL_INTER = (1 << 7), /* elec inter-enclosure */
+	SFP_XMTR_TECH_LC = (1 << 8),	/* longwave laser */
+	SFP_XMTR_TECH_SA = (1 << 9)
+};
+
+/*
+ * Serial ID: Data Fields -- Address A0h
+ * Basic ID field total 64 bytes
+ */
+struct sfp_srlid_base_s {
+	u8	id;		/* 00: Identifier */
+	u8	extid;		/* 01: Extended Identifier */
+	u8	connector;	/* 02: Connector */
+	u8	xcvr[8];	/* 03-10: Transceiver */
+	u8	encoding;	/* 11: Encoding */
+	u8	br_norm;	/* 12: BR, Nominal */
+	u8	rate_id;	/* 13: Rate Identifier */
+	u8	len_km;		/* 14: Length single mode km */
+	u8	len_100m;	/* 15: Length single mode 100m */
+	u8	len_om2;	/* 16: Length om2 fiber 10m */
+	u8	len_om1;	/* 17: Length om1 fiber 10m */
+	u8	len_cu;		/* 18: Length copper 1m */
+	u8	len_om3;	/* 19: Length om3 fiber 10m */
+	u8	vendor_name[16];/* 20-35 */
+	u8	unalloc1;
+	u8	vendor_oui[3];	/* 37-39 */
+	u8	vendor_pn[16];	/* 40-55 */
+	u8	vendor_rev[4];	/* 56-59 */
+	u8	wavelen[2];	/* 60-61 */
+	u8	unalloc2;
+	u8	cc_base;	/* 63: check code for base id field */
+};
+
+/*
+ * Serial ID: Data Fields -- Address A0h
+ * Extended id field total 32 bytes
+ */
+struct sfp_srlid_ext_s {
+	u8	options[2];
+	u8	br_max;
+	u8	br_min;
+	u8	vendor_sn[16];
+	u8	date_code[8];
+	u8	diag_mon_type;  /* 92: Diagnostic Monitoring type */
+	u8	en_options;
+	u8	sff_8472;
+	u8	cc_ext;
+};
+
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * Diagnostic and control/status base field total 96 bytes
+ */
+struct sfp_diag_base_s {
+	/*
+	 * Alarm and warning Thresholds 40 bytes
+	 */
+	u8	temp_high_alarm[2]; /* 00-01 */
+	u8	temp_low_alarm[2];  /* 02-03 */
+	u8	temp_high_warning[2];   /* 04-05 */
+	u8	temp_low_warning[2];    /* 06-07 */
+
+	u8	volt_high_alarm[2]; /* 08-09 */
+	u8	volt_low_alarm[2];  /* 10-11 */
+	u8	volt_high_warning[2];   /* 12-13 */
+	u8	volt_low_warning[2];    /* 14-15 */
+
+	u8	bias_high_alarm[2]; /* 16-17 */
+	u8	bias_low_alarm[2];  /* 18-19 */
+	u8	bias_high_warning[2];   /* 20-21 */
+	u8	bias_low_warning[2];    /* 22-23 */
+
+	u8	tx_pwr_high_alarm[2];   /* 24-25 */
+	u8	tx_pwr_low_alarm[2];    /* 26-27 */
+	u8	tx_pwr_high_warning[2]; /* 28-29 */
+	u8	tx_pwr_low_warning[2];  /* 30-31 */
+
+	u8	rx_pwr_high_alarm[2];   /* 32-33 */
+	u8	rx_pwr_low_alarm[2];    /* 34-35 */
+	u8	rx_pwr_high_warning[2]; /* 36-37 */
+	u8	rx_pwr_low_warning[2];  /* 38-39 */
+
+	u8	unallocate_1[16];
+
+	/*
+	 * ext_cal_const[36]
+	 */
+	u8	rx_pwr[20];
+	u8	tx_i[4];
+	u8	tx_pwr[4];
+	u8	temp[4];
+	u8	volt[4];
+	u8	unallocate_2[3];
+	u8	cc_dmi;
+};
+
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * Diagnostic and control/status extended field total 24 bytes
+ */
+struct sfp_diag_ext_s {
+	u8	diag[SFP_DIAGMON_SIZE];
+	u8	unalloc1[4];
+	u8	status_ctl;
+	u8	rsvd;
+	u8	alarm_flags[2];
+	u8	unalloc2[2];
+	u8	warning_flags[2];
+	u8	ext_status_ctl[2];
+};
+
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * General Use Fields: User Writable Table - Features's Control Registers
+ * Total 32 bytes
+ */
+struct sfp_usr_eeprom_s {
+	u8	rsvd1[2];       /* 128-129 */
+	u8	ewrap;          /* 130 */
+	u8	rsvd2[2];       /*  */
+	u8	owrap;          /* 133 */
+	u8	rsvd3[2];       /*  */
+	u8	prbs;           /* 136: PRBS 7 generator */
+	u8	rsvd4[2];       /*  */
+	u8	tx_eqz_16;      /* 139: TX Equalizer (16xFC) */
+	u8	tx_eqz_8;       /* 140: TX Equalizer (8xFC) */
+	u8	rsvd5[2];       /*  */
+	u8	rx_emp_16;      /* 143: RX Emphasis (16xFC) */
+	u8	rx_emp_8;       /* 144: RX Emphasis (8xFC) */
+	u8	rsvd6[2];       /*  */
+	u8	tx_eye_adj;     /* 147: TX eye Threshold Adjust */
+	u8	rsvd7[3];       /*  */
+	u8	tx_eye_qctl;    /* 151: TX eye Quality Control */
+	u8	tx_eye_qres;    /* 152: TX eye Quality Result */
+	u8	rsvd8[2];       /*  */
+	u8	poh[3];         /* 155-157: Power On Hours */
+	u8	rsvd9[2];       /*  */
+};
+
+struct sfp_mem_s {
+	struct sfp_srlid_base_s	srlid_base;
+	struct sfp_srlid_ext_s	srlid_ext;
+	struct sfp_diag_base_s	diag_base;
+	struct sfp_diag_ext_s	diag_ext;
+	struct sfp_usr_eeprom_s usr_eeprom;
+};
+
+/*
+ * transceiver codes (SFF-8472 Rev 10.2 Table 3.5)
+ */
+union sfp_xcvr_e10g_code_u {
+	u8		b;
+	struct {
+#ifdef __BIG_ENDIAN
+		u8	e10g_unall:1;   /* 10G Ethernet compliance */
+		u8	e10g_lrm:1;
+		u8	e10g_lr:1;
+		u8	e10g_sr:1;
+		u8	ib_sx:1;    /* Infiniband compliance */
+		u8	ib_lx:1;
+		u8	ib_cu_a:1;
+		u8	ib_cu_p:1;
+#else
+		u8	ib_cu_p:1;
+		u8	ib_cu_a:1;
+		u8	ib_lx:1;
+		u8	ib_sx:1;    /* Infiniband compliance */
+		u8	e10g_sr:1;
+		u8	e10g_lr:1;
+		u8	e10g_lrm:1;
+		u8	e10g_unall:1;   /* 10G Ethernet compliance */
+#endif
+	} r;
+};
+
+union sfp_xcvr_so1_code_u {
+	u8		b;
+	struct {
+		u8	escon:2;    /* ESCON compliance code */
+		u8	oc192_reach:1;  /* SONET compliance code */
+		u8	so_reach:2;
+		u8	oc48_reach:3;
+	} r;
+};
+
+union sfp_xcvr_so2_code_u {
+	u8		b;
+	struct {
+		u8	reserved:1;
+		u8	oc12_reach:3;   /* OC12 reach */
+		u8	reserved1:1;
+		u8	oc3_reach:3;    /* OC3 reach */
+	} r;
+};
+
+union sfp_xcvr_eth_code_u {
+	u8		b;
+	struct {
+		u8	base_px:1;
+		u8	base_bx10:1;
+		u8	e100base_fx:1;
+		u8	e100base_lx:1;
+		u8	e1000base_t:1;
+		u8	e1000base_cx:1;
+		u8	e1000base_lx:1;
+		u8	e1000base_sx:1;
+	} r;
+};
+
+struct sfp_xcvr_fc1_code_s {
+	u8	link_len:5; /* FC link length */
+	u8	xmtr_tech2:3;
+	u8	xmtr_tech1:7;   /* FC transmitter technology */
+	u8	reserved1:1;
+};
+
+union sfp_xcvr_fc2_code_u {
+	u8		b;
+	struct {
+		u8	tw_media:1; /* twin axial pair (tw) */
+		u8	tp_media:1; /* shielded twisted pair (sp) */
+		u8	mi_media:1; /* miniature coax (mi) */
+		u8	tv_media:1; /* video coax (tv) */
+		u8	m6_media:1; /* multimode, 62.5m (m6) */
+		u8	m5_media:1; /* multimode, 50m (m5) */
+		u8	reserved:1;
+		u8	sm_media:1; /* single mode (sm) */
+	} r;
+};
+
+union sfp_xcvr_fc3_code_u {
+	u8		b;
+	struct {
+#ifdef __BIG_ENDIAN
+		u8	rsv4:1;
+		u8	mb800:1;    /* 800 Mbytes/sec */
+		u8	mb1600:1;   /* 1600 Mbytes/sec */
+		u8	mb400:1;    /* 400 Mbytes/sec */
+		u8	rsv2:1;
+		u8	mb200:1;    /* 200 Mbytes/sec */
+		u8	rsv1:1;
+		u8	mb100:1;    /* 100 Mbytes/sec */
+#else
+		u8	mb100:1;    /* 100 Mbytes/sec */
+		u8	rsv1:1;
+		u8	mb200:1;    /* 200 Mbytes/sec */
+		u8	rsv2:1;
+		u8	mb400:1;    /* 400 Mbytes/sec */
+		u8	mb1600:1;   /* 1600 Mbytes/sec */
+		u8	mb800:1;    /* 800 Mbytes/sec */
+		u8	rsv4:1;
+#endif
+	} r;
+};
+
+struct sfp_xcvr_s {
+	union sfp_xcvr_e10g_code_u	e10g;
+	union sfp_xcvr_so1_code_u	so1;
+	union sfp_xcvr_so2_code_u	so2;
+	union sfp_xcvr_eth_code_u	eth;
+	struct sfp_xcvr_fc1_code_s	fc1;
+	union sfp_xcvr_fc2_code_u	fc2;
+	union sfp_xcvr_fc3_code_u	fc3;
+};
+
+/*
+ *	Flash module specific
+ */
+#define BFA_FLASH_PART_ENTRY_SIZE	32	/* partition entry size */
+#define BFA_FLASH_PART_MAX		32	/* maximal # of partitions */
+
+enum bfa_flash_part_type {
+	BFA_FLASH_PART_OPTROM   = 1,    /* option rom partition */
+	BFA_FLASH_PART_FWIMG    = 2,    /* firmware image partition */
+	BFA_FLASH_PART_FWCFG    = 3,    /* firmware tuneable config */
+	BFA_FLASH_PART_DRV      = 4,    /* IOC driver config */
+	BFA_FLASH_PART_BOOT     = 5,    /* boot config */
+	BFA_FLASH_PART_ASIC     = 6,    /* asic bootstrap configuration */
+	BFA_FLASH_PART_MFG      = 7,    /* manufacturing block partition */
+	BFA_FLASH_PART_OPTROM2  = 8,    /* 2nd option rom partition */
+	BFA_FLASH_PART_VPD      = 9,    /* vpd data of OEM info */
+	BFA_FLASH_PART_PBC      = 10,   /* pre-boot config */
+	BFA_FLASH_PART_BOOTOVL  = 11,   /* boot overlay partition */
+	BFA_FLASH_PART_LOG      = 12,   /* firmware log partition */
+	BFA_FLASH_PART_PXECFG   = 13,   /* pxe boot config partition */
+	BFA_FLASH_PART_PXEOVL   = 14,   /* pxe boot overlay partition */
+	BFA_FLASH_PART_PORTCFG  = 15,   /* port cfg partition */
+	BFA_FLASH_PART_ASICBK   = 16,   /* asic backup partition */
+};
+
+/*
+ * flash partition attributes
+ */
+struct bfa_flash_part_attr_s {
+	u32	part_type;      /* partition type */
+	u32	part_instance;  /* partition instance */
+	u32	part_off;       /* partition offset */
+	u32	part_size;      /* partition size */
+	u32	part_len;       /* partition content length */
+	u32	part_status;    /* partition status */
+	char	rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
+};
+
+/*
+ * flash attributes
+ */
+struct bfa_flash_attr_s {
+	u32	status; /* flash overall status */
+	u32	npart;  /* num of partitions */
+	struct bfa_flash_part_attr_s part[BFA_FLASH_PART_MAX];
+};
+
+/*
+ *	DIAG module specific
+ */
+#define LB_PATTERN_DEFAULT	0xB5B5B5B5
+#define QTEST_CNT_DEFAULT	10
+#define QTEST_PAT_DEFAULT	LB_PATTERN_DEFAULT
+#define DPORT_ENABLE_LOOPCNT_DEFAULT (1024 * 1024)
+
+struct bfa_diag_memtest_s {
+	u8	algo;
+	u8	rsvd[7];
+};
+
+struct bfa_diag_memtest_result {
+	u32	status;
+	u32	addr;
+	u32	exp; /* expect value read from reg */
+	u32	act; /* actually value read */
+	u32	err_status;             /* error status reg */
+	u32	err_status1;    /* extra error info reg */
+	u32	err_addr; /* error address reg */
+	u8	algo;
+	u8	rsv[3];
+};
+
+struct bfa_diag_loopback_result_s {
+	u32	numtxmfrm;      /* no. of transmit frame */
+	u32	numosffrm;      /* no. of outstanding frame */
+	u32	numrcvfrm;      /* no. of received good frame */
+	u32	badfrminf;      /* mis-match info */
+	u32	badfrmnum;      /* mis-match fram number */
+	u8	status;         /* loopback test result */
+	u8	rsvd[3];
+};
+
+enum bfa_diag_dport_test_status {
+	DPORT_TEST_ST_IDLE	= 0,    /* the test has not started yet. */
+	DPORT_TEST_ST_FINAL	= 1,    /* the test done successfully */
+	DPORT_TEST_ST_SKIP	= 2,    /* the test skipped */
+	DPORT_TEST_ST_FAIL	= 3,    /* the test failed */
+	DPORT_TEST_ST_INPRG	= 4,    /* the testing is in progress */
+	DPORT_TEST_ST_RESPONDER	= 5,    /* test triggered from remote port */
+	DPORT_TEST_ST_STOPPED	= 6,    /* the test stopped by user. */
+	DPORT_TEST_ST_MAX
+};
+
+enum bfa_diag_dport_test_type {
+	DPORT_TEST_ELOOP	= 0,
+	DPORT_TEST_OLOOP	= 1,
+	DPORT_TEST_ROLOOP	= 2,
+	DPORT_TEST_LINK		= 3,
+	DPORT_TEST_MAX
+};
+
+enum bfa_diag_dport_test_opmode {
+	BFA_DPORT_OPMODE_AUTO	= 0,
+	BFA_DPORT_OPMODE_MANU	= 1,
+};
+
+struct bfa_diag_dport_subtest_result_s {
+	u8	status;		/* bfa_diag_dport_test_status */
+	u8	rsvd[7];	/* 64bit align */
+	u64	start_time;	/* timestamp  */
+};
+
+struct bfa_diag_dport_result_s {
+	wwn_t	rp_pwwn;	/* switch port wwn  */
+	wwn_t	rp_nwwn;	/* switch node wwn  */
+	u64	start_time;	/* user/sw start time */
+	u64	end_time;	/* timestamp  */
+	u8	status;		/* bfa_diag_dport_test_status */
+	u8	mode;		/* bfa_diag_dport_test_opmode */
+	u8	rsvd;		/* 64bit align */
+	u8	speed;		/* link speed for buf_reqd */
+	u16	buffer_required;
+	u16	frmsz;		/* frame size for buf_reqd */
+	u32	lpcnt;		/* Frame count */
+	u32	pat;		/* Pattern */
+	u32	roundtrip_latency;	/* in nano sec */
+	u32	est_cable_distance;	/* in meter */
+	struct bfa_diag_dport_subtest_result_s subtest[DPORT_TEST_MAX];
+};
+
+struct bfa_diag_ledtest_s {
+	u32	cmd;    /* bfa_led_op_t */
+	u32	color;  /* bfa_led_color_t */
+	u16	freq;   /* no. of blinks every 10 secs */
+	u8	led;    /* bitmap of LEDs to be tested */
+	u8	rsvd[5];
+};
+
+struct bfa_diag_loopback_s {
+	u32	loopcnt;
+	u32	pattern;
+	u8	lb_mode;    /* bfa_port_opmode_t */
+	u8	speed;      /* bfa_port_speed_t */
+	u8	rsvd[2];
+};
+
+/*
+ *	PHY module specific
+ */
+enum bfa_phy_status_e {
+	BFA_PHY_STATUS_GOOD	= 0, /* phy is good */
+	BFA_PHY_STATUS_NOT_PRESENT	= 1, /* phy does not exist */
+	BFA_PHY_STATUS_BAD	= 2, /* phy is bad */
+};
+
+/*
+ * phy attributes for phy query
+ */
+struct bfa_phy_attr_s {
+	u32	status;         /* phy present/absent status */
+	u32	length;         /* firmware length */
+	u32	fw_ver;         /* firmware version */
+	u32	an_status;      /* AN status */
+	u32	pma_pmd_status; /* PMA/PMD link status */
+	u32	pma_pmd_signal; /* PMA/PMD signal detect */
+	u32	pcs_status;     /* PCS link status */
+};
+
+/*
+ * phy stats
+ */
+struct bfa_phy_stats_s {
+	u32	status;         /* phy stats status */
+	u32	link_breaks;    /* Num of link breaks after linkup */
+	u32	pma_pmd_fault;  /* NPMA/PMD fault */
+	u32	pcs_fault;      /* PCS fault */
+	u32	speed_neg;      /* Num of speed negotiation */
+	u32	tx_eq_training; /* Num of TX EQ training */
+	u32	tx_eq_timeout;  /* Num of TX EQ timeout */
+	u32	crc_error;      /* Num of CRC errors */
+};
+
+#pragma pack()
+
+#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h
new file mode 100644
index 0000000..5815a90
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_defs_fcs.h
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_DEFS_FCS_H__
+#define __BFA_DEFS_FCS_H__
+
+#include "bfa_fc.h"
+#include "bfa_defs_svc.h"
+
+/*
+ * VF states
+ */
+enum bfa_vf_state {
+	BFA_VF_UNINIT    = 0,	/*  fabric is not yet initialized */
+	BFA_VF_LINK_DOWN = 1,	/*  link is down */
+	BFA_VF_FLOGI     = 2,	/*  flogi is in progress */
+	BFA_VF_AUTH      = 3,	/*  authentication in progress */
+	BFA_VF_NOFABRIC  = 4,	/*  fabric is not present */
+	BFA_VF_ONLINE    = 5,	/*  login to fabric is complete */
+	BFA_VF_EVFP      = 6,	/*  EVFP is in progress */
+	BFA_VF_ISOLATED  = 7,	/*  port isolated due to vf_id mismatch */
+};
+
+/*
+ * VF statistics
+ */
+struct bfa_vf_stats_s {
+	u32	flogi_sent;	/*  Num FLOGIs sent */
+	u32	flogi_rsp_err;	/*  FLOGI response errors */
+	u32	flogi_acc_err;	/*  FLOGI accept errors */
+	u32	flogi_accepts;	/*  FLOGI accepts received */
+	u32	flogi_rejects;	/*  FLOGI rejects received */
+	u32	flogi_unknown_rsp; /*  Unknown responses for FLOGI */
+	u32	flogi_alloc_wait; /*  Allocation waits prior to sending FLOGI */
+	u32	flogi_rcvd;	/*  FLOGIs received */
+	u32	flogi_rejected;	/*  Incoming FLOGIs rejected */
+	u32	fabric_onlines;	/*  Internal fabric online notification sent
+				 *  to other modules */
+	u32	fabric_offlines; /* Internal fabric offline notification sent
+				  * to other modules */
+	u32	resvd; /*  padding for 64 bit alignment */
+};
+
+/*
+ * VF attributes returned in queries
+ */
+struct bfa_vf_attr_s {
+	enum bfa_vf_state  state;		/*  VF state */
+	u32        rsvd;
+	wwn_t           fabric_name;	/*  fabric name */
+};
+
+#define BFA_FCS_MAX_LPORTS 256
+#define BFA_FCS_FABRIC_IPADDR_SZ  16
+
+/*
+ * symbolic names for base port/virtual port
+ */
+#define BFA_SYMNAME_MAXLEN	128	/* 128 bytes */
+struct bfa_lport_symname_s {
+	char	    symname[BFA_SYMNAME_MAXLEN];
+};
+
+/*
+* Roles of FCS port:
+ *     - FCP IM and FCP TM roles cannot be enabled together for a FCS port
+ *     - Create multiple ports if both IM and TM functions required.
+ *     - Atleast one role must be specified.
+ */
+enum bfa_lport_role {
+	BFA_LPORT_ROLE_FCP_IM	= 0x01,	/*  FCP initiator role */
+	BFA_LPORT_ROLE_FCP_MAX	= BFA_LPORT_ROLE_FCP_IM,
+};
+
+/*
+ * FCS port configuration.
+ */
+struct bfa_lport_cfg_s {
+	wwn_t	       pwwn;       /*  port wwn */
+	wwn_t	       nwwn;       /*  node wwn */
+	struct bfa_lport_symname_s  sym_name;   /*  vm port symbolic name */
+	struct bfa_lport_symname_s node_sym_name; /* Node symbolic name */
+	enum bfa_lport_role roles;      /* FCS port roles */
+	u32     rsvd;
+	bfa_boolean_t   preboot_vp;  /*  vport created from PBC */
+	u8	tag[16];        /* opaque tag from application */
+	u8	padding[4];
+};
+
+/*
+ * FCS port states
+ */
+enum bfa_lport_state {
+	BFA_LPORT_UNINIT  = 0,	/*  PORT is not yet initialized */
+	BFA_LPORT_FDISC   = 1,	/*  FDISC is in progress */
+	BFA_LPORT_ONLINE  = 2,	/*  login to fabric is complete */
+	BFA_LPORT_OFFLINE = 3,	/*  No login to fabric */
+};
+
+/*
+ * FCS port type.
+ */
+enum bfa_lport_type {
+	BFA_LPORT_TYPE_PHYSICAL = 0,
+	BFA_LPORT_TYPE_VIRTUAL,
+};
+
+/*
+ * FCS port offline reason.
+ */
+enum bfa_lport_offline_reason {
+	BFA_LPORT_OFFLINE_UNKNOWN = 0,
+	BFA_LPORT_OFFLINE_LINKDOWN,
+	BFA_LPORT_OFFLINE_FAB_UNSUPPORTED,	/*  NPIV not supported by the
+	 *    fabric */
+	BFA_LPORT_OFFLINE_FAB_NORESOURCES,
+	BFA_LPORT_OFFLINE_FAB_LOGOUT,
+};
+
+/*
+ * FCS lport info.
+ */
+struct bfa_lport_info_s {
+	u8	 port_type;	/* bfa_lport_type_t : physical or
+	 * virtual */
+	u8	 port_state;	/* one of bfa_lport_state values */
+	u8	 offline_reason;	/* one of bfa_lport_offline_reason_t
+	 * values */
+	wwn_t	   port_wwn;
+	wwn_t	   node_wwn;
+
+	/*
+	 * following 4 feilds are valid for Physical Ports only
+	 */
+	u32	max_vports_supp;	/* Max supported vports */
+	u32	num_vports_inuse;	/* Num of in use vports */
+	u32	max_rports_supp;	/* Max supported rports */
+	u32	num_rports_inuse;	/* Num of doscovered rports */
+
+};
+
+/*
+ * FCS port statistics
+ */
+struct bfa_lport_stats_s {
+	u32	ns_plogi_sent;
+	u32	ns_plogi_rsp_err;
+	u32	ns_plogi_acc_err;
+	u32	ns_plogi_accepts;
+	u32	ns_rejects;	/* NS command rejects */
+	u32	ns_plogi_unknown_rsp;
+	u32	ns_plogi_alloc_wait;
+
+	u32	ns_retries;	/* NS command retries */
+	u32	ns_timeouts;	/* NS command timeouts */
+
+	u32	ns_rspnid_sent;
+	u32	ns_rspnid_accepts;
+	u32	ns_rspnid_rsp_err;
+	u32	ns_rspnid_rejects;
+	u32	ns_rspnid_alloc_wait;
+
+	u32	ns_rftid_sent;
+	u32	ns_rftid_accepts;
+	u32	ns_rftid_rsp_err;
+	u32	ns_rftid_rejects;
+	u32	ns_rftid_alloc_wait;
+
+	u32	ns_rffid_sent;
+	u32	ns_rffid_accepts;
+	u32	ns_rffid_rsp_err;
+	u32	ns_rffid_rejects;
+	u32	ns_rffid_alloc_wait;
+
+	u32	ns_gidft_sent;
+	u32	ns_gidft_accepts;
+	u32	ns_gidft_rsp_err;
+	u32	ns_gidft_rejects;
+	u32	ns_gidft_unknown_rsp;
+	u32	ns_gidft_alloc_wait;
+
+	u32	ns_rnnid_sent;
+	u32	ns_rnnid_accepts;
+	u32	ns_rnnid_rsp_err;
+	u32	ns_rnnid_rejects;
+	u32	ns_rnnid_alloc_wait;
+
+	u32	ns_rsnn_nn_sent;
+	u32	ns_rsnn_nn_accepts;
+	u32	ns_rsnn_nn_rsp_err;
+	u32	ns_rsnn_nn_rejects;
+	u32	ns_rsnn_nn_alloc_wait;
+
+	/*
+	 * Mgmt Server stats
+	 */
+	u32	ms_retries;	/* MS command retries */
+	u32	ms_timeouts;	/* MS command timeouts */
+	u32	ms_plogi_sent;
+	u32	ms_plogi_rsp_err;
+	u32	ms_plogi_acc_err;
+	u32	ms_plogi_accepts;
+	u32	ms_rejects;	/* MS command rejects */
+	u32	ms_plogi_unknown_rsp;
+	u32	ms_plogi_alloc_wait;
+
+	u32	num_rscn;	/* Num of RSCN received */
+	u32	num_portid_rscn;/* Num portid format RSCN
+	* received */
+
+	u32	uf_recvs;	/* Unsolicited recv frames	*/
+	u32	uf_recv_drops;	/* Dropped received frames	*/
+
+	u32	plogi_rcvd;	/* Received plogi	*/
+	u32	prli_rcvd;	/* Received prli	*/
+	u32	adisc_rcvd;	/* Received adisc	*/
+	u32	prlo_rcvd;	/* Received prlo	*/
+	u32	logo_rcvd;	/* Received logo	*/
+	u32	rpsc_rcvd;	/* Received rpsc	*/
+	u32	un_handled_els_rcvd;	/* Received unhandled ELS	*/
+	u32	rport_plogi_timeouts; /* Rport plogi retry timeout count */
+	u32	rport_del_max_plogi_retry; /* Deleted rport
+					    * (max retry of plogi) */
+};
+
+/*
+ * BFA port attribute returned in queries
+ */
+struct bfa_lport_attr_s {
+	enum bfa_lport_state state;	/*  port state */
+	u32	 pid;	/*  port ID */
+	struct bfa_lport_cfg_s   port_cfg;	/*  port configuration */
+	enum bfa_port_type port_type;	/*  current topology */
+	u32	 loopback;	/*  cable is externally looped back */
+	wwn_t	fabric_name; /*  attached switch's nwwn */
+	u8	fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /*  attached
+	* fabric's ip addr */
+	mac_t	   fpma_mac;	/*  Lport's FPMA Mac address */
+	u16	authfail;	/*  auth failed state */
+};
+
+
+/*
+ * VPORT states
+ */
+enum bfa_vport_state {
+	BFA_FCS_VPORT_UNINIT		= 0,
+	BFA_FCS_VPORT_CREATED		= 1,
+	BFA_FCS_VPORT_OFFLINE		= 1,
+	BFA_FCS_VPORT_FDISC_SEND	= 2,
+	BFA_FCS_VPORT_FDISC		= 3,
+	BFA_FCS_VPORT_FDISC_RETRY	= 4,
+	BFA_FCS_VPORT_FDISC_RSP_WAIT	= 5,
+	BFA_FCS_VPORT_ONLINE		= 6,
+	BFA_FCS_VPORT_DELETING		= 7,
+	BFA_FCS_VPORT_CLEANUP		= 8,
+	BFA_FCS_VPORT_LOGO_SEND		= 9,
+	BFA_FCS_VPORT_LOGO		= 10,
+	BFA_FCS_VPORT_ERROR		= 11,
+	BFA_FCS_VPORT_MAX_STATE,
+};
+
+/*
+ * vport statistics
+ */
+struct bfa_vport_stats_s {
+	struct bfa_lport_stats_s port_stats;	/*  base class (port) stats */
+	/*
+	 * TODO - remove
+	 */
+
+	u32        fdisc_sent;	/*  num fdisc sent */
+	u32        fdisc_accepts;	/*  fdisc accepts */
+	u32        fdisc_retries;	/*  fdisc retries */
+	u32        fdisc_timeouts;	/*  fdisc timeouts */
+	u32        fdisc_rsp_err;	/*  fdisc response error */
+	u32        fdisc_acc_bad;	/*  bad fdisc accepts */
+	u32        fdisc_rejects;	/*  fdisc rejects */
+	u32        fdisc_unknown_rsp;
+	/*
+	 *!< fdisc rsp unknown error
+	 */
+	u32        fdisc_alloc_wait;/*  fdisc req (fcxp)alloc wait */
+
+	u32        logo_alloc_wait;/*  logo req (fcxp) alloc wait */
+	u32        logo_sent;	/*  logo sent */
+	u32        logo_accepts;	/*  logo accepts */
+	u32        logo_rejects;	/*  logo rejects */
+	u32        logo_rsp_err;	/*  logo rsp errors */
+	u32        logo_unknown_rsp;
+			/*  logo rsp unknown errors */
+
+	u32        fab_no_npiv;	/*  fabric does not support npiv */
+
+	u32        fab_offline;	/*  offline events from fab SM */
+	u32        fab_online;	/*  online events from fab SM */
+	u32        fab_cleanup;	/*  cleanup request from fab SM */
+	u32        rsvd;
+};
+
+/*
+ * BFA vport attribute returned in queries
+ */
+struct bfa_vport_attr_s {
+	struct bfa_lport_attr_s   port_attr; /*  base class (port) attributes */
+	enum bfa_vport_state vport_state; /*  vport state */
+	u32          rsvd;
+};
+
+/*
+ * FCS remote port states
+ */
+enum bfa_rport_state {
+	BFA_RPORT_UNINIT	= 0,	/*  PORT is not yet initialized */
+	BFA_RPORT_OFFLINE	= 1,	/*  rport is offline */
+	BFA_RPORT_PLOGI		= 2,	/*  PLOGI to rport is in progress */
+	BFA_RPORT_ONLINE	= 3,	/*  login to rport is complete */
+	BFA_RPORT_PLOGI_RETRY	= 4,	/*  retrying login to rport */
+	BFA_RPORT_NSQUERY	= 5,	/*  nameserver query */
+	BFA_RPORT_ADISC		= 6,	/*  ADISC authentication */
+	BFA_RPORT_LOGO		= 7,	/*  logging out with rport */
+	BFA_RPORT_LOGORCV	= 8,	/*  handling LOGO from rport */
+	BFA_RPORT_NSDISC	= 9,	/*  re-discover rport */
+};
+
+/*
+ *  Rport Scsi Function : Initiator/Target.
+ */
+enum bfa_rport_function {
+	BFA_RPORT_INITIATOR	= 0x01,	/*  SCSI Initiator	*/
+	BFA_RPORT_TARGET	= 0x02,	/*  SCSI Target	*/
+};
+
+/*
+ * port/node symbolic names for rport
+ */
+#define BFA_RPORT_SYMNAME_MAXLEN	255
+struct bfa_rport_symname_s {
+	char            symname[BFA_RPORT_SYMNAME_MAXLEN];
+};
+
+/*
+ * FCS remote port statistics
+ */
+struct bfa_rport_stats_s {
+	u32        offlines;           /*  remote port offline count  */
+	u32        onlines;            /*  remote port online count   */
+	u32        rscns;              /*  RSCN affecting rport       */
+	u32        plogis;		    /*  plogis sent                */
+	u32        plogi_accs;	    /*  plogi accepts              */
+	u32        plogi_timeouts;	    /*  plogi timeouts             */
+	u32        plogi_rejects;	    /*  rcvd plogi rejects         */
+	u32        plogi_failed;	    /*  local failure              */
+	u32        plogi_rcvd;	    /*  plogis rcvd                */
+	u32        prli_rcvd;          /*  inbound PRLIs              */
+	u32        adisc_rcvd;         /*  ADISCs received            */
+	u32        adisc_rejects;      /*  recvd  ADISC rejects       */
+	u32        adisc_sent;         /*  ADISC requests sent        */
+	u32        adisc_accs;         /*  ADISC accepted by rport    */
+	u32        adisc_failed;       /*  ADISC failed (no response) */
+	u32        adisc_rejected;     /*  ADISC rejected by us    */
+	u32        logos;              /*  logos sent                 */
+	u32        logo_accs;          /*  LOGO accepts from rport    */
+	u32        logo_failed;        /*  LOGO failures              */
+	u32        logo_rejected;      /*  LOGO rejects from rport    */
+	u32        logo_rcvd;          /*  LOGO from remote port      */
+
+	u32        rpsc_rcvd;         /*  RPSC received            */
+	u32        rpsc_rejects;      /*  recvd  RPSC rejects       */
+	u32        rpsc_sent;         /*  RPSC requests sent        */
+	u32        rpsc_accs;         /*  RPSC accepted by rport    */
+	u32        rpsc_failed;       /*  RPSC failed (no response) */
+	u32        rpsc_rejected;     /*  RPSC rejected by us    */
+
+	u32	rjt_insuff_res;	/*  LS RJT with insuff resources */
+	struct bfa_rport_hal_stats_s	hal_stats;  /*  BFA rport stats    */
+};
+
+/*
+ * FCS remote port attributes returned in queries
+ */
+struct bfa_rport_attr_s {
+	wwn_t		nwwn;	/*  node wwn */
+	wwn_t		pwwn;	/*  port wwn */
+	enum fc_cos cos_supported;	/*  supported class of services */
+	u32		pid;	/*  port ID */
+	u32		df_sz;	/*  Max payload size */
+	enum bfa_rport_state	state;	/*  Rport State machine state */
+	enum fc_cos	fc_cos;	/*  FC classes of services */
+	bfa_boolean_t	cisc;	/*  CISC capable device */
+	struct bfa_rport_symname_s symname; /*  Symbolic Name */
+	enum bfa_rport_function	scsi_function; /*  Initiator/Target */
+	struct bfa_rport_qos_attr_s qos_attr; /*  qos attributes  */
+	enum bfa_port_speed curr_speed;   /*  operating speed got from
+					    * RPSC ELS. UNKNOWN, if RPSC
+					    * is not supported */
+	bfa_boolean_t	trl_enforced;	/*  TRL enforced ? TRUE/FALSE */
+	enum bfa_port_speed	assigned_speed;	/* Speed assigned by the user.
+						 * will be used if RPSC is not
+						 * supported by the rport */
+};
+
+struct bfa_rport_remote_link_stats_s {
+	u32 lfc; /*  Link Failure Count */
+	u32 lsyc; /*  Loss of Synchronization Count */
+	u32 lsic; /*  Loss of Signal Count */
+	u32 pspec; /*  Primitive Sequence Protocol Error Count */
+	u32 itwc; /*  Invalid Transmission Word Count */
+	u32 icc; /*  Invalid CRC Count */
+};
+
+struct bfa_rport_qualifier_s {
+	wwn_t	pwwn;	/* Port WWN */
+	u32	pid;	/* port ID */
+	u32	rsvd;
+};
+
+#define BFA_MAX_IO_INDEX 7
+#define BFA_NO_IO_INDEX 9
+
+/*
+ * FCS itnim states
+ */
+enum bfa_itnim_state {
+	BFA_ITNIM_OFFLINE	= 0,	/*  offline */
+	BFA_ITNIM_PRLI_SEND	= 1,	/*  prli send */
+	BFA_ITNIM_PRLI_SENT	= 2,	/*  prli sent */
+	BFA_ITNIM_PRLI_RETRY	= 3,	/*  prli retry */
+	BFA_ITNIM_HCB_ONLINE	= 4,	/*  online callback */
+	BFA_ITNIM_ONLINE	= 5,	/*  online */
+	BFA_ITNIM_HCB_OFFLINE	= 6,	/*  offline callback */
+	BFA_ITNIM_INITIATIOR	= 7,	/*  initiator */
+};
+
+/*
+ * FCS remote port statistics
+ */
+struct bfa_itnim_stats_s {
+	u32        onlines;	/*  num rport online */
+	u32        offlines;	/*  num rport offline */
+	u32        prli_sent;	/*  num prli sent out */
+	u32        fcxp_alloc_wait;/*  num fcxp alloc waits */
+	u32        prli_rsp_err;	/*  num prli rsp errors */
+	u32        prli_rsp_acc;	/*  num prli rsp accepts */
+	u32        initiator;	/*  rport is an initiator */
+	u32        prli_rsp_parse_err;	/*  prli rsp parsing errors */
+	u32        prli_rsp_rjt;	/*  num prli rsp rejects */
+	u32        timeout;	/*  num timeouts detected */
+	u32        sler;		/*  num sler notification from BFA */
+	u32	rsvd;		/* padding for 64 bit alignment */
+};
+
+/*
+ * FCS itnim attributes returned in queries
+ */
+struct bfa_itnim_attr_s {
+	enum bfa_itnim_state state; /*  FCS itnim state        */
+	u8 retry;		/*  data retransmision support */
+	u8	task_retry_id;  /*  task retry ident support   */
+	u8 rec_support;    /*  REC supported              */
+	u8 conf_comp;      /*  confirmed completion supp  */
+};
+
+#endif /* __BFA_DEFS_FCS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
new file mode 100644
index 0000000..3d0c96a
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -0,0 +1,1464 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_DEFS_SVC_H__
+#define __BFA_DEFS_SVC_H__
+
+#include "bfa_defs.h"
+#include "bfa_fc.h"
+#include "bfi.h"
+
+#define BFA_IOCFC_INTR_DELAY	1125
+#define BFA_IOCFC_INTR_LATENCY	225
+#define BFA_IOCFCOE_INTR_DELAY	25
+#define BFA_IOCFCOE_INTR_LATENCY 5
+
+/*
+ * Interrupt coalescing configuration.
+ */
+#pragma pack(1)
+struct bfa_iocfc_intr_attr_s {
+	u8		coalesce;	/*  enable/disable coalescing */
+	u8		rsvd[3];
+	__be16		latency;	/*  latency in microseconds   */
+	__be16		delay;		/*  delay in microseconds     */
+};
+
+/*
+ * IOC firmware configuraton
+ */
+struct bfa_iocfc_fwcfg_s {
+	u16		num_fabrics;	/*  number of fabrics		*/
+	u16		num_lports;	/*  number of local lports	*/
+	u16		num_rports;	/*  number of remote ports	*/
+	u16		num_ioim_reqs;	/*  number of IO reqs		*/
+	u16		num_tskim_reqs;	/*  task management requests	*/
+	u16		num_fwtio_reqs;	/* number of TM IO reqs in FW   */
+	u16		num_fcxp_reqs;	/*  unassisted FC exchanges	*/
+	u16		num_uf_bufs;	/*  unsolicited recv buffers	*/
+	u8		num_cqs;
+	u8		fw_tick_res;	/*  FW clock resolution in ms */
+	u8		rsvd[6];
+};
+#pragma pack()
+
+struct bfa_iocfc_drvcfg_s {
+	u16		num_reqq_elems;	/*  number of req queue elements */
+	u16		num_rspq_elems;	/*  number of rsp queue elements */
+	u16		num_sgpgs;	/*  number of total SG pages	 */
+	u16		num_sboot_tgts;	/*  number of SAN boot targets	 */
+	u16		num_sboot_luns;	/*  number of SAN boot luns	 */
+	u16		ioc_recover;	/*  IOC recovery mode		 */
+	u16		min_cfg;	/*  minimum configuration	 */
+	u16		path_tov;	/*  device path timeout		*/
+	u16		num_tio_reqs;	/* number of TM IO reqs	*/
+	u8		port_mode;
+	u8		rsvd_a;
+	bfa_boolean_t	delay_comp;	/* delay completion of failed
+					 * inflight IOs */
+	u16		num_ttsk_reqs;	 /* TM task management requests */
+	u32		rsvd;
+};
+
+/*
+ * IOC configuration
+ */
+struct bfa_iocfc_cfg_s {
+	struct bfa_iocfc_fwcfg_s	fwcfg;	/*  firmware side config */
+	struct bfa_iocfc_drvcfg_s	drvcfg;	/*  driver side config	  */
+};
+
+/*
+ * IOC firmware IO stats
+ */
+struct bfa_fw_ioim_stats_s {
+	u32	host_abort;		/*  IO aborted by host driver*/
+	u32	host_cleanup;		/*  IO clean up by host driver */
+
+	u32	fw_io_timeout;		/*  IOs timedout */
+	u32	fw_frm_parse;		/*  frame parsed by f/w */
+	u32	fw_frm_data;		/*  fcp_data frame parsed by f/w */
+	u32	fw_frm_rsp;		/*  fcp_rsp frame parsed by f/w */
+	u32	fw_frm_xfer_rdy;	/*  xfer_rdy frame parsed by f/w */
+	u32	fw_frm_bls_acc;		/*  BLS ACC  frame parsed by f/w */
+	u32	fw_frm_tgt_abort;	/*  target ABTS parsed by f/w */
+	u32	fw_frm_unknown;		/*  unknown parsed by f/w */
+	u32	fw_data_dma;		/*  f/w DMA'ed the data frame */
+	u32	fw_frm_drop;		/*  f/w drop the frame */
+
+	u32	rec_timeout;		/*  FW rec timed out */
+	u32	error_rec;		/*  FW sending rec on
+					 *  an error condition*/
+	u32	wait_for_si;		/*  FW wait for SI */
+	u32	rec_rsp_inval;		/*  REC rsp invalid */
+	u32     rec_rsp_xchg_comp;	/*  REC rsp xchg complete */
+	u32     rec_rsp_rd_si_ownd;	/*  REC rsp read si owned */
+
+	u32	seqr_io_abort;		/*  target does not know cmd so abort */
+	u32	seqr_io_retry;		/*  SEQR failed so retry IO */
+
+	u32	itn_cisc_upd_rsp;	/*  ITN cisc updated on fcp_rsp */
+	u32	itn_cisc_upd_data;	/*  ITN cisc updated on fcp_data */
+	u32	itn_cisc_upd_xfer_rdy;	/*  ITN cisc updated on fcp_data */
+
+	u32	fcp_data_lost;		/*  fcp data lost */
+
+	u32	ro_set_in_xfer_rdy;	/*  Target set RO in Xfer_rdy frame */
+	u32	xfer_rdy_ooo_err;	/*  Out of order Xfer_rdy received */
+	u32	xfer_rdy_unknown_err;	/*  unknown error in xfer_rdy frame */
+
+	u32	io_abort_timeout;	/*  ABTS timedout  */
+	u32	sler_initiated;		/*  SLER initiated */
+
+	u32	unexp_fcp_rsp;		/*  fcp response in wrong state */
+
+	u32	fcp_rsp_under_run;	/*  fcp rsp IO underrun */
+	u32     fcp_rsp_under_run_wr;   /*  fcp rsp IO underrun for write */
+	u32	fcp_rsp_under_run_err;	/*  fcp rsp IO underrun error */
+	u32     fcp_rsp_resid_inval;    /*  invalid residue */
+	u32	fcp_rsp_over_run;	/*  fcp rsp IO overrun */
+	u32	fcp_rsp_over_run_err;	/*  fcp rsp IO overrun error */
+	u32	fcp_rsp_proto_err;	/*  protocol error in fcp rsp */
+	u32	fcp_rsp_sense_err;	/*  error in sense info in fcp rsp */
+	u32	fcp_conf_req;		/*  FCP conf requested */
+
+	u32	tgt_aborted_io;		/*  target initiated abort */
+
+	u32	ioh_edtov_timeout_event;/*  IOH edtov timer popped */
+	u32	ioh_fcp_rsp_excp_event;	/*  IOH FCP_RSP exception */
+	u32	ioh_fcp_conf_event;	/*  IOH FCP_CONF */
+	u32	ioh_mult_frm_rsp_event;	/*  IOH multi_frame FCP_RSP */
+	u32	ioh_hit_class2_event;	/*  IOH hit class2 */
+	u32	ioh_miss_other_event;	/*  IOH miss other */
+	u32	ioh_seq_cnt_err_event;	/*  IOH seq cnt error */
+	u32	ioh_len_err_event;	/*  IOH len error - fcp_dl !=
+					 *  bytes xfered */
+	u32	ioh_seq_len_err_event;	/*  IOH seq len error */
+	u32	ioh_data_oor_event;	/*  Data out of range */
+	u32	ioh_ro_ooo_event;	/*  Relative offset out of range */
+	u32	ioh_cpu_owned_event;	/*  IOH hit -iost owned by f/w */
+	u32	ioh_unexp_frame_event;	/*  unexpected frame received
+					 *  count */
+	u32	ioh_err_int;		/*  IOH error int during data-phase
+					 *  for scsi write */
+};
+
+struct bfa_fw_tio_stats_s {
+	u32	tio_conf_proc;	    /* TIO CONF processed */
+	u32	tio_conf_drop;      /* TIO CONF dropped */
+	u32	tio_cleanup_req;    /* TIO cleanup requested */
+	u32	tio_cleanup_comp;   /* TIO cleanup completed */
+	u32	tio_abort_rsp;      /* TIO abort response */
+	u32	tio_abort_rsp_comp; /* TIO abort rsp completed */
+	u32	tio_abts_req;       /* TIO ABTS requested */
+	u32	tio_abts_ack;       /* TIO ABTS ack-ed */
+	u32	tio_abts_ack_nocomp;/* TIO ABTS ack-ed but not completed */
+	u32	tio_abts_tmo;       /* TIO ABTS timeout */
+	u32	tio_snsdata_dma;    /* TIO sense data DMA */
+	u32	tio_rxwchan_wait;   /* TIO waiting for RX wait channel */
+	u32	tio_rxwchan_avail;  /* TIO RX wait channel available */
+	u32	tio_hit_bls;        /* TIO IOH BLS event */
+	u32	tio_uf_recv;        /* TIO received UF */
+	u32	tio_rd_invalid_sm;  /* TIO read reqst in wrong state machine */
+	u32	tio_wr_invalid_sm;  /* TIO write reqst in wrong state machine */
+
+	u32	ds_rxwchan_wait;    /* DS waiting for RX wait channel */
+	u32	ds_rxwchan_avail;   /* DS RX wait channel available */
+	u32	ds_unaligned_rd;    /* DS unaligned read */
+	u32	ds_rdcomp_invalid_sm; /* DS read completed in wrong state
+				       * machine */
+	u32	ds_wrcomp_invalid_sm; /* DS write completed in wrong state
+				       * machine */
+	u32	ds_flush_req;       /* DS flush requested */
+	u32	ds_flush_comp;      /* DS flush completed */
+	u32	ds_xfrdy_exp;       /* DS XFER_RDY expired */
+	u32	ds_seq_cnt_err;     /* DS seq cnt error */
+	u32	ds_seq_len_err;     /* DS seq len error */
+	u32	ds_data_oor;        /* DS data out of order */
+	u32	ds_hit_bls;	    /* DS hit BLS */
+	u32	ds_edtov_timer_exp; /* DS edtov expired */
+	u32	ds_cpu_owned;       /* DS cpu owned */
+	u32	ds_hit_class2;      /* DS hit class2 */
+	u32	ds_length_err;      /* DS length error */
+	u32	ds_ro_ooo_err;      /* DS relative offset out-of-order error */
+	u32	ds_rectov_timer_exp;/* DS rectov expired */
+	u32	ds_unexp_fr_err;    /* DS unexp frame error */
+};
+
+/*
+ * IOC firmware IO stats
+ */
+struct bfa_fw_io_stats_s {
+	struct bfa_fw_ioim_stats_s	ioim_stats;
+	struct bfa_fw_tio_stats_s	tio_stats;
+};
+
+/*
+ * IOC port firmware stats
+ */
+
+struct bfa_fw_port_fpg_stats_s {
+	u32    intr_evt;
+	u32    intr;
+	u32    intr_excess;
+	u32    intr_cause0;
+	u32    intr_other;
+	u32    intr_other_ign;
+	u32    sig_lost;
+	u32    sig_regained;
+	u32    sync_lost;
+	u32    sync_to;
+	u32    sync_regained;
+	u32    div2_overflow;
+	u32    div2_underflow;
+	u32    efifo_overflow;
+	u32    efifo_underflow;
+	u32    idle_rx;
+	u32    lrr_rx;
+	u32    lr_rx;
+	u32    ols_rx;
+	u32    nos_rx;
+	u32    lip_rx;
+	u32    arbf0_rx;
+	u32    arb_rx;
+	u32    mrk_rx;
+	u32    const_mrk_rx;
+	u32    prim_unknown;
+};
+
+
+struct bfa_fw_port_lksm_stats_s {
+	u32    hwsm_success;       /*  hwsm state machine success          */
+	u32    hwsm_fails;         /*  hwsm fails                          */
+	u32    hwsm_wdtov;         /*  hwsm timed out                      */
+	u32    swsm_success;       /*  swsm success                        */
+	u32    swsm_fails;         /*  swsm fails                          */
+	u32    swsm_wdtov;         /*  swsm timed out                      */
+	u32    busybufs;           /*  link init failed due to busybuf     */
+	u32    buf_waits;          /*  bufwait state entries               */
+	u32    link_fails;         /*  link failures                       */
+	u32    psp_errors;         /*  primitive sequence protocol errors  */
+	u32    lr_unexp;           /*  No. of times LR rx-ed unexpectedly  */
+	u32    lrr_unexp;          /*  No. of times LRR rx-ed unexpectedly */
+	u32    lr_tx;              /*  No. of times LR tx started          */
+	u32    lrr_tx;             /*  No. of times LRR tx started         */
+	u32    ols_tx;             /*  No. of times OLS tx started         */
+	u32    nos_tx;             /*  No. of times NOS tx started         */
+	u32    hwsm_lrr_rx;        /*  No. of times LRR rx-ed by HWSM      */
+	u32    hwsm_lr_rx;         /*  No. of times LR rx-ed by HWSM       */
+};
+
+struct bfa_fw_port_snsm_stats_s {
+	u32    hwsm_success;       /*  Successful hwsm terminations        */
+	u32    hwsm_fails;         /*  hwsm fail count                     */
+	u32    hwsm_wdtov;         /*  hwsm timed out                      */
+	u32    swsm_success;       /*  swsm success                        */
+	u32    swsm_wdtov;         /*  swsm timed out                      */
+	u32    error_resets;       /*  error resets initiated by upsm      */
+	u32    sync_lost;          /*  Sync loss count                     */
+	u32    sig_lost;           /*  Signal loss count                   */
+	u32    asn8g_attempts;	   /* SNSM HWSM at 8Gbps attempts	   */
+	u32    adapt_success;	   /* SNSM adaptation success	*/
+	u32    adapt_fails;	   /* SNSM adaptation failures */
+	u32    adapt_ign_fails;	   /* SNSM adaptation failures ignored */
+};
+
+struct bfa_fw_port_physm_stats_s {
+	u32    module_inserts;     /*  Module insert count                 */
+	u32    module_xtracts;     /*  Module extracts count               */
+	u32    module_invalids;    /*  Invalid module inserted count       */
+	u32    module_read_ign;    /*  Module validation status ignored    */
+	u32    laser_faults;       /*  Laser fault count                   */
+	u32    rsvd;
+};
+
+struct bfa_fw_fip_stats_s {
+	u32    vlan_req;           /*  vlan discovery requests             */
+	u32    vlan_notify;        /*  vlan notifications                  */
+	u32    vlan_err;           /*  vlan response error                 */
+	u32    vlan_timeouts;      /*  vlan disvoery timeouts              */
+	u32    vlan_invalids;      /*  invalid vlan in discovery advert.   */
+	u32    disc_req;           /*  Discovery solicit requests          */
+	u32    disc_rsp;           /*  Discovery solicit response          */
+	u32    disc_err;           /*  Discovery advt. parse errors        */
+	u32    disc_unsol;         /*  Discovery unsolicited               */
+	u32    disc_timeouts;      /*  Discovery timeouts                  */
+	u32    disc_fcf_unavail;   /*  Discovery FCF Not Avail.            */
+	u32    linksvc_unsupp;     /*  Unsupported link service req        */
+	u32    linksvc_err;        /*  Parse error in link service req     */
+	u32    logo_req;           /*  FIP logos received                  */
+	u32    clrvlink_req;       /*  Clear virtual link req              */
+	u32    op_unsupp;          /*  Unsupported FIP operation           */
+	u32    untagged;           /*  Untagged frames (ignored)           */
+	u32    invalid_version;    /*  Invalid FIP version                 */
+};
+
+struct bfa_fw_lps_stats_s {
+	u32    mac_invalids;       /*  Invalid mac assigned                */
+	u32    rsvd;
+};
+
+struct bfa_fw_fcoe_stats_s {
+	u32    cee_linkups;        /*  CEE link up count                   */
+	u32    cee_linkdns;        /*  CEE link down count                 */
+	u32    fip_linkups;        /*  FIP link up count                   */
+	u32    fip_linkdns;        /*  FIP link up count                   */
+	u32    fip_fails;          /*  FIP fail count                      */
+	u32    mac_invalids;       /*  Invalid mac assigned                */
+};
+
+/*
+ * IOC firmware FCoE port stats
+ */
+struct bfa_fw_fcoe_port_stats_s {
+	struct bfa_fw_fcoe_stats_s		fcoe_stats;
+	struct bfa_fw_fip_stats_s		fip_stats;
+};
+
+/**
+ * @brief LPSM statistics
+ */
+struct bfa_fw_lpsm_stats_s {
+	u32	cls_rx;		/* LPSM cls_rx			*/
+	u32	cls_tx;		/* LPSM cls_tx			*/
+	u32	arbf0_rx;	/* LPSM abrf0 rcvd		*/
+	u32	arbf0_tx;	/* LPSM abrf0 xmit		*/
+	u32	init_rx;	/* LPSM loop init start		*/
+	u32	unexp_hwst;	/* LPSM unknown hw state	*/
+	u32	unexp_frame;	/* LPSM unknown_frame		*/
+	u32	unexp_prim;	/* LPSM unexpected primitive	*/
+	u32	prev_alpa_unavail; /* LPSM prev alpa unavailable */
+	u32	alpa_unavail;	/* LPSM alpa not available	*/
+	u32	lip_rx;		/* LPSM lip rcvd		*/
+	u32	lip_f7f7_rx;	/* LPSM lip f7f7 rcvd		*/
+	u32	lip_f8_rx;	/* LPSM lip f8 rcvd		*/
+	u32	lip_f8f7_rx;	/* LPSM lip f8f7 rcvd		*/
+	u32	lip_other_rx;	/* LPSM lip other rcvd		*/
+	u32	lip_tx;		/* LPSM lip xmit		*/
+	u32	retry_tov;	/* LPSM retry TOV		*/
+	u32	lip_tov;	/* LPSM LIP wait TOV		*/
+	u32	idle_tov;	/* LPSM idle wait TOV		*/
+	u32	arbf0_tov;	/* LPSM arbfo wait TOV		*/
+	u32	stop_loop_tov;	/* LPSM stop loop wait TOV	*/
+	u32	lixa_tov;	/* LPSM lisa wait TOV		*/
+	u32	lixx_tov;	/* LPSM lilp/lirp wait TOV	*/
+	u32	cls_tov;	/* LPSM cls wait TOV		*/
+	u32	sler;		/* LPSM SLER recvd		*/
+	u32	failed;		/* LPSM failed			*/
+	u32	success;	/* LPSM online			*/
+};
+
+/*
+ * IOC firmware FC uport stats
+ */
+struct bfa_fw_fc_uport_stats_s {
+	struct bfa_fw_port_snsm_stats_s		snsm_stats;
+	struct bfa_fw_port_lksm_stats_s		lksm_stats;
+	struct bfa_fw_lpsm_stats_s		lpsm_stats;
+};
+
+/*
+ * IOC firmware FC port stats
+ */
+union bfa_fw_fc_port_stats_s {
+	struct bfa_fw_fc_uport_stats_s		fc_stats;
+	struct bfa_fw_fcoe_port_stats_s		fcoe_stats;
+};
+
+/*
+ * IOC firmware port stats
+ */
+struct bfa_fw_port_stats_s {
+	struct bfa_fw_port_fpg_stats_s		fpg_stats;
+	struct bfa_fw_port_physm_stats_s	physm_stats;
+	union  bfa_fw_fc_port_stats_s		fc_port;
+};
+
+/*
+ * fcxchg module statistics
+ */
+struct bfa_fw_fcxchg_stats_s {
+	u32	ua_tag_inv;
+	u32	ua_state_inv;
+};
+
+/*
+ *  Trunk statistics
+ */
+struct bfa_fw_trunk_stats_s {
+	u32 emt_recvd;		/*  Trunk EMT received		*/
+	u32 emt_accepted;	/*  Trunk EMT Accepted		*/
+	u32 emt_rejected;	/*  Trunk EMT rejected		*/
+	u32 etp_recvd;		/*  Trunk ETP received		*/
+	u32 etp_accepted;	/*  Trunk ETP Accepted		*/
+	u32 etp_rejected;	/*  Trunk ETP rejected		*/
+	u32 lr_recvd;		/*  Trunk LR received		*/
+	u32 rsvd;		/*  padding for 64 bit alignment */
+};
+
+struct bfa_fw_aport_stats_s {
+	u32 flogi_sent;		/*  Flogi sent			*/
+	u32 flogi_acc_recvd;	/*  Flogi Acc received		*/
+	u32 flogi_rjt_recvd;	/*  Flogi rejects received	*/
+	u32 flogi_retries;	/*  Flogi retries		*/
+
+	u32 elp_recvd;		/*  ELP received		*/
+	u32 elp_accepted;	/*  ELP Accepted		*/
+	u32 elp_rejected;	/*  ELP rejected		*/
+	u32 elp_dropped;	/*  ELP dropped			*/
+
+	u32 bbcr_lr_count;	/*!< BBCR Link Resets		*/
+	u32 frame_lost_intrs;	/*!< BBCR Frame loss intrs	*/
+	u32 rrdy_lost_intrs;	/*!< BBCR Rrdy loss intrs	*/
+
+	u32 rsvd;
+};
+
+/*
+ * IOCFC firmware stats
+ */
+struct bfa_fw_iocfc_stats_s {
+	u32	cfg_reqs;	/*  cfg request */
+	u32	updq_reqs;	/*  update queue request */
+	u32	ic_reqs;	/*  interrupt coalesce reqs */
+	u32	unknown_reqs;
+	u32	set_intr_reqs;	/*  set interrupt reqs */
+};
+
+/*
+ * IOC attributes returned in queries
+ */
+struct bfa_iocfc_attr_s {
+	struct bfa_iocfc_cfg_s		config;		/*  IOCFC config   */
+	struct bfa_iocfc_intr_attr_s	intr_attr;	/*  interrupt attr */
+};
+
+/*
+ * Eth_sndrcv mod stats
+ */
+struct bfa_fw_eth_sndrcv_stats_s {
+	u32	crc_err;
+	u32	rsvd;		/*  64bit align    */
+};
+
+/*
+ * CT MAC mod stats
+ */
+struct bfa_fw_mac_mod_stats_s {
+	u32	mac_on;		/*  MAC got turned-on */
+	u32	link_up;	/*  link-up */
+	u32	signal_off;	/*  lost signal */
+	u32	dfe_on;		/*  DFE on */
+	u32	mac_reset;	/*  # of MAC reset to bring lnk up */
+	u32	pcs_reset;	/*  # of PCS reset to bring lnk up */
+	u32	loopback;	/*  MAC got into serdes loopback */
+	u32	lb_mac_reset;
+			/*  # of MAC reset to bring link up in loopback */
+	u32	lb_pcs_reset;
+			/*  # of PCS reset to bring link up in loopback */
+	u32	rsvd;		/*  64bit align    */
+};
+
+/*
+ * CT MOD stats
+ */
+struct bfa_fw_ct_mod_stats_s {
+	u32	rxa_rds_undrun;	/*  RxA RDS underrun */
+	u32	rad_bpc_ovfl;	/*  RAD BPC overflow */
+	u32	rad_rlb_bpc_ovfl; /*  RAD RLB BPC overflow */
+	u32	bpc_fcs_err;	/*  BPC FCS_ERR */
+	u32	txa_tso_hdr;	/*  TxA TSO header too long */
+	u32	rsvd;		/*  64bit align    */
+};
+
+/*
+ * RDS mod stats
+ */
+struct bfa_fw_rds_stats_s {
+	u32	no_fid_drop_err; /* RDS no fid drop error */
+	u32	rsvd;		 /* 64bit align */
+};
+
+/*
+ * IOC firmware stats
+ */
+struct bfa_fw_stats_s {
+	struct bfa_fw_ioc_stats_s	ioc_stats;
+	struct bfa_fw_iocfc_stats_s	iocfc_stats;
+	struct bfa_fw_io_stats_s	io_stats;
+	struct bfa_fw_port_stats_s	port_stats;
+	struct bfa_fw_fcxchg_stats_s	fcxchg_stats;
+	struct bfa_fw_lps_stats_s	lps_stats;
+	struct bfa_fw_trunk_stats_s	trunk_stats;
+	struct bfa_fw_aport_stats_s	aport_stats;
+	struct bfa_fw_mac_mod_stats_s	macmod_stats;
+	struct bfa_fw_ct_mod_stats_s	ctmod_stats;
+	struct bfa_fw_eth_sndrcv_stats_s	ethsndrcv_stats;
+	struct bfa_fw_rds_stats_s	rds_stats;
+};
+
+#define BFA_IOCFC_PATHTOV_MAX	60
+#define BFA_IOCFC_QDEPTH_MAX	2000
+
+/*
+ * QoS states
+ */
+enum bfa_qos_state {
+	BFA_QOS_DISABLED = 0,		/* QoS is disabled */
+	BFA_QOS_ONLINE = 1,		/*  QoS is online */
+	BFA_QOS_OFFLINE = 2,		/*  QoS is offline */
+};
+
+/*
+ * QoS  Priority levels.
+ */
+enum bfa_qos_priority {
+	BFA_QOS_UNKNOWN = 0,
+	BFA_QOS_HIGH  = 1,	/*  QoS Priority Level High */
+	BFA_QOS_MED  =  2,	/*  QoS Priority Level Medium */
+	BFA_QOS_LOW  =  3,	/*  QoS Priority Level Low */
+};
+
+/*
+ * QoS  bandwidth allocation for each priority level
+ */
+enum bfa_qos_bw_alloc {
+	BFA_QOS_BW_HIGH  = 60,	/*  bandwidth allocation for High */
+	BFA_QOS_BW_MED  =  30,	/*  bandwidth allocation for Medium */
+	BFA_QOS_BW_LOW  =  10,	/*  bandwidth allocation for Low */
+};
+#pragma pack(1)
+
+struct bfa_qos_bw_s {
+	u8	qos_bw_set;
+	u8	high;
+	u8	med;
+	u8	low;
+};
+
+/*
+ * QoS attribute returned in QoS Query
+ */
+struct bfa_qos_attr_s {
+	u8	state;		/*  QoS current state */
+	u8	rsvd1[3];
+	u32	total_bb_cr;	/*  Total BB Credits */
+	struct bfa_qos_bw_s qos_bw;	/* QOS bw cfg */
+	struct bfa_qos_bw_s qos_bw_op;	/* QOS bw operational */
+};
+
+enum bfa_bbcr_state {
+	BFA_BBCR_DISABLED,	/*!< BBCR is disable */
+	BFA_BBCR_ONLINE,	/*!< BBCR is online  */
+	BFA_BBCR_OFFLINE,	/*!< BBCR is offline */
+};
+
+enum bfa_bbcr_err_reason {
+	BFA_BBCR_ERR_REASON_NONE, /*!< Unknown */
+	BFA_BBCR_ERR_REASON_SPEED_UNSUP, /*!< Port speed < max sup_speed */
+	BFA_BBCR_ERR_REASON_PEER_UNSUP,	/*!< BBCR is disable on peer port */
+	BFA_BBCR_ERR_REASON_NON_BRCD_SW, /*!< Connected to non BRCD switch */
+	BFA_BBCR_ERR_REASON_FLOGI_RJT, /*!< Login rejected by the switch */
+};
+
+struct bfa_bbcr_attr_s {
+	u8	state;
+	u8	peer_bb_scn;
+	u8	reason;
+	u8	rsvd;
+};
+
+/*
+ * These fields should be displayed only from the CLI.
+ * There will be a separate BFAL API (get_qos_vc_attr ?)
+ * to retrieve this.
+ *
+ */
+#define  BFA_QOS_MAX_VC  16
+
+struct bfa_qos_vc_info_s {
+	u8 vc_credit;
+	u8 borrow_credit;
+	u8 priority;
+	u8 resvd;
+};
+
+struct bfa_qos_vc_attr_s {
+	u16  total_vc_count;                    /*  Total VC Count */
+	u16  shared_credit;
+	u32  elp_opmode_flags;
+	struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC];  /* as many as
+							    * total_vc_count */
+};
+
+/*
+ * QoS statistics
+ */
+struct bfa_qos_stats_s {
+	u32	flogi_sent;		/*  QoS Flogi sent */
+	u32	flogi_acc_recvd;	/*  QoS Flogi Acc received */
+	u32	flogi_rjt_recvd;	/*  QoS Flogi rejects received */
+	u32	flogi_retries;		/*  QoS Flogi retries */
+
+	u32	elp_recvd;		/*  QoS ELP received */
+	u32	elp_accepted;		/*  QoS ELP Accepted */
+	u32	elp_rejected;		/*  QoS ELP rejected */
+	u32	elp_dropped;		/*  QoS ELP dropped  */
+
+	u32	qos_rscn_recvd;		/*  QoS RSCN received */
+	u32	rsvd;			/* padding for 64 bit alignment */
+};
+
+/*
+ * FCoE statistics
+ */
+struct bfa_fcoe_stats_s {
+	u64	secs_reset;	/*  Seconds since stats reset	     */
+	u64	cee_linkups;	/*  CEE link up			     */
+	u64	cee_linkdns;	/*  CEE link down		     */
+	u64	fip_linkups;	/*  FIP link up			     */
+	u64	fip_linkdns;	/*  FIP link down		     */
+	u64	fip_fails;	/*  FIP failures		     */
+	u64	mac_invalids;	/*  Invalid mac assignments	     */
+	u64	vlan_req;	/*  Vlan requests		     */
+	u64	vlan_notify;	/*  Vlan notifications		     */
+	u64	vlan_err;	/*  Vlan notification errors	     */
+	u64	vlan_timeouts;	/*  Vlan request timeouts	     */
+	u64	vlan_invalids;	/*  Vlan invalids		     */
+	u64	disc_req;	/*  Discovery requests		     */
+	u64	disc_rsp;	/*  Discovery responses		     */
+	u64	disc_err;	/*  Discovery error frames	     */
+	u64	disc_unsol;	/*  Discovery unsolicited	     */
+	u64	disc_timeouts;	/*  Discovery timeouts		     */
+	u64	disc_fcf_unavail; /*  Discovery FCF not avail	     */
+	u64	linksvc_unsupp;	/*  FIP link service req unsupp	     */
+	u64	linksvc_err;	/*  FIP link service req errors	     */
+	u64	logo_req;	/*  FIP logos received		     */
+	u64	clrvlink_req;	/*  Clear virtual link requests	     */
+	u64	op_unsupp;	/*  FIP operation unsupp.	     */
+	u64	untagged;	/*  FIP untagged frames		     */
+	u64	txf_ucast;	/*  Tx FCoE unicast frames	     */
+	u64	txf_ucast_vlan;	/*  Tx FCoE unicast vlan frames      */
+	u64	txf_ucast_octets; /*  Tx FCoE unicast octets	     */
+	u64	txf_mcast;	/*  Tx FCoE multicast frames	     */
+	u64	txf_mcast_vlan;	/*  Tx FCoE multicast vlan frames    */
+	u64	txf_mcast_octets; /*  Tx FCoE multicast octets	     */
+	u64	txf_bcast;	/*  Tx FCoE broadcast frames	     */
+	u64	txf_bcast_vlan;	/*  Tx FCoE broadcast vlan frames    */
+	u64	txf_bcast_octets; /*  Tx FCoE broadcast octets	     */
+	u64	txf_timeout;	  /*  Tx timeouts		     */
+	u64	txf_parity_errors; /*  Transmit parity err	     */
+	u64	txf_fid_parity_errors; /*  Transmit FID parity err   */
+	u64	rxf_ucast_octets; /*  Rx FCoE unicast octets	     */
+	u64	rxf_ucast;	/*  Rx FCoE unicast frames	     */
+	u64	rxf_ucast_vlan;	/*  Rx FCoE unicast vlan frames	     */
+	u64	rxf_mcast_octets; /*  Rx FCoE multicast octets	     */
+	u64	rxf_mcast;	/*  Rx FCoE multicast frames	     */
+	u64	rxf_mcast_vlan;	/*  Rx FCoE multicast vlan frames    */
+	u64	rxf_bcast_octets; /*  Rx FCoE broadcast octets	     */
+	u64	rxf_bcast;	/*  Rx FCoE broadcast frames	     */
+	u64	rxf_bcast_vlan;	/*  Rx FCoE broadcast vlan frames    */
+};
+
+/*
+ * QoS or FCoE stats (fcport stats excluding physical FC port stats)
+ */
+union bfa_fcport_stats_u {
+	struct bfa_qos_stats_s	fcqos;
+	struct bfa_fcoe_stats_s	fcoe;
+};
+#pragma pack()
+
+struct bfa_fcpim_del_itn_stats_s {
+	u32	del_itn_iocomp_aborted;	   /* Aborted IO requests	      */
+	u32	del_itn_iocomp_timedout;   /* IO timeouts		      */
+	u32	del_itn_iocom_sqer_needed; /* IO retry for SQ error recovery  */
+	u32	del_itn_iocom_res_free;    /* Delayed freeing of IO resources */
+	u32	del_itn_iocom_hostabrts;   /* Host IO abort requests	      */
+	u32	del_itn_total_ios;	   /* Total IO count		      */
+	u32	del_io_iocdowns;	   /* IO cleaned-up due to IOC down   */
+	u32	del_tm_iocdowns;	   /* TM cleaned-up due to IOC down   */
+};
+
+struct bfa_itnim_iostats_s {
+
+	u32	total_ios;		/*  Total IO Requests		*/
+	u32	input_reqs;		/*  Data in-bound requests	*/
+	u32	output_reqs;		/*  Data out-bound requests	*/
+	u32	io_comps;		/*  Total IO Completions	*/
+	u32	wr_throughput;		/*  Write data transferred in bytes */
+	u32	rd_throughput;		/*  Read data transferred in bytes  */
+
+	u32	iocomp_ok;		/*  Slowpath IO completions	*/
+	u32	iocomp_underrun;	/*  IO underrun		*/
+	u32	iocomp_overrun;		/*  IO overrun			*/
+	u32	qwait;			/*  IO Request-Q wait		*/
+	u32	qresumes;		/*  IO Request-Q wait done	*/
+	u32	no_iotags;		/*  No free IO tag		*/
+	u32	iocomp_timedout;	/*  IO timeouts		*/
+	u32	iocom_nexus_abort;	/*  IO failure due to target offline */
+	u32	iocom_proto_err;	/*  IO protocol errors		*/
+	u32	iocom_dif_err;		/*  IO SBC-3 protection errors	*/
+
+	u32	iocom_sqer_needed;	/*  fcp-2 error recovery failed	*/
+	u32	iocom_res_free;		/*  Delayed freeing of IO tag	*/
+
+
+	u32	io_aborts;		/*  Host IO abort requests	*/
+	u32	iocom_hostabrts;	/*  Host IO abort completions	*/
+	u32	io_cleanups;		/*  IO clean-up requests	*/
+	u32	path_tov_expired;	/*  IO path tov expired	*/
+	u32	iocomp_aborted;		/*  IO abort completions	*/
+	u32	io_iocdowns;		/*  IO cleaned-up due to IOC down */
+	u32	iocom_utags;		/*  IO comp with unknown tags	*/
+
+	u32	io_tmaborts;		/*  Abort request due to TM command */
+	u32	tm_io_comps;		/* Abort completion due to TM command */
+
+	u32	creates;		/*  IT Nexus create requests	*/
+	u32	fw_create;		/*  IT Nexus FW create requests	*/
+	u32	create_comps;		/*  IT Nexus FW create completions */
+	u32	onlines;		/*  IT Nexus onlines		*/
+	u32	offlines;		/*  IT Nexus offlines		*/
+	u32	fw_delete;		/*  IT Nexus FW delete requests	*/
+	u32	delete_comps;		/*  IT Nexus FW delete completions */
+	u32	deletes;		/*  IT Nexus delete requests	   */
+	u32	sler_events;		/*  SLER events		*/
+	u32	ioc_disabled;		/*  Num IOC disables		*/
+	u32	cleanup_comps;		/*  IT Nexus cleanup completions    */
+
+	u32	tm_cmnds;		/*  TM Requests		*/
+	u32	tm_fw_rsps;		/*  TM Completions		*/
+	u32	tm_success;		/*  TM initiated IO cleanup success */
+	u32	tm_failures;		/*  TM initiated IO cleanup failure */
+	u32	no_tskims;		/*  No free TM tag		*/
+	u32	tm_qwait;		/*  TM Request-Q wait		*/
+	u32	tm_qresumes;		/*  TM Request-Q wait done	*/
+
+	u32	tm_iocdowns;		/*  TM cleaned-up due to IOC down   */
+	u32	tm_cleanups;		/*  TM cleanup requests	*/
+	u32	tm_cleanup_comps;	/*  TM cleanup completions	*/
+	u32	rsvd[6];
+};
+
+/* Modify char* port_stt[] in bfal_port.c if a new state was added */
+enum bfa_port_states {
+	BFA_PORT_ST_UNINIT		= 1,
+	BFA_PORT_ST_ENABLING_QWAIT	= 2,
+	BFA_PORT_ST_ENABLING		= 3,
+	BFA_PORT_ST_LINKDOWN		= 4,
+	BFA_PORT_ST_LINKUP		= 5,
+	BFA_PORT_ST_DISABLING_QWAIT	= 6,
+	BFA_PORT_ST_DISABLING		= 7,
+	BFA_PORT_ST_DISABLED		= 8,
+	BFA_PORT_ST_STOPPED		= 9,
+	BFA_PORT_ST_IOCDOWN		= 10,
+	BFA_PORT_ST_IOCDIS		= 11,
+	BFA_PORT_ST_FWMISMATCH		= 12,
+	BFA_PORT_ST_PREBOOT_DISABLED	= 13,
+	BFA_PORT_ST_TOGGLING_QWAIT	= 14,
+	BFA_PORT_ST_FAA_MISCONFIG	= 15,
+	BFA_PORT_ST_DPORT		= 16,
+	BFA_PORT_ST_DDPORT		= 17,
+	BFA_PORT_ST_MAX_STATE,
+};
+
+/*
+ *	Port operational type (in sync with SNIA port type).
+ */
+enum bfa_port_type {
+	BFA_PORT_TYPE_UNKNOWN	= 1,	/*  port type is unknown */
+	BFA_PORT_TYPE_NPORT	= 5,	/*  P2P with switched fabric */
+	BFA_PORT_TYPE_NLPORT	= 6,	/*  public loop */
+	BFA_PORT_TYPE_LPORT	= 20,	/*  private loop */
+	BFA_PORT_TYPE_P2P	= 21,	/*  P2P with no switched fabric */
+	BFA_PORT_TYPE_VPORT	= 22,	/*  NPIV - virtual port */
+};
+
+/*
+ *	Port topology setting. A port's topology and fabric login status
+ *	determine its operational type.
+ */
+enum bfa_port_topology {
+	BFA_PORT_TOPOLOGY_NONE = 0,	/*  No valid topology */
+	BFA_PORT_TOPOLOGY_P2P_OLD_VER = 1, /* P2P def for older ver */
+	BFA_PORT_TOPOLOGY_LOOP = 2,	/* LOOP topology */
+	BFA_PORT_TOPOLOGY_AUTO_OLD_VER = 3, /* auto def for older ver */
+	BFA_PORT_TOPOLOGY_AUTO = 4,	/* auto topology selection */
+	BFA_PORT_TOPOLOGY_P2P = 5,	/* P2P only */
+};
+
+/*
+ *	Physical port loopback types.
+ */
+enum bfa_port_opmode {
+	BFA_PORT_OPMODE_NORMAL   = 0x00, /*  normal non-loopback mode */
+	BFA_PORT_OPMODE_LB_INT   = 0x01, /*  internal loop back */
+	BFA_PORT_OPMODE_LB_SLW   = 0x02, /*  serial link wrapback (serdes) */
+	BFA_PORT_OPMODE_LB_EXT   = 0x04, /*  external loop back (serdes) */
+	BFA_PORT_OPMODE_LB_CBL   = 0x08, /*  cabled loop back */
+	BFA_PORT_OPMODE_LB_NLINT = 0x20, /*  NL_Port internal loopback */
+};
+
+#define BFA_PORT_OPMODE_LB_HARD(_mode)			\
+	((_mode == BFA_PORT_OPMODE_LB_INT) ||		\
+	(_mode == BFA_PORT_OPMODE_LB_SLW) ||		\
+	(_mode == BFA_PORT_OPMODE_LB_EXT))
+
+/*
+ *	Port link state
+ */
+enum bfa_port_linkstate {
+	BFA_PORT_LINKUP		= 1,	/*  Physical port/Trunk link up */
+	BFA_PORT_LINKDOWN	= 2,	/*  Physical port/Trunk link down */
+};
+
+/*
+ *	Port link state reason code
+ */
+enum bfa_port_linkstate_rsn {
+	BFA_PORT_LINKSTATE_RSN_NONE		= 0,
+	BFA_PORT_LINKSTATE_RSN_DISABLED		= 1,
+	BFA_PORT_LINKSTATE_RSN_RX_NOS		= 2,
+	BFA_PORT_LINKSTATE_RSN_RX_OLS		= 3,
+	BFA_PORT_LINKSTATE_RSN_RX_LIP		= 4,
+	BFA_PORT_LINKSTATE_RSN_RX_LIPF7		= 5,
+	BFA_PORT_LINKSTATE_RSN_SFP_REMOVED	= 6,
+	BFA_PORT_LINKSTATE_RSN_PORT_FAULT	= 7,
+	BFA_PORT_LINKSTATE_RSN_RX_LOS		= 8,
+	BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT	= 9,
+	BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT	= 10,
+	BFA_PORT_LINKSTATE_RSN_TIMEOUT		= 11,
+	BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG	= 12,
+
+
+
+	/* CEE related reason codes/errors */
+	CEE_LLDP_INFO_AGED_OUT			= 20,
+	CEE_LLDP_SHUTDOWN_TLV_RCVD		= 21,
+	CEE_PEER_NOT_ADVERTISE_DCBX		= 22,
+	CEE_PEER_NOT_ADVERTISE_PG		= 23,
+	CEE_PEER_NOT_ADVERTISE_PFC		= 24,
+	CEE_PEER_NOT_ADVERTISE_FCOE		= 25,
+	CEE_PG_NOT_COMPATIBLE			= 26,
+	CEE_PFC_NOT_COMPATIBLE			= 27,
+	CEE_FCOE_NOT_COMPATIBLE			= 28,
+	CEE_BAD_PG_RCVD				= 29,
+	CEE_BAD_BW_RCVD				= 30,
+	CEE_BAD_PFC_RCVD			= 31,
+	CEE_BAD_APP_PRI_RCVD			= 32,
+	CEE_FCOE_PRI_PFC_OFF			= 33,
+	CEE_DUP_CONTROL_TLV_RCVD		= 34,
+	CEE_DUP_FEAT_TLV_RCVD			= 35,
+	CEE_APPLY_NEW_CFG			= 36, /* reason, not error */
+	CEE_PROTOCOL_INIT			= 37, /* reason, not error */
+	CEE_PHY_LINK_DOWN			= 38,
+	CEE_LLS_FCOE_ABSENT			= 39,
+	CEE_LLS_FCOE_DOWN			= 40,
+	CEE_ISCSI_NOT_COMPATIBLE		= 41,
+	CEE_ISCSI_PRI_PFC_OFF			= 42,
+	CEE_ISCSI_PRI_OVERLAP_FCOE_PRI		= 43
+};
+
+#define MAX_LUN_MASK_CFG 16
+
+/*
+ * Initially flash content may be fff. On making LUN mask enable and disable
+ * state chnage.  when report lun command is being processed it goes from
+ * BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
+ * BFA_LUN_MASK_ACTIVE.
+ */
+enum bfa_ioim_lun_mask_state_s {
+	BFA_IOIM_LUN_MASK_INACTIVE = 0,
+	BFA_IOIM_LUN_MASK_ACTIVE = 1,
+	BFA_IOIM_LUN_MASK_FETCHED = 2,
+};
+
+enum bfa_lunmask_state_s {
+	BFA_LUNMASK_DISABLED = 0x00,
+	BFA_LUNMASK_ENABLED = 0x01,
+	BFA_LUNMASK_MINCFG = 0x02,
+	BFA_LUNMASK_UNINITIALIZED = 0xff,
+};
+
+/**
+ * FEC states
+ */
+enum bfa_fec_state_s {
+	BFA_FEC_ONLINE = 1,		/*!< FEC is online */
+	BFA_FEC_OFFLINE = 2,		/*!< FEC is offline */
+	BFA_FEC_OFFLINE_NOT_16G = 3,	/*!< FEC is offline (speed not 16Gig) */
+};
+
+#pragma pack(1)
+/*
+ * LUN mask configuration
+ */
+struct bfa_lun_mask_s {
+	wwn_t		lp_wwn;
+	wwn_t		rp_wwn;
+	struct scsi_lun	lun;
+	u8		ua;
+	u8		rsvd[3];
+	u16		rp_tag;
+	u8		lp_tag;
+	u8		state;
+};
+
+#define MAX_LUN_MASK_CFG 16
+struct bfa_lunmask_cfg_s {
+	u32	status;
+	u32	rsvd;
+	struct bfa_lun_mask_s	lun_list[MAX_LUN_MASK_CFG];
+};
+
+struct bfa_throttle_cfg_s {
+	u16	is_valid;
+	u16	value;
+	u32	rsvd;
+};
+
+struct bfa_defs_fcpim_throttle_s {
+	u16	max_value;
+	u16	cur_value;
+	u16	cfg_value;
+	u16	rsvd;
+};
+
+#define BFA_BB_SCN_DEF 3
+#define BFA_BB_SCN_MAX 0x0F
+
+/*
+ *      Physical port configuration
+ */
+struct bfa_port_cfg_s {
+	u8	 topology;	/*  bfa_port_topology		*/
+	u8	 speed;		/*  enum bfa_port_speed	*/
+	u8	 trunked;	/*  trunked or not		*/
+	u8	 qos_enabled;	/*  qos enabled or not		*/
+	u8	 cfg_hardalpa;	/*  is hard alpa configured	*/
+	u8	 hardalpa;	/*  configured hard alpa	*/
+	__be16	 maxfrsize;	/*  maximum frame size		*/
+	u8	 rx_bbcredit;	/*  receive buffer credits	*/
+	u8	 tx_bbcredit;	/*  transmit buffer credits	*/
+	u8	 ratelimit;	/*  ratelimit enabled or not	*/
+	u8	 trl_def_speed;	/*  ratelimit default speed	*/
+	u8	 bb_cr_enabled; /*!< Config state of BB_SCN	*/
+	u8	 bb_scn;	/*!< BB_SCN value for FLOGI Exchg */
+	u8	 faa_state;	/*  FAA enabled/disabled        */
+	u8	 rsvd1;
+	u16	 path_tov;	/*  device path timeout	*/
+	u16	 q_depth;	/*  SCSI Queue depth		*/
+	struct bfa_qos_bw_s qos_bw;	/* QOS bandwidth	*/
+};
+#pragma pack()
+
+/*
+ *	Port attribute values.
+ */
+struct bfa_port_attr_s {
+	/*
+	 * Static fields
+	 */
+	wwn_t			nwwn;		/*  node wwn */
+	wwn_t			pwwn;		/*  port wwn */
+	wwn_t			factorynwwn;	/*  factory node wwn */
+	wwn_t			factorypwwn;	/*  factory port wwn */
+	enum fc_cos		cos_supported;	/*  supported class of
+						 *  services */
+	u32			rsvd;
+	struct fc_symname_s	port_symname;	/*  port symbolic name */
+	enum bfa_port_speed	speed_supported; /* supported speeds */
+	bfa_boolean_t		pbind_enabled;
+
+	/*
+	 * Configured values
+	 */
+	struct bfa_port_cfg_s	pport_cfg;	/*  pport cfg */
+
+	/*
+	 * Dynamic field - info from BFA
+	 */
+	enum bfa_port_states	port_state;	/*  current port state */
+	enum bfa_port_speed	speed;		/*  current speed */
+	enum bfa_port_topology	topology;	/*  current topology */
+	bfa_boolean_t		beacon;		/*  current beacon status */
+	bfa_boolean_t		link_e2e_beacon; /* link beacon is on */
+	bfa_boolean_t		bbsc_op_status;	/* fc credit recovery oper
+						 * state */
+	enum bfa_fec_state_s	fec_state;	/*!< current FEC state */
+
+	/*
+	 * Dynamic field - info from FCS
+	 */
+	u32			pid;		/*  port ID */
+	enum bfa_port_type	port_type;	/*  current topology */
+	u32			loopback;	/*  external loopback */
+	u32			authfail;	/*  auth fail state */
+
+	/* FCoE specific  */
+	u16			fcoe_vlan;
+	u8			rsvd1[2];
+};
+
+/*
+ *	      Port FCP mappings.
+ */
+struct bfa_port_fcpmap_s {
+	char	osdevname[256];
+	u32	bus;
+	u32	target;
+	u32	oslun;
+	u32	fcid;
+	wwn_t	nwwn;
+	wwn_t	pwwn;
+	u64	fcplun;
+	char	luid[256];
+};
+
+/*
+ *	      Port RNID info.
+ */
+struct bfa_port_rnid_s {
+	wwn_t	  wwn;
+	u32	  unittype;
+	u32	  portid;
+	u32	  attached_nodes_num;
+	u16	  ip_version;
+	u16	  udp_port;
+	u8	  ipaddr[16];
+	u16	  rsvd;
+	u16	  topologydiscoveryflags;
+};
+
+#pragma pack(1)
+struct bfa_fcport_fcf_s {
+	wwn_t	name;		/*  FCF name		   */
+	wwn_t	fabric_name;    /*  Fabric Name		   */
+	u8	fipenabled;	/*  FIP enabled or not	   */
+	u8	fipfailed;	/*  FIP failed or not	   */
+	u8	resv[2];
+	u8	pri;		/*  FCF priority	   */
+	u8	version;	/*  FIP version used	   */
+	u8	available;      /*  Available for login    */
+	u8	fka_disabled;   /*  FKA is disabled	   */
+	u8	maxsz_verified; /*  FCoE max size verified */
+	u8	fc_map[3];      /*  FC map		   */
+	__be16	vlan;		/*  FCoE vlan tag/priority */
+	u32	fka_adv_per;    /*  FIP  ka advert. period */
+	mac_t	mac;		/*  FCF mac		   */
+};
+
+/*
+ *	Trunk states for BCU/BFAL
+ */
+enum bfa_trunk_state {
+	BFA_TRUNK_DISABLED	= 0,	/*  Trunk is not configured	*/
+	BFA_TRUNK_ONLINE	= 1,	/*  Trunk is online		*/
+	BFA_TRUNK_OFFLINE	= 2,	/*  Trunk is offline		*/
+};
+
+/*
+ *	VC attributes for trunked link
+ */
+struct bfa_trunk_vc_attr_s {
+	u32 bb_credit;
+	u32 elp_opmode_flags;
+	u32 req_credit;
+	u16 vc_credits[8];
+};
+
+struct bfa_fcport_loop_info_s {
+	u8	myalpa;		/* alpa claimed */
+	u8	alpabm_val;	/* alpa bitmap valid or not (1 or 0) */
+	u8	resvd[6];
+	struct fc_alpabm_s alpabm;	/* alpa bitmap */
+};
+
+/*
+ *	Link state information
+ */
+struct bfa_port_link_s {
+	u8	 linkstate;	/*  Link state bfa_port_linkstate */
+	u8	 linkstate_rsn;	/*  bfa_port_linkstate_rsn_t */
+	u8	 topology;	/*  P2P/LOOP bfa_port_topology */
+	u8	 speed;		/*  Link speed (1/2/4/8 G) */
+	u32	 linkstate_opt; /*  Linkstate optional data (debug) */
+	u8	 trunked;	/*  Trunked or not (1 or 0) */
+	u8	 fec_state;	/*!< State of FEC */
+	u8	 resvd[6];
+	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
+	union {
+		struct bfa_fcport_loop_info_s loop_info;
+		struct bfa_bbcr_attr_s bbcr_attr;
+		union {
+			struct bfa_qos_vc_attr_s qos_vc_attr;
+					/*  VC info from ELP */
+			struct bfa_trunk_vc_attr_s trunk_vc_attr;
+			struct bfa_fcport_fcf_s fcf;
+					/*  FCF information (for FCoE) */
+		} vc_fcf;
+	} attr;
+};
+#pragma pack()
+
+enum bfa_trunk_link_fctl {
+	BFA_TRUNK_LINK_FCTL_NORMAL,
+	BFA_TRUNK_LINK_FCTL_VC,
+	BFA_TRUNK_LINK_FCTL_VC_QOS,
+};
+
+enum bfa_trunk_link_state {
+	BFA_TRUNK_LINK_STATE_UP = 1,		/* link part of trunk */
+	BFA_TRUNK_LINK_STATE_DN_LINKDN = 2,	/* physical link down */
+	BFA_TRUNK_LINK_STATE_DN_GRP_MIS = 3,	/* trunk group different */
+	BFA_TRUNK_LINK_STATE_DN_SPD_MIS = 4,	/* speed mismatch */
+	BFA_TRUNK_LINK_STATE_DN_MODE_MIS = 5,	/* remote port not trunked */
+};
+
+#define BFA_TRUNK_MAX_PORTS	2
+struct bfa_trunk_link_attr_s {
+	wwn_t    trunk_wwn;
+	enum bfa_trunk_link_fctl fctl;
+	enum bfa_trunk_link_state link_state;
+	enum bfa_port_speed	speed;
+	u32 deskew;
+};
+
+struct bfa_trunk_attr_s {
+	enum bfa_trunk_state	state;
+	enum bfa_port_speed	speed;
+	u32		port_id;
+	u32		rsvd;
+	struct bfa_trunk_link_attr_s link_attr[BFA_TRUNK_MAX_PORTS];
+};
+
+struct bfa_rport_hal_stats_s {
+	u32        sm_un_cr;	    /*  uninit: create events      */
+	u32        sm_un_unexp;	    /*  uninit: exception events   */
+	u32        sm_cr_on;	    /*  created: online events     */
+	u32        sm_cr_del;	    /*  created: delete events     */
+	u32        sm_cr_hwf;	    /*  created: IOC down          */
+	u32        sm_cr_unexp;	    /*  created: exception events  */
+	u32        sm_fwc_rsp;	    /*  fw create: f/w responses   */
+	u32        sm_fwc_del;	    /*  fw create: delete events   */
+	u32        sm_fwc_off;	    /*  fw create: offline events  */
+	u32        sm_fwc_hwf;	    /*  fw create: IOC down        */
+	u32        sm_fwc_unexp;    /*  fw create: exception events*/
+	u32        sm_on_off;	    /*  online: offline events     */
+	u32        sm_on_del;	    /*  online: delete events      */
+	u32        sm_on_hwf;	    /*  online: IOC down events    */
+	u32        sm_on_unexp;	    /*  online: exception events   */
+	u32        sm_fwd_rsp;	    /*  fw delete: fw responses    */
+	u32        sm_fwd_del;	    /*  fw delete: delete events   */
+	u32        sm_fwd_hwf;	    /*  fw delete: IOC down events */
+	u32        sm_fwd_unexp;    /*  fw delete: exception events*/
+	u32        sm_off_del;	    /*  offline: delete events     */
+	u32        sm_off_on;	    /*  offline: online events     */
+	u32        sm_off_hwf;	    /*  offline: IOC down events   */
+	u32        sm_off_unexp;    /*  offline: exception events  */
+	u32        sm_del_fwrsp;    /*  delete: fw responses       */
+	u32        sm_del_hwf;	    /*  delete: IOC down events    */
+	u32        sm_del_unexp;    /*  delete: exception events   */
+	u32        sm_delp_fwrsp;   /*  delete pend: fw responses  */
+	u32        sm_delp_hwf;	    /*  delete pend: IOC downs     */
+	u32        sm_delp_unexp;   /*  delete pend: exceptions    */
+	u32        sm_offp_fwrsp;   /*  off-pending: fw responses  */
+	u32        sm_offp_del;	    /*  off-pending: deletes       */
+	u32        sm_offp_hwf;	    /*  off-pending: IOC downs     */
+	u32        sm_offp_unexp;   /*  off-pending: exceptions    */
+	u32        sm_iocd_off;	    /*  IOC down: offline events   */
+	u32        sm_iocd_del;	    /*  IOC down: delete events    */
+	u32        sm_iocd_on;	    /*  IOC down: online events    */
+	u32        sm_iocd_unexp;   /*  IOC down: exceptions       */
+	u32        rsvd;
+};
+#pragma pack(1)
+/*
+ *  Rport's QoS attributes
+ */
+struct bfa_rport_qos_attr_s {
+	u8		qos_priority;	/*  rport's QoS priority   */
+	u8		rsvd[3];
+	u32		qos_flow_id;	/*  QoS flow Id	 */
+};
+#pragma pack()
+
+#define BFA_IOBUCKET_MAX 14
+
+struct bfa_itnim_latency_s {
+	u32 min[BFA_IOBUCKET_MAX];
+	u32 max[BFA_IOBUCKET_MAX];
+	u32 count[BFA_IOBUCKET_MAX];
+	u32 avg[BFA_IOBUCKET_MAX];
+};
+
+struct bfa_itnim_ioprofile_s {
+	u32 clock_res_mul;
+	u32 clock_res_div;
+	u32 index;
+	u32 io_profile_start_time;	/*  IO profile start time	*/
+	u32 iocomps[BFA_IOBUCKET_MAX];	/*  IO completed	*/
+	struct bfa_itnim_latency_s io_latency;
+};
+
+/*
+ *	vHBA port attribute values.
+ */
+struct bfa_vhba_attr_s {
+	wwn_t	nwwn;       /* node wwn */
+	wwn_t	pwwn;       /* port wwn */
+	u32	pid;        /* port ID */
+	bfa_boolean_t       io_profile; /* get it from fcpim mod */
+	bfa_boolean_t       plog_enabled;   /* portlog is enabled */
+	u16	path_tov;
+	u8	rsvd[2];
+};
+
+/*
+ * FC physical port statistics.
+ */
+struct bfa_port_fc_stats_s {
+	u64     secs_reset;     /*  Seconds since stats is reset */
+	u64     tx_frames;      /*  Tx frames                   */
+	u64     tx_words;       /*  Tx words                    */
+	u64     tx_lip;         /*  Tx LIP                      */
+	u64	tx_lip_f7f7;	/*  Tx LIP_F7F7		*/
+	u64	tx_lip_f8f7;	/*  Tx LIP_F8F7		*/
+	u64	tx_arbf0;	/*  Tx ARB F0			*/
+	u64     tx_nos;         /*  Tx NOS                      */
+	u64     tx_ols;         /*  Tx OLS                      */
+	u64     tx_lr;          /*  Tx LR                       */
+	u64     tx_lrr;         /*  Tx LRR                      */
+	u64     rx_frames;      /*  Rx frames                   */
+	u64     rx_words;       /*  Rx words                    */
+	u64     lip_count;      /*  Rx LIP                      */
+	u64	rx_lip_f7f7;	/*  Rx LIP_F7F7		*/
+	u64	rx_lip_f8f7;	/*  Rx LIP_F8F7		*/
+	u64	rx_arbf0;	/*  Rx ARB F0			*/
+	u64     nos_count;      /*  Rx NOS                      */
+	u64     ols_count;      /*  Rx OLS                      */
+	u64     lr_count;       /*  Rx LR                       */
+	u64     lrr_count;      /*  Rx LRR                      */
+	u64     invalid_crcs;   /*  Rx CRC err frames           */
+	u64     invalid_crc_gd_eof; /*  Rx CRC err good EOF frames */
+	u64     undersized_frm; /*  Rx undersized frames        */
+	u64     oversized_frm;  /*  Rx oversized frames */
+	u64     bad_eof_frm;    /*  Rx frames with bad EOF      */
+	u64     error_frames;   /*  Errored frames              */
+	u64     dropped_frames; /*  Dropped frames              */
+	u64     link_failures;  /*  Link Failure (LF) count     */
+	u64     loss_of_syncs;  /*  Loss of sync count          */
+	u64     loss_of_signals; /*  Loss of signal count       */
+	u64     primseq_errs;   /*  Primitive sequence protocol err. */
+	u64     bad_os_count;   /*  Invalid ordered sets        */
+	u64     err_enc_out;    /*  Encoding err nonframe_8b10b */
+	u64     err_enc;        /*  Encoding err frame_8b10b    */
+	u64	bbcr_frames_lost; /*!< BBCR Frames Lost */
+	u64	bbcr_rrdys_lost; /*!< BBCR RRDYs Lost */
+	u64	bbcr_link_resets; /*!< BBCR Link Resets */
+	u64	bbcr_frame_lost_intrs; /*!< BBCR Frame loss intrs */
+	u64	bbcr_rrdy_lost_intrs; /*!< BBCR Rrdy loss intrs */
+	u64	loop_timeouts;	/*  Loop timeouts		*/
+};
+
+/*
+ * Eth Physical Port statistics.
+ */
+struct bfa_port_eth_stats_s {
+	u64     secs_reset;     /*  Seconds since stats is reset */
+	u64     frame_64;       /*  Frames 64 bytes             */
+	u64     frame_65_127;   /*  Frames 65-127 bytes */
+	u64     frame_128_255;  /*  Frames 128-255 bytes        */
+	u64     frame_256_511;  /*  Frames 256-511 bytes        */
+	u64     frame_512_1023; /*  Frames 512-1023 bytes       */
+	u64     frame_1024_1518; /*  Frames 1024-1518 bytes     */
+	u64     frame_1519_1522; /*  Frames 1519-1522 bytes     */
+	u64     tx_bytes;       /*  Tx bytes                    */
+	u64     tx_packets;      /*  Tx packets         */
+	u64     tx_mcast_packets; /*  Tx multicast packets      */
+	u64     tx_bcast_packets; /*  Tx broadcast packets      */
+	u64     tx_control_frame; /*  Tx control frame          */
+	u64     tx_drop;        /*  Tx drops                    */
+	u64     tx_jabber;      /*  Tx jabber                   */
+	u64     tx_fcs_error;   /*  Tx FCS errors               */
+	u64     tx_fragments;   /*  Tx fragments                */
+	u64     rx_bytes;       /*  Rx bytes                    */
+	u64     rx_packets;     /*  Rx packets                  */
+	u64     rx_mcast_packets; /*  Rx multicast packets      */
+	u64     rx_bcast_packets; /*  Rx broadcast packets      */
+	u64     rx_control_frames; /*  Rx control frames        */
+	u64     rx_unknown_opcode; /*  Rx unknown opcode        */
+	u64     rx_drop;        /*  Rx drops                    */
+	u64     rx_jabber;      /*  Rx jabber                   */
+	u64     rx_fcs_error;   /*  Rx FCS errors               */
+	u64     rx_alignment_error; /*  Rx alignment errors     */
+	u64     rx_frame_length_error; /*  Rx frame len errors  */
+	u64     rx_code_error;  /*  Rx code errors              */
+	u64     rx_fragments;   /*  Rx fragments                */
+	u64     rx_pause;       /*  Rx pause                    */
+	u64     rx_zero_pause;  /*  Rx zero pause               */
+	u64     tx_pause;       /*  Tx pause                    */
+	u64     tx_zero_pause;  /*  Tx zero pause               */
+	u64     rx_fcoe_pause;  /*  Rx FCoE pause               */
+	u64     rx_fcoe_zero_pause; /*  Rx FCoE zero pause      */
+	u64     tx_fcoe_pause;  /*  Tx FCoE pause               */
+	u64     tx_fcoe_zero_pause; /*  Tx FCoE zero pause      */
+	u64     rx_iscsi_pause; /*  Rx iSCSI pause              */
+	u64     rx_iscsi_zero_pause; /*  Rx iSCSI zero pause    */
+	u64     tx_iscsi_pause; /*  Tx iSCSI pause              */
+	u64     tx_iscsi_zero_pause; /*  Tx iSCSI zero pause    */
+};
+
+/*
+ *              Port statistics.
+ */
+union bfa_port_stats_u {
+	struct bfa_port_fc_stats_s      fc;
+	struct bfa_port_eth_stats_s     eth;
+};
+
+struct bfa_port_cfg_mode_s {
+	u16		max_pf;
+	u16		max_vf;
+	enum bfa_mode_s	mode;
+};
+
+#pragma pack(1)
+
+#define BFA_CEE_LLDP_MAX_STRING_LEN	(128)
+#define BFA_CEE_DCBX_MAX_PRIORITY	(8)
+#define BFA_CEE_DCBX_MAX_PGID		(8)
+
+struct bfa_cee_lldp_str_s {
+	u8	sub_type;
+	u8	len;
+	u8	rsvd[2];
+	u8	value[BFA_CEE_LLDP_MAX_STRING_LEN];
+};
+
+struct bfa_cee_lldp_cfg_s {
+	struct bfa_cee_lldp_str_s chassis_id;
+	struct bfa_cee_lldp_str_s port_id;
+	struct bfa_cee_lldp_str_s port_desc;
+	struct bfa_cee_lldp_str_s sys_name;
+	struct bfa_cee_lldp_str_s sys_desc;
+	struct bfa_cee_lldp_str_s mgmt_addr;
+	u16	time_to_live;
+	u16	enabled_system_cap;
+};
+
+/* CEE/DCBX parameters */
+struct bfa_cee_dcbx_cfg_s {
+	u8	pgid[BFA_CEE_DCBX_MAX_PRIORITY];
+	u8	pg_percentage[BFA_CEE_DCBX_MAX_PGID];
+	u8	pfc_primap; /* bitmap of priorties with PFC enabled */
+	u8	fcoe_primap; /* bitmap of priorities used for FcoE traffic */
+	u8	iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
+	u8	dcbx_version; /* operating version:CEE or preCEE */
+	u8	lls_fcoe; /* FCoE Logical Link Status */
+	u8	lls_lan; /* LAN Logical Link Status */
+	u8	rsvd[2];
+};
+
+/* CEE Query */
+struct bfa_cee_attr_s {
+	u8	cee_status;
+	u8	error_reason;
+	struct bfa_cee_lldp_cfg_s lldp_remote;
+	struct bfa_cee_dcbx_cfg_s dcbx_remote;
+	mac_t src_mac;
+	u8	link_speed;
+	u8	nw_priority;
+	u8	filler[2];
+};
+
+/* LLDP/DCBX/CEE Statistics */
+struct bfa_cee_stats_s {
+	u32		lldp_tx_frames;		/* LLDP Tx Frames */
+	u32		lldp_rx_frames;		/* LLDP Rx Frames */
+	u32		lldp_rx_frames_invalid; /* LLDP Rx Frames invalid */
+	u32		lldp_rx_frames_new;     /* LLDP Rx Frames new */
+	u32		lldp_tlvs_unrecognized; /* LLDP Rx unrecog. TLVs */
+	u32		lldp_rx_shutdown_tlvs;  /* LLDP Rx shutdown TLVs */
+	u32		lldp_info_aged_out;     /* LLDP remote info aged */
+	u32		dcbx_phylink_ups;       /* DCBX phy link ups */
+	u32		dcbx_phylink_downs;     /* DCBX phy link downs */
+	u32		dcbx_rx_tlvs;           /* DCBX Rx TLVs */
+	u32		dcbx_rx_tlvs_invalid;   /* DCBX Rx TLVs invalid */
+	u32		dcbx_control_tlv_error; /* DCBX control TLV errors */
+	u32		dcbx_feature_tlv_error; /* DCBX feature TLV errors */
+	u32		dcbx_cee_cfg_new;       /* DCBX new CEE cfg rcvd */
+	u32		cee_status_down;        /* DCB status down */
+	u32		cee_status_up;          /* DCB status up */
+	u32		cee_hw_cfg_changed;     /* DCB hw cfg changed */
+	u32		cee_rx_invalid_cfg;     /* DCB invalid cfg */
+};
+
+#pragma pack()
+
+/*
+ *			AEN related definitions
+ */
+#define BFAD_NL_VENDOR_ID (((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) \
+			   | BFA_PCI_VENDOR_ID_BROCADE)
+
+/* BFA remote port events */
+enum bfa_rport_aen_event {
+	BFA_RPORT_AEN_ONLINE     = 1,   /* RPort online event */
+	BFA_RPORT_AEN_OFFLINE    = 2,   /* RPort offline event */
+	BFA_RPORT_AEN_DISCONNECT = 3,   /* RPort disconnect event */
+	BFA_RPORT_AEN_QOS_PRIO   = 4,   /* QOS priority change event */
+	BFA_RPORT_AEN_QOS_FLOWID = 5,   /* QOS flow Id change event */
+};
+
+struct bfa_rport_aen_data_s {
+	u16             vf_id;  /* vf_id of this logical port */
+	u16             rsvd[3];
+	wwn_t           ppwwn;  /* WWN of its physical port */
+	wwn_t           lpwwn;  /* WWN of this logical port */
+	wwn_t           rpwwn;  /* WWN of this remote port */
+	union {
+		struct bfa_rport_qos_attr_s qos;
+	} priv;
+};
+
+union bfa_aen_data_u {
+	struct bfa_adapter_aen_data_s	adapter;
+	struct bfa_port_aen_data_s	port;
+	struct bfa_lport_aen_data_s	lport;
+	struct bfa_rport_aen_data_s	rport;
+	struct bfa_itnim_aen_data_s	itnim;
+	struct bfa_audit_aen_data_s	audit;
+	struct bfa_ioc_aen_data_s	ioc;
+};
+
+#define BFA_AEN_MAX_ENTRY	512
+
+struct bfa_aen_entry_s {
+	struct list_head	qe;
+	enum bfa_aen_category   aen_category;
+	u32                     aen_type;
+	union bfa_aen_data_u    aen_data;
+	u64			aen_tv_sec;
+	u64			aen_tv_usec;
+	u32                     seq_num;
+	u32                     bfad_num;
+};
+
+#endif /* __BFA_DEFS_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
new file mode 100644
index 0000000..18b7304
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -0,0 +1,1629 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_FC_H__
+#define __BFA_FC_H__
+
+#include "bfad_drv.h"
+
+typedef u64 wwn_t;
+
+#define WWN_NULL	(0)
+#define FC_SYMNAME_MAX	256	/*  max name server symbolic name size */
+#define FC_ALPA_MAX	128
+
+#pragma pack(1)
+
+#define MAC_ADDRLEN	(6)
+struct mac_s { u8 mac[MAC_ADDRLEN]; };
+#define mac_t struct mac_s
+
+/*
+ * generic SCSI cdb definition
+ */
+#define SCSI_MAX_CDBLEN     16
+struct scsi_cdb_s {
+	u8         scsi_cdb[SCSI_MAX_CDBLEN];
+};
+
+/* ------------------------------------------------------------
+ * SCSI status byte values
+ * ------------------------------------------------------------
+ */
+#define SCSI_STATUS_GOOD                   0x00
+#define SCSI_STATUS_CHECK_CONDITION        0x02
+#define SCSI_STATUS_CONDITION_MET          0x04
+#define SCSI_STATUS_BUSY                   0x08
+#define SCSI_STATUS_INTERMEDIATE           0x10
+#define SCSI_STATUS_ICM                    0x14 /* intermediate condition met */
+#define SCSI_STATUS_RESERVATION_CONFLICT   0x18
+#define SCSI_STATUS_COMMAND_TERMINATED     0x22
+#define SCSI_STATUS_QUEUE_FULL             0x28
+#define SCSI_STATUS_ACA_ACTIVE             0x30
+
+#define SCSI_MAX_ALLOC_LEN      0xFF    /* maximum allocarion length */
+
+/*
+ * Fibre Channel Header Structure (FCHS) definition
+ */
+struct fchs_s {
+#ifdef __BIG_ENDIAN
+	u32        routing:4;	/* routing bits */
+	u32        cat_info:4;	/* category info */
+#else
+	u32        cat_info:4;	/* category info */
+	u32        routing:4;	/* routing bits */
+#endif
+	u32        d_id:24;	/* destination identifier */
+
+	u32        cs_ctl:8;	/* class specific control */
+	u32        s_id:24;	/* source identifier */
+
+	u32        type:8;	/* data structure type */
+	u32        f_ctl:24;	/* initial frame control */
+
+	u8         seq_id;	/* sequence identifier */
+	u8         df_ctl;	/* data field control */
+	u16        seq_cnt;	/* sequence count */
+
+	__be16     ox_id;	/* originator exchange ID */
+	u16        rx_id;	/* responder exchange ID */
+
+	u32        ro;		/* relative offset */
+};
+
+/*
+ * routing bit definitions
+ */
+enum {
+	FC_RTG_FC4_DEV_DATA	= 0x0,	/* FC-4 Device Data */
+	FC_RTG_EXT_LINK		= 0x2,	/* Extended Link Data */
+	FC_RTG_FC4_LINK_DATA	= 0x3,	/* FC-4 Link Data */
+	FC_RTG_VIDEO_DATA	= 0x4,	/* Video Data */
+	FC_RTG_EXT_HDR		= 0x5,	/* VFT, IFR or Encapsuled */
+	FC_RTG_BASIC_LINK	= 0x8,	/* Basic Link data */
+	FC_RTG_LINK_CTRL	= 0xC,	/* Link Control */
+};
+
+/*
+ * information category for extended link data and FC-4 Link Data
+ */
+enum {
+	FC_CAT_LD_REQUEST	= 0x2,	/* Request */
+	FC_CAT_LD_REPLY		= 0x3,	/* Reply */
+	FC_CAT_LD_DIAG		= 0xF,	/* for DIAG use only */
+};
+
+/*
+ * information category for extended headers (VFT, IFR or encapsulation)
+ */
+enum {
+	FC_CAT_VFT_HDR = 0x0,	/* Virtual fabric tagging header */
+	FC_CAT_IFR_HDR = 0x1,	/* Inter-Fabric routing header */
+	FC_CAT_ENC_HDR = 0x2,	/* Encapsulation header */
+};
+
+/*
+ * information category for FC-4 device data
+ */
+enum {
+	FC_CAT_UNCATEG_INFO	= 0x0,	/* Uncategorized information */
+	FC_CAT_SOLICIT_DATA	= 0x1,	/* Solicited Data */
+	FC_CAT_UNSOLICIT_CTRL	= 0x2,	/* Unsolicited Control */
+	FC_CAT_SOLICIT_CTRL	= 0x3,	/* Solicited Control */
+	FC_CAT_UNSOLICIT_DATA	= 0x4,	/* Unsolicited Data */
+	FC_CAT_DATA_DESC	= 0x5,	/* Data Descriptor */
+	FC_CAT_UNSOLICIT_CMD	= 0x6,	/* Unsolicited Command */
+	FC_CAT_CMD_STATUS	= 0x7,	/* Command Status */
+};
+
+/*
+ * Type Field Definitions. FC-PH Section 18.5 pg. 165
+ */
+enum {
+	FC_TYPE_BLS		= 0x0,	/* Basic Link Service */
+	FC_TYPE_ELS		= 0x1,	/* Extended Link Service */
+	FC_TYPE_IP		= 0x5,	/* IP */
+	FC_TYPE_FCP		= 0x8,	/* SCSI-FCP */
+	FC_TYPE_GPP		= 0x9,	/* SCSI_GPP */
+	FC_TYPE_SERVICES	= 0x20,	/* Fibre Channel Services */
+	FC_TYPE_FC_FSS		= 0x22,	/* Fabric Switch Services */
+	FC_TYPE_FC_AL		= 0x23,	/* FC-AL */
+	FC_TYPE_FC_SNMP		= 0x24,	/* FC-SNMP */
+	FC_TYPE_FC_SPINFAB	= 0xEE,	/* SPINFAB */
+	FC_TYPE_FC_DIAG		= 0xEF,	/* DIAG */
+	FC_TYPE_MAX		= 256,	/* 256 FC-4 types */
+};
+
+/*
+ * Frame Control Definitions. FC-PH Table-45. pg. 168
+ */
+enum {
+	FCTL_EC_ORIG = 0x000000,	/* exchange originator */
+	FCTL_EC_RESP = 0x800000,	/* exchange responder */
+	FCTL_SEQ_INI = 0x000000,	/* sequence initiator */
+	FCTL_SEQ_REC = 0x400000,	/* sequence recipient */
+	FCTL_FS_EXCH = 0x200000,	/* first sequence of xchg */
+	FCTL_LS_EXCH = 0x100000,	/* last sequence of xchg */
+	FCTL_END_SEQ = 0x080000,	/* last frame of sequence */
+	FCTL_SI_XFER = 0x010000,	/* seq initiative transfer */
+	FCTL_RO_PRESENT = 0x000008,	/* relative offset present */
+	FCTL_FILLBYTE_MASK = 0x000003	/* , fill byte mask */
+};
+
+/*
+ * Fabric Well Known Addresses
+ */
+enum {
+	FC_MIN_WELL_KNOWN_ADDR		= 0xFFFFF0,
+	FC_DOMAIN_CONTROLLER_MASK	= 0xFFFC00,
+	FC_ALIAS_SERVER			= 0xFFFFF8,
+	FC_MGMT_SERVER			= 0xFFFFFA,
+	FC_TIME_SERVER			= 0xFFFFFB,
+	FC_NAME_SERVER			= 0xFFFFFC,
+	FC_FABRIC_CONTROLLER		= 0xFFFFFD,
+	FC_FABRIC_PORT			= 0xFFFFFE,
+	FC_BROADCAST_SERVER		= 0xFFFFFF
+};
+
+/*
+ * domain/area/port defines
+ */
+#define FC_DOMAIN_MASK  0xFF0000
+#define FC_DOMAIN_SHIFT 16
+#define FC_AREA_MASK    0x00FF00
+#define FC_AREA_SHIFT   8
+#define FC_PORT_MASK    0x0000FF
+#define FC_PORT_SHIFT   0
+
+#define FC_GET_DOMAIN(p)	(((p) & FC_DOMAIN_MASK) >> FC_DOMAIN_SHIFT)
+#define FC_GET_AREA(p)		(((p) & FC_AREA_MASK) >> FC_AREA_SHIFT)
+#define FC_GET_PORT(p)		(((p) & FC_PORT_MASK) >> FC_PORT_SHIFT)
+
+#define FC_DOMAIN_CTRLR(p)	(FC_DOMAIN_CONTROLLER_MASK | (FC_GET_DOMAIN(p)))
+
+enum {
+	FC_RXID_ANY = 0xFFFFU,
+};
+
+/*
+ * generic ELS command
+ */
+struct fc_els_cmd_s {
+	u32        els_code:8;	/* ELS Command Code */
+	u32        reserved:24;
+};
+
+/*
+ * ELS Command Codes. FC-PH Table-75. pg. 223
+ */
+enum {
+	FC_ELS_LS_RJT = 0x1,	/* Link Service Reject. */
+	FC_ELS_ACC = 0x02,	/* Accept */
+	FC_ELS_PLOGI = 0x03,	/* N_Port Login. */
+	FC_ELS_FLOGI = 0x04,	/* F_Port Login. */
+	FC_ELS_LOGO = 0x05,	/* Logout. */
+	FC_ELS_ABTX = 0x06,	/* Abort Exchange */
+	FC_ELS_RES = 0x08,	/* Read Exchange status */
+	FC_ELS_RSS = 0x09,	/* Read sequence status block */
+	FC_ELS_RSI = 0x0A,	/* Request Sequence Initiative */
+	FC_ELS_ESTC = 0x0C,	/* Estimate Credit. */
+	FC_ELS_RTV = 0x0E,	/* Read Timeout Value. */
+	FC_ELS_RLS = 0x0F,	/* Read Link Status. */
+	FC_ELS_ECHO = 0x10,	/* Echo */
+	FC_ELS_TEST = 0x11,	/* Test */
+	FC_ELS_RRQ = 0x12,	/* Reinstate Recovery Qualifier. */
+	FC_ELS_REC = 0x13,	/* Add this for TAPE support in FCR */
+	FC_ELS_PRLI = 0x20,	/* Process Login */
+	FC_ELS_PRLO = 0x21,	/* Process Logout. */
+	FC_ELS_SCN = 0x22,	/* State Change Notification. */
+	FC_ELS_TPRLO = 0x24,	/* Third Party Process Logout. */
+	FC_ELS_PDISC = 0x50,	/* Discover N_Port Parameters. */
+	FC_ELS_FDISC = 0x51,	/* Discover F_Port Parameters. */
+	FC_ELS_ADISC = 0x52,	/* Discover Address. */
+	FC_ELS_FARP_REQ = 0x54,	/* FARP Request. */
+	FC_ELS_FARP_REP = 0x55,	/* FARP Reply. */
+	FC_ELS_FAN = 0x60,	/* Fabric Address Notification */
+	FC_ELS_RSCN = 0x61,	/* Reg State Change Notification */
+	FC_ELS_SCR = 0x62,	/* State Change Registration. */
+	FC_ELS_RTIN = 0x77,	/* Mangement server request */
+	FC_ELS_RNID = 0x78,	/* Mangement server request */
+	FC_ELS_RLIR = 0x79,	/* Registered Link Incident Record */
+
+	FC_ELS_RPSC = 0x7D,	/* Report Port Speed Capabilities */
+	FC_ELS_QSA = 0x7E,	/* Query Security Attributes. Ref FC-SP */
+	FC_ELS_E2E_LBEACON = 0x81,
+				/* End-to-End Link Beacon */
+	FC_ELS_AUTH = 0x90,	/* Authentication. Ref FC-SP */
+	FC_ELS_RFCN = 0x97,	/* Request Fabric Change Notification. Ref
+				 *FC-SP */
+};
+
+/*
+ *  Version numbers for FC-PH standards,
+ *  used in login to indicate what port
+ *  supports. See FC-PH-X table 158.
+ */
+enum {
+	FC_PH_VER_4_3 = 0x09,
+	FC_PH_VER_PH_3 = 0x20,
+};
+
+/*
+ * PDU size defines
+ */
+enum {
+	FC_MIN_PDUSZ = 512,
+	FC_MAX_PDUSZ = 2112,
+};
+
+/*
+ * N_Port PLOGI Common Service Parameters.
+ * FC-PH-x. Figure-76. pg. 308.
+ */
+struct fc_plogi_csp_s {
+	u8		verhi;		/* FC-PH high version */
+	u8		verlo;		/* FC-PH low version */
+	__be16		bbcred;		/* BB_Credit */
+
+#ifdef __BIG_ENDIAN
+	u8		ciro:1,		/* continuously increasing RO */
+			rro:1,		/* random relative offset */
+			npiv_supp:1,	/* NPIV supported */
+			port_type:1,	/* N_Port/F_port */
+			altbbcred:1,	/* alternate BB_Credit */
+			resolution:1,	/* ms/ns ED_TOV resolution */
+			vvl_info:1,	/* VVL Info included */
+			reserved1:1;
+
+	u8		hg_supp:1,
+			query_dbc:1,
+			security:1,
+			sync_cap:1,
+			r_t_tov:1,
+			dh_dup_supp:1,
+			cisc:1,		/* continuously increasing seq count */
+			payload:1;
+#else
+	u8		reserved2:2,
+			resolution:1,	/* ms/ns ED_TOV resolution */
+			altbbcred:1,	/* alternate BB_Credit */
+			port_type:1,	/* N_Port/F_port */
+			npiv_supp:1,	/* NPIV supported */
+			rro:1,		/* random relative offset */
+			ciro:1;		/* continuously increasing RO */
+
+	u8		payload:1,
+			cisc:1,		/* continuously increasing seq count */
+			dh_dup_supp:1,
+			r_t_tov:1,
+			sync_cap:1,
+			security:1,
+			query_dbc:1,
+			hg_supp:1;
+#endif
+	__be16		rxsz;		/* receive data_field size */
+	__be16		conseq;
+	__be16		ro_bitmap;
+	__be32		e_d_tov;
+};
+
+/*
+ * N_Port PLOGI Class Specific Parameters.
+ * FC-PH-x. Figure 78. pg. 318.
+ */
+struct fc_plogi_clp_s {
+#ifdef __BIG_ENDIAN
+	u32        class_valid:1;
+	u32        intermix:1;	/* class intermix supported if set =1.
+				 * valid only for class1. Reserved for
+				 * class2 & class3 */
+	u32        reserved1:2;
+	u32        sequential:1;
+	u32        reserved2:3;
+#else
+	u32        reserved2:3;
+	u32        sequential:1;
+	u32        reserved1:2;
+	u32        intermix:1;	/* class intermix supported if set =1.
+				 * valid only for class1. Reserved for
+				 * class2 & class3 */
+	u32        class_valid:1;
+#endif
+	u32        reserved3:24;
+
+	u32        reserved4:16;
+	u32        rxsz:16;	/* Receive data_field size */
+
+	u32        reserved5:8;
+	u32        conseq:8;
+	u32        e2e_credit:16; /* end to end credit */
+
+	u32        reserved7:8;
+	u32        ospx:8;
+	u32        reserved8:16;
+};
+
+/* ASCII value for each character in string "BRCD" */
+#define FLOGI_VVL_BRCD    0x42524344
+
+/*
+ * PLOGI els command and reply payload
+ */
+struct fc_logi_s {
+	struct fc_els_cmd_s	els_cmd;	/* ELS command code */
+	struct fc_plogi_csp_s	csp;		/* common service params */
+	wwn_t			port_name;
+	wwn_t			node_name;
+	struct fc_plogi_clp_s	class1;		/* class 1 service parameters */
+	struct fc_plogi_clp_s	class2;		/* class 2 service parameters */
+	struct fc_plogi_clp_s	class3;		/* class 3 service parameters */
+	struct fc_plogi_clp_s	class4;		/* class 4 service parameters */
+	u8			vvl[16];	/* vendor version level */
+};
+
+/*
+ * LOGO els command payload
+ */
+struct fc_logo_s {
+	struct fc_els_cmd_s	els_cmd;	/* ELS command code */
+	u32			res1:8;
+	u32		nport_id:24;	/* N_Port identifier of source */
+	wwn_t           orig_port_name;	/* Port name of the LOGO originator */
+};
+
+/*
+ * ADISC els command payload
+ */
+struct fc_adisc_s {
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
+	u32		res1:8;
+	u32		orig_HA:24;	/* originator hard address */
+	wwn_t		orig_port_name;	/* originator port name */
+	wwn_t		orig_node_name;	/* originator node name */
+	u32		res2:8;
+	u32		nport_id:24;	/* originator NPortID */
+};
+
+/*
+ * Exchange status block
+ */
+struct fc_exch_status_blk_s {
+	u32        oxid:16;
+	u32        rxid:16;
+	u32        res1:8;
+	u32        orig_np:24;	/* originator NPortID */
+	u32        res2:8;
+	u32        resp_np:24;	/* responder NPortID */
+	u32        es_bits;
+	u32        res3;
+	/*
+	 * un modified section of the fields
+	 */
+};
+
+/*
+ * RES els command payload
+ */
+struct fc_res_s {
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
+	u32        res1:8;
+	u32        nport_id:24;		/* N_Port identifier of source */
+	u32        oxid:16;
+	u32        rxid:16;
+	u8         assoc_hdr[32];
+};
+
+/*
+ * RES els accept payload
+ */
+struct fc_res_acc_s {
+	struct fc_els_cmd_s		els_cmd;	/* ELS command code */
+	struct fc_exch_status_blk_s	fc_exch_blk; /* Exchange status block */
+};
+
+/*
+ * REC els command payload
+ */
+struct fc_rec_s {
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
+	u32        res1:8;
+	u32        nport_id:24;	/* N_Port identifier of source */
+	u32        oxid:16;
+	u32        rxid:16;
+};
+
+#define FC_REC_ESB_OWN_RSP	0x80000000	/* responder owns */
+#define FC_REC_ESB_SI		0x40000000	/* SI is owned	*/
+#define FC_REC_ESB_COMP		0x20000000	/* exchange is complete	*/
+#define FC_REC_ESB_ENDCOND_ABN	0x10000000	/* abnormal ending	*/
+#define FC_REC_ESB_RQACT	0x04000000	/* recovery qual active	*/
+#define FC_REC_ESB_ERRP_MSK	0x03000000
+#define FC_REC_ESB_OXID_INV	0x00800000	/* invalid OXID		*/
+#define FC_REC_ESB_RXID_INV	0x00400000	/* invalid RXID		*/
+#define FC_REC_ESB_PRIO_INUSE	0x00200000
+
+/*
+ * REC els accept payload
+ */
+struct fc_rec_acc_s {
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
+	u32        oxid:16;
+	u32        rxid:16;
+	u32        res1:8;
+	u32        orig_id:24;	/* N_Port id of exchange originator */
+	u32        res2:8;
+	u32        resp_id:24;	/* N_Port id of exchange responder */
+	u32        count;	/* data transfer count */
+	u32        e_stat;	/* exchange status */
+};
+
+/*
+ * RSI els payload
+ */
+struct fc_rsi_s {
+	struct fc_els_cmd_s els_cmd;
+	u32        res1:8;
+	u32        orig_sid:24;
+	u32        oxid:16;
+	u32        rxid:16;
+};
+
+/*
+ * structure for PRLI paramater pages, both request & response
+ * see FC-PH-X table 113 & 115 for explanation also FCP table 8
+ */
+struct fc_prli_params_s {
+	u32        reserved:16;
+#ifdef __BIG_ENDIAN
+	u32        reserved1:5;
+	u32        rec_support:1;
+	u32        task_retry_id:1;
+	u32        retry:1;
+
+	u32        confirm:1;
+	u32        doverlay:1;
+	u32        initiator:1;
+	u32        target:1;
+	u32        cdmix:1;
+	u32        drmix:1;
+	u32        rxrdisab:1;
+	u32        wxrdisab:1;
+#else
+	u32        retry:1;
+	u32        task_retry_id:1;
+	u32        rec_support:1;
+	u32        reserved1:5;
+
+	u32        wxrdisab:1;
+	u32        rxrdisab:1;
+	u32        drmix:1;
+	u32        cdmix:1;
+	u32        target:1;
+	u32        initiator:1;
+	u32        doverlay:1;
+	u32        confirm:1;
+#endif
+};
+
+/*
+ * valid values for rspcode in PRLI ACC payload
+ */
+enum {
+	FC_PRLI_ACC_XQTD = 0x1,		/* request executed */
+	FC_PRLI_ACC_PREDEF_IMG = 0x5,	/* predefined image - no prli needed */
+};
+
+struct fc_prli_params_page_s {
+	u32        type:8;
+	u32        codext:8;
+#ifdef __BIG_ENDIAN
+	u32        origprocasv:1;
+	u32        rsppav:1;
+	u32        imagepair:1;
+	u32        reserved1:1;
+	u32        rspcode:4;
+#else
+	u32        rspcode:4;
+	u32        reserved1:1;
+	u32        imagepair:1;
+	u32        rsppav:1;
+	u32        origprocasv:1;
+#endif
+	u32        reserved2:8;
+
+	u32        origprocas;
+	u32        rspprocas;
+	struct fc_prli_params_s servparams;
+};
+
+/*
+ * PRLI request and accept payload, FC-PH-X tables 112 & 114
+ */
+struct fc_prli_s {
+	u32        command:8;
+	u32        pglen:8;
+	u32        pagebytes:16;
+	struct fc_prli_params_page_s parampage;
+};
+
+/*
+ * PRLO logout params page
+ */
+struct fc_prlo_params_page_s {
+	u32        type:8;
+	u32        type_ext:8;
+#ifdef __BIG_ENDIAN
+	u32        opa_valid:1;	/* originator process associator valid */
+	u32        rpa_valid:1;	/* responder process associator valid */
+	u32        res1:14;
+#else
+	u32        res1:14;
+	u32        rpa_valid:1;	/* responder process associator valid */
+	u32        opa_valid:1;	/* originator process associator valid */
+#endif
+	u32        orig_process_assc;
+	u32        resp_process_assc;
+
+	u32        res2;
+};
+
+/*
+ * PRLO els command payload
+ */
+struct fc_prlo_s {
+	u32	command:8;
+	u32	page_len:8;
+	u32	payload_len:16;
+	struct fc_prlo_params_page_s	prlo_params[1];
+};
+
+/*
+ * PRLO Logout response parameter page
+ */
+struct fc_prlo_acc_params_page_s {
+	u32        type:8;
+	u32        type_ext:8;
+
+#ifdef __BIG_ENDIAN
+	u32        opa_valid:1;	/* originator process associator valid */
+	u32        rpa_valid:1;	/* responder process associator valid */
+	u32        res1:14;
+#else
+	u32        res1:14;
+	u32        rpa_valid:1;	/* responder process associator valid */
+	u32        opa_valid:1;	/* originator process associator valid */
+#endif
+	u32        orig_process_assc;
+	u32        resp_process_assc;
+
+	u32        fc4type_csp;
+};
+
+/*
+ * PRLO els command ACC payload
+ */
+struct fc_prlo_acc_s {
+	u32        command:8;
+	u32        page_len:8;
+	u32        payload_len:16;
+	struct fc_prlo_acc_params_page_s prlo_acc_params[1];
+};
+
+/*
+ * SCR els command payload
+ */
+enum {
+	FC_SCR_REG_FUNC_FABRIC_DETECTED = 0x01,
+	FC_SCR_REG_FUNC_N_PORT_DETECTED = 0x02,
+	FC_SCR_REG_FUNC_FULL = 0x03,
+	FC_SCR_REG_FUNC_CLEAR_REG = 0xFF,
+};
+
+/* SCR VU registrations */
+enum {
+	FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE = 0x01
+};
+
+struct fc_scr_s {
+	u32 command:8;
+	u32 res:24;
+	u32 vu_reg_func:8; /* Vendor Unique Registrations */
+	u32 res1:16;
+	u32 reg_func:8;
+};
+
+/*
+ * Information category for Basic link data
+ */
+enum {
+	FC_CAT_NOP	= 0x0,
+	FC_CAT_ABTS	= 0x1,
+	FC_CAT_RMC	= 0x2,
+	FC_CAT_BA_ACC	= 0x4,
+	FC_CAT_BA_RJT	= 0x5,
+	FC_CAT_PRMT	= 0x6,
+};
+
+/*
+ * LS_RJT els reply payload
+ */
+struct fc_ls_rjt_s {
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
+	u32        res1:8;
+	u32        reason_code:8;	/* Reason code for reject */
+	u32        reason_code_expl:8;	/* Reason code explanation */
+	u32        vendor_unique:8;	/* Vendor specific */
+};
+
+/*
+ * LS_RJT reason codes
+ */
+enum {
+	FC_LS_RJT_RSN_INV_CMD_CODE	= 0x01,
+	FC_LS_RJT_RSN_LOGICAL_ERROR	= 0x03,
+	FC_LS_RJT_RSN_LOGICAL_BUSY	= 0x05,
+	FC_LS_RJT_RSN_PROTOCOL_ERROR	= 0x07,
+	FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD = 0x09,
+	FC_LS_RJT_RSN_CMD_NOT_SUPP	= 0x0B,
+};
+
+/*
+ * LS_RJT reason code explanation
+ */
+enum {
+	FC_LS_RJT_EXP_NO_ADDL_INFO		= 0x00,
+	FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS	= 0x01,
+	FC_LS_RJT_EXP_SPARMS_ERR_INI_CTL	= 0x03,
+	FC_LS_RJT_EXP_SPARMS_ERR_REC_CTL	= 0x05,
+	FC_LS_RJT_EXP_SPARMS_ERR_RXSZ		= 0x07,
+	FC_LS_RJT_EXP_SPARMS_ERR_CONSEQ		= 0x09,
+	FC_LS_RJT_EXP_SPARMS_ERR_CREDIT		= 0x0B,
+	FC_LS_RJT_EXP_INV_PORT_NAME		= 0x0D,
+	FC_LS_RJT_EXP_INV_NODE_FABRIC_NAME	= 0x0E,
+	FC_LS_RJT_EXP_INV_CSP			= 0x0F,
+	FC_LS_RJT_EXP_INV_ASSOC_HDR		= 0x11,
+	FC_LS_RJT_EXP_ASSOC_HDR_REQD		= 0x13,
+	FC_LS_RJT_EXP_INV_ORIG_S_ID		= 0x15,
+	FC_LS_RJT_EXP_INV_OXID_RXID_COMB	= 0x17,
+	FC_LS_RJT_EXP_CMD_ALREADY_IN_PROG	= 0x19,
+	FC_LS_RJT_EXP_LOGIN_REQUIRED		= 0x1E,
+	FC_LS_RJT_EXP_INVALID_NPORT_ID		= 0x1F,
+	FC_LS_RJT_EXP_INSUFF_RES		= 0x29,
+	FC_LS_RJT_EXP_CMD_NOT_SUPP		= 0x2C,
+	FC_LS_RJT_EXP_INV_PAYLOAD_LEN		= 0x2D,
+};
+
+/*
+ * RRQ els command payload
+ */
+struct fc_rrq_s {
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
+	u32        res1:8;
+	u32        s_id:24;	/* exchange originator S_ID */
+
+	u32        ox_id:16;	/* originator exchange ID */
+	u32        rx_id:16;	/* responder exchange ID */
+
+	u32        res2[8];	/* optional association header */
+};
+
+/*
+ * ABTS BA_ACC reply payload
+ */
+struct fc_ba_acc_s {
+	u32        seq_id_valid:8;	/* set to 0x00 for Abort Exchange */
+	u32        seq_id:8;		/* invalid for Abort Exchange */
+	u32        res2:16;
+	u32        ox_id:16;		/* OX_ID from ABTS frame */
+	u32        rx_id:16;		/* RX_ID from ABTS frame */
+	u32        low_seq_cnt:16;	/* set to 0x0000 for Abort Exchange */
+	u32        high_seq_cnt:16;	/* set to 0xFFFF for Abort Exchange */
+};
+
+/*
+ * ABTS BA_RJT reject payload
+ */
+struct fc_ba_rjt_s {
+	u32        res1:8;		/* Reserved */
+	u32        reason_code:8;	/* reason code for reject */
+	u32        reason_expl:8;	/* reason code explanation */
+	u32        vendor_unique:8; /* vendor unique reason code,set to 0 */
+};
+
+/*
+ * TPRLO logout parameter page
+ */
+struct fc_tprlo_params_page_s {
+	u32        type:8;
+	u32        type_ext:8;
+
+#ifdef __BIG_ENDIAN
+	u32        opa_valid:1;
+	u32        rpa_valid:1;
+	u32        tpo_nport_valid:1;
+	u32        global_process_logout:1;
+	u32        res1:12;
+#else
+	u32        res1:12;
+	u32        global_process_logout:1;
+	u32        tpo_nport_valid:1;
+	u32        rpa_valid:1;
+	u32        opa_valid:1;
+#endif
+
+	u32        orig_process_assc;
+	u32        resp_process_assc;
+
+	u32        res2:8;
+	u32        tpo_nport_id;
+};
+
+/*
+ * TPRLO ELS command payload
+ */
+struct fc_tprlo_s {
+	u32        command:8;
+	u32        page_len:8;
+	u32        payload_len:16;
+
+	struct fc_tprlo_params_page_s tprlo_params[1];
+};
+
+enum fc_tprlo_type {
+	FC_GLOBAL_LOGO = 1,
+	FC_TPR_LOGO
+};
+
+/*
+ * TPRLO els command ACC payload
+ */
+struct fc_tprlo_acc_s {
+	u32	command:8;
+	u32	page_len:8;
+	u32	payload_len:16;
+	struct fc_prlo_acc_params_page_s tprlo_acc_params[1];
+};
+
+/*
+ * RSCN els command req payload
+ */
+#define FC_RSCN_PGLEN	0x4
+
+enum fc_rscn_format {
+	FC_RSCN_FORMAT_PORTID	= 0x0,
+	FC_RSCN_FORMAT_AREA	= 0x1,
+	FC_RSCN_FORMAT_DOMAIN	= 0x2,
+	FC_RSCN_FORMAT_FABRIC	= 0x3,
+};
+
+struct fc_rscn_event_s {
+	u32	format:2;
+	u32	qualifier:4;
+	u32	resvd:2;
+	u32	portid:24;
+};
+
+struct fc_rscn_pl_s {
+	u8	command;
+	u8	pagelen;
+	__be16	payldlen;
+	struct fc_rscn_event_s event[1];
+};
+
+/*
+ * ECHO els command req payload
+ */
+struct fc_echo_s {
+	struct fc_els_cmd_s els_cmd;
+};
+
+/*
+ * RNID els command
+ */
+#define RNID_NODEID_DATA_FORMAT_COMMON			0x00
+#define RNID_NODEID_DATA_FORMAT_FCP3			0x08
+#define RNID_NODEID_DATA_FORMAT_DISCOVERY		0xDF
+
+#define RNID_ASSOCIATED_TYPE_UNKNOWN			0x00000001
+#define RNID_ASSOCIATED_TYPE_OTHER                      0x00000002
+#define RNID_ASSOCIATED_TYPE_HUB                        0x00000003
+#define RNID_ASSOCIATED_TYPE_SWITCH                     0x00000004
+#define RNID_ASSOCIATED_TYPE_GATEWAY                    0x00000005
+#define RNID_ASSOCIATED_TYPE_STORAGE_DEVICE             0x00000009
+#define RNID_ASSOCIATED_TYPE_HOST                       0x0000000A
+#define RNID_ASSOCIATED_TYPE_STORAGE_SUBSYSTEM          0x0000000B
+#define RNID_ASSOCIATED_TYPE_STORAGE_ACCESS_DEVICE      0x0000000E
+#define RNID_ASSOCIATED_TYPE_NAS_SERVER                 0x00000011
+#define RNID_ASSOCIATED_TYPE_BRIDGE                     0x00000002
+#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE      0x00000003
+#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE      0x000000FF
+
+/*
+ * RNID els command payload
+ */
+struct fc_rnid_cmd_s {
+	struct fc_els_cmd_s els_cmd;
+	u32        node_id_data_format:8;
+	u32        reserved:24;
+};
+
+/*
+ * RNID els response payload
+ */
+
+struct fc_rnid_common_id_data_s {
+	wwn_t		port_name;
+	wwn_t           node_name;
+};
+
+struct fc_rnid_general_topology_data_s {
+	u32        vendor_unique[4];
+	__be32     asso_type;
+	u32        phy_port_num;
+	__be32     num_attached_nodes;
+	u32        node_mgmt:8;
+	u32        ip_version:8;
+	u32        udp_tcp_port_num:16;
+	u32        ip_address[4];
+	u32        reserved:16;
+	u32        vendor_specific:16;
+};
+
+struct fc_rnid_acc_s {
+	struct fc_els_cmd_s els_cmd;
+	u32        node_id_data_format:8;
+	u32        common_id_data_length:8;
+	u32        reserved:8;
+	u32        specific_id_data_length:8;
+	struct fc_rnid_common_id_data_s common_id_data;
+	struct fc_rnid_general_topology_data_s gen_topology_data;
+};
+
+#define RNID_ASSOCIATED_TYPE_UNKNOWN                    0x00000001
+#define RNID_ASSOCIATED_TYPE_OTHER                      0x00000002
+#define RNID_ASSOCIATED_TYPE_HUB                        0x00000003
+#define RNID_ASSOCIATED_TYPE_SWITCH                     0x00000004
+#define RNID_ASSOCIATED_TYPE_GATEWAY                    0x00000005
+#define RNID_ASSOCIATED_TYPE_STORAGE_DEVICE             0x00000009
+#define RNID_ASSOCIATED_TYPE_HOST                       0x0000000A
+#define RNID_ASSOCIATED_TYPE_STORAGE_SUBSYSTEM          0x0000000B
+#define RNID_ASSOCIATED_TYPE_STORAGE_ACCESS_DEVICE      0x0000000E
+#define RNID_ASSOCIATED_TYPE_NAS_SERVER                 0x00000011
+#define RNID_ASSOCIATED_TYPE_BRIDGE                     0x00000002
+#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE      0x00000003
+#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE      0x000000FF
+
+enum fc_rpsc_speed_cap {
+	RPSC_SPEED_CAP_1G = 0x8000,
+	RPSC_SPEED_CAP_2G = 0x4000,
+	RPSC_SPEED_CAP_4G = 0x2000,
+	RPSC_SPEED_CAP_10G = 0x1000,
+	RPSC_SPEED_CAP_8G = 0x0800,
+	RPSC_SPEED_CAP_16G = 0x0400,
+
+	RPSC_SPEED_CAP_UNKNOWN = 0x0001,
+};
+
+enum fc_rpsc_op_speed {
+	RPSC_OP_SPEED_1G = 0x8000,
+	RPSC_OP_SPEED_2G = 0x4000,
+	RPSC_OP_SPEED_4G = 0x2000,
+	RPSC_OP_SPEED_10G = 0x1000,
+	RPSC_OP_SPEED_8G = 0x0800,
+	RPSC_OP_SPEED_16G = 0x0400,
+
+	RPSC_OP_SPEED_NOT_EST = 0x0001,	/* speed not established */
+};
+
+struct fc_rpsc_speed_info_s {
+	__be16        port_speed_cap;	/* see enum fc_rpsc_speed_cap */
+	__be16        port_op_speed;	/* see enum fc_rpsc_op_speed */
+};
+
+/*
+ * If RPSC request is sent to the Domain Controller, the request is for
+ * all the ports within that domain.
+ */
+struct fc_rpsc_cmd_s {
+	struct fc_els_cmd_s els_cmd;
+};
+
+/*
+ * RPSC Acc
+ */
+struct fc_rpsc_acc_s {
+	u32        command:8;
+	u32        rsvd:8;
+	u32        num_entries:16;
+
+	struct fc_rpsc_speed_info_s speed_info[1];
+};
+
+/*
+ * If RPSC2 request is sent to the Domain Controller,
+ */
+#define FC_BRCD_TOKEN    0x42524344
+
+struct fc_rpsc2_cmd_s {
+	struct fc_els_cmd_s els_cmd;
+	__be32	token;
+	u16	resvd;
+	__be16	num_pids;		/* Number of pids in the request */
+	struct  {
+		u32	rsvd1:8;
+		u32	pid:24;		/* port identifier */
+	} pid_list[1];
+};
+
+enum fc_rpsc2_port_type {
+	RPSC2_PORT_TYPE_UNKNOWN = 0,
+	RPSC2_PORT_TYPE_NPORT   = 1,
+	RPSC2_PORT_TYPE_NLPORT  = 2,
+	RPSC2_PORT_TYPE_NPIV_PORT  = 0x5f,
+	RPSC2_PORT_TYPE_NPORT_TRUNK  = 0x6f,
+};
+
+/*
+ * RPSC2 portInfo entry structure
+ */
+struct fc_rpsc2_port_info_s {
+	__be32	pid;		/* PID */
+	u16	resvd1;
+	__be16	index;		/* port number / index */
+	u8	resvd2;
+	u8	type;		/* port type N/NL/... */
+	__be16	speed;		/* port Operating Speed */
+};
+
+/*
+ * RPSC2 Accept payload
+ */
+struct fc_rpsc2_acc_s {
+	u8        els_cmd;
+	u8        resvd;
+	__be16    num_pids; /* Number of pids in the request */
+	struct fc_rpsc2_port_info_s port_info[1]; /* port information */
+};
+
+/*
+ * bit fields so that multiple classes can be specified
+ */
+enum fc_cos {
+	FC_CLASS_2	= 0x04,
+	FC_CLASS_3	= 0x08,
+	FC_CLASS_2_3	= 0x0C,
+};
+
+/*
+ * symbolic name
+ */
+struct fc_symname_s {
+	u8         symname[FC_SYMNAME_MAX];
+};
+
+struct fc_alpabm_s {
+	u8	alpa_bm[FC_ALPA_MAX / 8];
+};
+
+/*
+ * protocol default timeout values
+ */
+#define FC_ED_TOV	2
+#define FC_REC_TOV	(FC_ED_TOV + 1)
+#define FC_RA_TOV	10
+#define FC_ELS_TOV	(2 * FC_RA_TOV)
+#define FC_FCCT_TOV	(3 * FC_RA_TOV)
+
+/*
+ * virtual fabric related defines
+ */
+#define FC_VF_ID_NULL    0	/*  must not be used as VF_ID */
+#define FC_VF_ID_MIN     1
+#define FC_VF_ID_MAX     0xEFF
+#define FC_VF_ID_CTL     0xFEF	/*  control VF_ID */
+
+/*
+ * Virtual Fabric Tagging header format
+ * @caution This is defined only in BIG ENDIAN format.
+ */
+struct fc_vft_s {
+	u32        r_ctl:8;
+	u32        ver:2;
+	u32        type:4;
+	u32        res_a:2;
+	u32        priority:3;
+	u32        vf_id:12;
+	u32        res_b:1;
+	u32        hopct:8;
+	u32        res_c:24;
+};
+
+/*
+ * FCP_CMND definitions
+ */
+#define FCP_CMND_CDB_LEN    16
+#define FCP_CMND_LUN_LEN    8
+
+struct fcp_cmnd_s {
+	struct scsi_lun	lun;		/* 64-bit LU number */
+	u8		crn;		/* command reference number */
+#ifdef __BIG_ENDIAN
+	u8		resvd:1,
+			priority:4,	/* FCP-3: SAM-3 priority */
+			taskattr:3;	/* scsi task attribute */
+#else
+	u8		taskattr:3,	/* scsi task attribute */
+			priority:4,	/* FCP-3: SAM-3 priority */
+			resvd:1;
+#endif
+	u8		tm_flags;	/* task management flags */
+#ifdef __BIG_ENDIAN
+	u8		addl_cdb_len:6,	/* additional CDB length words */
+			iodir:2;	/* read/write FCP_DATA IUs */
+#else
+	u8		iodir:2,	/* read/write FCP_DATA IUs */
+			addl_cdb_len:6;	/* additional CDB length */
+#endif
+	struct scsi_cdb_s      cdb;
+
+	__be32        fcp_dl;	/* bytes to be transferred */
+};
+
+#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN)
+#define fcp_cmnd_fcpdl(_cmnd)	((&(_cmnd)->fcp_dl)[(_cmnd)->addl_cdb_len])
+
+/*
+ * struct fcp_cmnd_s .iodir field values
+ */
+enum fcp_iodir {
+	FCP_IODIR_NONE  = 0,
+	FCP_IODIR_WRITE = 1,
+	FCP_IODIR_READ  = 2,
+	FCP_IODIR_RW    = 3,
+};
+
+/*
+ * Task management flags field - only one bit shall be set
+ */
+enum fcp_tm_cmnd {
+	FCP_TM_ABORT_TASK_SET	= BIT(1),
+	FCP_TM_CLEAR_TASK_SET	= BIT(2),
+	FCP_TM_LUN_RESET	= BIT(4),
+	FCP_TM_TARGET_RESET	= BIT(5),	/* obsolete in FCP-3 */
+	FCP_TM_CLEAR_ACA	= BIT(6),
+};
+
+/*
+ * FCP_RSP residue flags
+ */
+enum fcp_residue {
+	FCP_NO_RESIDUE = 0,     /* no residue */
+	FCP_RESID_OVER = 1,     /* more data left that was not sent */
+	FCP_RESID_UNDER = 2,    /* less data than requested */
+};
+
+struct fcp_rspinfo_s {
+	u32        res0:24;
+	u32        rsp_code:8;		/* response code (as above) */
+	u32        res1;
+};
+
+struct fcp_resp_s {
+	u32        reserved[2];		/* 2 words reserved */
+	u16        reserved2;
+#ifdef __BIG_ENDIAN
+	u8         reserved3:3;
+	u8         fcp_conf_req:1;	/* FCP_CONF is requested */
+	u8         resid_flags:2;	/* underflow/overflow */
+	u8         sns_len_valid:1;	/* sense len is valid */
+	u8         rsp_len_valid:1;	/* response len is valid */
+#else
+	u8         rsp_len_valid:1;	/* response len is valid */
+	u8         sns_len_valid:1;	/* sense len is valid */
+	u8         resid_flags:2;	/* underflow/overflow */
+	u8         fcp_conf_req:1;	/* FCP_CONF is requested */
+	u8         reserved3:3;
+#endif
+	u8         scsi_status;		/* one byte SCSI status */
+	u32        residue;		/* residual data bytes */
+	u32        sns_len;		/* length od sense info */
+	u32        rsp_len;		/* length of response info */
+};
+
+#define fcp_snslen(__fcprsp)	((__fcprsp)->sns_len_valid ?		\
+					(__fcprsp)->sns_len : 0)
+#define fcp_rsplen(__fcprsp)	((__fcprsp)->rsp_len_valid ?		\
+					(__fcprsp)->rsp_len : 0)
+#define fcp_rspinfo(__fcprsp)	((struct fcp_rspinfo_s *)((__fcprsp) + 1))
+#define fcp_snsinfo(__fcprsp)	(((u8 *)fcp_rspinfo(__fcprsp)) +	\
+						fcp_rsplen(__fcprsp))
+/*
+ * CT
+ */
+struct ct_hdr_s {
+	u32	rev_id:8;	/* Revision of the CT */
+	u32	in_id:24;	/* Initiator Id */
+	u32	gs_type:8;	/* Generic service Type */
+	u32	gs_sub_type:8;	/* Generic service sub type */
+	u32	options:8;	/* options */
+	u32	rsvrd:8;	/* reserved */
+	u32	cmd_rsp_code:16;/* ct command/response code */
+	u32	max_res_size:16;/* maximum/residual size */
+	u32	frag_id:8;	/* fragment ID */
+	u32	reason_code:8;	/* reason code */
+	u32	exp_code:8;	/* explanation code */
+	u32	vendor_unq:8;	/* vendor unique */
+};
+
+/*
+ * defines for the Revision
+ */
+enum {
+	CT_GS3_REVISION = 0x01,
+};
+
+/*
+ * defines for gs_type
+ */
+enum {
+	CT_GSTYPE_KEYSERVICE	= 0xF7,
+	CT_GSTYPE_ALIASSERVICE	= 0xF8,
+	CT_GSTYPE_MGMTSERVICE	= 0xFA,
+	CT_GSTYPE_TIMESERVICE	= 0xFB,
+	CT_GSTYPE_DIRSERVICE	= 0xFC,
+};
+
+/*
+ * defines for gs_sub_type for gs type directory service
+ */
+enum {
+	CT_GSSUBTYPE_NAMESERVER = 0x02,
+};
+
+/*
+ * defines for gs_sub_type for gs type management service
+ */
+enum {
+	CT_GSSUBTYPE_CFGSERVER	= 0x01,
+	CT_GSSUBTYPE_UNZONED_NS = 0x02,
+	CT_GSSUBTYPE_ZONESERVER = 0x03,
+	CT_GSSUBTYPE_LOCKSERVER = 0x04,
+	CT_GSSUBTYPE_HBA_MGMTSERVER = 0x10,	/* for FDMI */
+};
+
+/*
+ * defines for CT response code field
+ */
+enum {
+	CT_RSP_REJECT = 0x8001,
+	CT_RSP_ACCEPT = 0x8002,
+};
+
+/*
+ * defintions for CT reason code
+ */
+enum {
+	CT_RSN_INV_CMD		= 0x01,
+	CT_RSN_INV_VER		= 0x02,
+	CT_RSN_LOGIC_ERR	= 0x03,
+	CT_RSN_INV_SIZE		= 0x04,
+	CT_RSN_LOGICAL_BUSY	= 0x05,
+	CT_RSN_PROTO_ERR	= 0x07,
+	CT_RSN_UNABLE_TO_PERF	= 0x09,
+	CT_RSN_NOT_SUPP		= 0x0B,
+	CT_RSN_SERVER_NOT_AVBL  = 0x0D,
+	CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E,
+	CT_RSN_VENDOR_SPECIFIC  = 0xFF,
+
+};
+
+/*
+ * definitions for explanations code for Name server
+ */
+enum {
+	CT_NS_EXP_NOADDITIONAL	= 0x00,
+	CT_NS_EXP_ID_NOT_REG	= 0x01,
+	CT_NS_EXP_PN_NOT_REG	= 0x02,
+	CT_NS_EXP_NN_NOT_REG	= 0x03,
+	CT_NS_EXP_CS_NOT_REG	= 0x04,
+	CT_NS_EXP_IPN_NOT_REG	= 0x05,
+	CT_NS_EXP_IPA_NOT_REG	= 0x06,
+	CT_NS_EXP_FT_NOT_REG	= 0x07,
+	CT_NS_EXP_SPN_NOT_REG	= 0x08,
+	CT_NS_EXP_SNN_NOT_REG	= 0x09,
+	CT_NS_EXP_PT_NOT_REG	= 0x0A,
+	CT_NS_EXP_IPP_NOT_REG	= 0x0B,
+	CT_NS_EXP_FPN_NOT_REG	= 0x0C,
+	CT_NS_EXP_HA_NOT_REG	= 0x0D,
+	CT_NS_EXP_FD_NOT_REG	= 0x0E,
+	CT_NS_EXP_FF_NOT_REG	= 0x0F,
+	CT_NS_EXP_ACCESSDENIED	= 0x10,
+	CT_NS_EXP_UNACCEPTABLE_ID = 0x11,
+	CT_NS_EXP_DATABASEEMPTY		= 0x12,
+	CT_NS_EXP_NOT_REG_IN_SCOPE	= 0x13,
+	CT_NS_EXP_DOM_ID_NOT_PRESENT	= 0x14,
+	CT_NS_EXP_PORT_NUM_NOT_PRESENT	= 0x15,
+	CT_NS_EXP_NO_DEVICE_ATTACHED	= 0x16
+};
+
+/*
+ * defintions for the explanation code for all servers
+ */
+enum {
+	CT_EXP_AUTH_EXCEPTION		= 0xF1,
+	CT_EXP_DB_FULL			= 0xF2,
+	CT_EXP_DB_EMPTY			= 0xF3,
+	CT_EXP_PROCESSING_REQ		= 0xF4,
+	CT_EXP_UNABLE_TO_VERIFY_CONN	= 0xF5,
+	CT_EXP_DEVICES_NOT_IN_CMN_ZONE  = 0xF6
+};
+
+/*
+ * Command codes for Name server
+ */
+enum {
+	GS_GID_PN	= 0x0121,	/* Get Id on port name */
+	GS_GPN_ID	= 0x0112,	/* Get port name on ID */
+	GS_GNN_ID	= 0x0113,	/* Get node name on ID */
+	GS_GID_FT	= 0x0171,	/* Get Id on FC4 type */
+	GS_GSPN_ID	= 0x0118,	/* Get symbolic PN on ID */
+	GS_RFT_ID	= 0x0217,	/* Register fc4type on ID */
+	GS_RSPN_ID	= 0x0218,	/* Register symbolic PN on ID */
+	GS_RSNN_NN	= 0x0239,	/* Register symbolic NN on NN */
+	GS_RPN_ID	= 0x0212,	/* Register port name */
+	GS_RNN_ID	= 0x0213,	/* Register node name */
+	GS_RCS_ID	= 0x0214,	/* Register class of service */
+	GS_RPT_ID	= 0x021A,	/* Register port type */
+	GS_GA_NXT	= 0x0100,	/* Get all next */
+	GS_RFF_ID	= 0x021F,	/* Register FC4 Feature		*/
+};
+
+struct fcgs_id_req_s {
+	u32 rsvd:8;
+	u32 dap:24; /* port identifier */
+};
+#define fcgs_gpnid_req_t struct fcgs_id_req_s
+#define fcgs_gnnid_req_t struct fcgs_id_req_s
+#define fcgs_gspnid_req_t struct fcgs_id_req_s
+
+struct fcgs_gidpn_req_s {
+	wwn_t	port_name;	/* port wwn */
+};
+
+struct fcgs_gidpn_resp_s {
+	u32	rsvd:8;
+	u32	dap:24;		/* port identifier */
+};
+
+/*
+ * RFT_ID
+ */
+struct fcgs_rftid_req_s {
+	u32	rsvd:8;
+	u32	dap:24;		/* port identifier */
+	__be32	fc4_type[8];	/* fc4 types */
+};
+
+/*
+ * RFF_ID : Register FC4 features.
+ */
+#define FC_GS_FCP_FC4_FEATURE_INITIATOR  0x02
+#define FC_GS_FCP_FC4_FEATURE_TARGET	 0x01
+
+struct fcgs_rffid_req_s {
+	u32	rsvd:8;
+	u32	dap:24;		/* port identifier */
+	u32	rsvd1:16;
+	u32	fc4ftr_bits:8;	/* fc4 feature bits */
+	u32	fc4_type:8;		/* corresponding FC4 Type */
+};
+
+/*
+ * GID_FT Request
+ */
+struct fcgs_gidft_req_s {
+	u8	reserved;
+	u8	domain_id;	/* domain, 0 - all fabric */
+	u8	area_id;	/* area, 0 - whole domain */
+	u8	fc4_type;	/* FC_TYPE_FCP for SCSI devices */
+};
+
+/*
+ * GID_FT Response
+ */
+struct fcgs_gidft_resp_s {
+	u8	last:1;		/* last port identifier flag */
+	u8	reserved:7;
+	u32	pid:24;		/* port identifier */
+};
+
+/*
+ * RSPN_ID
+ */
+struct fcgs_rspnid_req_s {
+	u32	rsvd:8;
+	u32	dap:24;		/* port identifier */
+	u8	spn_len;	/* symbolic port name length */
+	u8	spn[256];	/* symbolic port name */
+};
+
+/*
+ * RSNN_NN
+ */
+struct fcgs_rsnn_nn_req_s {
+	wwn_t	node_name;	/* Node name */
+	u8	snn_len;	/* symbolic node name length */
+	u8	snn[256];	/* symbolic node name */
+};
+
+/*
+ * RPN_ID
+ */
+struct fcgs_rpnid_req_s {
+	u32	rsvd:8;
+	u32	port_id:24;
+	wwn_t	port_name;
+};
+
+/*
+ * RNN_ID
+ */
+struct fcgs_rnnid_req_s {
+	u32	rsvd:8;
+	u32	port_id:24;
+	wwn_t	node_name;
+};
+
+/*
+ * RCS_ID
+ */
+struct fcgs_rcsid_req_s {
+	u32	rsvd:8;
+	u32	port_id:24;
+	u32	cos;
+};
+
+/*
+ * RPT_ID
+ */
+struct fcgs_rptid_req_s {
+	u32	rsvd:8;
+	u32	port_id:24;
+	u32	port_type:8;
+	u32	rsvd1:24;
+};
+
+/*
+ * GA_NXT Request
+ */
+struct fcgs_ganxt_req_s {
+	u32	rsvd:8;
+	u32	port_id:24;
+};
+
+/*
+ * GA_NXT Response
+ */
+struct fcgs_ganxt_rsp_s {
+	u32		port_type:8;	/* Port Type */
+	u32		port_id:24;	/* Port Identifier */
+	wwn_t		port_name;	/* Port Name */
+	u8		spn_len;	/* Length of Symbolic Port Name */
+	char		spn[255];	/* Symbolic Port Name */
+	wwn_t		node_name;	/* Node Name */
+	u8		snn_len;	/* Length of Symbolic Node Name */
+	char		snn[255];	/* Symbolic Node Name */
+	u8		ipa[8];		/* Initial Process Associator */
+	u8		ip[16];		/* IP Address */
+	u32		cos;		/* Class of Service */
+	u32		fc4types[8];	/* FC-4 TYPEs */
+	wwn_t		fabric_port_name; /* Fabric Port Name */
+	u32		rsvd:8;		/* Reserved */
+	u32		hard_addr:24;	/* Hard Address */
+};
+
+/*
+ * Command codes for Fabric Configuration Server
+ */
+enum {
+	GS_FC_GFN_CMD	= 0x0114,	/* GS FC Get Fabric Name  */
+	GS_FC_GMAL_CMD	= 0x0116,	/* GS FC GMAL  */
+	GS_FC_TRACE_CMD	= 0x0400,	/* GS FC Trace Route */
+	GS_FC_PING_CMD	= 0x0401,	/* GS FC Ping */
+};
+
+/*
+ * GMAL Command ( Get ( interconnect Element) Management Address List)
+ * To retrieve the IP Address of a Switch.
+ */
+#define CT_GMAL_RESP_PREFIX_TELNET	 "telnet://"
+#define CT_GMAL_RESP_PREFIX_HTTP	 "http://"
+
+/*  GMAL/GFN request */
+struct fcgs_req_s {
+	wwn_t    wwn;   /* PWWN/NWWN */
+};
+
+#define fcgs_gmal_req_t struct fcgs_req_s
+#define fcgs_gfn_req_t struct fcgs_req_s
+
+/* Accept Response to GMAL */
+struct fcgs_gmal_resp_s {
+	__be32	ms_len;   /* Num of entries */
+	u8	ms_ma[256];
+};
+
+struct fcgs_gmal_entry_s {
+	u8  len;
+	u8  prefix[7]; /* like "http://" */
+	u8  ip_addr[248];
+};
+
+/*
+ * FDMI Command Codes
+ */
+#define	FDMI_GRHL		0x0100
+#define	FDMI_GHAT		0x0101
+#define	FDMI_GRPL		0x0102
+#define	FDMI_GPAT		0x0110
+#define	FDMI_RHBA		0x0200
+#define	FDMI_RHAT		0x0201
+#define	FDMI_RPRT		0x0210
+#define	FDMI_RPA		0x0211
+#define	FDMI_DHBA		0x0300
+#define	FDMI_DPRT		0x0310
+
+/*
+ * FDMI reason codes
+ */
+#define	FDMI_NO_ADDITIONAL_EXP		0x00
+#define	FDMI_HBA_ALREADY_REG		0x10
+#define	FDMI_HBA_ATTRIB_NOT_REG		0x11
+#define	FDMI_HBA_ATTRIB_MULTIPLE	0x12
+#define	FDMI_HBA_ATTRIB_LENGTH_INVALID	0x13
+#define	FDMI_HBA_ATTRIB_NOT_PRESENT	0x14
+#define	FDMI_PORT_ORIG_NOT_IN_LIST	0x15
+#define	FDMI_PORT_HBA_NOT_IN_LIST	0x16
+#define	FDMI_PORT_ATTRIB_NOT_REG	0x20
+#define	FDMI_PORT_NOT_REG		0x21
+#define	FDMI_PORT_ATTRIB_MULTIPLE	0x22
+#define	FDMI_PORT_ATTRIB_LENGTH_INVALID	0x23
+#define	FDMI_PORT_ALREADY_REGISTEREED	0x24
+
+/*
+ * FDMI Transmission Speed Mask values
+ */
+#define	FDMI_TRANS_SPEED_1G		0x00000001
+#define	FDMI_TRANS_SPEED_2G		0x00000002
+#define	FDMI_TRANS_SPEED_10G		0x00000004
+#define	FDMI_TRANS_SPEED_4G		0x00000008
+#define	FDMI_TRANS_SPEED_8G		0x00000010
+#define	FDMI_TRANS_SPEED_16G		0x00000020
+#define	FDMI_TRANS_SPEED_UNKNOWN	0x00008000
+
+/*
+ * FDMI HBA attribute types
+ */
+enum fdmi_hba_attribute_type {
+	FDMI_HBA_ATTRIB_NODENAME = 1,	/* 0x0001 */
+	FDMI_HBA_ATTRIB_MANUFACTURER,	/* 0x0002 */
+	FDMI_HBA_ATTRIB_SERIALNUM,	/* 0x0003 */
+	FDMI_HBA_ATTRIB_MODEL,		/* 0x0004 */
+	FDMI_HBA_ATTRIB_MODEL_DESC,	/* 0x0005 */
+	FDMI_HBA_ATTRIB_HW_VERSION,	/* 0x0006 */
+	FDMI_HBA_ATTRIB_DRIVER_VERSION,	/* 0x0007 */
+	FDMI_HBA_ATTRIB_ROM_VERSION,	/* 0x0008 */
+	FDMI_HBA_ATTRIB_FW_VERSION,	/* 0x0009 */
+	FDMI_HBA_ATTRIB_OS_NAME,	/* 0x000A */
+	FDMI_HBA_ATTRIB_MAX_CT,		/* 0x000B */
+	FDMI_HBA_ATTRIB_NODE_SYM_NAME,  /* 0x000C */
+	FDMI_HBA_ATTRIB_VENDOR_INFO,    /* 0x000D */
+	FDMI_HBA_ATTRIB_NUM_PORTS,  /* 0x000E */
+	FDMI_HBA_ATTRIB_FABRIC_NAME,    /* 0x000F */
+	FDMI_HBA_ATTRIB_BIOS_VER,   /* 0x0010 */
+	FDMI_HBA_ATTRIB_VENDOR_ID = 0x00E0,
+
+	FDMI_HBA_ATTRIB_MAX_TYPE
+};
+
+/*
+ * FDMI Port attribute types
+ */
+enum fdmi_port_attribute_type {
+	FDMI_PORT_ATTRIB_FC4_TYPES = 1,	/* 0x0001 */
+	FDMI_PORT_ATTRIB_SUPP_SPEED,	/* 0x0002 */
+	FDMI_PORT_ATTRIB_PORT_SPEED,	/* 0x0003 */
+	FDMI_PORT_ATTRIB_FRAME_SIZE,	/* 0x0004 */
+	FDMI_PORT_ATTRIB_DEV_NAME,	/* 0x0005 */
+	FDMI_PORT_ATTRIB_HOST_NAME,	/* 0x0006 */
+	FDMI_PORT_ATTRIB_NODE_NAME,     /* 0x0007 */
+	FDMI_PORT_ATTRIB_PORT_NAME,     /* 0x0008 */
+	FDMI_PORT_ATTRIB_PORT_SYM_NAME, /* 0x0009 */
+	FDMI_PORT_ATTRIB_PORT_TYPE,     /* 0x000A */
+	FDMI_PORT_ATTRIB_SUPP_COS,      /* 0x000B */
+	FDMI_PORT_ATTRIB_PORT_FAB_NAME, /* 0x000C */
+	FDMI_PORT_ATTRIB_PORT_FC4_TYPE, /* 0x000D */
+	FDMI_PORT_ATTRIB_PORT_STATE = 0x101,    /* 0x0101 */
+	FDMI_PORT_ATTRIB_PORT_NUM_RPRT = 0x102, /* 0x0102 */
+
+	FDMI_PORT_ATTR_MAX_TYPE
+};
+
+/*
+ * FDMI attribute
+ */
+struct fdmi_attr_s {
+	__be16        type;
+	__be16        len;
+	u8         value[1];
+};
+
+/*
+ * HBA Attribute Block
+ */
+struct fdmi_hba_attr_s {
+	__be32        attr_count;	/* # of attributes */
+	struct fdmi_attr_s hba_attr;	/* n attributes */
+};
+
+/*
+ * Registered Port List
+ */
+struct fdmi_port_list_s {
+	__be32		num_ports;	/* number Of Port Entries */
+	wwn_t		port_entry;	/* one or more */
+};
+
+/*
+ * Port Attribute Block
+ */
+struct fdmi_port_attr_s {
+	__be32        attr_count;	/* # of attributes */
+	struct fdmi_attr_s port_attr;	/* n attributes */
+};
+
+/*
+ * FDMI Register HBA Attributes
+ */
+struct fdmi_rhba_s {
+	wwn_t			hba_id;		/* HBA Identifier */
+	struct fdmi_port_list_s port_list;	/* Registered Port List */
+	struct fdmi_hba_attr_s hba_attr_blk;	/* HBA attribute block */
+};
+
+/*
+ * FDMI Register Port
+ */
+struct fdmi_rprt_s {
+	wwn_t			hba_id;		/* HBA Identifier */
+	wwn_t			port_name;	/* Port wwn */
+	struct fdmi_port_attr_s port_attr_blk;	/* Port Attr Block */
+};
+
+/*
+ * FDMI Register Port Attributes
+ */
+struct fdmi_rpa_s {
+	wwn_t			port_name;	/* port wwn */
+	struct fdmi_port_attr_s port_attr_blk;	/* Port Attr Block */
+};
+
+#pragma pack()
+
+#endif	/* __BFA_FC_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
new file mode 100644
index 0000000..d3b00a4
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -0,0 +1,1464 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * fcbuild.c - FC link service frame building and parsing routines
+ */
+
+#include "bfad_drv.h"
+#include "bfa_fcbuild.h"
+
+/*
+ * static build functions
+ */
+static void     fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+				 __be16 ox_id);
+static void     fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+				 __be16 ox_id);
+static struct fchs_s fc_els_req_tmpl;
+static struct fchs_s fc_els_rsp_tmpl;
+static struct fchs_s fc_bls_req_tmpl;
+static struct fchs_s fc_bls_rsp_tmpl;
+static struct fc_ba_acc_s ba_acc_tmpl;
+static struct fc_logi_s plogi_tmpl;
+static struct fc_prli_s prli_tmpl;
+static struct fc_rrq_s rrq_tmpl;
+static struct fchs_s fcp_fchs_tmpl;
+
+void
+fcbuild_init(void)
+{
+	/*
+	 * fc_els_req_tmpl
+	 */
+	fc_els_req_tmpl.routing = FC_RTG_EXT_LINK;
+	fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST;
+	fc_els_req_tmpl.type = FC_TYPE_ELS;
+	fc_els_req_tmpl.f_ctl =
+		bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
+			      FCTL_SI_XFER);
+	fc_els_req_tmpl.rx_id = FC_RXID_ANY;
+
+	/*
+	 * fc_els_rsp_tmpl
+	 */
+	fc_els_rsp_tmpl.routing = FC_RTG_EXT_LINK;
+	fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY;
+	fc_els_rsp_tmpl.type = FC_TYPE_ELS;
+	fc_els_rsp_tmpl.f_ctl =
+		bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+			      FCTL_END_SEQ | FCTL_SI_XFER);
+	fc_els_rsp_tmpl.rx_id = FC_RXID_ANY;
+
+	/*
+	 * fc_bls_req_tmpl
+	 */
+	fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK;
+	fc_bls_req_tmpl.type = FC_TYPE_BLS;
+	fc_bls_req_tmpl.f_ctl = bfa_hton3b(FCTL_END_SEQ | FCTL_SI_XFER);
+	fc_bls_req_tmpl.rx_id = FC_RXID_ANY;
+
+	/*
+	 * fc_bls_rsp_tmpl
+	 */
+	fc_bls_rsp_tmpl.routing = FC_RTG_BASIC_LINK;
+	fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC;
+	fc_bls_rsp_tmpl.type = FC_TYPE_BLS;
+	fc_bls_rsp_tmpl.f_ctl =
+		bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+			      FCTL_END_SEQ | FCTL_SI_XFER);
+	fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY;
+
+	/*
+	 * ba_acc_tmpl
+	 */
+	ba_acc_tmpl.seq_id_valid = 0;
+	ba_acc_tmpl.low_seq_cnt = 0;
+	ba_acc_tmpl.high_seq_cnt = 0xFFFF;
+
+	/*
+	 * plogi_tmpl
+	 */
+	plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
+	plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
+	plogi_tmpl.csp.ciro = 0x1;
+	plogi_tmpl.csp.cisc = 0x0;
+	plogi_tmpl.csp.altbbcred = 0x0;
+	plogi_tmpl.csp.conseq = cpu_to_be16(0x00FF);
+	plogi_tmpl.csp.ro_bitmap = cpu_to_be16(0x0002);
+	plogi_tmpl.csp.e_d_tov = cpu_to_be32(2000);
+
+	plogi_tmpl.class3.class_valid = 1;
+	plogi_tmpl.class3.sequential = 1;
+	plogi_tmpl.class3.conseq = 0xFF;
+	plogi_tmpl.class3.ospx = 1;
+
+	/*
+	 * prli_tmpl
+	 */
+	prli_tmpl.command = FC_ELS_PRLI;
+	prli_tmpl.pglen = 0x10;
+	prli_tmpl.pagebytes = cpu_to_be16(0x0014);
+	prli_tmpl.parampage.type = FC_TYPE_FCP;
+	prli_tmpl.parampage.imagepair = 1;
+	prli_tmpl.parampage.servparams.rxrdisab = 1;
+
+	/*
+	 * rrq_tmpl
+	 */
+	rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ;
+
+	/*
+	 * fcp_struct fchs_s mpl
+	 */
+	fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA;
+	fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
+	fcp_fchs_tmpl.type = FC_TYPE_FCP;
+	fcp_fchs_tmpl.f_ctl =
+		bfa_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER);
+	fcp_fchs_tmpl.seq_id = 1;
+	fcp_fchs_tmpl.rx_id = FC_RXID_ANY;
+}
+
+static void
+fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
+{
+	memset(fchs, 0, sizeof(struct fchs_s));
+
+	fchs->routing = FC_RTG_FC4_DEV_DATA;
+	fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
+	fchs->type = FC_TYPE_SERVICES;
+	fchs->f_ctl =
+		bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
+			      FCTL_SI_XFER);
+	fchs->rx_id = FC_RXID_ANY;
+	fchs->d_id = (d_id);
+	fchs->s_id = (s_id);
+	fchs->ox_id = cpu_to_be16(ox_id);
+
+	/*
+	 * @todo no need to set ox_id for request
+	 *       no need to set rx_id for response
+	 */
+}
+
+static void
+fc_gsresp_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+{
+	memset(fchs, 0, sizeof(struct fchs_s));
+
+	fchs->routing = FC_RTG_FC4_DEV_DATA;
+	fchs->cat_info = FC_CAT_SOLICIT_CTRL;
+	fchs->type = FC_TYPE_SERVICES;
+	fchs->f_ctl =
+		bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+			   FCTL_END_SEQ | FCTL_SI_XFER);
+	fchs->d_id = d_id;
+	fchs->s_id = s_id;
+	fchs->ox_id = ox_id;
+}
+
+void
+fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
+{
+	memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
+	fchs->d_id = (d_id);
+	fchs->s_id = (s_id);
+	fchs->ox_id = cpu_to_be16(ox_id);
+}
+
+static void
+fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
+{
+	memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
+	fchs->d_id = d_id;
+	fchs->s_id = s_id;
+	fchs->ox_id = ox_id;
+}
+
+enum fc_parse_status
+fc_els_rsp_parse(struct fchs_s *fchs, int len)
+{
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+	struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
+
+	len = len;
+
+	switch (els_cmd->els_code) {
+	case FC_ELS_LS_RJT:
+		if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
+			return FC_PARSE_BUSY;
+		else
+			return FC_PARSE_FAILURE;
+
+	case FC_ELS_ACC:
+		return FC_PARSE_OK;
+	}
+	return FC_PARSE_OK;
+}
+
+static void
+fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
+{
+	memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
+	fchs->d_id = d_id;
+	fchs->s_id = s_id;
+	fchs->ox_id = ox_id;
+}
+
+static          u16
+fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
+		 __be16 ox_id, wwn_t port_name, wwn_t node_name,
+		 u16 pdu_size, u16 bb_cr, u8 els_code)
+{
+	struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
+
+	memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+
+	/* For FC AL bb_cr is 0 and altbbcred is 1 */
+	if (!bb_cr)
+		plogi->csp.altbbcred = 1;
+
+	plogi->els_cmd.els_code = els_code;
+	if (els_code == FC_ELS_PLOGI)
+		fc_els_req_build(fchs, d_id, s_id, ox_id);
+	else
+		fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+	plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size);
+	plogi->csp.bbcred  = cpu_to_be16(bb_cr);
+
+	memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
+	memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
+
+	return sizeof(struct fc_logi_s);
+}
+
+u16
+fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
+		u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size,
+	       u8 set_npiv, u8 set_auth, u16 local_bb_credits)
+{
+	u32        d_id = bfa_hton3b(FC_FABRIC_PORT);
+	__be32	*vvl_info;
+
+	memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+
+	flogi->els_cmd.els_code = FC_ELS_FLOGI;
+	fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+	flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
+	flogi->port_name = port_name;
+	flogi->node_name = node_name;
+
+	/*
+	 * Set the NPIV Capability Bit ( word 1, bit 31) of Common
+	 * Service Parameters.
+	 */
+	flogi->csp.ciro = set_npiv;
+
+	/* set AUTH capability */
+	flogi->csp.security = set_auth;
+
+	flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
+
+	/* Set brcd token in VVL */
+	vvl_info = (u32 *)&flogi->vvl[0];
+
+	/* set the flag to indicate the presence of VVL */
+	flogi->csp.npiv_supp    = 1; /* @todo. field name is not correct */
+	vvl_info[0]	= cpu_to_be32(FLOGI_VVL_BRCD);
+
+	return sizeof(struct fc_logi_s);
+}
+
+u16
+fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
+		   __be16 ox_id, wwn_t port_name, wwn_t node_name,
+		   u16 pdu_size, u16 local_bb_credits, u8 bb_scn)
+{
+	u32        d_id = 0;
+	u16	   bbscn_rxsz = (bb_scn << 12) | pdu_size;
+
+	memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+	flogi->els_cmd.els_code = FC_ELS_ACC;
+	flogi->class3.rxsz = cpu_to_be16(pdu_size);
+	flogi->csp.rxsz  = cpu_to_be16(bbscn_rxsz);	/* bb_scn/rxsz */
+	flogi->port_name = port_name;
+	flogi->node_name = node_name;
+
+	flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
+
+	return sizeof(struct fc_logi_s);
+}
+
+u16
+fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
+		u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size)
+{
+	u32        d_id = bfa_hton3b(FC_FABRIC_PORT);
+
+	memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+
+	flogi->els_cmd.els_code = FC_ELS_FDISC;
+	fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+	flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
+	flogi->port_name = port_name;
+	flogi->node_name = node_name;
+
+	return sizeof(struct fc_logi_s);
+}
+
+u16
+fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
+	       u16 ox_id, wwn_t port_name, wwn_t node_name,
+	       u16 pdu_size, u16 bb_cr)
+{
+	return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
+				node_name, pdu_size, bb_cr, FC_ELS_PLOGI);
+}
+
+u16
+fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
+		   u16 ox_id, wwn_t port_name, wwn_t node_name,
+		   u16 pdu_size, u16 bb_cr)
+{
+	return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
+				node_name, pdu_size, bb_cr, FC_ELS_ACC);
+}
+
+enum fc_parse_status
+fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
+{
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+	struct fc_logi_s *plogi;
+	struct fc_ls_rjt_s *ls_rjt;
+
+	switch (els_cmd->els_code) {
+	case FC_ELS_LS_RJT:
+		ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1);
+		if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
+			return FC_PARSE_BUSY;
+		else
+			return FC_PARSE_FAILURE;
+	case FC_ELS_ACC:
+		plogi = (struct fc_logi_s *) (fchs + 1);
+		if (len < sizeof(struct fc_logi_s))
+			return FC_PARSE_FAILURE;
+
+		if (!wwn_is_equal(plogi->port_name, port_name))
+			return FC_PARSE_FAILURE;
+
+		if (!plogi->class3.class_valid)
+			return FC_PARSE_FAILURE;
+
+		if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
+			return FC_PARSE_FAILURE;
+
+		return FC_PARSE_OK;
+	default:
+		return FC_PARSE_FAILURE;
+	}
+}
+
+enum fc_parse_status
+fc_plogi_parse(struct fchs_s *fchs)
+{
+	struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1);
+
+	if (plogi->class3.class_valid != 1)
+		return FC_PARSE_FAILURE;
+
+	if ((be16_to_cpu(plogi->class3.rxsz) < FC_MIN_PDUSZ)
+	    || (be16_to_cpu(plogi->class3.rxsz) > FC_MAX_PDUSZ)
+	    || (plogi->class3.rxsz == 0))
+		return FC_PARSE_FAILURE;
+
+	return FC_PARSE_OK;
+}
+
+u16
+fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
+	      u16 ox_id)
+{
+	struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
+
+	fc_els_req_build(fchs, d_id, s_id, ox_id);
+	memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
+
+	prli->command = FC_ELS_PRLI;
+	prli->parampage.servparams.initiator     = 1;
+	prli->parampage.servparams.retry         = 1;
+	prli->parampage.servparams.rec_support   = 1;
+	prli->parampage.servparams.task_retry_id = 0;
+	prli->parampage.servparams.confirm       = 1;
+
+	return sizeof(struct fc_prli_s);
+}
+
+u16
+fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
+		  __be16 ox_id, enum bfa_lport_role role)
+{
+	struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
+
+	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+	memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
+
+	prli->command = FC_ELS_ACC;
+
+	prli->parampage.servparams.initiator = 1;
+
+	prli->parampage.rspcode = FC_PRLI_ACC_XQTD;
+
+	return sizeof(struct fc_prli_s);
+}
+
+enum fc_parse_status
+fc_prli_rsp_parse(struct fc_prli_s *prli, int len)
+{
+	if (len < sizeof(struct fc_prli_s))
+		return FC_PARSE_FAILURE;
+
+	if (prli->command != FC_ELS_ACC)
+		return FC_PARSE_FAILURE;
+
+	if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD)
+	    && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG))
+		return FC_PARSE_FAILURE;
+
+	if (prli->parampage.servparams.target != 1)
+		return FC_PARSE_FAILURE;
+
+	return FC_PARSE_OK;
+}
+
+enum fc_parse_status
+fc_prli_parse(struct fc_prli_s *prli)
+{
+	if (prli->parampage.type != FC_TYPE_FCP)
+		return FC_PARSE_FAILURE;
+
+	if (!prli->parampage.imagepair)
+		return FC_PARSE_FAILURE;
+
+	if (!prli->parampage.servparams.initiator)
+		return FC_PARSE_FAILURE;
+
+	return FC_PARSE_OK;
+}
+
+u16
+fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
+	      u16 ox_id, wwn_t port_name)
+{
+	fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+	memset(logo, '\0', sizeof(struct fc_logo_s));
+	logo->els_cmd.els_code = FC_ELS_LOGO;
+	logo->nport_id = (s_id);
+	logo->orig_port_name = port_name;
+
+	return sizeof(struct fc_logo_s);
+}
+
+static u16
+fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
+		 u32 s_id, __be16 ox_id, wwn_t port_name,
+		 wwn_t node_name, u8 els_code)
+{
+	memset(adisc, '\0', sizeof(struct fc_adisc_s));
+
+	adisc->els_cmd.els_code = els_code;
+
+	if (els_code == FC_ELS_ADISC)
+		fc_els_req_build(fchs, d_id, s_id, ox_id);
+	else
+		fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+	adisc->orig_HA = 0;
+	adisc->orig_port_name = port_name;
+	adisc->orig_node_name = node_name;
+	adisc->nport_id = (s_id);
+
+	return sizeof(struct fc_adisc_s);
+}
+
+u16
+fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
+		u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name)
+{
+	return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
+				node_name, FC_ELS_ADISC);
+}
+
+u16
+fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
+		   u32 s_id, __be16 ox_id, wwn_t port_name,
+		   wwn_t node_name)
+{
+	return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
+				node_name, FC_ELS_ACC);
+}
+
+enum fc_parse_status
+fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name,
+				 wwn_t node_name)
+{
+
+	if (len < sizeof(struct fc_adisc_s))
+		return FC_PARSE_FAILURE;
+
+	if (adisc->els_cmd.els_code != FC_ELS_ACC)
+		return FC_PARSE_FAILURE;
+
+	if (!wwn_is_equal(adisc->orig_port_name, port_name))
+		return FC_PARSE_FAILURE;
+
+	return FC_PARSE_OK;
+}
+
+enum fc_parse_status
+fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, wwn_t node_name,
+	       wwn_t port_name)
+{
+	struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld;
+
+	if (adisc->els_cmd.els_code != FC_ELS_ACC)
+		return FC_PARSE_FAILURE;
+
+	if ((adisc->nport_id == (host_dap))
+	    && wwn_is_equal(adisc->orig_port_name, port_name)
+	    && wwn_is_equal(adisc->orig_node_name, node_name))
+		return FC_PARSE_OK;
+
+	return FC_PARSE_FAILURE;
+}
+
+enum fc_parse_status
+fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
+{
+	struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
+
+	if (pdisc->class3.class_valid != 1)
+		return FC_PARSE_FAILURE;
+
+	if ((be16_to_cpu(pdisc->class3.rxsz) <
+		(FC_MIN_PDUSZ - sizeof(struct fchs_s)))
+	    || (pdisc->class3.rxsz == 0))
+		return FC_PARSE_FAILURE;
+
+	if (!wwn_is_equal(pdisc->port_name, port_name))
+		return FC_PARSE_FAILURE;
+
+	if (!wwn_is_equal(pdisc->node_name, node_name))
+		return FC_PARSE_FAILURE;
+
+	return FC_PARSE_OK;
+}
+
+u16
+fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+{
+	memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
+	fchs->cat_info = FC_CAT_ABTS;
+	fchs->d_id = (d_id);
+	fchs->s_id = (s_id);
+	fchs->ox_id = cpu_to_be16(ox_id);
+
+	return sizeof(struct fchs_s);
+}
+
+enum fc_parse_status
+fc_abts_rsp_parse(struct fchs_s *fchs, int len)
+{
+	if ((fchs->cat_info == FC_CAT_BA_ACC)
+	    || (fchs->cat_info == FC_CAT_BA_RJT))
+		return FC_PARSE_OK;
+
+	return FC_PARSE_FAILURE;
+}
+
+u16
+fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
+	     u16 ox_id, u16 rrq_oxid)
+{
+	fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+	/*
+	 * build rrq payload
+	 */
+	memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
+	rrq->s_id = (s_id);
+	rrq->ox_id = cpu_to_be16(rrq_oxid);
+	rrq->rx_id = FC_RXID_ANY;
+
+	return sizeof(struct fc_rrq_s);
+}
+
+u16
+fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
+		  __be16 ox_id)
+{
+	struct fc_els_cmd_s *acc = pld;
+
+	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+	memset(acc, 0, sizeof(struct fc_els_cmd_s));
+	acc->els_code = FC_ELS_ACC;
+
+	return sizeof(struct fc_els_cmd_s);
+}
+
+u16
+fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
+		u32 s_id, __be16 ox_id, u8 reason_code,
+		u8 reason_code_expl)
+{
+	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+	memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
+
+	ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
+	ls_rjt->reason_code = reason_code;
+	ls_rjt->reason_code_expl = reason_code_expl;
+	ls_rjt->vendor_unique = 0x00;
+
+	return sizeof(struct fc_ls_rjt_s);
+}
+
+u16
+fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
+		u32 s_id, __be16 ox_id, u16 rx_id)
+{
+	fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
+
+	memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s));
+
+	fchs->rx_id = rx_id;
+
+	ba_acc->ox_id = fchs->ox_id;
+	ba_acc->rx_id = fchs->rx_id;
+
+	return sizeof(struct fc_ba_acc_s);
+}
+
+u16
+fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
+		u32 s_id, __be16 ox_id)
+{
+	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+	memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
+	els_cmd->els_code = FC_ELS_ACC;
+
+	return sizeof(struct fc_els_cmd_s);
+}
+
+int
+fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
+{
+	int             num_pages = 0;
+	struct fc_prlo_s *prlo;
+	struct fc_tprlo_s *tprlo;
+
+	if (els_code == FC_ELS_PRLO) {
+		prlo = (struct fc_prlo_s *) (fc_frame + 1);
+		num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16;
+	} else {
+		tprlo = (struct fc_tprlo_s *) (fc_frame + 1);
+		num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
+	}
+	return num_pages;
+}
+
+u16
+fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
+		u32 d_id, u32 s_id, __be16 ox_id, int num_pages)
+{
+	int             page;
+
+	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+	memset(tprlo_acc, 0, (num_pages * 16) + 4);
+	tprlo_acc->command = FC_ELS_ACC;
+
+	tprlo_acc->page_len = 0x10;
+	tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
+
+	for (page = 0; page < num_pages; page++) {
+		tprlo_acc->tprlo_acc_params[page].opa_valid = 0;
+		tprlo_acc->tprlo_acc_params[page].rpa_valid = 0;
+		tprlo_acc->tprlo_acc_params[page].fc4type_csp = FC_TYPE_FCP;
+		tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0;
+		tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0;
+	}
+	return be16_to_cpu(tprlo_acc->payload_len);
+}
+
+u16
+fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
+		  u32 s_id, __be16 ox_id, int num_pages)
+{
+	int             page;
+
+	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+	memset(prlo_acc, 0, (num_pages * 16) + 4);
+	prlo_acc->command = FC_ELS_ACC;
+	prlo_acc->page_len = 0x10;
+	prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
+
+	for (page = 0; page < num_pages; page++) {
+		prlo_acc->prlo_acc_params[page].opa_valid = 0;
+		prlo_acc->prlo_acc_params[page].rpa_valid = 0;
+		prlo_acc->prlo_acc_params[page].fc4type_csp = FC_TYPE_FCP;
+		prlo_acc->prlo_acc_params[page].orig_process_assc = 0;
+		prlo_acc->prlo_acc_params[page].resp_process_assc = 0;
+	}
+
+	return be16_to_cpu(prlo_acc->payload_len);
+}
+
+u16
+fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
+		u32 s_id, u16 ox_id, u32 data_format)
+{
+	fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+	memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
+
+	rnid->els_cmd.els_code = FC_ELS_RNID;
+	rnid->node_id_data_format = data_format;
+
+	return sizeof(struct fc_rnid_cmd_s);
+}
+
+u16
+fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
+		  u32 s_id, __be16 ox_id, u32 data_format,
+		  struct fc_rnid_common_id_data_s *common_id_data,
+		  struct fc_rnid_general_topology_data_s *gen_topo_data)
+{
+	memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
+
+	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+	rnid_acc->els_cmd.els_code = FC_ELS_ACC;
+	rnid_acc->node_id_data_format = data_format;
+	rnid_acc->common_id_data_length =
+			sizeof(struct fc_rnid_common_id_data_s);
+	rnid_acc->common_id_data = *common_id_data;
+
+	if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
+		rnid_acc->specific_id_data_length =
+			sizeof(struct fc_rnid_general_topology_data_s);
+		rnid_acc->gen_topology_data = *gen_topo_data;
+		return sizeof(struct fc_rnid_acc_s);
+	} else {
+		return sizeof(struct fc_rnid_acc_s) -
+			sizeof(struct fc_rnid_general_topology_data_s);
+	}
+
+}
+
+u16
+fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
+		u32 s_id, u16 ox_id)
+{
+	fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+	memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
+
+	rpsc->els_cmd.els_code = FC_ELS_RPSC;
+	return sizeof(struct fc_rpsc_cmd_s);
+}
+
+u16
+fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
+		u32 s_id, u32 *pid_list, u16 npids)
+{
+	u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_hton3b(d_id));
+	int i = 0;
+
+	fc_els_req_build(fchs, bfa_hton3b(dctlr_id), s_id, 0);
+
+	memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
+
+	rpsc2->els_cmd.els_code = FC_ELS_RPSC;
+	rpsc2->token = cpu_to_be32(FC_BRCD_TOKEN);
+	rpsc2->num_pids  = cpu_to_be16(npids);
+	for (i = 0; i < npids; i++)
+		rpsc2->pid_list[i].pid = pid_list[i];
+
+	return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * (sizeof(u32)));
+}
+
+u16
+fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
+		u32 d_id, u32 s_id, __be16 ox_id,
+		  struct fc_rpsc_speed_info_s *oper_speed)
+{
+	memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
+
+	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
+
+	rpsc_acc->command = FC_ELS_ACC;
+	rpsc_acc->num_entries = cpu_to_be16(1);
+
+	rpsc_acc->speed_info[0].port_speed_cap =
+		cpu_to_be16(oper_speed->port_speed_cap);
+
+	rpsc_acc->speed_info[0].port_op_speed =
+		cpu_to_be16(oper_speed->port_op_speed);
+
+	return sizeof(struct fc_rpsc_acc_s);
+}
+
+u16
+fc_logo_rsp_parse(struct fchs_s *fchs, int len)
+{
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+	len = len;
+	if (els_cmd->els_code != FC_ELS_ACC)
+		return FC_PARSE_FAILURE;
+
+	return FC_PARSE_OK;
+}
+
+u16
+fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
+	       wwn_t port_name, wwn_t node_name, u16 pdu_size)
+{
+	struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
+
+	memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
+
+	pdisc->els_cmd.els_code = FC_ELS_PDISC;
+	fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+	pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size);
+	pdisc->port_name = port_name;
+	pdisc->node_name = node_name;
+
+	return sizeof(struct fc_logi_s);
+}
+
+u16
+fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
+{
+	struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
+
+	if (len < sizeof(struct fc_logi_s))
+		return FC_PARSE_LEN_INVAL;
+
+	if (pdisc->els_cmd.els_code != FC_ELS_ACC)
+		return FC_PARSE_ACC_INVAL;
+
+	if (!wwn_is_equal(pdisc->port_name, port_name))
+		return FC_PARSE_PWWN_NOT_EQUAL;
+
+	if (!pdisc->class3.class_valid)
+		return FC_PARSE_NWWN_NOT_EQUAL;
+
+	if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
+		return FC_PARSE_RXSZ_INVAL;
+
+	return FC_PARSE_OK;
+}
+
+u16
+fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
+	      int num_pages)
+{
+	struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1);
+	int             page;
+
+	fc_els_req_build(fchs, d_id, s_id, ox_id);
+	memset(prlo, 0, (num_pages * 16) + 4);
+	prlo->command = FC_ELS_PRLO;
+	prlo->page_len = 0x10;
+	prlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
+
+	for (page = 0; page < num_pages; page++) {
+		prlo->prlo_params[page].type = FC_TYPE_FCP;
+		prlo->prlo_params[page].opa_valid = 0;
+		prlo->prlo_params[page].rpa_valid = 0;
+		prlo->prlo_params[page].orig_process_assc = 0;
+		prlo->prlo_params[page].resp_process_assc = 0;
+	}
+
+	return be16_to_cpu(prlo->payload_len);
+}
+
+u16
+fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
+{
+	struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
+	int             num_pages = 0;
+	int             page = 0;
+
+	len = len;
+
+	if (prlo->command != FC_ELS_ACC)
+		return FC_PARSE_FAILURE;
+
+	num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
+
+	for (page = 0; page < num_pages; page++) {
+		if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
+			return FC_PARSE_FAILURE;
+
+		if (prlo->prlo_acc_params[page].opa_valid != 0)
+			return FC_PARSE_FAILURE;
+
+		if (prlo->prlo_acc_params[page].rpa_valid != 0)
+			return FC_PARSE_FAILURE;
+
+		if (prlo->prlo_acc_params[page].orig_process_assc != 0)
+			return FC_PARSE_FAILURE;
+
+		if (prlo->prlo_acc_params[page].resp_process_assc != 0)
+			return FC_PARSE_FAILURE;
+	}
+	return FC_PARSE_OK;
+
+}
+
+u16
+fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
+	       int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
+{
+	struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1);
+	int             page;
+
+	fc_els_req_build(fchs, d_id, s_id, ox_id);
+	memset(tprlo, 0, (num_pages * 16) + 4);
+	tprlo->command = FC_ELS_TPRLO;
+	tprlo->page_len = 0x10;
+	tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
+
+	for (page = 0; page < num_pages; page++) {
+		tprlo->tprlo_params[page].type = FC_TYPE_FCP;
+		tprlo->tprlo_params[page].opa_valid = 0;
+		tprlo->tprlo_params[page].rpa_valid = 0;
+		tprlo->tprlo_params[page].orig_process_assc = 0;
+		tprlo->tprlo_params[page].resp_process_assc = 0;
+		if (tprlo_type == FC_GLOBAL_LOGO) {
+			tprlo->tprlo_params[page].global_process_logout = 1;
+		} else if (tprlo_type == FC_TPR_LOGO) {
+			tprlo->tprlo_params[page].tpo_nport_valid = 1;
+			tprlo->tprlo_params[page].tpo_nport_id = (tpr_id);
+		}
+	}
+
+	return be16_to_cpu(tprlo->payload_len);
+}
+
+u16
+fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
+{
+	struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1);
+	int             num_pages = 0;
+	int             page = 0;
+
+	len = len;
+
+	if (tprlo->command != FC_ELS_ACC)
+		return FC_PARSE_ACC_INVAL;
+
+	num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
+
+	for (page = 0; page < num_pages; page++) {
+		if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
+			return FC_PARSE_NOT_FCP;
+		if (tprlo->tprlo_acc_params[page].opa_valid != 0)
+			return FC_PARSE_OPAFLAG_INVAL;
+		if (tprlo->tprlo_acc_params[page].rpa_valid != 0)
+			return FC_PARSE_RPAFLAG_INVAL;
+		if (tprlo->tprlo_acc_params[page].orig_process_assc != 0)
+			return FC_PARSE_OPA_INVAL;
+		if (tprlo->tprlo_acc_params[page].resp_process_assc != 0)
+			return FC_PARSE_RPA_INVAL;
+	}
+	return FC_PARSE_OK;
+}
+
+enum fc_parse_status
+fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
+{
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+	len = len;
+	if (els_cmd->els_code != FC_ELS_ACC)
+		return FC_PARSE_FAILURE;
+
+	return FC_PARSE_OK;
+}
+
+u16
+fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
+		u32 reason_code, u32 reason_expl)
+{
+	struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
+
+	fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
+
+	fchs->cat_info = FC_CAT_BA_RJT;
+	ba_rjt->reason_code = reason_code;
+	ba_rjt->reason_expl = reason_expl;
+	return sizeof(struct fc_ba_rjt_s);
+}
+
+static void
+fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
+{
+	memset(cthdr, 0, sizeof(struct ct_hdr_s));
+	cthdr->rev_id = CT_GS3_REVISION;
+	cthdr->gs_type = CT_GSTYPE_DIRSERVICE;
+	cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER;
+	cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
+}
+
+static void
+fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
+{
+	memset(cthdr, 0, sizeof(struct ct_hdr_s));
+	cthdr->rev_id = CT_GS3_REVISION;
+	cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
+	cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER;
+	cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
+}
+
+static void
+fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code,
+					 u8 sub_type)
+{
+	memset(cthdr, 0, sizeof(struct ct_hdr_s));
+	cthdr->rev_id = CT_GS3_REVISION;
+	cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
+	cthdr->gs_sub_type = sub_type;
+	cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
+}
+
+u16
+fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+	       wwn_t port_name)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+	fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN);
+
+	memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
+	gidpn->port_name = port_name;
+	return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+	       u32 port_id)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+	fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID);
+
+	memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
+	gpnid->dap = port_id;
+	return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+	       u32 port_id)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+	fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID);
+
+	memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
+	gnnid->dap = port_id;
+	return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
+{
+	if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) {
+		if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY)
+			return FC_PARSE_BUSY;
+		else
+			return FC_PARSE_FAILURE;
+	}
+
+	return FC_PARSE_OK;
+}
+
+u16
+fc_gs_rjt_build(struct fchs_s *fchs,  struct ct_hdr_s *cthdr,
+		u32 d_id, u32 s_id, u16 ox_id, u8 reason_code,
+		u8 reason_code_expl)
+{
+	fc_gsresp_fchdr_build(fchs, d_id, s_id, ox_id);
+
+	cthdr->cmd_rsp_code = cpu_to_be16(CT_RSP_REJECT);
+	cthdr->rev_id = CT_GS3_REVISION;
+
+	cthdr->reason_code = reason_code;
+	cthdr->exp_code    = reason_code_expl;
+	return sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
+		u8 set_br_reg, u32 s_id, u16 ox_id)
+{
+	u32        d_id = bfa_hton3b(FC_FABRIC_CONTROLLER);
+
+	fc_els_req_build(fchs, d_id, s_id, ox_id);
+
+	memset(scr, 0, sizeof(struct fc_scr_s));
+	scr->command = FC_ELS_SCR;
+	scr->reg_func = FC_SCR_REG_FUNC_FULL;
+	if (set_br_reg)
+		scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE;
+
+	return sizeof(struct fc_scr_s);
+}
+
+u16
+fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
+		u32 s_id, u16 ox_id)
+{
+	u32        d_id = bfa_hton3b(FC_FABRIC_CONTROLLER);
+	u16        payldlen;
+
+	fc_els_req_build(fchs, d_id, s_id, ox_id);
+	rscn->command = FC_ELS_RSCN;
+	rscn->pagelen = sizeof(rscn->event[0]);
+
+	payldlen = sizeof(u32) + rscn->pagelen;
+	rscn->payldlen = cpu_to_be16(payldlen);
+
+	rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
+	rscn->event[0].portid = s_id;
+
+	return sizeof(struct fc_rscn_pl_s);
+}
+
+u16
+fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+	       enum bfa_lport_role roles)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
+	u32        type_value, d_id = bfa_hton3b(FC_NAME_SERVER);
+	u8         index;
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+	fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
+
+	memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
+
+	rftid->dap = s_id;
+
+	/* By default, FCP FC4 Type is registered */
+	index = FC_TYPE_FCP >> 5;
+	type_value = 1 << (FC_TYPE_FCP % 32);
+	rftid->fc4_type[index] = cpu_to_be32(type_value);
+
+	return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+		   u8 *fc4_bitmap, u32 bitmap_size)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+	fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
+
+	memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
+
+	rftid->dap = s_id;
+	memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
+		(bitmap_size < 32 ? bitmap_size : 32));
+
+	return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+	       u8 fc4_type, u8 fc4_ftrs)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1);
+	u32         d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+	fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID);
+
+	memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
+
+	rffid->dap	    = s_id;
+	rffid->fc4ftr_bits  = fc4_ftrs;
+	rffid->fc4_type	    = fc4_type;
+
+	return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+		u8 *name)
+{
+
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rspnid_req_s *rspnid =
+			(struct fcgs_rspnid_req_s *)(cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
+	fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID);
+
+	memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
+
+	rspnid->dap = s_id;
+	strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
+	rspnid->spn_len = (u8) strlen(rspnid->spn);
+
+	return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			wwn_t node_name, u8 *name)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rsnn_nn_req_s *rsnn_nn =
+		(struct fcgs_rsnn_nn_req_s *) (cthdr + 1);
+	u32	d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+	fc_gs_cthdr_build(cthdr, s_id, GS_RSNN_NN);
+
+	memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
+
+	rsnn_nn->node_name = node_name;
+	strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
+	rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
+
+	return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
+{
+
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+
+	fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT);
+
+	memset(gidft, 0, sizeof(struct fcgs_gidft_req_s));
+	gidft->fc4_type = fc4_type;
+	gidft->domain_id = 0;
+	gidft->area_id = 0;
+
+	return sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
+	       wwn_t port_name)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+	fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID);
+
+	memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s));
+	rpnid->port_id = port_id;
+	rpnid->port_name = port_name;
+
+	return sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
+	       wwn_t node_name)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+	fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID);
+
+	memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s));
+	rnnid->port_id = port_id;
+	rnnid->node_name = node_name;
+
+	return sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
+	       u32 cos)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rcsid_req_s *rcsid =
+			(struct fcgs_rcsid_req_s *) (cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+	fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID);
+
+	memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s));
+	rcsid->port_id = port_id;
+	rcsid->cos = cos;
+
+	return sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
+	       u8 port_type)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+	fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID);
+
+	memset(rptid, 0, sizeof(struct fcgs_rptid_req_s));
+	rptid->port_id = port_id;
+	rptid->port_type = port_type;
+
+	return sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s);
+}
+
+u16
+fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+	fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT);
+
+	memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
+	ganxt->port_id = port_id;
+
+	return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s);
+}
+
+/*
+ * Builds fc hdr and ct hdr for FDMI requests.
+ */
+u16
+fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+		     u16 cmd_code)
+{
+
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	u32        d_id = bfa_hton3b(FC_MGMT_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+	fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code);
+
+	return sizeof(struct ct_hdr_s);
+}
+
+/*
+ * Given a FC4 Type, this function returns a fc4 type bitmask
+ */
+void
+fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
+{
+	u8         index;
+	__be32       *ptr = (__be32 *) bit_mask;
+	u32        type_value;
+
+	/*
+	 * @todo : Check for bitmask size
+	 */
+
+	index = fc4_type >> 5;
+	type_value = 1 << (fc4_type % 32);
+	ptr[index] = cpu_to_be32(type_value);
+
+}
+
+/*
+ *	GMAL Request
+ */
+u16
+fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_MGMT_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+	fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD,
+			CT_GSSUBTYPE_CFGSERVER);
+
+	memset(gmal, 0, sizeof(fcgs_gmal_req_t));
+	gmal->wwn = wwn;
+
+	return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t);
+}
+
+/*
+ * GFN (Get Fabric Name) Request
+ */
+u16
+fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
+{
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1);
+	u32        d_id = bfa_hton3b(FC_MGMT_SERVER);
+
+	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+	fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD,
+			CT_GSSUBTYPE_CFGSERVER);
+
+	memset(gfn, 0, sizeof(fcgs_gfn_req_t));
+	gfn->wwn = wwn;
+
+	return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t);
+}
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
new file mode 100644
index 0000000..b109a88
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * fcbuild.h - FC link service frame building and parsing routines
+ */
+
+#ifndef __FCBUILD_H__
+#define __FCBUILD_H__
+
+#include "bfad_drv.h"
+#include "bfa_fc.h"
+#include "bfa_defs_fcs.h"
+
+/*
+ * Utility Macros/functions
+ */
+
+#define wwn_is_equal(_wwn1, _wwn2)		\
+	(memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0)
+
+#define fc_roundup(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
+
+/*
+ * Given the fc response length, this routine will return
+ * the length of the actual payload bytes following the CT header.
+ *
+ * Assumes the input response length does not include the crc, eof, etc.
+ */
+static inline   u32
+fc_get_ctresp_pyld_len(u32 resp_len)
+{
+	return resp_len - sizeof(struct ct_hdr_s);
+}
+
+/*
+ * Convert bfa speed to rpsc speed value.
+ */
+static inline  enum bfa_port_speed
+fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed)
+{
+	switch (speed) {
+
+	case RPSC_OP_SPEED_1G:
+		return BFA_PORT_SPEED_1GBPS;
+
+	case RPSC_OP_SPEED_2G:
+		return BFA_PORT_SPEED_2GBPS;
+
+	case RPSC_OP_SPEED_4G:
+		return BFA_PORT_SPEED_4GBPS;
+
+	case RPSC_OP_SPEED_8G:
+		return BFA_PORT_SPEED_8GBPS;
+
+	case RPSC_OP_SPEED_16G:
+		return BFA_PORT_SPEED_16GBPS;
+
+	case RPSC_OP_SPEED_10G:
+		return BFA_PORT_SPEED_10GBPS;
+
+	default:
+		return BFA_PORT_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Convert RPSC speed to bfa speed value.
+ */
+static inline   enum fc_rpsc_op_speed
+fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed)
+{
+	switch (op_speed) {
+
+	case BFA_PORT_SPEED_1GBPS:
+		return RPSC_OP_SPEED_1G;
+
+	case BFA_PORT_SPEED_2GBPS:
+		return RPSC_OP_SPEED_2G;
+
+	case BFA_PORT_SPEED_4GBPS:
+		return RPSC_OP_SPEED_4G;
+
+	case BFA_PORT_SPEED_8GBPS:
+		return RPSC_OP_SPEED_8G;
+
+	case BFA_PORT_SPEED_16GBPS:
+		return RPSC_OP_SPEED_16G;
+
+	case BFA_PORT_SPEED_10GBPS:
+		return RPSC_OP_SPEED_10G;
+
+	default:
+		return RPSC_OP_SPEED_NOT_EST;
+	}
+}
+
+enum fc_parse_status {
+	FC_PARSE_OK = 0,
+	FC_PARSE_FAILURE = 1,
+	FC_PARSE_BUSY = 2,
+	FC_PARSE_LEN_INVAL,
+	FC_PARSE_ACC_INVAL,
+	FC_PARSE_PWWN_NOT_EQUAL,
+	FC_PARSE_NWWN_NOT_EQUAL,
+	FC_PARSE_RXSZ_INVAL,
+	FC_PARSE_NOT_FCP,
+	FC_PARSE_OPAFLAG_INVAL,
+	FC_PARSE_RPAFLAG_INVAL,
+	FC_PARSE_OPA_INVAL,
+	FC_PARSE_RPA_INVAL,
+
+};
+
+struct fc_templates_s {
+	struct fchs_s fc_els_req;
+	struct fchs_s fc_bls_req;
+	struct fc_logi_s plogi;
+	struct fc_rrq_s rrq;
+};
+
+void            fcbuild_init(void);
+
+u16        fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
+			u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name,
+			       u16 pdu_size, u8 set_npiv, u8 set_auth,
+			       u16 local_bb_credits);
+
+u16        fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id,
+			       u16 ox_id, wwn_t port_name, wwn_t node_name,
+			       u16 pdu_size);
+
+u16        fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
+				   u32 s_id, __be16 ox_id,
+				   wwn_t port_name, wwn_t node_name,
+				   u16 pdu_size,
+				   u16 local_bb_credits, u8 bb_scn);
+
+u16        fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
+			       u32 s_id, u16 ox_id, wwn_t port_name,
+			       wwn_t node_name, u16 pdu_size, u16 bb_cr);
+
+enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
+
+u16        fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id,
+			      u16 ox_id);
+
+enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
+
+u16        fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
+			     u32 s_id, u16 ox_id, u16 rrq_oxid);
+enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
+
+u16        fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
+				u16 ox_id, u8 *name);
+u16	fc_rsnn_nn_build(struct fchs_s *fchs, void *pld, u32 s_id,
+				wwn_t node_name, u8 *name);
+
+u16        fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
+			       u16 ox_id, enum bfa_lport_role role);
+
+u16       fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
+				   u16 ox_id, u8 *fc4_bitmap,
+				   u32 bitmap_size);
+
+u16	fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			u16 ox_id, u8 fc4_type, u8 fc4_ftrs);
+
+u16        fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			       u16 ox_id, wwn_t port_name);
+
+u16        fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
+			       u16 ox_id, u32 port_id);
+
+u16	fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr,
+			u32 d_id, u32 s_id, u16 ox_id,
+			u8 reason_code, u8 reason_code_expl);
+
+u16        fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
+			u8 set_br_reg, u32 s_id, u16 ox_id);
+
+u16        fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
+				   u32 s_id, u16 ox_id,
+				   wwn_t port_name, wwn_t node_name,
+				   u16 pdu_size, u16 bb_cr);
+
+u16        fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
+			u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name,
+			       wwn_t node_name);
+
+enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
+			u32 host_dap, wwn_t node_name, wwn_t port_name);
+
+enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len,
+				 wwn_t port_name, wwn_t node_name);
+
+u16        fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
+				   u32 d_id, u32 s_id, __be16 ox_id,
+				   wwn_t port_name, wwn_t node_name);
+u16        fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
+				u32 d_id, u32 s_id, __be16 ox_id,
+				u8 reason_code, u8 reason_code_expl);
+u16        fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
+				u32 d_id, u32 s_id, __be16 ox_id);
+u16        fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
+			      u32 s_id, u16 ox_id);
+
+enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
+
+u16        fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
+				  u32 s_id, __be16 ox_id,
+				  enum bfa_lport_role role);
+
+u16        fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
+			      u32 d_id, u32 s_id, u16 ox_id,
+			      u32 data_format);
+
+u16        fc_rnid_acc_build(struct fchs_s *fchs,
+			struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id,
+			__be16 ox_id, u32 data_format,
+			struct fc_rnid_common_id_data_s *common_id_data,
+			struct fc_rnid_general_topology_data_s *gen_topo_data);
+
+u16	fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c,
+			u32 d_id, u32 s_id, u32 *pid_list, u16 npids);
+u16        fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc,
+			      u32 d_id, u32 s_id, u16 ox_id);
+u16        fc_rpsc_acc_build(struct fchs_s *fchs,
+			struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id,
+			__be16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
+u16        fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
+				u8 fc4_type);
+
+u16        fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			       u32 port_id, wwn_t port_name);
+
+u16        fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			       u32 port_id, wwn_t node_name);
+
+u16        fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			       u32 port_id, u32 cos);
+
+u16        fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			       u32 port_id, u8 port_type);
+
+u16        fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			       u32 port_id);
+
+u16        fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
+			      u32 s_id, u16 ox_id, wwn_t port_name);
+
+u16        fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
+				  u32 s_id, __be16 ox_id);
+
+u16        fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+				     u16 cmd_code);
+u16	fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
+u16	fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
+
+void		fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
+
+void		fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+					 __be16 ox_id);
+
+enum fc_parse_status	fc_els_rsp_parse(struct fchs_s *fchs, int len);
+
+enum fc_parse_status	fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
+					wwn_t port_name);
+
+enum fc_parse_status	fc_prli_parse(struct fc_prli_s *prli);
+
+enum fc_parse_status	fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name,
+					wwn_t port_name);
+
+u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
+		u32 s_id, __be16 ox_id, u16 rx_id);
+
+int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
+
+u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
+		u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
+
+u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
+		u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
+
+u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
+
+u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+		u16 ox_id, wwn_t port_name, wwn_t node_name,
+		u16 pdu_size);
+
+u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
+
+u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+		u16 ox_id, int num_pages);
+
+u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
+
+u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+		u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
+		u32 tpr_id);
+
+u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
+
+u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+		__be16 ox_id, u32 reason_code, u32 reason_expl);
+
+u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+		u32 port_id);
+
+u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr);
+
+u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
+		u16 ox_id);
+#endif
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
new file mode 100644
index 0000000..2c85f5b
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -0,0 +1,3911 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_modules.h"
+
+BFA_TRC_FILE(HAL, FCPIM);
+
+/*
+ *  BFA ITNIM Related definitions
+ */
+static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
+
+#define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
+	(((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
+
+#define bfa_fcpim_additn(__itnim)					\
+	list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
+#define bfa_fcpim_delitn(__itnim)	do {				\
+	WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));   \
+	bfa_itnim_update_del_itn_stats(__itnim);      \
+	list_del(&(__itnim)->qe);      \
+	WARN_ON(!list_empty(&(__itnim)->io_q));				\
+	WARN_ON(!list_empty(&(__itnim)->io_cleanup_q));			\
+	WARN_ON(!list_empty(&(__itnim)->pending_q));			\
+} while (0)
+
+#define bfa_itnim_online_cb(__itnim) do {				\
+	if ((__itnim)->bfa->fcs)					\
+		bfa_cb_itnim_online((__itnim)->ditn);      \
+	else {								\
+		bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,	\
+		__bfa_cb_itnim_online, (__itnim));      \
+	}								\
+} while (0)
+
+#define bfa_itnim_offline_cb(__itnim) do {				\
+	if ((__itnim)->bfa->fcs)					\
+		bfa_cb_itnim_offline((__itnim)->ditn);      \
+	else {								\
+		bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,	\
+		__bfa_cb_itnim_offline, (__itnim));      \
+	}								\
+} while (0)
+
+#define bfa_itnim_sler_cb(__itnim) do {					\
+	if ((__itnim)->bfa->fcs)					\
+		bfa_cb_itnim_sler((__itnim)->ditn);      \
+	else {								\
+		bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,	\
+		__bfa_cb_itnim_sler, (__itnim));      \
+	}								\
+} while (0)
+
+enum bfa_ioim_lm_ua_status {
+	BFA_IOIM_LM_UA_RESET = 0,
+	BFA_IOIM_LM_UA_SET = 1,
+};
+
+/*
+ *  itnim state machine event
+ */
+enum bfa_itnim_event {
+	BFA_ITNIM_SM_CREATE = 1,	/*  itnim is created */
+	BFA_ITNIM_SM_ONLINE = 2,	/*  itnim is online */
+	BFA_ITNIM_SM_OFFLINE = 3,	/*  itnim is offline */
+	BFA_ITNIM_SM_FWRSP = 4,		/*  firmware response */
+	BFA_ITNIM_SM_DELETE = 5,	/*  deleting an existing itnim */
+	BFA_ITNIM_SM_CLEANUP = 6,	/*  IO cleanup completion */
+	BFA_ITNIM_SM_SLER = 7,		/*  second level error recovery */
+	BFA_ITNIM_SM_HWFAIL = 8,	/*  IOC h/w failure event */
+	BFA_ITNIM_SM_QRESUME = 9,	/*  queue space available */
+};
+
+/*
+ *  BFA IOIM related definitions
+ */
+#define bfa_ioim_move_to_comp_q(__ioim) do {				\
+	list_del(&(__ioim)->qe);					\
+	list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q);	\
+} while (0)
+
+
+#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do {			\
+	if ((__fcpim)->profile_comp)					\
+		(__fcpim)->profile_comp(__ioim);			\
+} while (0)
+
+#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do {			\
+	if ((__fcpim)->profile_start)					\
+		(__fcpim)->profile_start(__ioim);			\
+} while (0)
+
+/*
+ * IO state machine events
+ */
+enum bfa_ioim_event {
+	BFA_IOIM_SM_START	= 1,	/*  io start request from host */
+	BFA_IOIM_SM_COMP_GOOD	= 2,	/*  io good comp, resource free */
+	BFA_IOIM_SM_COMP	= 3,	/*  io comp, resource is free */
+	BFA_IOIM_SM_COMP_UTAG	= 4,	/*  io comp, resource is free */
+	BFA_IOIM_SM_DONE	= 5,	/*  io comp, resource not free */
+	BFA_IOIM_SM_FREE	= 6,	/*  io resource is freed */
+	BFA_IOIM_SM_ABORT	= 7,	/*  abort request from scsi stack */
+	BFA_IOIM_SM_ABORT_COMP	= 8,	/*  abort from f/w */
+	BFA_IOIM_SM_ABORT_DONE	= 9,	/*  abort completion from f/w */
+	BFA_IOIM_SM_QRESUME	= 10,	/*  CQ space available to queue IO */
+	BFA_IOIM_SM_SGALLOCED	= 11,	/*  SG page allocation successful */
+	BFA_IOIM_SM_SQRETRY	= 12,	/*  sequence recovery retry */
+	BFA_IOIM_SM_HCB		= 13,	/*  bfa callback complete */
+	BFA_IOIM_SM_CLEANUP	= 14,	/*  IO cleanup from itnim */
+	BFA_IOIM_SM_TMSTART	= 15,	/*  IO cleanup from tskim */
+	BFA_IOIM_SM_TMDONE	= 16,	/*  IO cleanup from tskim */
+	BFA_IOIM_SM_HWFAIL	= 17,	/*  IOC h/w failure event */
+	BFA_IOIM_SM_IOTOV	= 18,	/*  ITN offline TOV */
+};
+
+
+/*
+ *  BFA TSKIM related definitions
+ */
+
+/*
+ * task management completion handling
+ */
+#define bfa_tskim_qcomp(__tskim, __cbfn) do {				\
+	bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
+	bfa_tskim_notify_comp(__tskim);      \
+} while (0)
+
+#define bfa_tskim_notify_comp(__tskim) do {				\
+	if ((__tskim)->notify)						\
+		bfa_itnim_tskdone((__tskim)->itnim);      \
+} while (0)
+
+
+enum bfa_tskim_event {
+	BFA_TSKIM_SM_START	= 1,	/*  TM command start		*/
+	BFA_TSKIM_SM_DONE	= 2,	/*  TM completion		*/
+	BFA_TSKIM_SM_QRESUME	= 3,	/*  resume after qfull		*/
+	BFA_TSKIM_SM_HWFAIL	= 5,	/*  IOC h/w failure event	*/
+	BFA_TSKIM_SM_HCB	= 6,	/*  BFA callback completion	*/
+	BFA_TSKIM_SM_IOS_DONE	= 7,	/*  IO and sub TM completions	*/
+	BFA_TSKIM_SM_CLEANUP	= 8,	/*  TM cleanup on ITN offline	*/
+	BFA_TSKIM_SM_CLEANUP_DONE = 9,	/*  TM abort completion	*/
+	BFA_TSKIM_SM_UTAG	= 10,	/*  TM completion unknown tag  */
+};
+
+/*
+ * forward declaration for BFA ITNIM functions
+ */
+static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
+static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
+static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
+static void     bfa_itnim_cleanp_comp(void *itnim_cbarg);
+static void     bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
+static void     __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
+static void     __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
+static void     __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
+static void     bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
+static void     bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
+static void     bfa_itnim_iotov(void *itnim_arg);
+static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
+static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
+static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
+
+/*
+ * forward declaration of ITNIM state machine
+ */
+static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+
+/*
+ * forward declaration for BFA IOIM functions
+ */
+static bfa_boolean_t	bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
+static bfa_boolean_t	bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
+static bfa_boolean_t	bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
+static void		bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
+static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
+static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
+
+/*
+ * forward declaration of BFA IO state machine
+ */
+static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void	bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+/*
+ * forward declaration for BFA TSKIM functions
+ */
+static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
+static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
+static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
+					struct scsi_lun lun);
+static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
+static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
+static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
+static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
+static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
+static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
+
+/*
+ * forward declaration of BFA TSKIM state machine
+ */
+static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+static void     bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+static void     bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+static void     bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+static void     bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+/*
+ *  BFA FCP Initiator Mode module
+ */
+
+/*
+ * Compute and return memory needed by FCP(im) module.
+ */
+static void
+bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
+{
+	bfa_itnim_meminfo(cfg, km_len);
+
+	/*
+	 * IO memory
+	 */
+	*km_len += cfg->fwcfg.num_ioim_reqs *
+	  (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
+
+	/*
+	 * task management command memory
+	 */
+	if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
+		cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
+	*km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
+}
+
+
+static void
+bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
+		struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_fcpim_s *fcpim = &fcp->fcpim;
+	struct bfa_s *bfa = fcp->bfa;
+
+	bfa_trc(bfa, cfg->drvcfg.path_tov);
+	bfa_trc(bfa, cfg->fwcfg.num_rports);
+	bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
+	bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
+
+	fcpim->fcp		= fcp;
+	fcpim->bfa		= bfa;
+	fcpim->num_itnims	= cfg->fwcfg.num_rports;
+	fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
+	fcpim->path_tov		= cfg->drvcfg.path_tov;
+	fcpim->delay_comp	= cfg->drvcfg.delay_comp;
+	fcpim->profile_comp = NULL;
+	fcpim->profile_start = NULL;
+
+	bfa_itnim_attach(fcpim);
+	bfa_tskim_attach(fcpim);
+	bfa_ioim_attach(fcpim);
+}
+
+void
+bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
+{
+	struct bfa_fcpim_s *fcpim = &fcp->fcpim;
+	struct bfa_itnim_s *itnim;
+	struct list_head *qe, *qen;
+
+	/* Enqueue unused ioim resources to free_q */
+	list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
+
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		bfa_itnim_iocdisable(itnim);
+	}
+}
+
+void
+bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+
+	fcpim->path_tov = path_tov * 1000;
+	if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
+		fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
+}
+
+u16
+bfa_fcpim_path_tov_get(struct bfa_s *bfa)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+
+	return fcpim->path_tov / 1000;
+}
+
+#define bfa_fcpim_add_iostats(__l, __r, __stats)	\
+	(__l->__stats += __r->__stats)
+
+void
+bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
+		struct bfa_itnim_iostats_s *rstats)
+{
+	bfa_fcpim_add_iostats(lstats, rstats, total_ios);
+	bfa_fcpim_add_iostats(lstats, rstats, qresumes);
+	bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
+	bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
+	bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
+	bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
+	bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
+	bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
+	bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
+	bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
+	bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
+	bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
+	bfa_fcpim_add_iostats(lstats, rstats, onlines);
+	bfa_fcpim_add_iostats(lstats, rstats, offlines);
+	bfa_fcpim_add_iostats(lstats, rstats, creates);
+	bfa_fcpim_add_iostats(lstats, rstats, deletes);
+	bfa_fcpim_add_iostats(lstats, rstats, create_comps);
+	bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
+	bfa_fcpim_add_iostats(lstats, rstats, sler_events);
+	bfa_fcpim_add_iostats(lstats, rstats, fw_create);
+	bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
+	bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
+	bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_success);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
+	bfa_fcpim_add_iostats(lstats, rstats, io_comps);
+	bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
+	bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
+	bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
+	bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
+}
+
+bfa_status_t
+bfa_fcpim_port_iostats(struct bfa_s *bfa,
+		struct bfa_itnim_iostats_s *stats, u8 lp_tag)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	struct list_head *qe, *qen;
+	struct bfa_itnim_s *itnim;
+
+	/* accumulate IO stats from itnim */
+	memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		if (itnim->rport->rport_info.lp_tag != lp_tag)
+			continue;
+		bfa_fcpim_add_stats(stats, &(itnim->stats));
+	}
+	return BFA_STATUS_OK;
+}
+
+void
+bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
+{
+	struct bfa_itnim_latency_s *io_lat =
+			&(ioim->itnim->ioprofile.io_latency);
+	u32 val, idx;
+
+	val = (u32)(jiffies - ioim->start_time);
+	idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
+	bfa_itnim_ioprofile_update(ioim->itnim, idx);
+
+	io_lat->count[idx]++;
+	io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
+	io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
+	io_lat->avg[idx] += val;
+}
+
+void
+bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
+{
+	ioim->start_time = jiffies;
+}
+
+bfa_status_t
+bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time)
+{
+	struct bfa_itnim_s *itnim;
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	struct list_head *qe, *qen;
+
+	/* accumulate IO stats from itnim */
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		bfa_itnim_clear_stats(itnim);
+	}
+	fcpim->io_profile = BFA_TRUE;
+	fcpim->io_profile_start_time = time;
+	fcpim->profile_comp = bfa_ioim_profile_comp;
+	fcpim->profile_start = bfa_ioim_profile_start;
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_profile_off(struct bfa_s *bfa)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	fcpim->io_profile = BFA_FALSE;
+	fcpim->io_profile_start_time = 0;
+	fcpim->profile_comp = NULL;
+	fcpim->profile_start = NULL;
+	return BFA_STATUS_OK;
+}
+
+u16
+bfa_fcpim_qdepth_get(struct bfa_s *bfa)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+
+	return fcpim->q_depth;
+}
+
+/*
+ *  BFA ITNIM module state machine functions
+ */
+
+/*
+ * Beginning/unallocated state - no events expected.
+ */
+static void
+bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_CREATE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_created);
+		itnim->is_online = BFA_FALSE;
+		bfa_fcpim_additn(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/*
+ * Beginning state, only online event expected.
+ */
+static void
+bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_ONLINE:
+		if (bfa_itnim_send_fwcreate(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/*
+ *	Waiting for itnim create response from firmware.
+ */
+static void
+bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_FWRSP:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_online);
+		itnim->is_online = BFA_TRUE;
+		bfa_itnim_iotov_online(itnim);
+		bfa_itnim_online_cb(itnim);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
+		break;
+
+	case BFA_ITNIM_SM_OFFLINE:
+		if (bfa_itnim_send_fwdelete(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+static void
+bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
+			enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_QRESUME:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
+		bfa_itnim_send_fwcreate(itnim);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_reqq_wcancel(&itnim->reqq_wait);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	case BFA_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
+		bfa_reqq_wcancel(&itnim->reqq_wait);
+		bfa_itnim_offline_cb(itnim);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		bfa_reqq_wcancel(&itnim->reqq_wait);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/*
+ * Waiting for itnim create response from firmware, a delete is pending.
+ */
+static void
+bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
+				enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_FWRSP:
+		if (bfa_itnim_send_fwdelete(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/*
+ * Online state - normal parking state.
+ */
+static void
+bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
+		itnim->is_online = BFA_FALSE;
+		bfa_itnim_iotov_start(itnim);
+		bfa_itnim_cleanup(itnim);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
+		itnim->is_online = BFA_FALSE;
+		bfa_itnim_cleanup(itnim);
+		break;
+
+	case BFA_ITNIM_SM_SLER:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
+		itnim->is_online = BFA_FALSE;
+		bfa_itnim_iotov_start(itnim);
+		bfa_itnim_sler_cb(itnim);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		itnim->is_online = BFA_FALSE;
+		bfa_itnim_iotov_start(itnim);
+		bfa_itnim_iocdisable_cleanup(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/*
+ * Second level error recovery need.
+ */
+static void
+bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
+		bfa_itnim_cleanup(itnim);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
+		bfa_itnim_cleanup(itnim);
+		bfa_itnim_iotov_delete(itnim);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		bfa_itnim_iocdisable_cleanup(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/*
+ * Going offline. Waiting for active IO cleanup.
+ */
+static void
+bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
+				 enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_CLEANUP:
+		if (bfa_itnim_send_fwdelete(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
+		bfa_itnim_iotov_delete(itnim);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		bfa_itnim_iocdisable_cleanup(itnim);
+		bfa_itnim_offline_cb(itnim);
+		break;
+
+	case BFA_ITNIM_SM_SLER:
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/*
+ * Deleting itnim. Waiting for active IO cleanup.
+ */
+static void
+bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
+				enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_CLEANUP:
+		if (bfa_itnim_send_fwdelete(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		bfa_itnim_iocdisable_cleanup(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/*
+ * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
+ */
+static void
+bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_FWRSP:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
+		bfa_itnim_offline_cb(itnim);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		bfa_itnim_offline_cb(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+static void
+bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
+			enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_QRESUME:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
+		bfa_itnim_send_fwdelete(itnim);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		bfa_reqq_wcancel(&itnim->reqq_wait);
+		bfa_itnim_offline_cb(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/*
+ * Offline state.
+ */
+static void
+bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_itnim_iotov_delete(itnim);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	case BFA_ITNIM_SM_ONLINE:
+		if (bfa_itnim_send_fwcreate(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+static void
+bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
+				enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_itnim_iotov_delete(itnim);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	case BFA_ITNIM_SM_OFFLINE:
+		bfa_itnim_offline_cb(itnim);
+		break;
+
+	case BFA_ITNIM_SM_ONLINE:
+		if (bfa_itnim_send_fwcreate(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/*
+ * Itnim is deleted, waiting for firmware response to delete.
+ */
+static void
+bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_FWRSP:
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+static void
+bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
+		enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_QRESUME:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
+		bfa_itnim_send_fwdelete(itnim);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_reqq_wcancel(&itnim->reqq_wait);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/*
+ * Initiate cleanup of all IOs on an IOC failure.
+ */
+static void
+bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
+{
+	struct bfa_tskim_s *tskim;
+	struct bfa_ioim_s *ioim;
+	struct list_head	*qe, *qen;
+
+	list_for_each_safe(qe, qen, &itnim->tsk_q) {
+		tskim = (struct bfa_tskim_s *) qe;
+		bfa_tskim_iocdisable(tskim);
+	}
+
+	list_for_each_safe(qe, qen, &itnim->io_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		bfa_ioim_iocdisable(ioim);
+	}
+
+	/*
+	 * For IO request in pending queue, we pretend an early timeout.
+	 */
+	list_for_each_safe(qe, qen, &itnim->pending_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		bfa_ioim_tov(ioim);
+	}
+
+	list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		bfa_ioim_iocdisable(ioim);
+	}
+}
+
+/*
+ * IO cleanup completion
+ */
+static void
+bfa_itnim_cleanp_comp(void *itnim_cbarg)
+{
+	struct bfa_itnim_s *itnim = itnim_cbarg;
+
+	bfa_stats(itnim, cleanup_comps);
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
+}
+
+/*
+ * Initiate cleanup of all IOs.
+ */
+static void
+bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
+{
+	struct bfa_ioim_s  *ioim;
+	struct bfa_tskim_s *tskim;
+	struct list_head	*qe, *qen;
+
+	bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
+
+	list_for_each_safe(qe, qen, &itnim->io_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+
+		/*
+		 * Move IO to a cleanup queue from active queue so that a later
+		 * TM will not pickup this IO.
+		 */
+		list_del(&ioim->qe);
+		list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
+
+		bfa_wc_up(&itnim->wc);
+		bfa_ioim_cleanup(ioim);
+	}
+
+	list_for_each_safe(qe, qen, &itnim->tsk_q) {
+		tskim = (struct bfa_tskim_s *) qe;
+		bfa_wc_up(&itnim->wc);
+		bfa_tskim_cleanup(tskim);
+	}
+
+	bfa_wc_wait(&itnim->wc);
+}
+
+static void
+__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_itnim_s *itnim = cbarg;
+
+	if (complete)
+		bfa_cb_itnim_online(itnim->ditn);
+}
+
+static void
+__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_itnim_s *itnim = cbarg;
+
+	if (complete)
+		bfa_cb_itnim_offline(itnim->ditn);
+}
+
+static void
+__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_itnim_s *itnim = cbarg;
+
+	if (complete)
+		bfa_cb_itnim_sler(itnim->ditn);
+}
+
+/*
+ * Call to resume any I/O requests waiting for room in request queue.
+ */
+static void
+bfa_itnim_qresume(void *cbarg)
+{
+	struct bfa_itnim_s *itnim = cbarg;
+
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
+}
+
+/*
+ *  bfa_itnim_public
+ */
+
+void
+bfa_itnim_iodone(struct bfa_itnim_s *itnim)
+{
+	bfa_wc_down(&itnim->wc);
+}
+
+void
+bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
+{
+	bfa_wc_down(&itnim->wc);
+}
+
+void
+bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
+{
+	/*
+	 * ITN memory
+	 */
+	*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
+}
+
+void
+bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
+{
+	struct bfa_s	*bfa = fcpim->bfa;
+	struct bfa_fcp_mod_s	*fcp = fcpim->fcp;
+	struct bfa_itnim_s *itnim;
+	int	i, j;
+
+	INIT_LIST_HEAD(&fcpim->itnim_q);
+
+	itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
+	fcpim->itnim_arr = itnim;
+
+	for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
+		memset(itnim, 0, sizeof(struct bfa_itnim_s));
+		itnim->bfa = bfa;
+		itnim->fcpim = fcpim;
+		itnim->reqq = BFA_REQQ_QOS_LO;
+		itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
+		itnim->iotov_active = BFA_FALSE;
+		bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
+
+		INIT_LIST_HEAD(&itnim->io_q);
+		INIT_LIST_HEAD(&itnim->io_cleanup_q);
+		INIT_LIST_HEAD(&itnim->pending_q);
+		INIT_LIST_HEAD(&itnim->tsk_q);
+		INIT_LIST_HEAD(&itnim->delay_comp_q);
+		for (j = 0; j < BFA_IOBUCKET_MAX; j++)
+			itnim->ioprofile.io_latency.min[j] = ~0;
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+	}
+
+	bfa_mem_kva_curp(fcp) = (u8 *) itnim;
+}
+
+void
+bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
+{
+	bfa_stats(itnim, ioc_disabled);
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
+}
+
+static bfa_boolean_t
+bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
+{
+	struct bfi_itn_create_req_s *m;
+
+	itnim->msg_no++;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(itnim->bfa, itnim->reqq);
+	if (!m) {
+		bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
+			bfa_fn_lpu(itnim->bfa));
+	m->fw_handle = itnim->rport->fw_handle;
+	m->class = FC_CLASS_3;
+	m->seq_rec = itnim->seq_rec;
+	m->msg_no = itnim->msg_no;
+	bfa_stats(itnim, fw_create);
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
+	return BFA_TRUE;
+}
+
+static bfa_boolean_t
+bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
+{
+	struct bfi_itn_delete_req_s *m;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(itnim->bfa, itnim->reqq);
+	if (!m) {
+		bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
+			bfa_fn_lpu(itnim->bfa));
+	m->fw_handle = itnim->rport->fw_handle;
+	bfa_stats(itnim, fw_delete);
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
+	return BFA_TRUE;
+}
+
+/*
+ * Cleanup all pending failed inflight requests.
+ */
+static void
+bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
+{
+	struct bfa_ioim_s *ioim;
+	struct list_head *qe, *qen;
+
+	list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
+		ioim = (struct bfa_ioim_s *)qe;
+		bfa_ioim_delayed_comp(ioim, iotov);
+	}
+}
+
+/*
+ * Start all pending IO requests.
+ */
+static void
+bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
+{
+	struct bfa_ioim_s *ioim;
+
+	bfa_itnim_iotov_stop(itnim);
+
+	/*
+	 * Abort all inflight IO requests in the queue
+	 */
+	bfa_itnim_delayed_comp(itnim, BFA_FALSE);
+
+	/*
+	 * Start all pending IO requests.
+	 */
+	while (!list_empty(&itnim->pending_q)) {
+		bfa_q_deq(&itnim->pending_q, &ioim);
+		list_add_tail(&ioim->qe, &itnim->io_q);
+		bfa_ioim_start(ioim);
+	}
+}
+
+/*
+ * Fail all pending IO requests
+ */
+static void
+bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
+{
+	struct bfa_ioim_s *ioim;
+
+	/*
+	 * Fail all inflight IO requests in the queue
+	 */
+	bfa_itnim_delayed_comp(itnim, BFA_TRUE);
+
+	/*
+	 * Fail any pending IO requests.
+	 */
+	while (!list_empty(&itnim->pending_q)) {
+		bfa_q_deq(&itnim->pending_q, &ioim);
+		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
+		bfa_ioim_tov(ioim);
+	}
+}
+
+/*
+ * IO TOV timer callback. Fail any pending IO requests.
+ */
+static void
+bfa_itnim_iotov(void *itnim_arg)
+{
+	struct bfa_itnim_s *itnim = itnim_arg;
+
+	itnim->iotov_active = BFA_FALSE;
+
+	bfa_cb_itnim_tov_begin(itnim->ditn);
+	bfa_itnim_iotov_cleanup(itnim);
+	bfa_cb_itnim_tov(itnim->ditn);
+}
+
+/*
+ * Start IO TOV timer for failing back pending IO requests in offline state.
+ */
+static void
+bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
+{
+	if (itnim->fcpim->path_tov > 0) {
+
+		itnim->iotov_active = BFA_TRUE;
+		WARN_ON(!bfa_itnim_hold_io(itnim));
+		bfa_timer_start(itnim->bfa, &itnim->timer,
+			bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
+	}
+}
+
+/*
+ * Stop IO TOV timer.
+ */
+static void
+bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
+{
+	if (itnim->iotov_active) {
+		itnim->iotov_active = BFA_FALSE;
+		bfa_timer_stop(&itnim->timer);
+	}
+}
+
+/*
+ * Stop IO TOV timer.
+ */
+static void
+bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
+{
+	bfa_boolean_t pathtov_active = BFA_FALSE;
+
+	if (itnim->iotov_active)
+		pathtov_active = BFA_TRUE;
+
+	bfa_itnim_iotov_stop(itnim);
+	if (pathtov_active)
+		bfa_cb_itnim_tov_begin(itnim->ditn);
+	bfa_itnim_iotov_cleanup(itnim);
+	if (pathtov_active)
+		bfa_cb_itnim_tov(itnim->ditn);
+}
+
+static void
+bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
+	fcpim->del_itn_stats.del_itn_iocomp_aborted +=
+		itnim->stats.iocomp_aborted;
+	fcpim->del_itn_stats.del_itn_iocomp_timedout +=
+		itnim->stats.iocomp_timedout;
+	fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
+		itnim->stats.iocom_sqer_needed;
+	fcpim->del_itn_stats.del_itn_iocom_res_free +=
+		itnim->stats.iocom_res_free;
+	fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
+		itnim->stats.iocom_hostabrts;
+	fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
+	fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
+	fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
+}
+
+/*
+ * bfa_itnim_public
+ */
+
+/*
+ * Itnim interrupt processing.
+ */
+void
+bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	union bfi_itn_i2h_msg_u msg;
+	struct bfa_itnim_s *itnim;
+
+	bfa_trc(bfa, m->mhdr.msg_id);
+
+	msg.msg = m;
+
+	switch (m->mhdr.msg_id) {
+	case BFI_ITN_I2H_CREATE_RSP:
+		itnim = BFA_ITNIM_FROM_TAG(fcpim,
+						msg.create_rsp->bfa_handle);
+		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
+		bfa_stats(itnim, create_comps);
+		bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
+		break;
+
+	case BFI_ITN_I2H_DELETE_RSP:
+		itnim = BFA_ITNIM_FROM_TAG(fcpim,
+						msg.delete_rsp->bfa_handle);
+		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
+		bfa_stats(itnim, delete_comps);
+		bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
+		break;
+
+	case BFI_ITN_I2H_SLER_EVENT:
+		itnim = BFA_ITNIM_FROM_TAG(fcpim,
+						msg.sler_event->bfa_handle);
+		bfa_stats(itnim, sler_events);
+		bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
+		break;
+
+	default:
+		bfa_trc(bfa, m->mhdr.msg_id);
+		WARN_ON(1);
+	}
+}
+
+/*
+ * bfa_itnim_api
+ */
+
+struct bfa_itnim_s *
+bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	struct bfa_itnim_s *itnim;
+
+	bfa_itn_create(bfa, rport, bfa_itnim_isr);
+
+	itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
+	WARN_ON(itnim->rport != rport);
+
+	itnim->ditn = ditn;
+
+	bfa_stats(itnim, creates);
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
+
+	return itnim;
+}
+
+void
+bfa_itnim_delete(struct bfa_itnim_s *itnim)
+{
+	bfa_stats(itnim, deletes);
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
+}
+
+void
+bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
+{
+	itnim->seq_rec = seq_rec;
+	bfa_stats(itnim, onlines);
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
+}
+
+void
+bfa_itnim_offline(struct bfa_itnim_s *itnim)
+{
+	bfa_stats(itnim, offlines);
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
+}
+
+/*
+ * Return true if itnim is considered offline for holding off IO request.
+ * IO is not held if itnim is being deleted.
+ */
+bfa_boolean_t
+bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
+{
+	return itnim->fcpim->path_tov && itnim->iotov_active &&
+		(bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
+		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
+		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
+		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
+		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
+		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
+}
+
+#define bfa_io_lat_clock_res_div	HZ
+#define bfa_io_lat_clock_res_mul	1000
+bfa_status_t
+bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+			struct bfa_itnim_ioprofile_s *ioprofile)
+{
+	struct bfa_fcpim_s *fcpim;
+
+	if (!itnim)
+		return BFA_STATUS_NO_FCPIM_NEXUS;
+
+	fcpim = BFA_FCPIM(itnim->bfa);
+
+	if (!fcpim->io_profile)
+		return BFA_STATUS_IOPROFILE_OFF;
+
+	itnim->ioprofile.index = BFA_IOBUCKET_MAX;
+	/* unsigned 32-bit time_t overflow here in y2106 */
+	itnim->ioprofile.io_profile_start_time =
+				bfa_io_profile_start_time(itnim->bfa);
+	itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
+	itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
+	*ioprofile = itnim->ioprofile;
+
+	return BFA_STATUS_OK;
+}
+
+void
+bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
+{
+	int j;
+
+	if (!itnim)
+		return;
+
+	memset(&itnim->stats, 0, sizeof(itnim->stats));
+	memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
+	for (j = 0; j < BFA_IOBUCKET_MAX; j++)
+		itnim->ioprofile.io_latency.min[j] = ~0;
+}
+
+/*
+ *  BFA IO module state machine functions
+ */
+
+/*
+ * IO is not started (unallocated).
+ */
+static void
+bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	switch (event) {
+	case BFA_IOIM_SM_START:
+		if (!bfa_itnim_is_online(ioim->itnim)) {
+			if (!bfa_itnim_hold_io(ioim->itnim)) {
+				bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+				list_del(&ioim->qe);
+				list_add_tail(&ioim->qe,
+					&ioim->fcpim->ioim_comp_q);
+				bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+						__bfa_cb_ioim_pathtov, ioim);
+			} else {
+				list_del(&ioim->qe);
+				list_add_tail(&ioim->qe,
+					&ioim->itnim->pending_q);
+			}
+			break;
+		}
+
+		if (ioim->nsges > BFI_SGE_INLINE) {
+			if (!bfa_ioim_sgpg_alloc(ioim)) {
+				bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
+				return;
+			}
+		}
+
+		if (!bfa_ioim_send_ioreq(ioim)) {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
+			break;
+		}
+
+		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+		break;
+
+	case BFA_IOIM_SM_IOTOV:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+				__bfa_cb_ioim_pathtov, ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		/*
+		 * IO in pending queue can get abort requests. Complete abort
+		 * requests immediately.
+		 */
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+			__bfa_cb_ioim_abort, ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/*
+ * IO is waiting for SG pages.
+ */
+static void
+bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_SGALLOCED:
+		if (!bfa_ioim_send_ioreq(ioim)) {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
+			break;
+		}
+		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/*
+ * IO is active.
+ */
+static void
+bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	switch (event) {
+	case BFA_IOIM_SM_COMP_GOOD:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+			      __bfa_cb_ioim_good_comp, ioim);
+		break;
+
+	case BFA_IOIM_SM_COMP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_DONE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		ioim->iosp->abort_explicit = BFA_TRUE;
+		ioim->io_cbfn = __bfa_cb_ioim_abort;
+
+		if (bfa_ioim_send_abort(ioim))
+			bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
+		else {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
+			bfa_stats(ioim->itnim, qwait);
+			bfa_reqq_wait(ioim->bfa, ioim->reqq,
+					  &ioim->iosp->reqq_wait);
+		}
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		ioim->iosp->abort_explicit = BFA_FALSE;
+		ioim->io_cbfn = __bfa_cb_ioim_failed;
+
+		if (bfa_ioim_send_abort(ioim))
+			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+		else {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+			bfa_stats(ioim->itnim, qwait);
+			bfa_reqq_wait(ioim->bfa, ioim->reqq,
+					  &ioim->iosp->reqq_wait);
+		}
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_SQRETRY:
+		if (bfa_ioim_maxretry_reached(ioim)) {
+			/* max retry reached, free IO */
+			bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+			bfa_ioim_move_to_comp_q(ioim);
+			bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+					__bfa_cb_ioim_failed, ioim);
+			break;
+		}
+		/* waiting for IO tag resource free */
+		bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/*
+ * IO is retried with new tag.
+ */
+static void
+bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	switch (event) {
+	case BFA_IOIM_SM_FREE:
+		/* abts and rrq done. Now retry the IO with new tag */
+		bfa_ioim_update_iotag(ioim);
+		if (!bfa_ioim_send_ioreq(ioim)) {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
+			break;
+		}
+		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+	break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		ioim->iosp->abort_explicit = BFA_FALSE;
+		ioim->io_cbfn = __bfa_cb_ioim_failed;
+
+		if (bfa_ioim_send_abort(ioim))
+			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+		else {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+			bfa_stats(ioim->itnim, qwait);
+			bfa_reqq_wait(ioim->bfa, ioim->reqq,
+					  &ioim->iosp->reqq_wait);
+		}
+	break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+			 __bfa_cb_ioim_failed, ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		/* in this state IO abort is done.
+		 * Waiting for IO tag resource free.
+		 */
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/*
+ * IO is being aborted, waiting for completion from firmware.
+ */
+static void
+bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_COMP_GOOD:
+	case BFA_IOIM_SM_COMP:
+	case BFA_IOIM_SM_DONE:
+	case BFA_IOIM_SM_FREE:
+		break;
+
+	case BFA_IOIM_SM_ABORT_DONE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT_COMP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_COMP_UTAG:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
+		ioim->iosp->abort_explicit = BFA_FALSE;
+
+		if (bfa_ioim_send_abort(ioim))
+			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+		else {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+			bfa_stats(ioim->itnim, qwait);
+			bfa_reqq_wait(ioim->bfa, ioim->reqq,
+					  &ioim->iosp->reqq_wait);
+		}
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/*
+ * IO is being cleaned up (implicit abort), waiting for completion from
+ * firmware.
+ */
+static void
+bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_COMP_GOOD:
+	case BFA_IOIM_SM_COMP:
+	case BFA_IOIM_SM_DONE:
+	case BFA_IOIM_SM_FREE:
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		/*
+		 * IO is already being aborted implicitly
+		 */
+		ioim->io_cbfn = __bfa_cb_ioim_abort;
+		break;
+
+	case BFA_IOIM_SM_ABORT_DONE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT_COMP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_COMP_UTAG:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		/*
+		 * IO can be in cleanup state already due to TM command.
+		 * 2nd cleanup request comes from ITN offline event.
+		 */
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/*
+ * IO is waiting for room in request CQ
+ */
+static void
+bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_QRESUME:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+		bfa_ioim_send_ioreq(ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/*
+ * Active IO is being aborted, waiting for room in request CQ.
+ */
+static void
+bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_QRESUME:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
+		bfa_ioim_send_abort(ioim);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
+		ioim->iosp->abort_explicit = BFA_FALSE;
+		bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+		break;
+
+	case BFA_IOIM_SM_COMP_GOOD:
+	case BFA_IOIM_SM_COMP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_DONE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/*
+ * Active IO is being cleaned up, waiting for room in request CQ.
+ */
+static void
+bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_QRESUME:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+		bfa_ioim_send_abort(ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		/*
+		 * IO is already being cleaned up implicitly
+		 */
+		ioim->io_cbfn = __bfa_cb_ioim_abort;
+		break;
+
+	case BFA_IOIM_SM_COMP_GOOD:
+	case BFA_IOIM_SM_COMP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_DONE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/*
+ * IO bfa callback is pending.
+ */
+static void
+bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	switch (event) {
+	case BFA_IOIM_SM_HCB:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
+		bfa_ioim_free(ioim);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/*
+ * IO bfa callback is pending. IO resource cannot be freed.
+ */
+static void
+bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_HCB:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
+		list_del(&ioim->qe);
+		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
+		break;
+
+	case BFA_IOIM_SM_FREE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/*
+ * IO is completed, waiting resource free from firmware.
+ */
+static void
+bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_FREE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
+		bfa_ioim_free(ioim);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/*
+ * This is called from bfa_fcpim_start after the bfa_init() with flash read
+ * is complete by driver. now invalidate the stale content of lun mask
+ * like unit attention, rp tag and lp tag.
+ */
+void
+bfa_ioim_lm_init(struct bfa_s *bfa)
+{
+	struct bfa_lun_mask_s *lunm_list;
+	int	i;
+
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return;
+
+	lunm_list = bfa_get_lun_mask_list(bfa);
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
+		lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
+		lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
+	}
+}
+
+static void
+__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
+}
+
+static void
+__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s	*ioim = cbarg;
+	struct bfi_ioim_rsp_s *m;
+	u8	*snsinfo = NULL;
+	u8	sns_len = 0;
+	s32	residue = 0;
+
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
+	if (m->io_status == BFI_IOIM_STS_OK) {
+		/*
+		 * setup sense information, if present
+		 */
+		if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
+					m->sns_len) {
+			sns_len = m->sns_len;
+			snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
+						ioim->iotag);
+		}
+
+		/*
+		 * setup residue value correctly for normal completions
+		 */
+		if (m->resid_flags == FCP_RESID_UNDER) {
+			residue = be32_to_cpu(m->residue);
+			bfa_stats(ioim->itnim, iocomp_underrun);
+		}
+		if (m->resid_flags == FCP_RESID_OVER) {
+			residue = be32_to_cpu(m->residue);
+			residue = -residue;
+			bfa_stats(ioim->itnim, iocomp_overrun);
+		}
+	}
+
+	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
+			  m->scsi_status, sns_len, snsinfo, residue);
+}
+
+void
+bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
+			u16 rp_tag, u8 lp_tag)
+{
+	struct bfa_lun_mask_s *lun_list;
+	u8	i;
+
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return;
+
+	lun_list = bfa_get_lun_mask_list(bfa);
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
+			if ((lun_list[i].lp_wwn == lp_wwn) &&
+			    (lun_list[i].rp_wwn == rp_wwn)) {
+				lun_list[i].rp_tag = rp_tag;
+				lun_list[i].lp_tag = lp_tag;
+			}
+		}
+	}
+}
+
+/*
+ * set UA for all active luns in LM DB
+ */
+static void
+bfa_ioim_lm_set_ua(struct bfa_s *bfa)
+{
+	struct bfa_lun_mask_s	*lunm_list;
+	int	i;
+
+	lunm_list = bfa_get_lun_mask_list(bfa);
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+			continue;
+		lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+	}
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
+{
+	struct bfa_lunmask_cfg_s	*lun_mask;
+
+	bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return BFA_STATUS_FAILED;
+
+	if (bfa_get_lun_mask_status(bfa) == update)
+		return BFA_STATUS_NO_CHANGE;
+
+	lun_mask = bfa_get_lun_mask(bfa);
+	lun_mask->status = update;
+
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
+		bfa_ioim_lm_set_ua(bfa);
+
+	return  bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
+{
+	int i;
+	struct bfa_lun_mask_s	*lunm_list;
+
+	bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return BFA_STATUS_FAILED;
+
+	lunm_list = bfa_get_lun_mask_list(bfa);
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
+			if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
+				bfa_rport_unset_lunmask(bfa,
+				  BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
+		}
+	}
+
+	memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
+	return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
+{
+	struct bfa_lunmask_cfg_s *lun_mask;
+
+	bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return BFA_STATUS_FAILED;
+
+	lun_mask = bfa_get_lun_mask(bfa);
+	memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
+		      wwn_t rpwwn, struct scsi_lun lun)
+{
+	struct bfa_lun_mask_s *lunm_list;
+	struct bfa_rport_s *rp = NULL;
+	int i, free_index = MAX_LUN_MASK_CFG + 1;
+	struct bfa_fcs_lport_s *port = NULL;
+	struct bfa_fcs_rport_s *rp_fcs;
+
+	bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return BFA_STATUS_FAILED;
+
+	port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
+				   vf_id, *pwwn);
+	if (port) {
+		*pwwn = port->port_cfg.pwwn;
+		rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+		if (rp_fcs)
+			rp = rp_fcs->bfa_rport;
+	}
+
+	lunm_list = bfa_get_lun_mask_list(bfa);
+	/* if entry exists */
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+			free_index = i;
+		if ((lunm_list[i].lp_wwn == *pwwn) &&
+		    (lunm_list[i].rp_wwn == rpwwn) &&
+		    (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
+		     scsilun_to_int((struct scsi_lun *)&lun)))
+			return  BFA_STATUS_ENTRY_EXISTS;
+	}
+
+	if (free_index > MAX_LUN_MASK_CFG)
+		return BFA_STATUS_MAX_ENTRY_REACHED;
+
+	if (rp) {
+		lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
+						   rp->rport_info.local_pid);
+		lunm_list[free_index].rp_tag = rp->rport_tag;
+	} else {
+		lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
+		lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
+	}
+
+	lunm_list[free_index].lp_wwn = *pwwn;
+	lunm_list[free_index].rp_wwn = rpwwn;
+	lunm_list[free_index].lun = lun;
+	lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
+
+	/* set for all luns in this rp */
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if ((lunm_list[i].lp_wwn == *pwwn) &&
+		    (lunm_list[i].rp_wwn == rpwwn))
+			lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+	}
+
+	return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
+			 wwn_t rpwwn, struct scsi_lun lun)
+{
+	struct bfa_lun_mask_s	*lunm_list;
+	struct bfa_rport_s	*rp = NULL;
+	struct bfa_fcs_lport_s *port = NULL;
+	struct bfa_fcs_rport_s *rp_fcs;
+	int	i;
+
+	/* in min cfg lunm_list could be NULL but  no commands should run. */
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+		return BFA_STATUS_FAILED;
+
+	bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+	bfa_trc(bfa, *pwwn);
+	bfa_trc(bfa, rpwwn);
+	bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
+
+	if (*pwwn == 0) {
+		port = bfa_fcs_lookup_port(
+				&((struct bfad_s *)bfa->bfad)->bfa_fcs,
+				vf_id, *pwwn);
+		if (port) {
+			*pwwn = port->port_cfg.pwwn;
+			rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+			if (rp_fcs)
+				rp = rp_fcs->bfa_rport;
+		}
+	}
+
+	lunm_list = bfa_get_lun_mask_list(bfa);
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if ((lunm_list[i].lp_wwn == *pwwn) &&
+		    (lunm_list[i].rp_wwn == rpwwn) &&
+		    (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
+		     scsilun_to_int((struct scsi_lun *)&lun))) {
+			lunm_list[i].lp_wwn = 0;
+			lunm_list[i].rp_wwn = 0;
+			int_to_scsilun(0, &lunm_list[i].lun);
+			lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
+			if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
+				lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
+				lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
+			}
+			return bfa_dconf_update(bfa);
+		}
+	}
+
+	/* set for all luns in this rp */
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if ((lunm_list[i].lp_wwn == *pwwn) &&
+		    (lunm_list[i].rp_wwn == rpwwn))
+			lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+	}
+
+	return BFA_STATUS_ENTRY_NOT_EXISTS;
+}
+
+static void
+__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
+			  0, 0, NULL, 0);
+}
+
+static void
+__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+
+	bfa_stats(ioim->itnim, path_tov_expired);
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
+			  0, 0, NULL, 0);
+}
+
+static void
+__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
+}
+
+static void
+bfa_ioim_sgpg_alloced(void *cbarg)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+
+	ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
+	list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
+	ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
+}
+
+/*
+ * Send I/O request to firmware.
+ */
+static	bfa_boolean_t
+bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
+{
+	struct bfa_itnim_s *itnim = ioim->itnim;
+	struct bfi_ioim_req_s *m;
+	static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
+	struct bfi_sge_s *sge, *sgpge;
+	u32	pgdlen = 0;
+	u32	fcp_dl;
+	u64 addr;
+	struct scatterlist *sg;
+	struct bfa_sgpg_s *sgpg;
+	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
+	u32 i, sge_id, pgcumsz;
+	enum dma_data_direction dmadir;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
+	if (!m) {
+		bfa_stats(ioim->itnim, qwait);
+		bfa_reqq_wait(ioim->bfa, ioim->reqq,
+				  &ioim->iosp->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	/*
+	 * build i/o request message next
+	 */
+	m->io_tag = cpu_to_be16(ioim->iotag);
+	m->rport_hdl = ioim->itnim->rport->fw_handle;
+	m->io_timeout = 0;
+
+	sge = &m->sges[0];
+	sgpg = ioim->sgpg;
+	sge_id = 0;
+	sgpge = NULL;
+	pgcumsz = 0;
+	scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
+		if (i == 0) {
+			/* build inline IO SG element */
+			addr = bfa_sgaddr_le(sg_dma_address(sg));
+			sge->sga = *(union bfi_addr_u *) &addr;
+			pgdlen = sg_dma_len(sg);
+			sge->sg_len = pgdlen;
+			sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
+					BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
+			bfa_sge_to_be(sge);
+			sge++;
+		} else {
+			if (sge_id == 0)
+				sgpge = sgpg->sgpg->sges;
+
+			addr = bfa_sgaddr_le(sg_dma_address(sg));
+			sgpge->sga = *(union bfi_addr_u *) &addr;
+			sgpge->sg_len = sg_dma_len(sg);
+			pgcumsz += sgpge->sg_len;
+
+			/* set flags */
+			if (i < (ioim->nsges - 1) &&
+					sge_id < (BFI_SGPG_DATA_SGES - 1))
+				sgpge->flags = BFI_SGE_DATA;
+			else if (i < (ioim->nsges - 1))
+				sgpge->flags = BFI_SGE_DATA_CPL;
+			else
+				sgpge->flags = BFI_SGE_DATA_LAST;
+
+			bfa_sge_to_le(sgpge);
+
+			sgpge++;
+			if (i == (ioim->nsges - 1)) {
+				sgpge->flags = BFI_SGE_PGDLEN;
+				sgpge->sga.a32.addr_lo = 0;
+				sgpge->sga.a32.addr_hi = 0;
+				sgpge->sg_len = pgcumsz;
+				bfa_sge_to_le(sgpge);
+			} else if (++sge_id == BFI_SGPG_DATA_SGES) {
+				sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
+				sgpge->flags = BFI_SGE_LINK;
+				sgpge->sga = sgpg->sgpg_pa;
+				sgpge->sg_len = pgcumsz;
+				bfa_sge_to_le(sgpge);
+				sge_id = 0;
+				pgcumsz = 0;
+			}
+		}
+	}
+
+	if (ioim->nsges > BFI_SGE_INLINE) {
+		sge->sga = ioim->sgpg->sgpg_pa;
+	} else {
+		sge->sga.a32.addr_lo = 0;
+		sge->sga.a32.addr_hi = 0;
+	}
+	sge->sg_len = pgdlen;
+	sge->flags = BFI_SGE_PGDLEN;
+	bfa_sge_to_be(sge);
+
+	/*
+	 * set up I/O command parameters
+	 */
+	m->cmnd = cmnd_z0;
+	int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
+	dmadir = cmnd->sc_data_direction;
+	if (dmadir == DMA_TO_DEVICE)
+		m->cmnd.iodir = FCP_IODIR_WRITE;
+	else if (dmadir == DMA_FROM_DEVICE)
+		m->cmnd.iodir = FCP_IODIR_READ;
+	else
+		m->cmnd.iodir = FCP_IODIR_NONE;
+
+	m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
+	fcp_dl = scsi_bufflen(cmnd);
+	m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
+
+	/*
+	 * set up I/O message header
+	 */
+	switch (m->cmnd.iodir) {
+	case FCP_IODIR_READ:
+		bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
+		bfa_stats(itnim, input_reqs);
+		ioim->itnim->stats.rd_throughput += fcp_dl;
+		break;
+	case FCP_IODIR_WRITE:
+		bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
+		bfa_stats(itnim, output_reqs);
+		ioim->itnim->stats.wr_throughput += fcp_dl;
+		break;
+	case FCP_IODIR_RW:
+		bfa_stats(itnim, input_reqs);
+		bfa_stats(itnim, output_reqs);
+	default:
+		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
+	}
+	if (itnim->seq_rec ||
+	    (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
+		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
+	return BFA_TRUE;
+}
+
+/*
+ * Setup any additional SG pages needed.Inline SG element is setup
+ * at queuing time.
+ */
+static bfa_boolean_t
+bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
+{
+	u16	nsgpgs;
+
+	WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
+
+	/*
+	 * allocate SG pages needed
+	 */
+	nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
+	if (!nsgpgs)
+		return BFA_TRUE;
+
+	if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
+	    != BFA_STATUS_OK) {
+		bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
+		return BFA_FALSE;
+	}
+
+	ioim->nsgpgs = nsgpgs;
+	ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
+
+	return BFA_TRUE;
+}
+
+/*
+ * Send I/O abort request to firmware.
+ */
+static	bfa_boolean_t
+bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
+{
+	struct bfi_ioim_abort_req_s *m;
+	enum bfi_ioim_h2i	msgop;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
+	if (!m)
+		return BFA_FALSE;
+
+	/*
+	 * build i/o request message next
+	 */
+	if (ioim->iosp->abort_explicit)
+		msgop = BFI_IOIM_H2I_IOABORT_REQ;
+	else
+		msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
+
+	bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
+	m->io_tag    = cpu_to_be16(ioim->iotag);
+	m->abort_tag = ++ioim->abort_tag;
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
+	return BFA_TRUE;
+}
+
+/*
+ * Call to resume any I/O requests waiting for room in request queue.
+ */
+static void
+bfa_ioim_qresume(void *cbarg)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+
+	bfa_stats(ioim->itnim, qresumes);
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
+}
+
+
+static void
+bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
+{
+	/*
+	 * Move IO from itnim queue to fcpim global queue since itnim will be
+	 * freed.
+	 */
+	list_del(&ioim->qe);
+	list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
+
+	if (!ioim->iosp->tskim) {
+		if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
+			bfa_cb_dequeue(&ioim->hcb_qe);
+			list_del(&ioim->qe);
+			list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
+		}
+		bfa_itnim_iodone(ioim->itnim);
+	} else
+		bfa_wc_down(&ioim->iosp->tskim->wc);
+}
+
+static bfa_boolean_t
+bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
+{
+	if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
+	    (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)))	||
+	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort))		||
+	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull))	||
+	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb))		||
+	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free))	||
+	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
+		return BFA_FALSE;
+
+	return BFA_TRUE;
+}
+
+void
+bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
+{
+	/*
+	 * If path tov timer expired, failback with PATHTOV status - these
+	 * IO requests are not normally retried by IO stack.
+	 *
+	 * Otherwise device cameback online and fail it with normal failed
+	 * status so that IO stack retries these failed IO requests.
+	 */
+	if (iotov)
+		ioim->io_cbfn = __bfa_cb_ioim_pathtov;
+	else {
+		ioim->io_cbfn = __bfa_cb_ioim_failed;
+		bfa_stats(ioim->itnim, iocom_nexus_abort);
+	}
+	bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+
+	/*
+	 * Move IO to fcpim global queue since itnim will be
+	 * freed.
+	 */
+	list_del(&ioim->qe);
+	list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
+}
+
+
+/*
+ * Memory allocation and initialization.
+ */
+void
+bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
+{
+	struct bfa_ioim_s		*ioim;
+	struct bfa_fcp_mod_s	*fcp = fcpim->fcp;
+	struct bfa_ioim_sp_s	*iosp;
+	u16		i;
+
+	/*
+	 * claim memory first
+	 */
+	ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
+	fcpim->ioim_arr = ioim;
+	bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
+
+	iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
+	fcpim->ioim_sp_arr = iosp;
+	bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
+
+	/*
+	 * Initialize ioim free queues
+	 */
+	INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
+	INIT_LIST_HEAD(&fcpim->ioim_comp_q);
+
+	for (i = 0; i < fcpim->fcp->num_ioim_reqs;
+	     i++, ioim++, iosp++) {
+		/*
+		 * initialize IOIM
+		 */
+		memset(ioim, 0, sizeof(struct bfa_ioim_s));
+		ioim->iotag   = i;
+		ioim->bfa     = fcpim->bfa;
+		ioim->fcpim   = fcpim;
+		ioim->iosp    = iosp;
+		INIT_LIST_HEAD(&ioim->sgpg_q);
+		bfa_reqq_winit(&ioim->iosp->reqq_wait,
+				   bfa_ioim_qresume, ioim);
+		bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
+				   bfa_ioim_sgpg_alloced, ioim);
+		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
+	}
+}
+
+void
+bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
+	struct bfa_ioim_s *ioim;
+	u16	iotag;
+	enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
+
+	iotag = be16_to_cpu(rsp->io_tag);
+
+	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
+	WARN_ON(ioim->iotag != iotag);
+
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, rsp->io_status);
+	bfa_trc(ioim->bfa, rsp->reuse_io_tag);
+
+	if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
+		ioim->iosp->comp_rspmsg = *m;
+
+	switch (rsp->io_status) {
+	case BFI_IOIM_STS_OK:
+		bfa_stats(ioim->itnim, iocomp_ok);
+		if (rsp->reuse_io_tag == 0)
+			evt = BFA_IOIM_SM_DONE;
+		else
+			evt = BFA_IOIM_SM_COMP;
+		break;
+
+	case BFI_IOIM_STS_TIMEDOUT:
+		bfa_stats(ioim->itnim, iocomp_timedout);
+	case BFI_IOIM_STS_ABORTED:
+		rsp->io_status = BFI_IOIM_STS_ABORTED;
+		bfa_stats(ioim->itnim, iocomp_aborted);
+		if (rsp->reuse_io_tag == 0)
+			evt = BFA_IOIM_SM_DONE;
+		else
+			evt = BFA_IOIM_SM_COMP;
+		break;
+
+	case BFI_IOIM_STS_PROTO_ERR:
+		bfa_stats(ioim->itnim, iocom_proto_err);
+		WARN_ON(!rsp->reuse_io_tag);
+		evt = BFA_IOIM_SM_COMP;
+		break;
+
+	case BFI_IOIM_STS_SQER_NEEDED:
+		bfa_stats(ioim->itnim, iocom_sqer_needed);
+		WARN_ON(rsp->reuse_io_tag != 0);
+		evt = BFA_IOIM_SM_SQRETRY;
+		break;
+
+	case BFI_IOIM_STS_RES_FREE:
+		bfa_stats(ioim->itnim, iocom_res_free);
+		evt = BFA_IOIM_SM_FREE;
+		break;
+
+	case BFI_IOIM_STS_HOST_ABORTED:
+		bfa_stats(ioim->itnim, iocom_hostabrts);
+		if (rsp->abort_tag != ioim->abort_tag) {
+			bfa_trc(ioim->bfa, rsp->abort_tag);
+			bfa_trc(ioim->bfa, ioim->abort_tag);
+			return;
+		}
+
+		if (rsp->reuse_io_tag)
+			evt = BFA_IOIM_SM_ABORT_COMP;
+		else
+			evt = BFA_IOIM_SM_ABORT_DONE;
+		break;
+
+	case BFI_IOIM_STS_UTAG:
+		bfa_stats(ioim->itnim, iocom_utags);
+		evt = BFA_IOIM_SM_COMP_UTAG;
+		break;
+
+	default:
+		WARN_ON(1);
+	}
+
+	bfa_sm_send_event(ioim, evt);
+}
+
+void
+bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
+	struct bfa_ioim_s *ioim;
+	u16	iotag;
+
+	iotag = be16_to_cpu(rsp->io_tag);
+
+	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
+	WARN_ON(ioim->iotag != iotag);
+
+	bfa_ioim_cb_profile_comp(fcpim, ioim);
+
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+}
+
+/*
+ * Called by itnim to clean up IO while going offline.
+ */
+void
+bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_stats(ioim->itnim, io_cleanups);
+
+	ioim->iosp->tskim = NULL;
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
+}
+
+void
+bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_stats(ioim->itnim, io_tmaborts);
+
+	ioim->iosp->tskim = tskim;
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
+}
+
+/*
+ * IOC failure handling.
+ */
+void
+bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_stats(ioim->itnim, io_iocdowns);
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
+}
+
+/*
+ * IO offline TOV popped. Fail the pending IO.
+ */
+void
+bfa_ioim_tov(struct bfa_ioim_s *ioim)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
+}
+
+
+/*
+ * Allocate IOIM resource for initiator mode I/O request.
+ */
+struct bfa_ioim_s *
+bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
+		struct bfa_itnim_s *itnim, u16 nsges)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	struct bfa_ioim_s *ioim;
+	struct bfa_iotag_s *iotag = NULL;
+
+	/*
+	 * alocate IOIM resource
+	 */
+	bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
+	if (!iotag) {
+		bfa_stats(itnim, no_iotags);
+		return NULL;
+	}
+
+	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
+
+	ioim->dio = dio;
+	ioim->itnim = itnim;
+	ioim->nsges = nsges;
+	ioim->nsgpgs = 0;
+
+	bfa_stats(itnim, total_ios);
+	fcpim->ios_active++;
+
+	list_add_tail(&ioim->qe, &itnim->io_q);
+
+	return ioim;
+}
+
+void
+bfa_ioim_free(struct bfa_ioim_s *ioim)
+{
+	struct bfa_fcpim_s *fcpim = ioim->fcpim;
+	struct bfa_iotag_s *iotag;
+
+	if (ioim->nsgpgs > 0)
+		bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
+
+	bfa_stats(ioim->itnim, io_comps);
+	fcpim->ios_active--;
+
+	ioim->iotag &= BFA_IOIM_IOTAG_MASK;
+
+	WARN_ON(!(ioim->iotag <
+		(fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
+	iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
+
+	if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
+		list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
+	else
+		list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
+
+	list_del(&ioim->qe);
+}
+
+void
+bfa_ioim_start(struct bfa_ioim_s *ioim)
+{
+	bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
+
+	/*
+	 * Obtain the queue over which this request has to be issued
+	 */
+	ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
+			BFA_FALSE : bfa_itnim_get_reqq(ioim);
+
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
+}
+
+/*
+ * Driver I/O abort request.
+ */
+bfa_status_t
+bfa_ioim_abort(struct bfa_ioim_s *ioim)
+{
+
+	bfa_trc(ioim->bfa, ioim->iotag);
+
+	if (!bfa_ioim_is_abortable(ioim))
+		return BFA_STATUS_FAILED;
+
+	bfa_stats(ioim->itnim, io_aborts);
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ *  BFA TSKIM state machine functions
+ */
+
+/*
+ * Task management command beginning state.
+ */
+static void
+bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_START:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_active);
+		bfa_tskim_gather_ios(tskim);
+
+		/*
+		 * If device is offline, do not send TM on wire. Just cleanup
+		 * any pending IO requests and complete TM request.
+		 */
+		if (!bfa_itnim_is_online(tskim->itnim)) {
+			bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
+			tskim->tsk_status = BFI_TSKIM_STS_OK;
+			bfa_tskim_cleanup_ios(tskim);
+			return;
+		}
+
+		if (!bfa_tskim_send(tskim)) {
+			bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
+			bfa_stats(tskim->itnim, tm_qwait);
+			bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
+					  &tskim->reqq_wait);
+		}
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+/*
+ * TM command is active, awaiting completion from firmware to
+ * cleanup IO requests in TM scope.
+ */
+static void
+bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_DONE:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
+		bfa_tskim_cleanup_ios(tskim);
+		break;
+
+	case BFA_TSKIM_SM_CLEANUP:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
+		if (!bfa_tskim_send_abort(tskim)) {
+			bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
+			bfa_stats(tskim->itnim, tm_qwait);
+			bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
+				&tskim->reqq_wait);
+		}
+		break;
+
+	case BFA_TSKIM_SM_HWFAIL:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+		bfa_tskim_iocdisable_ios(tskim);
+		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+/*
+ * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
+ * completion event from firmware.
+ */
+static void
+bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_DONE:
+		/*
+		 * Ignore and wait for ABORT completion from firmware.
+		 */
+		break;
+
+	case BFA_TSKIM_SM_UTAG:
+	case BFA_TSKIM_SM_CLEANUP_DONE:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
+		bfa_tskim_cleanup_ios(tskim);
+		break;
+
+	case BFA_TSKIM_SM_HWFAIL:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+		bfa_tskim_iocdisable_ios(tskim);
+		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+static void
+bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_IOS_DONE:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
+		break;
+
+	case BFA_TSKIM_SM_CLEANUP:
+		/*
+		 * Ignore, TM command completed on wire.
+		 * Notify TM conmpletion on IO cleanup completion.
+		 */
+		break;
+
+	case BFA_TSKIM_SM_HWFAIL:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+		bfa_tskim_iocdisable_ios(tskim);
+		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+/*
+ * Task management command is waiting for room in request CQ
+ */
+static void
+bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_QRESUME:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_active);
+		bfa_tskim_send(tskim);
+		break;
+
+	case BFA_TSKIM_SM_CLEANUP:
+		/*
+		 * No need to send TM on wire since ITN is offline.
+		 */
+		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
+		bfa_reqq_wcancel(&tskim->reqq_wait);
+		bfa_tskim_cleanup_ios(tskim);
+		break;
+
+	case BFA_TSKIM_SM_HWFAIL:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+		bfa_reqq_wcancel(&tskim->reqq_wait);
+		bfa_tskim_iocdisable_ios(tskim);
+		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+/*
+ * Task management command is active, awaiting for room in request CQ
+ * to send clean up request.
+ */
+static void
+bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
+		enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_DONE:
+		bfa_reqq_wcancel(&tskim->reqq_wait);
+		/*
+		 * Fall through !!!
+		 */
+	case BFA_TSKIM_SM_QRESUME:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
+		bfa_tskim_send_abort(tskim);
+		break;
+
+	case BFA_TSKIM_SM_HWFAIL:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+		bfa_reqq_wcancel(&tskim->reqq_wait);
+		bfa_tskim_iocdisable_ios(tskim);
+		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+/*
+ * BFA callback is pending
+ */
+static void
+bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_HCB:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
+		bfa_tskim_free(tskim);
+		break;
+
+	case BFA_TSKIM_SM_CLEANUP:
+		bfa_tskim_notify_comp(tskim);
+		break;
+
+	case BFA_TSKIM_SM_HWFAIL:
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+static void
+__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_tskim_s *tskim = cbarg;
+
+	if (!complete) {
+		bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
+		return;
+	}
+
+	bfa_stats(tskim->itnim, tm_success);
+	bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
+}
+
+static void
+__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_tskim_s *tskim = cbarg;
+
+	if (!complete) {
+		bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
+		return;
+	}
+
+	bfa_stats(tskim->itnim, tm_failures);
+	bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
+				BFI_TSKIM_STS_FAILED);
+}
+
+static bfa_boolean_t
+bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
+{
+	switch (tskim->tm_cmnd) {
+	case FCP_TM_TARGET_RESET:
+		return BFA_TRUE;
+
+	case FCP_TM_ABORT_TASK_SET:
+	case FCP_TM_CLEAR_TASK_SET:
+	case FCP_TM_LUN_RESET:
+	case FCP_TM_CLEAR_ACA:
+		return !memcmp(&tskim->lun, &lun, sizeof(lun));
+
+	default:
+		WARN_ON(1);
+	}
+
+	return BFA_FALSE;
+}
+
+/*
+ * Gather affected IO requests and task management commands.
+ */
+static void
+bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
+{
+	struct bfa_itnim_s *itnim = tskim->itnim;
+	struct bfa_ioim_s *ioim;
+	struct list_head *qe, *qen;
+	struct scsi_cmnd *cmnd;
+	struct scsi_lun scsilun;
+
+	INIT_LIST_HEAD(&tskim->io_q);
+
+	/*
+	 * Gather any active IO requests first.
+	 */
+	list_for_each_safe(qe, qen, &itnim->io_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		cmnd = (struct scsi_cmnd *) ioim->dio;
+		int_to_scsilun(cmnd->device->lun, &scsilun);
+		if (bfa_tskim_match_scope(tskim, scsilun)) {
+			list_del(&ioim->qe);
+			list_add_tail(&ioim->qe, &tskim->io_q);
+		}
+	}
+
+	/*
+	 * Failback any pending IO requests immediately.
+	 */
+	list_for_each_safe(qe, qen, &itnim->pending_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		cmnd = (struct scsi_cmnd *) ioim->dio;
+		int_to_scsilun(cmnd->device->lun, &scsilun);
+		if (bfa_tskim_match_scope(tskim, scsilun)) {
+			list_del(&ioim->qe);
+			list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
+			bfa_ioim_tov(ioim);
+		}
+	}
+}
+
+/*
+ * IO cleanup completion
+ */
+static void
+bfa_tskim_cleanp_comp(void *tskim_cbarg)
+{
+	struct bfa_tskim_s *tskim = tskim_cbarg;
+
+	bfa_stats(tskim->itnim, tm_io_comps);
+	bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
+}
+
+/*
+ * Gather affected IO requests and task management commands.
+ */
+static void
+bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
+{
+	struct bfa_ioim_s *ioim;
+	struct list_head	*qe, *qen;
+
+	bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
+
+	list_for_each_safe(qe, qen, &tskim->io_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		bfa_wc_up(&tskim->wc);
+		bfa_ioim_cleanup_tm(ioim, tskim);
+	}
+
+	bfa_wc_wait(&tskim->wc);
+}
+
+/*
+ * Send task management request to firmware.
+ */
+static bfa_boolean_t
+bfa_tskim_send(struct bfa_tskim_s *tskim)
+{
+	struct bfa_itnim_s *itnim = tskim->itnim;
+	struct bfi_tskim_req_s *m;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(tskim->bfa, itnim->reqq);
+	if (!m)
+		return BFA_FALSE;
+
+	/*
+	 * build i/o request message next
+	 */
+	bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
+			bfa_fn_lpu(tskim->bfa));
+
+	m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
+	m->itn_fhdl = tskim->itnim->rport->fw_handle;
+	m->t_secs = tskim->tsecs;
+	m->lun = tskim->lun;
+	m->tm_flags = tskim->tm_cmnd;
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
+	return BFA_TRUE;
+}
+
+/*
+ * Send abort request to cleanup an active TM to firmware.
+ */
+static bfa_boolean_t
+bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
+{
+	struct bfa_itnim_s	*itnim = tskim->itnim;
+	struct bfi_tskim_abortreq_s	*m;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(tskim->bfa, itnim->reqq);
+	if (!m)
+		return BFA_FALSE;
+
+	/*
+	 * build i/o request message next
+	 */
+	bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
+			bfa_fn_lpu(tskim->bfa));
+
+	m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
+	return BFA_TRUE;
+}
+
+/*
+ * Call to resume task management cmnd waiting for room in request queue.
+ */
+static void
+bfa_tskim_qresume(void *cbarg)
+{
+	struct bfa_tskim_s *tskim = cbarg;
+
+	bfa_stats(tskim->itnim, tm_qresumes);
+	bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
+}
+
+/*
+ * Cleanup IOs associated with a task mangement command on IOC failures.
+ */
+static void
+bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
+{
+	struct bfa_ioim_s *ioim;
+	struct list_head	*qe, *qen;
+
+	list_for_each_safe(qe, qen, &tskim->io_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		bfa_ioim_iocdisable(ioim);
+	}
+}
+
+/*
+ * Notification on completions from related ioim.
+ */
+void
+bfa_tskim_iodone(struct bfa_tskim_s *tskim)
+{
+	bfa_wc_down(&tskim->wc);
+}
+
+/*
+ * Handle IOC h/w failure notification from itnim.
+ */
+void
+bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
+{
+	tskim->notify = BFA_FALSE;
+	bfa_stats(tskim->itnim, tm_iocdowns);
+	bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
+}
+
+/*
+ * Cleanup TM command and associated IOs as part of ITNIM offline.
+ */
+void
+bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
+{
+	tskim->notify = BFA_TRUE;
+	bfa_stats(tskim->itnim, tm_cleanups);
+	bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
+}
+
+/*
+ * Memory allocation and initialization.
+ */
+void
+bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
+{
+	struct bfa_tskim_s *tskim;
+	struct bfa_fcp_mod_s	*fcp = fcpim->fcp;
+	u16	i;
+
+	INIT_LIST_HEAD(&fcpim->tskim_free_q);
+	INIT_LIST_HEAD(&fcpim->tskim_unused_q);
+
+	tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
+	fcpim->tskim_arr = tskim;
+
+	for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
+		/*
+		 * initialize TSKIM
+		 */
+		memset(tskim, 0, sizeof(struct bfa_tskim_s));
+		tskim->tsk_tag = i;
+		tskim->bfa	= fcpim->bfa;
+		tskim->fcpim	= fcpim;
+		tskim->notify  = BFA_FALSE;
+		bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
+					tskim);
+		bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
+
+		list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
+	}
+
+	bfa_mem_kva_curp(fcp) = (u8 *) tskim;
+}
+
+void
+bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
+	struct bfa_tskim_s *tskim;
+	u16	tsk_tag = be16_to_cpu(rsp->tsk_tag);
+
+	tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
+	WARN_ON(tskim->tsk_tag != tsk_tag);
+
+	tskim->tsk_status = rsp->tsk_status;
+
+	/*
+	 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
+	 * requests. All other statuses are for normal completions.
+	 */
+	if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
+		bfa_stats(tskim->itnim, tm_cleanup_comps);
+		bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
+	} else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
+		bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG);
+	} else {
+		bfa_stats(tskim->itnim, tm_fw_rsps);
+		bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
+	}
+}
+
+
+struct bfa_tskim_s *
+bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	struct bfa_tskim_s *tskim;
+
+	bfa_q_deq(&fcpim->tskim_free_q, &tskim);
+
+	if (tskim)
+		tskim->dtsk = dtsk;
+
+	return tskim;
+}
+
+void
+bfa_tskim_free(struct bfa_tskim_s *tskim)
+{
+	WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
+	list_del(&tskim->qe);
+	list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
+}
+
+/*
+ * Start a task management command.
+ *
+ * @param[in]	tskim	BFA task management command instance
+ * @param[in]	itnim	i-t nexus for the task management command
+ * @param[in]	lun	lun, if applicable
+ * @param[in]	tm_cmnd	Task management command code.
+ * @param[in]	t_secs	Timeout in seconds
+ *
+ * @return None.
+ */
+void
+bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
+			struct scsi_lun lun,
+			enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
+{
+	tskim->itnim	= itnim;
+	tskim->lun	= lun;
+	tskim->tm_cmnd = tm_cmnd;
+	tskim->tsecs	= tsecs;
+	tskim->notify  = BFA_FALSE;
+	bfa_stats(itnim, tm_cmnds);
+
+	list_add_tail(&tskim->qe, &itnim->tsk_q);
+	bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
+}
+
+void
+bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
+{
+	struct bfa_fcpim_s	*fcpim = BFA_FCPIM(bfa);
+	struct list_head	*qe;
+	int	i;
+
+	for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
+		bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
+		list_add_tail(qe, &fcpim->tskim_unused_q);
+	}
+}
+
+void
+bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+		struct bfa_s *bfa)
+{
+	struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+	struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
+	struct bfa_mem_dma_s *seg_ptr;
+	u16	nsegs, idx, per_seg_ios, num_io_req;
+	u32	km_len = 0;
+
+	/*
+	 * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
+	 * So if the values are non zero, adjust them appropriately.
+	 */
+	if (cfg->fwcfg.num_ioim_reqs &&
+	    cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
+		cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
+	else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
+		cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
+
+	if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
+		cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
+
+	num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+	if (num_io_req > BFA_IO_MAX) {
+		if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
+			cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
+			cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
+		} else if (cfg->fwcfg.num_fwtio_reqs)
+			cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
+		else
+			cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
+	}
+
+	bfa_fcpim_meminfo(cfg, &km_len);
+
+	num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+	km_len += num_io_req * sizeof(struct bfa_iotag_s);
+	km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
+
+	/* dma memory */
+	nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
+	per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
+
+	bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
+		if (num_io_req >= per_seg_ios) {
+			num_io_req -= per_seg_ios;
+			bfa_mem_dma_setup(minfo, seg_ptr,
+				per_seg_ios * BFI_IOIM_SNSLEN);
+		} else
+			bfa_mem_dma_setup(minfo, seg_ptr,
+				num_io_req * BFI_IOIM_SNSLEN);
+	}
+
+	/* kva memory */
+	bfa_mem_kva_setup(minfo, fcp_kva, km_len);
+}
+
+void
+bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+	struct bfa_mem_dma_s *seg_ptr;
+	u16	idx, nsegs, num_io_req;
+
+	fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
+	fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
+	fcp->num_fwtio_reqs  = cfg->fwcfg.num_fwtio_reqs;
+	fcp->num_itns   = cfg->fwcfg.num_rports;
+	fcp->bfa = bfa;
+
+	/*
+	 * Setup the pool of snsbase addr's, that is passed to fw as
+	 * part of bfi_iocfc_cfg_s.
+	 */
+	num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+	nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
+
+	bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
+
+		if (!bfa_mem_dma_virt(seg_ptr))
+			break;
+
+		fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
+		fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
+		bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
+	}
+
+	fcp->throttle_update_required = 1;
+	bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
+
+	bfa_iotag_attach(fcp);
+
+	fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
+	bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
+			(fcp->num_itns * sizeof(struct bfa_itn_s));
+	memset(fcp->itn_arr, 0,
+			(fcp->num_itns * sizeof(struct bfa_itn_s)));
+}
+
+void
+bfa_fcp_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+	bfa_fcpim_iocdisable(fcp);
+}
+
+void
+bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
+{
+	struct bfa_fcp_mod_s	*mod = BFA_FCP_MOD(bfa);
+	struct list_head	*qe;
+	int	i;
+
+	/* Update io throttle value only once during driver load time */
+	if (!mod->throttle_update_required)
+		return;
+
+	for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
+		bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
+		list_add_tail(qe, &mod->iotag_unused_q);
+	}
+
+	if (mod->num_ioim_reqs != num_ioim_fw) {
+		bfa_trc(bfa, mod->num_ioim_reqs);
+		bfa_trc(bfa, num_ioim_fw);
+	}
+
+	mod->max_ioim_reqs = max_ioim_fw;
+	mod->num_ioim_reqs = num_ioim_fw;
+	mod->throttle_update_required = 0;
+}
+
+void
+bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+		void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
+{
+	struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+	struct bfa_itn_s *itn;
+
+	itn =  BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
+	itn->isr = isr;
+}
+
+/*
+ * Itn interrupt processing.
+ */
+void
+bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+	union bfi_itn_i2h_msg_u msg;
+	struct bfa_itn_s *itn;
+
+	msg.msg = m;
+	itn =  BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
+
+	if (itn->isr)
+		itn->isr(bfa, m);
+	else
+		WARN_ON(1);
+}
+
+void
+bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
+{
+	struct bfa_iotag_s *iotag;
+	u16	num_io_req, i;
+
+	iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
+	fcp->iotag_arr = iotag;
+
+	INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
+	INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
+	INIT_LIST_HEAD(&fcp->iotag_unused_q);
+
+	num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
+	for (i = 0; i < num_io_req; i++, iotag++) {
+		memset(iotag, 0, sizeof(struct bfa_iotag_s));
+		iotag->tag = i;
+		if (i < fcp->num_ioim_reqs)
+			list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
+		else
+			list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
+	}
+
+	bfa_mem_kva_curp(fcp) = (u8 *) iotag;
+}
+
+
+/**
+ * To send config req, first try to use throttle value from flash
+ * If 0, then use driver parameter
+ * We need to use min(flash_val, drv_val) because
+ * memory allocation was done based on this cfg'd value
+ */
+u16
+bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
+{
+	u16 tmp;
+	struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+	/*
+	 * If throttle value from flash is already in effect after driver is
+	 * loaded then until next load, always return current value instead
+	 * of actual flash value
+	 */
+	if (!fcp->throttle_update_required)
+		return (u16)fcp->num_ioim_reqs;
+
+	tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
+	if (!tmp || (tmp > drv_cfg_param))
+		tmp = drv_cfg_param;
+
+	return tmp;
+}
+
+bfa_status_t
+bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value)
+{
+	if (!bfa_dconf_get_min_cfg(bfa)) {
+		BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
+		BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
+		return BFA_STATUS_OK;
+	}
+
+	return BFA_STATUS_FAILED;
+}
+
+u16
+bfa_fcpim_read_throttle(struct bfa_s *bfa)
+{
+	struct bfa_throttle_cfg_s *throttle_cfg =
+			&(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
+
+	return ((!bfa_dconf_get_min_cfg(bfa)) ?
+	       ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
+}
+
+bfa_status_t
+bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value)
+{
+	/* in min cfg no commands should run. */
+	if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
+	    (!bfa_dconf_read_data_valid(bfa)))
+		return BFA_STATUS_FAILED;
+
+	bfa_fcpim_write_throttle(bfa, value);
+
+	return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
+{
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+	struct bfa_defs_fcpim_throttle_s throttle;
+
+	if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
+	    (!bfa_dconf_read_data_valid(bfa)))
+		return BFA_STATUS_FAILED;
+
+	memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
+
+	throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
+	throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
+	if (!throttle.cfg_value)
+		throttle.cfg_value = throttle.cur_value;
+	throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
+	memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
+
+	return BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
new file mode 100644
index 0000000..ec8f863
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_FCPIM_H__
+#define __BFA_FCPIM_H__
+
+#include "bfa.h"
+#include "bfa_svc.h"
+#include "bfi_ms.h"
+#include "bfa_defs_svc.h"
+#include "bfa_cs.h"
+
+/* FCP module related definitions */
+#define BFA_IO_MAX	BFI_IO_MAX
+#define BFA_FWTIO_MAX	2000
+
+struct bfa_fcp_mod_s;
+struct bfa_iotag_s {
+	struct list_head	qe;	/* queue element	*/
+	u16	tag;			/* FW IO tag		*/
+};
+
+struct bfa_itn_s {
+	bfa_isr_func_t isr;
+};
+
+void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+		void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
+void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m);
+void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp);
+void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw);
+
+#define BFA_FCP_MOD(_hal)	(&(_hal)->modules.fcp_mod)
+#define BFA_MEM_FCP_KVA(__bfa)	(&(BFA_FCP_MOD(__bfa)->kva_seg))
+#define BFA_IOTAG_FROM_TAG(_fcp, _tag)	\
+	(&(_fcp)->iotag_arr[(_tag & BFA_IOIM_IOTAG_MASK)])
+#define BFA_ITN_FROM_TAG(_fcp, _tag)	\
+	((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1)))
+#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \
+	bfa_mem_get_dmabuf_kva(_fcp, (_tag & BFA_IOIM_IOTAG_MASK),	\
+	BFI_IOIM_SNSLEN)
+
+
+#define BFA_ITNIM_MIN   32
+#define BFA_ITNIM_MAX   1024
+
+#define BFA_IOIM_MIN	8
+#define BFA_IOIM_MAX	2000
+
+#define BFA_TSKIM_MIN   4
+#define BFA_TSKIM_MAX   512
+#define BFA_FCPIM_PATHTOV_DEF	(30 * 1000)	/* in millisecs */
+#define BFA_FCPIM_PATHTOV_MAX	(90 * 1000)	/* in millisecs */
+
+
+#define bfa_itnim_ioprofile_update(__itnim, __index)			\
+	(__itnim->ioprofile.iocomps[__index]++)
+
+#define BFA_IOIM_RETRY_TAG_OFFSET 11
+#define BFA_IOIM_IOTAG_MASK 0x07ff /* 2K IOs */
+#define BFA_IOIM_RETRY_MAX 7
+
+/* Buckets are are 512 bytes to 2MB */
+static inline u32
+bfa_ioim_get_index(u32 n) {
+	int pos = 0;
+	if (n >= (1UL)<<22)
+		return BFA_IOBUCKET_MAX - 1;
+	n >>= 8;
+	if (n >= (1UL)<<16) {
+		n >>= 16;
+		pos += 16;
+	}
+	if (n >= 1 << 8) {
+		n >>= 8;
+		pos += 8;
+	}
+	if (n >= 1 << 4) {
+		n >>= 4;
+		pos += 4;
+	}
+	if (n >= 1 << 2) {
+		n >>= 2;
+		pos += 2;
+	}
+	if (n >= 1 << 1)
+		pos += 1;
+
+	return (n == 0) ? (0) : pos;
+}
+
+/*
+ * forward declarations
+ */
+struct bfa_ioim_s;
+struct bfa_tskim_s;
+struct bfad_ioim_s;
+struct bfad_tskim_s;
+
+typedef void    (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
+
+struct bfa_fcpim_s {
+	struct bfa_s		*bfa;
+	struct bfa_fcp_mod_s	*fcp;
+	struct bfa_itnim_s	*itnim_arr;
+	struct bfa_ioim_s	*ioim_arr;
+	struct bfa_ioim_sp_s	*ioim_sp_arr;
+	struct bfa_tskim_s	*tskim_arr;
+	int			num_itnims;
+	int			num_tskim_reqs;
+	u32			path_tov;
+	u16			q_depth;
+	u8			reqq;		/*  Request queue to be used */
+	struct list_head	itnim_q;	/*  queue of active itnim */
+	struct list_head	ioim_resfree_q; /*  IOs waiting for f/w */
+	struct list_head	ioim_comp_q;	/*  IO global comp Q	*/
+	struct list_head	tskim_free_q;
+	struct list_head	tskim_unused_q;	/* Unused tskim Q */
+	u32			ios_active;	/*  current active IOs	*/
+	u32			delay_comp;
+	struct bfa_fcpim_del_itn_stats_s del_itn_stats;
+	bfa_boolean_t		ioredirect;
+	bfa_boolean_t		io_profile;
+	time64_t		io_profile_start_time;
+	bfa_fcpim_profile_t     profile_comp;
+	bfa_fcpim_profile_t     profile_start;
+};
+
+/* Max FCP dma segs required */
+#define BFA_FCP_DMA_SEGS	BFI_IOIM_SNSBUF_SEGS
+
+struct bfa_fcp_mod_s {
+	struct bfa_s		*bfa;
+	struct list_head	iotag_ioim_free_q;	/* free IO resources */
+	struct list_head	iotag_tio_free_q;	/* free IO resources */
+	struct list_head	iotag_unused_q;	/* unused IO resources*/
+	struct bfa_iotag_s	*iotag_arr;
+	struct bfa_itn_s	*itn_arr;
+	int			max_ioim_reqs;
+	int			num_ioim_reqs;
+	int			num_fwtio_reqs;
+	int			num_itns;
+	struct bfa_dma_s	snsbase[BFA_FCP_DMA_SEGS];
+	struct bfa_fcpim_s	fcpim;
+	struct bfa_mem_dma_s	dma_seg[BFA_FCP_DMA_SEGS];
+	struct bfa_mem_kva_s	kva_seg;
+	int			throttle_update_required;
+};
+
+/*
+ * BFA IO (initiator mode)
+ */
+struct bfa_ioim_s {
+	struct list_head	qe;		/*  queue elememt	*/
+	bfa_sm_t		sm;		/*  BFA ioim state machine */
+	struct bfa_s		*bfa;		/*  BFA module	*/
+	struct bfa_fcpim_s	*fcpim;		/*  parent fcpim module */
+	struct bfa_itnim_s	*itnim;		/*  i-t-n nexus for this IO  */
+	struct bfad_ioim_s	*dio;		/*  driver IO handle	*/
+	u16			iotag;		/*  FWI IO tag	*/
+	u16			abort_tag;	/*  unqiue abort request tag */
+	u16			nsges;		/*  number of SG elements */
+	u16			nsgpgs;		/*  number of SG pages	*/
+	struct bfa_sgpg_s	*sgpg;		/*  first SG page	*/
+	struct list_head	sgpg_q;		/*  allocated SG pages	*/
+	struct bfa_cb_qe_s	hcb_qe;		/*  bfa callback qelem	*/
+	bfa_cb_cbfn_t		io_cbfn;	/*  IO completion handler */
+	struct bfa_ioim_sp_s	*iosp;		/*  slow-path IO handling */
+	u8			reqq;		/*  Request queue for I/O */
+	u8			mode;		/*  IO is passthrough or not */
+	u64			start_time;	/*  IO's Profile start val */
+};
+
+struct bfa_ioim_sp_s {
+	struct bfi_msg_s	comp_rspmsg;	/*  IO comp f/w response */
+	struct bfa_sgpg_wqe_s	sgpg_wqe;	/*  waitq elem for sgpg	*/
+	struct bfa_reqq_wait_s	reqq_wait;	/*  to wait for room in reqq */
+	bfa_boolean_t		abort_explicit;	/*  aborted by OS	*/
+	struct bfa_tskim_s	*tskim;		/*  Relevant TM cmd	*/
+};
+
+/*
+ * BFA Task management command (initiator mode)
+ */
+struct bfa_tskim_s {
+	struct list_head	qe;
+	bfa_sm_t		sm;
+	struct bfa_s		*bfa;	/*  BFA module  */
+	struct bfa_fcpim_s	*fcpim;	/*  parent fcpim module	*/
+	struct bfa_itnim_s	*itnim;	/*  i-t-n nexus for this IO  */
+	struct bfad_tskim_s	*dtsk;  /*  driver task mgmt cmnd	*/
+	bfa_boolean_t		notify;	/*  notify itnim on TM comp  */
+	struct scsi_lun		lun;	/*  lun if applicable	*/
+	enum fcp_tm_cmnd	tm_cmnd; /*  task management command  */
+	u16			tsk_tag; /*  FWI IO tag	*/
+	u8			tsecs;	/*  timeout in seconds	*/
+	struct bfa_reqq_wait_s  reqq_wait;   /*  to wait for room in reqq */
+	struct list_head	io_q;	/*  queue of affected IOs	*/
+	struct bfa_wc_s		wc;	/*  waiting counter	*/
+	struct bfa_cb_qe_s	hcb_qe;	/*  bfa callback qelem	*/
+	enum bfi_tskim_status   tsk_status;  /*  TM status	*/
+};
+
+/*
+ * BFA i-t-n (initiator mode)
+ */
+struct bfa_itnim_s {
+	struct list_head	qe;	/*  queue element	*/
+	bfa_sm_t		sm;	/*  i-t-n im BFA state machine  */
+	struct bfa_s		*bfa;	/*  bfa instance	*/
+	struct bfa_rport_s	*rport;	/*  bfa rport	*/
+	void			*ditn;	/*  driver i-t-n structure	*/
+	struct bfi_mhdr_s	mhdr;	/*  pre-built mhdr	*/
+	u8			msg_no;	/*  itnim/rport firmware handle */
+	u8			reqq;	/*  CQ for requests	*/
+	struct bfa_cb_qe_s	hcb_qe;	/*  bfa callback qelem	*/
+	struct list_head pending_q;	/*  queue of pending IO requests */
+	struct list_head io_q;		/*  queue of active IO requests */
+	struct list_head io_cleanup_q;	/*  IO being cleaned up	*/
+	struct list_head tsk_q;		/*  queue of active TM commands */
+	struct list_head  delay_comp_q; /*  queue of failed inflight cmds */
+	bfa_boolean_t   seq_rec;	/*  SQER supported	*/
+	bfa_boolean_t   is_online;	/*  itnim is ONLINE for IO	*/
+	bfa_boolean_t   iotov_active;	/*  IO TOV timer is active	 */
+	struct bfa_wc_s	wc;		/*  waiting counter	*/
+	struct bfa_timer_s timer;	/*  pending IO TOV	 */
+	struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq */
+	struct bfa_fcpim_s *fcpim;	/*  fcpim module	*/
+	struct bfa_itnim_iostats_s	stats;
+	struct bfa_itnim_ioprofile_s  ioprofile;
+};
+
+#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
+#define BFA_FCPIM(_hal)	(&(_hal)->modules.fcp_mod.fcpim)
+#define BFA_IOIM_TAG_2_ID(_iotag)	((_iotag) & BFA_IOIM_IOTAG_MASK)
+#define BFA_IOIM_FROM_TAG(_fcpim, _iotag)	\
+	(&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)])
+#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag)	\
+	(&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
+
+#define bfa_io_profile_start_time(_bfa)	\
+	((_bfa)->modules.fcp_mod.fcpim.io_profile_start_time)
+#define bfa_fcpim_get_io_profile(_bfa)	\
+	((_bfa)->modules.fcp_mod.fcpim.io_profile)
+#define bfa_ioim_update_iotag(__ioim) do {				\
+	uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET;	\
+	k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK;			\
+	(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET;		\
+} while (0)
+
+static inline bfa_boolean_t
+bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
+{
+	uint16_t k = ioim->iotag >> BFA_IOIM_RETRY_TAG_OFFSET;
+	if (k < BFA_IOIM_RETRY_MAX)
+		return BFA_FALSE;
+	return BFA_TRUE;
+}
+
+/*
+ * function prototypes
+ */
+void	bfa_ioim_attach(struct bfa_fcpim_s *fcpim);
+void	bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void	bfa_ioim_good_comp_isr(struct bfa_s *bfa,
+					struct bfi_msg_s *msg);
+void	bfa_ioim_cleanup(struct bfa_ioim_s *ioim);
+void	bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
+					struct bfa_tskim_s *tskim);
+void	bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
+void	bfa_ioim_tov(struct bfa_ioim_s *ioim);
+
+void	bfa_tskim_attach(struct bfa_fcpim_s *fcpim);
+void	bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void	bfa_tskim_iodone(struct bfa_tskim_s *tskim);
+void	bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
+void	bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
+void	bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw);
+
+void	bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len);
+void	bfa_itnim_attach(struct bfa_fcpim_s *fcpim);
+void	bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
+void	bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void	bfa_itnim_iodone(struct bfa_itnim_s *itnim);
+void	bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
+bfa_boolean_t   bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
+
+/*
+ * bfa fcpim module API functions
+ */
+void	bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
+u16	bfa_fcpim_path_tov_get(struct bfa_s *bfa);
+u16	bfa_fcpim_qdepth_get(struct bfa_s *bfa);
+bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
+			struct bfa_itnim_iostats_s *stats, u8 lp_tag);
+void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
+			struct bfa_itnim_iostats_s *itnim_stats);
+bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time);
+bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
+
+#define bfa_fcpim_ioredirect_enabled(__bfa)				\
+	(((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect)
+
+#define bfa_fcpim_get_next_reqq(__bfa, __qid)				\
+{									\
+	struct bfa_fcpim_s *__fcpim = BFA_FCPIM(__bfa);      \
+	__fcpim->reqq++;						\
+	__fcpim->reqq &= (BFI_IOC_MAX_CQS - 1);      \
+	*(__qid) = __fcpim->reqq;					\
+}
+
+#define bfa_iocfc_map_msg_to_qid(__msg, __qid)				\
+	*(__qid) = (u8)((__msg) & (BFI_IOC_MAX_CQS - 1));
+/*
+ * bfa itnim API functions
+ */
+struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
+		struct bfa_rport_s *rport, void *itnim);
+void bfa_itnim_delete(struct bfa_itnim_s *itnim);
+void bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec);
+void bfa_itnim_offline(struct bfa_itnim_s *itnim);
+void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
+bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+			struct bfa_itnim_ioprofile_s *ioprofile);
+
+#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
+
+/*
+ * BFA completion callback for bfa_itnim_online().
+ */
+void	bfa_cb_itnim_online(void *itnim);
+
+/*
+ * BFA completion callback for bfa_itnim_offline().
+ */
+void	bfa_cb_itnim_offline(void *itnim);
+void	bfa_cb_itnim_tov_begin(void *itnim);
+void	bfa_cb_itnim_tov(void *itnim);
+
+/*
+ * BFA notification to FCS/driver for second level error recovery.
+ * Atleast one I/O request has timedout and target is unresponsive to
+ * repeated abort requests. Second level error recovery should be initiated
+ * by starting implicit logout and recovery procedures.
+ */
+void	bfa_cb_itnim_sler(void *itnim);
+
+/*
+ * bfa ioim API functions
+ */
+struct bfa_ioim_s	*bfa_ioim_alloc(struct bfa_s *bfa,
+					struct bfad_ioim_s *dio,
+					struct bfa_itnim_s *itnim,
+					u16 nsgles);
+
+void		bfa_ioim_free(struct bfa_ioim_s *ioim);
+void		bfa_ioim_start(struct bfa_ioim_s *ioim);
+bfa_status_t	bfa_ioim_abort(struct bfa_ioim_s *ioim);
+void		bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
+				      bfa_boolean_t iotov);
+/*
+ * I/O completion notification.
+ *
+ * @param[in]		dio			driver IO structure
+ * @param[in]		io_status		IO completion status
+ * @param[in]		scsi_status		SCSI status returned by target
+ * @param[in]		sns_len			SCSI sense length, 0 if none
+ * @param[in]		sns_info		SCSI sense data, if any
+ * @param[in]		residue			Residual length
+ *
+ * @return None
+ */
+void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
+			enum bfi_ioim_status io_status,
+			u8 scsi_status, int sns_len,
+			u8 *sns_info, s32 residue);
+
+/*
+ * I/O good completion notification.
+ */
+void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
+
+/*
+ * I/O abort completion notification
+ */
+void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
+
+/*
+ * bfa tskim API functions
+ */
+struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa,
+			struct bfad_tskim_s *dtsk);
+void bfa_tskim_free(struct bfa_tskim_s *tskim);
+void bfa_tskim_start(struct bfa_tskim_s *tskim,
+			struct bfa_itnim_s *itnim, struct scsi_lun lun,
+			enum fcp_tm_cmnd tm, u8 t_secs);
+void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
+			enum bfi_tskim_status tsk_status);
+
+void	bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn,
+			wwn_t rp_wwn, u16 rp_tag, u8 lp_tag);
+bfa_status_t	bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 on_off);
+bfa_status_t	bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf);
+bfa_status_t	bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
+				wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
+bfa_status_t	bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
+				wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
+bfa_status_t	bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
+u16		bfa_fcpim_read_throttle(struct bfa_s *bfa);
+bfa_status_t	bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value);
+bfa_status_t	bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value);
+bfa_status_t	bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf);
+u16     bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param);
+
+#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
new file mode 100644
index 0000000..932feb0
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -0,0 +1,1624 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ *  bfa_fcs.c BFA FCS main
+ */
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_fcs.h"
+#include "bfa_fcbuild.h"
+
+BFA_TRC_FILE(FCS, FCS);
+
+/*
+ *  fcs_api BFA FCS API
+ */
+
+static void
+bfa_fcs_exit_comp(void *fcs_cbarg)
+{
+	struct bfa_fcs_s      *fcs = fcs_cbarg;
+	struct bfad_s         *bfad = fcs->bfad;
+
+	complete(&bfad->comp);
+}
+
+/*
+ * fcs initialization, called once after bfa initialization is complete
+ */
+void
+bfa_fcs_init(struct bfa_fcs_s *fcs)
+{
+	bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
+	bfa_trc(fcs, 0);
+}
+
+/*
+ *  fcs_api BFA FCS API
+ */
+
+/*
+ * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
+ * with values learned during bfa_init firmware GETATTR REQ.
+ */
+void
+bfa_fcs_update_cfg(struct bfa_fcs_s *fcs)
+{
+	struct bfa_fcs_fabric_s *fabric = &fcs->fabric;
+	struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+	struct bfa_ioc_s *ioc = &fabric->fcs->bfa->ioc;
+
+	port_cfg->nwwn = ioc->attr->nwwn;
+	port_cfg->pwwn = ioc->attr->pwwn;
+}
+
+/*
+ * Stop FCS operations.
+ */
+void
+bfa_fcs_stop(struct bfa_fcs_s *fcs)
+{
+	bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
+	bfa_wc_up(&fcs->wc);
+	bfa_fcs_fabric_modstop(fcs);
+	bfa_wc_wait(&fcs->wc);
+}
+
+/*
+ * fcs pbc vport initialization
+ */
+void
+bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs)
+{
+	int i, npbc_vports;
+	struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
+
+	/* Initialize pbc vports */
+	if (!fcs->min_cfg) {
+		npbc_vports =
+			bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
+		for (i = 0; i < npbc_vports; i++)
+			bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]);
+	}
+}
+
+/*
+ *	brief
+ *		FCS driver details initialization.
+ *
+ *	param[in]		fcs		FCS instance
+ *	param[in]		driver_info	Driver Details
+ *
+ *	return None
+ */
+void
+bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
+			struct bfa_fcs_driver_info_s *driver_info)
+{
+
+	fcs->driver_info = *driver_info;
+
+	bfa_fcs_fabric_psymb_init(&fcs->fabric);
+	bfa_fcs_fabric_nsymb_init(&fcs->fabric);
+}
+
+/*
+ *	brief
+ *		FCS instance cleanup and exit.
+ *
+ *	param[in]		fcs			FCS instance
+ *	return None
+ */
+void
+bfa_fcs_exit(struct bfa_fcs_s *fcs)
+{
+	bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
+	bfa_wc_up(&fcs->wc);
+	bfa_trc(fcs, 0);
+	bfa_lps_delete(fcs->fabric.lps);
+	bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_DELETE);
+	bfa_wc_wait(&fcs->wc);
+}
+
+/*
+ * Fabric module implementation.
+ */
+
+#define BFA_FCS_FABRIC_RETRY_DELAY	(2000)	/* Milliseconds */
+#define BFA_FCS_FABRIC_CLEANUP_DELAY	(10000)	/* Milliseconds */
+
+#define bfa_fcs_fabric_set_opertype(__fabric) do {			\
+	if (bfa_fcport_get_topology((__fabric)->fcs->bfa)		\
+				== BFA_PORT_TOPOLOGY_P2P) {		\
+		if (fabric->fab_type == BFA_FCS_FABRIC_SWITCHED)	\
+			(__fabric)->oper_type = BFA_PORT_TYPE_NPORT;	\
+		else							\
+			(__fabric)->oper_type = BFA_PORT_TYPE_P2P;	\
+	} else								\
+		(__fabric)->oper_type = BFA_PORT_TYPE_NLPORT;		\
+} while (0)
+
+/*
+ * forward declarations
+ */
+static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_delay(void *cbarg);
+static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_delete_comp(void *cbarg);
+static void bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_stop_comp(void *cbarg);
+static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric,
+				      struct fchs_s *fchs, u16 len);
+static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
+					 struct fchs_s *fchs, u16 len);
+static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
+					 struct bfa_fcxp_s *fcxp, void *cbarg,
+					 bfa_status_t status,
+					 u32 rsp_len,
+					 u32 resid_len,
+					 struct fchs_s *rspfchs);
+
+static void	bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
+					 enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
+					  enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
+					   enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
+					enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
+					      enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
+				       enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
+					   enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
+				       enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
+					    enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
+					   enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
+					   enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
+					   enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric,
+					  enum bfa_fcs_fabric_event event);
+/*
+ *   Beginning state before fabric creation.
+ */
+static void
+bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
+			 enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_CREATE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+		bfa_fcs_fabric_init(fabric);
+		bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_UP:
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ *   Beginning state before fabric creation.
+ */
+static void
+bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
+			  enum bfa_fcs_fabric_event event)
+{
+	struct bfa_s	*bfa = fabric->fcs->bfa;
+
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_START:
+		if (!bfa_fcport_is_linkup(fabric->fcs->bfa)) {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+			break;
+		}
+		if (bfa_fcport_get_topology(bfa) ==
+				BFA_PORT_TOPOLOGY_LOOP) {
+			fabric->fab_type = BFA_FCS_FABRIC_LOOP;
+			fabric->bport.pid = bfa_fcport_get_myalpa(bfa);
+			fabric->bport.pid = bfa_hton3b(fabric->bport.pid);
+			bfa_sm_set_state(fabric,
+					bfa_fcs_fabric_sm_online);
+			bfa_fcs_fabric_set_opertype(fabric);
+			bfa_fcs_lport_online(&fabric->bport);
+		} else {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
+			bfa_fcs_fabric_login(fabric);
+		}
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_UP:
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ *   Link is down, awaiting LINK UP event from port. This is also the
+ *   first state at fabric creation.
+ */
+static void
+bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
+			   enum bfa_fcs_fabric_event event)
+{
+	struct bfa_s	*bfa = fabric->fcs->bfa;
+
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_LINK_UP:
+		if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP) {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
+			bfa_fcs_fabric_login(fabric);
+			break;
+		}
+		fabric->fab_type = BFA_FCS_FABRIC_LOOP;
+		fabric->bport.pid = bfa_fcport_get_myalpa(bfa);
+		fabric->bport.pid = bfa_hton3b(fabric->bport.pid);
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
+		bfa_fcs_fabric_set_opertype(fabric);
+		bfa_fcs_lport_online(&fabric->bport);
+		break;
+
+	case BFA_FCS_FABRIC_SM_RETRY_OP:
+	case BFA_FCS_FABRIC_SM_LOOPBACK:
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_STOP:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+		bfa_fcs_fabric_stop(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ *   FLOGI is in progress, awaiting FLOGI reply.
+ */
+static void
+bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_CONT_OP:
+
+		bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
+					   fabric->bb_credit);
+		fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
+
+		if (fabric->auth_reqd && fabric->is_auth) {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth);
+			bfa_trc(fabric->fcs, event);
+		} else {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
+			bfa_fcs_fabric_notify_online(fabric);
+		}
+		break;
+
+	case BFA_FCS_FABRIC_SM_RETRY_OP:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry);
+		bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer,
+				bfa_fcs_fabric_delay, fabric,
+				BFA_FCS_FABRIC_RETRY_DELAY);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LOOPBACK:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+		bfa_fcs_fabric_set_opertype(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_NO_FABRIC:
+		fabric->fab_type = BFA_FCS_FABRIC_N2N;
+		bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
+					   fabric->bb_credit);
+		bfa_fcs_fabric_notify_online(fabric);
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+
+static void
+bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
+			      enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_DELAYED:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
+		bfa_fcs_fabric_login(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_timer_stop(&fabric->delay_timer);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_timer_stop(&fabric->delay_timer);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ *   Authentication is in progress, awaiting authentication results.
+ */
+static void
+bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
+		       enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_AUTH_FAILED:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+		break;
+
+	case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
+		bfa_fcs_fabric_notify_online(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_PERF_EVFP:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ *   Authentication failed
+ */
+void
+bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
+			      enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_fcs_fabric_notify_offline(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ *   Port is in loopback mode.
+ */
+void
+bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
+			   enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_fcs_fabric_notify_offline(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ *   There is no attached fabric - private loop or NPort-to-NPort topology.
+ */
+static void
+bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
+			   enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+		bfa_fcs_fabric_notify_offline(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_NO_FABRIC:
+		bfa_trc(fabric->fcs, fabric->bb_credit);
+		bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
+					   fabric->bb_credit);
+		break;
+
+	case BFA_FCS_FABRIC_SM_RETRY_OP:
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ *   Fabric is online - normal operating state.
+ */
+void
+bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
+			 enum bfa_fcs_fabric_event event)
+{
+	struct bfa_s	*bfa = fabric->fcs->bfa;
+
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) {
+			bfa_fcs_lport_offline(&fabric->bport);
+		} else {
+			bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+			bfa_fcs_fabric_notify_offline(fabric);
+		}
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_STOP:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_stopping);
+		bfa_fcs_fabric_stop(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_AUTH_FAILED:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
+		break;
+
+	case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ *   Exchanging virtual fabric parameters.
+ */
+static void
+bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
+		       enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_CONT_OP:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done);
+		break;
+
+	case BFA_FCS_FABRIC_SM_ISOLATE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ *   EVFP exchange complete and VFT tagging is enabled.
+ */
+static void
+bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
+			    enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+}
+
+/*
+ *   Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
+ */
+static void
+bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
+			   enum bfa_fcs_fabric_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad;
+	char	pwwn_ptr[BFA_STRING_32];
+
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+	wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn);
+
+	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+		"Port is isolated due to VF_ID mismatch. "
+		"PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.",
+		pwwn_ptr, fabric->fcs->port_vfid,
+		fabric->event_arg.swp_vfid);
+}
+
+/*
+ *   Fabric is being deleted, awaiting vport delete completions.
+ */
+static void
+bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
+			   enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_DELCOMP:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
+		bfa_wc_down(&fabric->fcs->wc);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_UP:
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_fcs_fabric_notify_offline(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ * Fabric is being stopped, awaiting vport stop completions.
+ */
+static void
+bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
+			   enum bfa_fcs_fabric_event event)
+{
+	struct bfa_s	*bfa = fabric->fcs->bfa;
+
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_STOPCOMP:
+		if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+		} else {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+			bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT);
+		}
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_UP:
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+		else
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ * Fabric is being stopped, cleanup without FLOGO
+ */
+static void
+bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric,
+			  enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_STOPCOMP:
+	case BFA_FCS_FABRIC_SM_LOGOCOMP:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+		bfa_wc_down(&(fabric->fcs)->wc);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		/*
+		 * Ignore - can get this event if we get notified about IOC down
+		 * before the fabric completion callbk is done.
+		 */
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/*
+ *  fcs_fabric_private fabric private functions
+ */
+
+static void
+bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+
+	port_cfg->roles = BFA_LPORT_ROLE_FCP_IM;
+	port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn;
+	port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn;
+}
+
+/*
+ * Port Symbolic Name Creation for base port.
+ */
+void
+bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+	char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
+	struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
+
+	bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
+
+	/* Model name/number */
+	strlcpy(port_cfg->sym_name.symname, model,
+		BFA_SYMNAME_MAXLEN);
+	strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+		BFA_SYMNAME_MAXLEN);
+
+	/* Driver Version */
+	strlcat(port_cfg->sym_name.symname, driver_info->version,
+		BFA_SYMNAME_MAXLEN);
+	strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+		BFA_SYMNAME_MAXLEN);
+
+	/* Host machine name */
+	strlcat(port_cfg->sym_name.symname,
+		driver_info->host_machine_name,
+		BFA_SYMNAME_MAXLEN);
+	strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+		BFA_SYMNAME_MAXLEN);
+
+	/*
+	 * Host OS Info :
+	 * If OS Patch Info is not there, do not truncate any bytes from the
+	 * OS name string and instead copy the entire OS info string (64 bytes).
+	 */
+	if (driver_info->host_os_patch[0] == '\0') {
+		strlcat(port_cfg->sym_name.symname,
+			driver_info->host_os_name,
+			BFA_SYMNAME_MAXLEN);
+		strlcat(port_cfg->sym_name.symname,
+			BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+			BFA_SYMNAME_MAXLEN);
+	} else {
+		strlcat(port_cfg->sym_name.symname,
+			driver_info->host_os_name,
+			BFA_SYMNAME_MAXLEN);
+		strlcat(port_cfg->sym_name.symname,
+			BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+			BFA_SYMNAME_MAXLEN);
+
+		/* Append host OS Patch Info */
+		strlcat(port_cfg->sym_name.symname,
+			driver_info->host_os_patch,
+			BFA_SYMNAME_MAXLEN);
+	}
+
+	/* null terminate */
+	port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
+}
+
+/*
+ * Node Symbolic Name Creation for base port and all vports
+ */
+void
+bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+	char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
+	struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
+
+	bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
+
+	/* Model name/number */
+	strlcpy(port_cfg->node_sym_name.symname, model,
+		BFA_SYMNAME_MAXLEN);
+	strlcat(port_cfg->node_sym_name.symname,
+			BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+			BFA_SYMNAME_MAXLEN);
+
+	/* Driver Version */
+	strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version,
+		BFA_SYMNAME_MAXLEN);
+	strlcat(port_cfg->node_sym_name.symname,
+			BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+			BFA_SYMNAME_MAXLEN);
+
+	/* Host machine name */
+	strlcat(port_cfg->node_sym_name.symname,
+		driver_info->host_machine_name,
+		BFA_SYMNAME_MAXLEN);
+	strlcat(port_cfg->node_sym_name.symname,
+			BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+			BFA_SYMNAME_MAXLEN);
+
+	/* null terminate */
+	port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
+}
+
+/*
+ * bfa lps login completion callback
+ */
+void
+bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
+{
+	struct bfa_fcs_fabric_s *fabric = uarg;
+
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, status);
+
+	switch (status) {
+	case BFA_STATUS_OK:
+		fabric->stats.flogi_accepts++;
+		break;
+
+	case BFA_STATUS_INVALID_MAC:
+		/* Only for CNA */
+		fabric->stats.flogi_acc_err++;
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
+
+		return;
+
+	case BFA_STATUS_EPROTOCOL:
+		switch (fabric->lps->ext_status) {
+		case BFA_EPROTO_BAD_ACCEPT:
+			fabric->stats.flogi_acc_err++;
+			break;
+
+		case BFA_EPROTO_UNKNOWN_RSP:
+			fabric->stats.flogi_unknown_rsp++;
+			break;
+
+		default:
+			break;
+		}
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
+
+		return;
+
+	case BFA_STATUS_FABRIC_RJT:
+		fabric->stats.flogi_rejects++;
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
+		return;
+
+	default:
+		fabric->stats.flogi_rsp_err++;
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
+		return;
+	}
+
+	fabric->bb_credit = fabric->lps->pr_bbcred;
+	bfa_trc(fabric->fcs, fabric->bb_credit);
+
+	if (!(fabric->lps->brcd_switch))
+		fabric->fabric_name =  fabric->lps->pr_nwwn;
+
+	/*
+	 * Check port type. It should be 1 = F-port.
+	 */
+	if (fabric->lps->fport) {
+		fabric->bport.pid = fabric->lps->lp_pid;
+		fabric->is_npiv = fabric->lps->npiv_en;
+		fabric->is_auth = fabric->lps->auth_req;
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP);
+	} else {
+		/*
+		 * Nport-2-Nport direct attached
+		 */
+		fabric->bport.port_topo.pn2n.rem_port_wwn =
+			fabric->lps->pr_pwwn;
+		fabric->fab_type = BFA_FCS_FABRIC_N2N;
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
+	}
+
+	bfa_trc(fabric->fcs, fabric->bport.pid);
+	bfa_trc(fabric->fcs, fabric->is_npiv);
+	bfa_trc(fabric->fcs, fabric->is_auth);
+}
+/*
+ *		Allocate and send FLOGI.
+ */
+static void
+bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_s		*bfa = fabric->fcs->bfa;
+	struct bfa_lport_cfg_s	*pcfg = &fabric->bport.port_cfg;
+	u8			alpa = 0;
+
+
+	bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
+		      pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
+
+	fabric->stats.flogi_sent++;
+}
+
+static void
+bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_fcs_vport_s *vport;
+	struct list_head	      *qe, *qen;
+
+	bfa_trc(fabric->fcs, fabric->fabric_name);
+
+	bfa_fcs_fabric_set_opertype(fabric);
+	fabric->stats.fabric_onlines++;
+
+	/*
+	 * notify online event to base and then virtual ports
+	 */
+	bfa_fcs_lport_online(&fabric->bport);
+
+	list_for_each_safe(qe, qen, &fabric->vport_q) {
+		vport = (struct bfa_fcs_vport_s *) qe;
+		bfa_fcs_vport_online(vport);
+	}
+}
+
+static void
+bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_fcs_vport_s *vport;
+	struct list_head	      *qe, *qen;
+
+	bfa_trc(fabric->fcs, fabric->fabric_name);
+	fabric->stats.fabric_offlines++;
+
+	/*
+	 * notify offline event first to vports and then base port.
+	 */
+	list_for_each_safe(qe, qen, &fabric->vport_q) {
+		vport = (struct bfa_fcs_vport_s *) qe;
+		bfa_fcs_vport_offline(vport);
+	}
+
+	bfa_fcs_lport_offline(&fabric->bport);
+
+	fabric->fabric_name = 0;
+	fabric->fabric_ip_addr[0] = 0;
+}
+
+static void
+bfa_fcs_fabric_delay(void *cbarg)
+{
+	struct bfa_fcs_fabric_s *fabric = cbarg;
+
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
+}
+
+/*
+ * Stop all vports and wait for vport stop completions.
+ */
+static void
+bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_fcs_vport_s *vport;
+	struct list_head	*qe, *qen;
+
+	bfa_wc_init(&fabric->stop_wc, bfa_fcs_fabric_stop_comp, fabric);
+
+	list_for_each_safe(qe, qen, &fabric->vport_q) {
+		vport = (struct bfa_fcs_vport_s *) qe;
+		bfa_wc_up(&fabric->stop_wc);
+		bfa_fcs_vport_fcs_stop(vport);
+	}
+
+	bfa_wc_up(&fabric->stop_wc);
+	bfa_fcs_lport_stop(&fabric->bport);
+	bfa_wc_wait(&fabric->stop_wc);
+}
+
+/*
+ * Delete all vports and wait for vport delete completions.
+ */
+static void
+bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_fcs_vport_s *vport;
+	struct list_head	      *qe, *qen;
+
+	list_for_each_safe(qe, qen, &fabric->vport_q) {
+		vport = (struct bfa_fcs_vport_s *) qe;
+		bfa_fcs_vport_fcs_delete(vport);
+	}
+
+	bfa_fcs_lport_delete(&fabric->bport);
+	bfa_wc_wait(&fabric->wc);
+}
+
+static void
+bfa_fcs_fabric_delete_comp(void *cbarg)
+{
+	struct bfa_fcs_fabric_s *fabric = cbarg;
+
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
+}
+
+static void
+bfa_fcs_fabric_stop_comp(void *cbarg)
+{
+	struct bfa_fcs_fabric_s *fabric = cbarg;
+
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOPCOMP);
+}
+
+/*
+ *  fcs_fabric_public fabric public functions
+ */
+
+/*
+ * Fabric module stop -- stop FCS actions
+ */
+void
+bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs)
+{
+	struct bfa_fcs_fabric_s *fabric;
+
+	bfa_trc(fcs, 0);
+	fabric = &fcs->fabric;
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOP);
+}
+
+/*
+ * Fabric module start -- kick starts FCS actions
+ */
+void
+bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
+{
+	struct bfa_fcs_fabric_s *fabric;
+
+	bfa_trc(fcs, 0);
+	fabric = &fcs->fabric;
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
+}
+
+
+/*
+ *   Link up notification from BFA physical port module.
+ */
+void
+bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
+}
+
+/*
+ *   Link down notification from BFA physical port module.
+ */
+void
+bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
+}
+
+/*
+ *   A child vport is being created in the fabric.
+ *
+ *   Call from vport module at vport creation. A list of base port and vports
+ *   belonging to a fabric is maintained to propagate link events.
+ *
+ *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
+ *   param[in] vport  - Vport being created.
+ *
+ *   @return None (always succeeds)
+ */
+void
+bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
+			struct bfa_fcs_vport_s *vport)
+{
+	/*
+	 * - add vport to fabric's vport_q
+	 */
+	bfa_trc(fabric->fcs, fabric->vf_id);
+
+	list_add_tail(&vport->qe, &fabric->vport_q);
+	fabric->num_vports++;
+	bfa_wc_up(&fabric->wc);
+}
+
+/*
+ *   A child vport is being deleted from fabric.
+ *
+ *   Vport is being deleted.
+ */
+void
+bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
+			struct bfa_fcs_vport_s *vport)
+{
+	list_del(&vport->qe);
+	fabric->num_vports--;
+	bfa_wc_down(&fabric->wc);
+}
+
+
+/*
+ * Lookup for a vport within a fabric given its pwwn
+ */
+struct bfa_fcs_vport_s *
+bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
+{
+	struct bfa_fcs_vport_s *vport;
+	struct list_head	      *qe;
+
+	list_for_each(qe, &fabric->vport_q) {
+		vport = (struct bfa_fcs_vport_s *) qe;
+		if (bfa_fcs_lport_get_pwwn(&vport->lport) == pwwn)
+			return vport;
+	}
+
+	return NULL;
+}
+
+
+/*
+ *  Get OUI of the attached switch.
+ *
+ *  Note : Use of this function should be avoided as much as possible.
+ *         This function should be used only if there is any requirement
+*          to check for FOS version below 6.3.
+ *         To check if the attached fabric is a brocade fabric, use
+ *         bfa_lps_is_brcd_fabric() which works for FOS versions 6.3
+ *         or above only.
+ */
+
+u16
+bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
+{
+	wwn_t fab_nwwn;
+	u8 *tmp;
+	u16 oui;
+
+	fab_nwwn = fabric->lps->pr_nwwn;
+
+	tmp = (u8 *)&fab_nwwn;
+	oui = (tmp[3] << 8) | tmp[4];
+
+	return oui;
+}
+/*
+ *		Unsolicited frame receive handling.
+ */
+void
+bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
+		       u16 len)
+{
+	u32	pid = fchs->d_id;
+	struct bfa_fcs_vport_s *vport;
+	struct list_head	      *qe;
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+	struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd;
+
+	bfa_trc(fabric->fcs, len);
+	bfa_trc(fabric->fcs, pid);
+
+	/*
+	 * Look for our own FLOGI frames being looped back. This means an
+	 * external loopback cable is in place. Our own FLOGI frames are
+	 * sometimes looped back when switch port gets temporarily bypassed.
+	 */
+	if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) &&
+	    (els_cmd->els_code == FC_ELS_FLOGI) &&
+	    (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) {
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK);
+		return;
+	}
+
+	/*
+	 * FLOGI/EVFP exchanges should be consumed by base fabric.
+	 */
+	if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) {
+		bfa_trc(fabric->fcs, pid);
+		bfa_fcs_fabric_process_uf(fabric, fchs, len);
+		return;
+	}
+
+	if (fabric->bport.pid == pid) {
+		/*
+		 * All authentication frames should be routed to auth
+		 */
+		bfa_trc(fabric->fcs, els_cmd->els_code);
+		if (els_cmd->els_code == FC_ELS_AUTH) {
+			bfa_trc(fabric->fcs, els_cmd->els_code);
+			return;
+		}
+
+		bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs));
+		bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
+		return;
+	}
+
+	/*
+	 * look for a matching local port ID
+	 */
+	list_for_each(qe, &fabric->vport_q) {
+		vport = (struct bfa_fcs_vport_s *) qe;
+		if (vport->lport.pid == pid) {
+			bfa_fcs_lport_uf_recv(&vport->lport, fchs, len);
+			return;
+		}
+	}
+
+	if (!bfa_fcs_fabric_is_switched(fabric))
+		bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
+
+	bfa_trc(fabric->fcs, fchs->type);
+}
+
+/*
+ *		Unsolicited frames to be processed by fabric.
+ */
+static void
+bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
+			  u16 len)
+{
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+	bfa_trc(fabric->fcs, els_cmd->els_code);
+
+	switch (els_cmd->els_code) {
+	case FC_ELS_FLOGI:
+		bfa_fcs_fabric_process_flogi(fabric, fchs, len);
+		break;
+
+	default:
+		/*
+		 * need to generate a LS_RJT
+		 */
+		break;
+	}
+}
+
+/*
+ *	Process	incoming FLOGI
+ */
+static void
+bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
+			struct fchs_s *fchs, u16 len)
+{
+	struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1);
+	struct bfa_fcs_lport_s *bport = &fabric->bport;
+
+	bfa_trc(fabric->fcs, fchs->s_id);
+
+	fabric->stats.flogi_rcvd++;
+	/*
+	 * Check port type. It should be 0 = n-port.
+	 */
+	if (flogi->csp.port_type) {
+		/*
+		 * @todo: may need to send a LS_RJT
+		 */
+		bfa_trc(fabric->fcs, flogi->port_name);
+		fabric->stats.flogi_rejected++;
+		return;
+	}
+
+	fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred);
+	bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
+	bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
+
+	/*
+	 * Send a Flogi Acc
+	 */
+	bfa_fcs_fabric_send_flogi_acc(fabric);
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
+}
+
+static void
+bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg;
+	struct bfa_fcs_lport_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n;
+	struct bfa_s	  *bfa = fabric->fcs->bfa;
+	struct bfa_fcxp_s *fcxp;
+	u16	reqlen;
+	struct fchs_s	fchs;
+
+	fcxp = bfa_fcs_fcxp_alloc(fabric->fcs, BFA_FALSE);
+	/*
+	 * Do not expect this failure -- expect remote node to retry
+	 */
+	if (!fcxp)
+		return;
+
+	reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				    bfa_hton3b(FC_FABRIC_PORT),
+				    n2n_port->reply_oxid, pcfg->pwwn,
+				    pcfg->nwwn,
+				    bfa_fcport_get_maxfrsize(bfa),
+				    bfa_fcport_get_rx_bbcredit(bfa), 0);
+
+	bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->bfa_tag,
+		      BFA_FALSE, FC_CLASS_3,
+		      reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
+		      FC_MAX_PDUSZ, 0);
+}
+
+/*
+ *   Flogi Acc completion callback.
+ */
+static void
+bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+			     bfa_status_t status, u32 rsp_len,
+			     u32 resid_len, struct fchs_s *rspfchs)
+{
+	struct bfa_fcs_fabric_s *fabric = cbarg;
+
+	bfa_trc(fabric->fcs, status);
+}
+
+
+/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port,
+			enum bfa_port_aen_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port);
+	aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port);
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+				  BFA_AEN_CAT_PORT, event);
+}
+
+/*
+ *
+ * @param[in] fabric - fabric
+ * @param[in] wwn_t - new fabric name
+ *
+ * @return - none
+ */
+void
+bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
+			       wwn_t fabric_name)
+{
+	struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad;
+	char	pwwn_ptr[BFA_STRING_32];
+	char	fwwn_ptr[BFA_STRING_32];
+
+	bfa_trc(fabric->fcs, fabric_name);
+
+	if (fabric->fabric_name == 0) {
+		/*
+		 * With BRCD switches, we don't get Fabric Name in FLOGI.
+		 * Don't generate a fabric name change event in this case.
+		 */
+		fabric->fabric_name = fabric_name;
+	} else {
+		fabric->fabric_name = fabric_name;
+		wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport));
+		wwn2str(fwwn_ptr,
+			bfa_fcs_lport_get_fabric_name(&fabric->bport));
+		BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
+			"Base port WWN = %s Fabric WWN = %s\n",
+			pwwn_ptr, fwwn_ptr);
+		bfa_fcs_fabric_aen_post(&fabric->bport,
+				BFA_PORT_AEN_FABRIC_NAME_CHANGE);
+	}
+}
+
+void
+bfa_cb_lps_flogo_comp(void *bfad, void *uarg)
+{
+	struct bfa_fcs_fabric_s *fabric = uarg;
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOGOCOMP);
+}
+
+/*
+ *	Returns FCS vf structure for a given vf_id.
+ *
+ *	param[in]	vf_id - VF_ID
+ *
+ *	return
+ *	If lookup succeeds, retuns fcs vf object, otherwise returns NULL
+ */
+bfa_fcs_vf_t   *
+bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
+{
+	bfa_trc(fcs, vf_id);
+	if (vf_id == FC_VF_ID_NULL)
+		return &fcs->fabric;
+
+	return NULL;
+}
+
+/*
+ *	Return the list of local logical ports present in the given VF.
+ *
+ *	@param[in]	vf	vf for which logical ports are returned
+ *	@param[out]	lpwwn	returned logical port wwn list
+ *	@param[in,out]	nlports in:size of lpwwn list;
+ *				out:total elements present,
+ *				actual elements returned is limited by the size
+ */
+void
+bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
+{
+	struct list_head *qe;
+	struct bfa_fcs_vport_s *vport;
+	int	i = 0;
+	struct bfa_fcs_s	*fcs;
+
+	if (vf == NULL || lpwwn == NULL || *nlports == 0)
+		return;
+
+	fcs = vf->fcs;
+
+	bfa_trc(fcs, vf->vf_id);
+	bfa_trc(fcs, (uint32_t) *nlports);
+
+	lpwwn[i++] = vf->bport.port_cfg.pwwn;
+
+	list_for_each(qe, &vf->vport_q) {
+		if (i >= *nlports)
+			break;
+
+		vport = (struct bfa_fcs_vport_s *) qe;
+		lpwwn[i++] = vport->lport.port_cfg.pwwn;
+	}
+
+	bfa_trc(fcs, i);
+	*nlports = i;
+}
+
+/*
+ * BFA FCS PPORT ( physical port)
+ */
+static void
+bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event)
+{
+	struct bfa_fcs_s      *fcs = cbarg;
+
+	bfa_trc(fcs, event);
+
+	switch (event) {
+	case BFA_PORT_LINKUP:
+		bfa_fcs_fabric_link_up(&fcs->fabric);
+		break;
+
+	case BFA_PORT_LINKDOWN:
+		bfa_fcs_fabric_link_down(&fcs->fabric);
+		break;
+
+	default:
+		WARN_ON(1);
+	}
+}
+
+/*
+ * BFA FCS UF ( Unsolicited Frames)
+ */
+
+/*
+ *		BFA callback for unsolicited frame receive handler.
+ *
+ * @param[in]		cbarg		callback arg for receive handler
+ * @param[in]		uf		unsolicited frame descriptor
+ *
+ * @return None
+ */
+static void
+bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
+{
+	struct bfa_fcs_s	*fcs = (struct bfa_fcs_s *) cbarg;
+	struct fchs_s	*fchs = bfa_uf_get_frmbuf(uf);
+	u16	len = bfa_uf_get_frmlen(uf);
+	struct fc_vft_s *vft;
+	struct bfa_fcs_fabric_s *fabric;
+
+	/*
+	 * check for VFT header
+	 */
+	if (fchs->routing == FC_RTG_EXT_HDR &&
+	    fchs->cat_info == FC_CAT_VFT_HDR) {
+		bfa_stats(fcs, uf.tagged);
+		vft = bfa_uf_get_frmbuf(uf);
+		if (fcs->port_vfid == vft->vf_id)
+			fabric = &fcs->fabric;
+		else
+			fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
+
+		/*
+		 * drop frame if vfid is unknown
+		 */
+		if (!fabric) {
+			WARN_ON(1);
+			bfa_stats(fcs, uf.vfid_unknown);
+			bfa_uf_free(uf);
+			return;
+		}
+
+		/*
+		 * skip vft header
+		 */
+		fchs = (struct fchs_s *) (vft + 1);
+		len -= sizeof(struct fc_vft_s);
+
+		bfa_trc(fcs, vft->vf_id);
+	} else {
+		bfa_stats(fcs, uf.untagged);
+		fabric = &fcs->fabric;
+	}
+
+	bfa_trc(fcs, ((u32 *) fchs)[0]);
+	bfa_trc(fcs, ((u32 *) fchs)[1]);
+	bfa_trc(fcs, ((u32 *) fchs)[2]);
+	bfa_trc(fcs, ((u32 *) fchs)[3]);
+	bfa_trc(fcs, ((u32 *) fchs)[4]);
+	bfa_trc(fcs, ((u32 *) fchs)[5]);
+	bfa_trc(fcs, len);
+
+	bfa_fcs_fabric_uf_recv(fabric, fchs, len);
+	bfa_uf_free(uf);
+}
+
+/*
+ * fcs attach -- called once to initialize data structures at driver attach time
+ */
+void
+bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
+	       bfa_boolean_t min_cfg)
+{
+	struct bfa_fcs_fabric_s *fabric = &fcs->fabric;
+
+	fcs->bfa = bfa;
+	fcs->bfad = bfad;
+	fcs->min_cfg = min_cfg;
+	fcs->num_rport_logins = 0;
+
+	bfa->fcs = BFA_TRUE;
+	fcbuild_init();
+
+	bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
+	bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
+
+	memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
+
+	/*
+	 * Initialize base fabric.
+	 */
+	fabric->fcs = fcs;
+	INIT_LIST_HEAD(&fabric->vport_q);
+	INIT_LIST_HEAD(&fabric->vf_q);
+	fabric->lps = bfa_lps_alloc(fcs->bfa);
+	WARN_ON(!fabric->lps);
+
+	/*
+	 * Initialize fabric delete completion handler. Fabric deletion is
+	 * complete when the last vport delete is complete.
+	 */
+	bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
+	bfa_wc_up(&fabric->wc); /* For the base port */
+
+	bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
+	bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
+}
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
new file mode 100644
index 0000000..e60f72b
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -0,0 +1,880 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_FCS_H__
+#define __BFA_FCS_H__
+
+#include "bfa_cs.h"
+#include "bfa_defs.h"
+#include "bfa_defs_fcs.h"
+#include "bfa_modules.h"
+#include "bfa_fc.h"
+
+#define BFA_FCS_OS_STR_LEN		64
+
+/*
+ *  lps_pvt BFA LPS private functions
+ */
+
+enum bfa_lps_event {
+	BFA_LPS_SM_LOGIN	= 1,	/* login request from user      */
+	BFA_LPS_SM_LOGOUT	= 2,	/* logout request from user     */
+	BFA_LPS_SM_FWRSP	= 3,	/* f/w response to login/logout */
+	BFA_LPS_SM_RESUME	= 4,	/* space present in reqq queue  */
+	BFA_LPS_SM_DELETE	= 5,	/* lps delete from user         */
+	BFA_LPS_SM_OFFLINE	= 6,	/* Link is offline              */
+	BFA_LPS_SM_RX_CVL	= 7,	/* Rx clear virtual link        */
+	BFA_LPS_SM_SET_N2N_PID  = 8,	/* Set assigned PID for n2n */
+};
+
+
+/*
+ * !!! Only append to the enums defined here to avoid any versioning
+ * !!! needed between trace utility and driver version
+ */
+enum {
+	BFA_TRC_FCS_FCS		= 1,
+	BFA_TRC_FCS_PORT	= 2,
+	BFA_TRC_FCS_RPORT	= 3,
+	BFA_TRC_FCS_FCPIM	= 4,
+};
+
+
+struct bfa_fcs_s;
+
+#define __fcs_min_cfg(__fcs)       ((__fcs)->min_cfg)
+
+#define BFA_FCS_BRCD_SWITCH_OUI  0x051e
+#define N2N_LOCAL_PID	    0x010000
+#define N2N_REMOTE_PID		0x020000
+#define	BFA_FCS_RETRY_TIMEOUT 2000
+#define BFA_FCS_MAX_NS_RETRIES 5
+#define BFA_FCS_PID_IS_WKA(pid)  ((bfa_ntoh3b(pid) > 0xFFF000) ?  1 : 0)
+#define BFA_FCS_MAX_RPORT_LOGINS 1024
+
+struct bfa_fcs_lport_ns_s {
+	bfa_sm_t        sm;		/*  state machine */
+	struct bfa_timer_s timer;
+	struct bfa_fcs_lport_s *port;	/*  parent port */
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_fcxp_wqe_s fcxp_wqe;
+	u8	num_rnnid_retries;
+	u8	num_rsnn_nn_retries;
+};
+
+
+struct bfa_fcs_lport_scn_s {
+	bfa_sm_t        sm;		/*  state machine */
+	struct bfa_timer_s timer;
+	struct bfa_fcs_lport_s *port;	/*  parent port */
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_fcxp_wqe_s fcxp_wqe;
+};
+
+
+struct bfa_fcs_lport_fdmi_s {
+	bfa_sm_t        sm;		/*  state machine */
+	struct bfa_timer_s timer;
+	struct bfa_fcs_lport_ms_s *ms;	/*  parent ms */
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_fcxp_wqe_s fcxp_wqe;
+	u8	retry_cnt;	/*  retry count */
+	u8	rsvd[3];
+};
+
+
+struct bfa_fcs_lport_ms_s {
+	bfa_sm_t        sm;		/*  state machine */
+	struct bfa_timer_s timer;
+	struct bfa_fcs_lport_s *port;	/*  parent port */
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_fcxp_wqe_s fcxp_wqe;
+	struct bfa_fcs_lport_fdmi_s fdmi;	/*  FDMI component of MS */
+	u8         retry_cnt;	/*  retry count */
+	u8	rsvd[3];
+};
+
+
+struct bfa_fcs_lport_fab_s {
+	struct bfa_fcs_lport_ns_s ns;	/*  NS component of port */
+	struct bfa_fcs_lport_scn_s scn;	/*  scn component of port */
+	struct bfa_fcs_lport_ms_s ms;	/*  MS component of port */
+};
+
+#define	MAX_ALPA_COUNT	127
+
+struct bfa_fcs_lport_loop_s {
+	u8	num_alpa;	/*  Num of ALPA entries in the map */
+	u8	alpabm_valid;	/* alpa bitmap valid or not (1 or 0) */
+	u8	alpa_pos_map[MAX_ALPA_COUNT]; /*  ALPA Positional Map */
+	struct bfa_fcs_lport_s *port;	/*  parent port */
+};
+
+struct bfa_fcs_lport_n2n_s {
+	u32        rsvd;
+	__be16     reply_oxid;	/*  ox_id from the req flogi to be
+					 *used in flogi acc */
+	wwn_t           rem_port_wwn;	/*  Attached port's wwn */
+};
+
+
+union bfa_fcs_lport_topo_u {
+	struct bfa_fcs_lport_fab_s pfab;
+	struct bfa_fcs_lport_loop_s ploop;
+	struct bfa_fcs_lport_n2n_s pn2n;
+};
+
+
+struct bfa_fcs_lport_s {
+	struct list_head         qe;	/*  used by port/vport */
+	bfa_sm_t               sm;	/*  state machine */
+	struct bfa_fcs_fabric_s *fabric;	/*  parent fabric */
+	struct bfa_lport_cfg_s  port_cfg;	/*  port configuration */
+	struct bfa_timer_s link_timer;	/*  timer for link offline */
+	u32        pid:24;	/*  FC address */
+	u8         lp_tag;		/*  lport tag */
+	u16        num_rports;	/*  Num of r-ports */
+	struct list_head         rport_q; /*  queue of discovered r-ports */
+	struct bfa_fcs_s *fcs;	/*  FCS instance */
+	union bfa_fcs_lport_topo_u port_topo;	/*  fabric/loop/n2n details */
+	struct bfad_port_s *bfad_port;	/*  driver peer instance */
+	struct bfa_fcs_vport_s *vport;	/*  NULL for base ports */
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_fcxp_wqe_s fcxp_wqe;
+	struct bfa_lport_stats_s stats;
+	struct bfa_wc_s        wc;	/*  waiting counter for events */
+};
+#define BFA_FCS_GET_HAL_FROM_PORT(port)  (port->fcs->bfa)
+#define BFA_FCS_GET_NS_FROM_PORT(port)  (&port->port_topo.pfab.ns)
+#define BFA_FCS_GET_SCN_FROM_PORT(port)  (&port->port_topo.pfab.scn)
+#define BFA_FCS_GET_MS_FROM_PORT(port)  (&port->port_topo.pfab.ms)
+#define BFA_FCS_GET_FDMI_FROM_PORT(port)  (&port->port_topo.pfab.ms.fdmi)
+#define	BFA_FCS_VPORT_IS_INITIATOR_MODE(port) \
+		(port->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
+
+/*
+ * forward declaration
+ */
+struct bfad_vf_s;
+
+enum bfa_fcs_fabric_type {
+	BFA_FCS_FABRIC_UNKNOWN = 0,
+	BFA_FCS_FABRIC_SWITCHED = 1,
+	BFA_FCS_FABRIC_N2N = 2,
+	BFA_FCS_FABRIC_LOOP = 3,
+};
+
+
+struct bfa_fcs_fabric_s {
+	struct list_head   qe;		/*  queue element */
+	bfa_sm_t	 sm;		/*  state machine */
+	struct bfa_fcs_s *fcs;		/*  FCS instance */
+	struct bfa_fcs_lport_s  bport;	/*  base logical port */
+	enum bfa_fcs_fabric_type fab_type; /*  fabric type */
+	enum bfa_port_type oper_type;	/*  current link topology */
+	u8         is_vf;		/*  is virtual fabric? */
+	u8         is_npiv;	/*  is NPIV supported ? */
+	u8         is_auth;	/*  is Security/Auth supported ? */
+	u16        bb_credit;	/*  BB credit from fabric */
+	u16        vf_id;		/*  virtual fabric ID */
+	u16        num_vports;	/*  num vports */
+	u16        rsvd;
+	struct list_head         vport_q;	/*  queue of virtual ports */
+	struct list_head         vf_q;	/*  queue of virtual fabrics */
+	struct bfad_vf_s      *vf_drv;	/*  driver vf structure */
+	struct bfa_timer_s link_timer;	/*  Link Failure timer. Vport */
+	wwn_t           fabric_name;	/*  attached fabric name */
+	bfa_boolean_t   auth_reqd;	/*  authentication required	*/
+	struct bfa_timer_s delay_timer;	/*  delay timer		*/
+	union {
+		u16        swp_vfid;/*  switch port VF id		*/
+	} event_arg;
+	struct bfa_wc_s        wc;	/*  wait counter for delete	*/
+	struct bfa_vf_stats_s	stats;	/*  fabric/vf stats		*/
+	struct bfa_lps_s	*lps;	/*  lport login services	*/
+	u8	fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ];
+					/*  attached fabric's ip addr  */
+	struct bfa_wc_s stop_wc;	/*  wait counter for stop */
+};
+
+#define bfa_fcs_fabric_npiv_capable(__f)    ((__f)->is_npiv)
+#define bfa_fcs_fabric_is_switched(__f)			\
+	((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
+
+/*
+ *   The design calls for a single implementation of base fabric and vf.
+ */
+#define bfa_fcs_vf_t struct bfa_fcs_fabric_s
+
+struct bfa_vf_event_s {
+	u32        undefined;
+};
+
+struct bfa_fcs_s;
+struct bfa_fcs_fabric_s;
+
+/*
+ * @todo : need to move to a global config file.
+ * Maximum Rports supported per port (physical/logical).
+ */
+#define BFA_FCS_MAX_RPORTS_SUPP  256	/* @todo : tentative value */
+
+#define bfa_fcs_lport_t struct bfa_fcs_lport_s
+
+/*
+ * Symbolic Name related defines
+ *  Total bytes 255.
+ *  Physical Port's symbolic name 128 bytes.
+ *  For Vports, Vport's symbolic name is appended to the Physical port's
+ *  Symbolic Name.
+ *
+ *  Physical Port's symbolic name Format : (Total 128 bytes)
+ *  Adapter Model number/name : 16 bytes
+ *  Driver Version     : 10 bytes
+ *  Host Machine Name  : 30 bytes
+ *  Host OS Info	   : 44 bytes
+ *  Host OS PATCH Info : 16 bytes
+ *  ( remaining 12 bytes reserved to be used for separator)
+ */
+#define BFA_FCS_PORT_SYMBNAME_SEPARATOR			" | "
+
+#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ			16
+#define BFA_FCS_PORT_SYMBNAME_VERSION_SZ		10
+#define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ		30
+#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ			44
+#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ		16
+
+/*
+ * Get FC port ID for a logical port.
+ */
+#define bfa_fcs_lport_get_fcid(_lport)	((_lport)->pid)
+#define bfa_fcs_lport_get_pwwn(_lport)	((_lport)->port_cfg.pwwn)
+#define bfa_fcs_lport_get_nwwn(_lport)	((_lport)->port_cfg.nwwn)
+#define bfa_fcs_lport_get_psym_name(_lport)	((_lport)->port_cfg.sym_name)
+#define bfa_fcs_lport_get_nsym_name(_lport) ((_lport)->port_cfg.node_sym_name)
+#define bfa_fcs_lport_is_initiator(_lport)			\
+	((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
+#define bfa_fcs_lport_get_nrports(_lport)	\
+	((_lport) ? (_lport)->num_rports : 0)
+
+static inline struct bfad_port_s *
+bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
+{
+	return port->bfad_port;
+}
+
+#define bfa_fcs_lport_get_opertype(_lport)	((_lport)->fabric->oper_type)
+#define bfa_fcs_lport_get_fabric_name(_lport)	((_lport)->fabric->fabric_name)
+#define bfa_fcs_lport_get_fabric_ipaddr(_lport)		\
+		((_lport)->fabric->fabric_ip_addr)
+
+/*
+ * bfa fcs port public functions
+ */
+
+bfa_boolean_t   bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port);
+struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
+void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port,
+			struct bfa_rport_qualifier_s rport[], int *nrports);
+wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
+			      int index, int nrports, bfa_boolean_t bwwn);
+
+struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
+					    u16 vf_id, wwn_t lpwwn);
+
+void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname);
+void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
+			    struct bfa_lport_info_s *port_info);
+void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port,
+			    struct bfa_lport_attr_s *port_attr);
+void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
+			     struct bfa_lport_stats_s *port_stats);
+void bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port);
+enum bfa_port_speed bfa_fcs_lport_get_rport_max_speed(
+			struct bfa_fcs_lport_s *port);
+
+/* MS FCS routines */
+void bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port);
+
+/* FDMI FCS routines */
+void bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms);
+void bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms);
+void bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms);
+void bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, struct fchs_s *fchs,
+				     u16 len);
+void bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
+			u16 vf_id, struct bfa_fcs_vport_s *vport);
+void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
+				struct bfa_lport_cfg_s *port_cfg);
+void            bfa_fcs_lport_online(struct bfa_fcs_lport_s *port);
+void            bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port);
+void            bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port);
+void		bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid(
+		struct bfa_fcs_lport_s *port, u32 pid);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_old_pid(
+		struct bfa_fcs_lport_s *port, u32 pid);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn(
+		struct bfa_fcs_lport_s *port, wwn_t pwwn);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn(
+		struct bfa_fcs_lport_s *port, wwn_t nwwn);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_qualifier(
+		struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 pid);
+void            bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
+				       struct bfa_fcs_rport_s *rport);
+void            bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
+				       struct bfa_fcs_rport_s *rport);
+void            bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port);
+void		bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg,
+				struct bfa_fcxp_s *fcxp_alloced);
+void            bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
+					      struct fchs_s *rx_frame, u32 len);
+void		bfa_fcs_lport_lip_scn_online(bfa_fcs_lport_t *port);
+
+struct bfa_fcs_vport_s {
+	struct list_head		qe;		/*  queue elem	*/
+	bfa_sm_t		sm;		/*  state machine	*/
+	bfa_fcs_lport_t		lport;		/*  logical port	*/
+	struct bfa_timer_s	timer;
+	struct bfad_vport_s	*vport_drv;	/*  Driver private	*/
+	struct bfa_vport_stats_s vport_stats;	/*  vport statistics	*/
+	struct bfa_lps_s	*lps;		/*  Lport login service*/
+	int			fdisc_retries;
+};
+
+#define bfa_fcs_vport_get_port(vport)			\
+	((struct bfa_fcs_lport_s  *)(&vport->port))
+
+/*
+ * bfa fcs vport public functions
+ */
+bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
+				  struct bfa_fcs_s *fcs, u16 vf_id,
+				  struct bfa_lport_cfg_s *port_cfg,
+				  struct bfad_vport_s *vport_drv);
+bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport,
+				      struct bfa_fcs_s *fcs, u16 vf_id,
+				      struct bfa_lport_cfg_s *port_cfg,
+				      struct bfad_vport_s *vport_drv);
+bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport);
+bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport);
+bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
+bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
+			    struct bfa_vport_attr_s *vport_attr);
+struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
+					     u16 vf_id, wwn_t vpwwn);
+void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport);
+
+#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT	90	/* in secs */
+#define BFA_FCS_RPORT_MAX_RETRIES	(5)
+
+/*
+ * forward declarations
+ */
+struct bfad_rport_s;
+
+struct bfa_fcs_itnim_s;
+struct bfa_fcs_tin_s;
+struct bfa_fcs_iprp_s;
+
+/* Rport Features (RPF) */
+struct bfa_fcs_rpf_s {
+	bfa_sm_t	sm;	/*  state machine */
+	struct bfa_fcs_rport_s *rport;	/*  parent rport */
+	struct bfa_timer_s	timer;	/*  general purpose timer */
+	struct bfa_fcxp_s	*fcxp;	/*  FCXP needed for discarding */
+	struct bfa_fcxp_wqe_s	fcxp_wqe; /*  fcxp wait queue element */
+	int	rpsc_retries;	/*  max RPSC retry attempts */
+	enum bfa_port_speed	rpsc_speed;
+	/*  Current Speed from RPSC. O if RPSC fails */
+	enum bfa_port_speed	assigned_speed;
+	/*
+	 * Speed assigned by the user.  will be used if RPSC is
+	 * not supported by the rport.
+	 */
+};
+
+struct bfa_fcs_rport_s {
+	struct list_head	qe;	/*  used by port/vport */
+	struct bfa_fcs_lport_s *port;	/*  parent FCS port */
+	struct bfa_fcs_s	*fcs;	/*  fcs instance */
+	struct bfad_rport_s	*rp_drv;	/*  driver peer instance */
+	u32	pid;	/*  port ID of rport */
+	u32	old_pid;	/* PID before rport goes offline */
+	u16	maxfrsize;	/*  maximum frame size */
+	__be16	reply_oxid;	/*  OX_ID of inbound requests */
+	enum fc_cos	fc_cos;	/*  FC classes of service supp */
+	bfa_boolean_t	cisc;	/*  CISC capable device */
+	bfa_boolean_t	prlo;	/*  processing prlo or LOGO */
+	bfa_boolean_t	plogi_pending;	/* Rx Plogi Pending */
+	wwn_t	pwwn;	/*  port wwn of rport */
+	wwn_t	nwwn;	/*  node wwn of rport */
+	struct bfa_rport_symname_s psym_name; /*  port symbolic name  */
+	bfa_sm_t	sm;		/*  state machine */
+	struct bfa_timer_s timer;	/*  general purpose timer */
+	struct bfa_fcs_itnim_s *itnim;	/*  ITN initiator mode role */
+	struct bfa_fcs_tin_s *tin;	/*  ITN initiator mode role */
+	struct bfa_fcs_iprp_s *iprp;	/*  IP/FC role */
+	struct bfa_rport_s *bfa_rport;	/*  BFA Rport */
+	struct bfa_fcxp_s *fcxp;	/*  FCXP needed for discarding */
+	int	plogi_retries;	/*  max plogi retry attempts */
+	int	ns_retries;	/*  max NS query retry attempts */
+	struct bfa_fcxp_wqe_s	fcxp_wqe; /*  fcxp wait queue element */
+	struct bfa_rport_stats_s stats;	/*  rport stats */
+	enum bfa_rport_function	scsi_function;  /*  Initiator/Target */
+	struct bfa_fcs_rpf_s rpf;	/* Rport features module */
+	bfa_boolean_t   scn_online;	/* SCN online flag */
+};
+
+static inline struct bfa_rport_s *
+bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
+{
+	return rport->bfa_rport;
+}
+
+/*
+ * bfa fcs rport API functions
+ */
+void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
+			struct bfa_rport_attr_s *attr);
+struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
+					     wwn_t rpwwn);
+struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
+	struct bfa_fcs_lport_s *port, wwn_t rnwwn);
+void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
+void bfa_fcs_rport_set_max_logins(u32 max_logins);
+void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
+	 struct fchs_s *fchs, u16 len);
+void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
+
+struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port,
+	 u32 pid);
+void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
+			 struct fc_logi_s *plogi_rsp);
+void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
+				struct fchs_s *rx_fchs,
+				struct fc_logi_s *plogi);
+void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
+			 struct fc_logi_s *plogi);
+void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id);
+
+void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
+int  bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
+struct bfa_fcs_rport_s *bfa_fcs_rport_create_by_wwn(
+			struct bfa_fcs_lport_s *port, wwn_t wwn);
+void  bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport);
+void  bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport);
+void  bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport);
+
+/*
+ * forward declarations
+ */
+struct bfad_itnim_s;
+
+struct bfa_fcs_itnim_s {
+	bfa_sm_t		sm;		/*  state machine */
+	struct bfa_fcs_rport_s	*rport;		/*  parent remote rport  */
+	struct bfad_itnim_s	*itnim_drv;	/*  driver peer instance */
+	struct bfa_fcs_s	*fcs;		/*  fcs instance	*/
+	struct bfa_timer_s	timer;		/*  timer functions	*/
+	struct bfa_itnim_s	*bfa_itnim;	/*  BFA itnim struct	*/
+	u32		prli_retries;	/*  max prli retry attempts */
+	bfa_boolean_t		seq_rec;	/*  seq recovery support */
+	bfa_boolean_t		rec_support;	/*  REC supported	*/
+	bfa_boolean_t		conf_comp;	/*  FCP_CONF	support */
+	bfa_boolean_t		task_retry_id;	/*  task retry id supp	*/
+	struct bfa_fcxp_wqe_s	fcxp_wqe;	/*  wait qelem for fcxp  */
+	struct bfa_fcxp_s	*fcxp;		/*  FCXP in use	*/
+	struct bfa_itnim_stats_s	stats;	/*  itn statistics	*/
+};
+#define bfa_fcs_fcxp_alloc(__fcs, __req)				\
+	bfa_fcxp_req_rsp_alloc(NULL, (__fcs)->bfa, 0, 0,		\
+			       NULL, NULL, NULL, NULL, __req)
+#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn,		\
+				__alloc_cbarg, __req)			\
+	bfa_fcxp_req_rsp_alloc_wait(__bfa, __wqe, __alloc_cbfn,		\
+		__alloc_cbarg, NULL, 0, 0, NULL, NULL, NULL, NULL, __req)
+
+static inline struct bfad_port_s *
+bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->port->bfad_port;
+}
+
+
+static inline struct bfa_fcs_lport_s *
+bfa_fcs_itnim_get_port(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->port;
+}
+
+
+static inline wwn_t
+bfa_fcs_itnim_get_nwwn(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->nwwn;
+}
+
+
+static inline wwn_t
+bfa_fcs_itnim_get_pwwn(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->pwwn;
+}
+
+
+static inline u32
+bfa_fcs_itnim_get_fcid(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->pid;
+}
+
+
+static inline	u32
+bfa_fcs_itnim_get_maxfrsize(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->maxfrsize;
+}
+
+
+static inline	enum fc_cos
+bfa_fcs_itnim_get_cos(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->fc_cos;
+}
+
+
+static inline struct bfad_itnim_s *
+bfa_fcs_itnim_get_drvitn(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->itnim_drv;
+}
+
+
+static inline struct bfa_itnim_s *
+bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->bfa_itnim;
+}
+
+/*
+ * bfa fcs FCP Initiator mode API functions
+ */
+void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
+			    struct bfa_itnim_attr_s *attr);
+void bfa_fcs_itnim_get_stats(struct bfa_fcs_itnim_s *itnim,
+			     struct bfa_itnim_stats_s *stats);
+struct bfa_fcs_itnim_s *bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port,
+					     wwn_t rpwwn);
+bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
+				    struct bfa_itnim_attr_s *attr);
+bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
+				     struct bfa_itnim_stats_s *stats);
+bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port,
+				       wwn_t rpwwn);
+struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim);
+bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
+			struct fchs_s *fchs, u16 len);
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_4G	(FDMI_TRANS_SPEED_1G  |	\
+				FDMI_TRANS_SPEED_2G |		\
+				FDMI_TRANS_SPEED_4G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_8G	(FDMI_TRANS_SPEED_1G  |	\
+				FDMI_TRANS_SPEED_2G |		\
+				FDMI_TRANS_SPEED_4G |		\
+				FDMI_TRANS_SPEED_8G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_16G	(FDMI_TRANS_SPEED_2G  |	\
+				FDMI_TRANS_SPEED_4G |		\
+				FDMI_TRANS_SPEED_8G |		\
+				FDMI_TRANS_SPEED_16G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_10G	FDMI_TRANS_SPEED_10G
+
+#define BFA_FCS_FDMI_VENDOR_INFO_LEN    8
+#define BFA_FCS_FDMI_FC4_TYPE_LEN       32
+
+/*
+ * HBA Attribute Block : BFA internal representation. Note : Some variable
+ * sizes have been trimmed to suit BFA For Ex : Model will be "QLogic ". Based
+ * on this the size has been reduced to 16 bytes from the standard's 64 bytes.
+ */
+struct bfa_fcs_fdmi_hba_attr_s {
+	wwn_t           node_name;
+	u8         manufacturer[64];
+	u8         serial_num[64];
+	u8         model[16];
+	u8         model_desc[128];
+	u8         hw_version[8];
+	u8         driver_version[BFA_VERSION_LEN];
+	u8         option_rom_ver[BFA_VERSION_LEN];
+	u8         fw_version[BFA_VERSION_LEN];
+	u8         os_name[256];
+	__be32        max_ct_pyld;
+	struct      bfa_lport_symname_s node_sym_name;
+	u8     vendor_info[BFA_FCS_FDMI_VENDOR_INFO_LEN];
+	__be32    num_ports;
+	wwn_t       fabric_name;
+	u8     bios_ver[BFA_VERSION_LEN];
+};
+
+/*
+ * Port Attribute Block
+ */
+struct bfa_fcs_fdmi_port_attr_s {
+	u8         supp_fc4_types[BFA_FCS_FDMI_FC4_TYPE_LEN];
+	__be32        supp_speed;	/* supported speed */
+	__be32        curr_speed;	/* current Speed */
+	__be32        max_frm_size;	/* max frame size */
+	u8         os_device_name[256];	/* OS device Name */
+	u8         host_name[256];	/* host name */
+	wwn_t       port_name;
+	wwn_t       node_name;
+	struct      bfa_lport_symname_s port_sym_name;
+	__be32    port_type;
+	enum fc_cos    scos;
+	wwn_t       port_fabric_name;
+	u8     port_act_fc4_type[BFA_FCS_FDMI_FC4_TYPE_LEN];
+	__be32    port_state;
+	__be32    num_ports;
+};
+
+struct bfa_fcs_stats_s {
+	struct {
+		u32	untagged; /*  untagged receive frames */
+		u32	tagged;	/*  tagged receive frames */
+		u32	vfid_unknown;	/*  VF id is unknown */
+	} uf;
+};
+
+struct bfa_fcs_driver_info_s {
+	u8	 version[BFA_VERSION_LEN];		/* Driver Version */
+	u8	 host_machine_name[BFA_FCS_OS_STR_LEN];
+	u8	 host_os_name[BFA_FCS_OS_STR_LEN]; /* OS name and version */
+	u8	 host_os_patch[BFA_FCS_OS_STR_LEN]; /* patch or service pack */
+	u8	 os_device_name[BFA_FCS_OS_STR_LEN]; /* Driver Device Name */
+};
+
+struct bfa_fcs_s {
+	struct bfa_s	  *bfa;	/*  corresponding BFA bfa instance */
+	struct bfad_s	      *bfad; /*  corresponding BDA driver instance */
+	struct bfa_trc_mod_s  *trcmod;	/*  tracing module */
+	bfa_boolean_t	vf_enabled;	/*  VF mode is enabled */
+	bfa_boolean_t	fdmi_enabled;	/*  FDMI is enabled */
+	bfa_boolean_t min_cfg;		/* min cfg enabled/disabled */
+	u16	port_vfid;	/*  port default VF ID */
+	struct bfa_fcs_driver_info_s driver_info;
+	struct bfa_fcs_fabric_s fabric; /*  base fabric state machine */
+	struct bfa_fcs_stats_s	stats;	/*  FCS statistics */
+	struct bfa_wc_s		wc;	/*  waiting counter */
+	int			fcs_aen_seq;
+	u32		num_rport_logins;
+};
+
+/*
+ *  fcs_fabric_sm fabric state machine functions
+ */
+
+/*
+ * Fabric state machine events
+ */
+enum bfa_fcs_fabric_event {
+	BFA_FCS_FABRIC_SM_CREATE        = 1,    /*  create from driver        */
+	BFA_FCS_FABRIC_SM_DELETE        = 2,    /*  delete from driver        */
+	BFA_FCS_FABRIC_SM_LINK_DOWN     = 3,    /*  link down from port      */
+	BFA_FCS_FABRIC_SM_LINK_UP       = 4,    /*  link up from port         */
+	BFA_FCS_FABRIC_SM_CONT_OP       = 5,    /*  flogi/auth continue op   */
+	BFA_FCS_FABRIC_SM_RETRY_OP      = 6,    /*  flogi/auth retry op      */
+	BFA_FCS_FABRIC_SM_NO_FABRIC     = 7,    /*  from flogi/auth           */
+	BFA_FCS_FABRIC_SM_PERF_EVFP     = 8,    /*  from flogi/auth           */
+	BFA_FCS_FABRIC_SM_ISOLATE       = 9,    /*  from EVFP processing     */
+	BFA_FCS_FABRIC_SM_NO_TAGGING    = 10,   /*  no VFT tagging from EVFP */
+	BFA_FCS_FABRIC_SM_DELAYED       = 11,   /*  timeout delay event      */
+	BFA_FCS_FABRIC_SM_AUTH_FAILED   = 12,   /*  auth failed       */
+	BFA_FCS_FABRIC_SM_AUTH_SUCCESS  = 13,   /*  auth successful           */
+	BFA_FCS_FABRIC_SM_DELCOMP       = 14,   /*  all vports deleted event */
+	BFA_FCS_FABRIC_SM_LOOPBACK      = 15,   /*  Received our own FLOGI   */
+	BFA_FCS_FABRIC_SM_START         = 16,   /*  from driver       */
+	BFA_FCS_FABRIC_SM_STOP		= 17,	/*  Stop from driver	*/
+	BFA_FCS_FABRIC_SM_STOPCOMP	= 18,	/*  Stop completion	*/
+	BFA_FCS_FABRIC_SM_LOGOCOMP	= 19,	/*  FLOGO completion	*/
+};
+
+/*
+ *  fcs_rport_sm FCS rport state machine events
+ */
+
+enum rport_event {
+	RPSM_EVENT_PLOGI_SEND   = 1,    /*  new rport; start with PLOGI */
+	RPSM_EVENT_PLOGI_RCVD   = 2,    /*  Inbound PLOGI from remote port */
+	RPSM_EVENT_PLOGI_COMP   = 3,    /*  PLOGI completed to rport    */
+	RPSM_EVENT_LOGO_RCVD    = 4,    /*  LOGO from remote device     */
+	RPSM_EVENT_LOGO_IMP     = 5,    /*  implicit logo for SLER      */
+	RPSM_EVENT_FCXP_SENT    = 6,    /*  Frame from has been sent    */
+	RPSM_EVENT_DELETE       = 7,    /*  RPORT delete request        */
+	RPSM_EVENT_FAB_SCN	= 8,    /*  state change notification   */
+	RPSM_EVENT_ACCEPTED     = 9,    /*  Good response from remote device */
+	RPSM_EVENT_FAILED       = 10,   /*  Request to rport failed.    */
+	RPSM_EVENT_TIMEOUT      = 11,   /*  Rport SM timeout event      */
+	RPSM_EVENT_HCB_ONLINE  = 12,    /*  BFA rport online callback   */
+	RPSM_EVENT_HCB_OFFLINE = 13,    /*  BFA rport offline callback  */
+	RPSM_EVENT_FC4_OFFLINE = 14,    /*  FC-4 offline complete       */
+	RPSM_EVENT_ADDRESS_CHANGE = 15, /*  Rport's PID has changed     */
+	RPSM_EVENT_ADDRESS_DISC = 16,   /*  Need to Discover rport's PID */
+	RPSM_EVENT_PRLO_RCVD   = 17,    /*  PRLO from remote device     */
+	RPSM_EVENT_PLOGI_RETRY = 18,    /*  Retry PLOGI continuously */
+	RPSM_EVENT_SCN_OFFLINE = 19,	/* loop scn offline		*/
+	RPSM_EVENT_SCN_ONLINE   = 20,	/* loop scn online		*/
+	RPSM_EVENT_FC4_FCS_ONLINE = 21, /* FC-4 FCS online complete */
+};
+
+/*
+ * fcs_itnim_sm FCS itnim state machine events
+ */
+enum bfa_fcs_itnim_event {
+	BFA_FCS_ITNIM_SM_FCS_ONLINE = 1,        /*  rport online event */
+	BFA_FCS_ITNIM_SM_OFFLINE = 2,   /*  rport offline */
+	BFA_FCS_ITNIM_SM_FRMSENT = 3,   /*  prli frame is sent */
+	BFA_FCS_ITNIM_SM_RSP_OK = 4,    /*  good response */
+	BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /*  error response */
+	BFA_FCS_ITNIM_SM_TIMEOUT = 6,   /*  delay timeout */
+	BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /*  BFA online callback */
+	BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /*  BFA offline callback */
+	BFA_FCS_ITNIM_SM_INITIATOR = 9, /*  rport is initiator */
+	BFA_FCS_ITNIM_SM_DELETE = 10,   /*  delete event from rport */
+	BFA_FCS_ITNIM_SM_PRLO = 11,     /*  delete event from rport */
+	BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
+	BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /* bfa rport online event */
+};
+
+/*
+ * bfa fcs API functions
+ */
+void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
+		    struct bfad_s *bfad,
+		    bfa_boolean_t min_cfg);
+void bfa_fcs_init(struct bfa_fcs_s *fcs);
+void bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs);
+void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs);
+void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
+			      struct bfa_fcs_driver_info_s *driver_info);
+void bfa_fcs_exit(struct bfa_fcs_s *fcs);
+void bfa_fcs_stop(struct bfa_fcs_s *fcs);
+
+/*
+ * bfa fcs vf public functions
+ */
+bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
+void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
+
+/*
+ * fabric protected interface functions
+ */
+void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
+	struct bfa_fcs_vport_s *vport);
+void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
+	struct bfa_fcs_vport_s *vport);
+struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
+		struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
+void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
+		struct fchs_s *fchs, u16 len);
+void	bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
+void	bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
+	       wwn_t fabric_name);
+u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event);
+void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event);
+void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event);
+
+/*
+ * BFA FCS callback interfaces
+ */
+
+/*
+ * fcb Main fcs callbacks
+ */
+
+struct bfad_port_s;
+struct bfad_vf_s;
+struct bfad_vport_s;
+struct bfad_rport_s;
+
+/*
+ * lport callbacks
+ */
+struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
+				      struct bfa_fcs_lport_s *port,
+				      enum bfa_lport_role roles,
+				      struct bfad_vf_s *vf_drv,
+				      struct bfad_vport_s *vp_drv);
+
+/*
+ * vport callbacks
+ */
+void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
+
+/*
+ * rport callbacks
+ */
+bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
+				 struct bfa_fcs_rport_s **rport,
+				 struct bfad_rport_s **rport_drv);
+
+/*
+ * itnim callbacks
+ */
+int bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
+			struct bfad_itnim_s **itnim_drv);
+void bfa_fcb_itnim_free(struct bfad_s *bfad,
+			struct bfad_itnim_s *itnim_drv);
+void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
+void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
+
+#endif /* __BFA_FCS_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
new file mode 100644
index 0000000..2e3b19e
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -0,0 +1,841 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ *  fcpim.c - FCP initiator mode i-t nexus state machine
+ */
+
+#include "bfad_drv.h"
+#include "bfa_fcs.h"
+#include "bfa_fcbuild.h"
+#include "bfad_im.h"
+
+BFA_TRC_FILE(FCS, FCPIM);
+
+/*
+ * forward declarations
+ */
+static void	bfa_fcs_itnim_timeout(void *arg);
+static void	bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim);
+static void	bfa_fcs_itnim_send_prli(void *itnim_cbarg,
+					struct bfa_fcxp_s *fcxp_alloced);
+static void	bfa_fcs_itnim_prli_response(void *fcsarg,
+			 struct bfa_fcxp_s *fcxp, void *cbarg,
+			    bfa_status_t req_status, u32 rsp_len,
+			    u32 resid_len, struct fchs_s *rsp_fchs);
+static void	bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
+			enum bfa_itnim_aen_event event);
+
+static void	bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
+					 enum bfa_fcs_itnim_event event);
+static void	bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
+					   enum bfa_fcs_itnim_event event);
+static void	bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
+				      enum bfa_fcs_itnim_event event);
+static void	bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
+					    enum bfa_fcs_itnim_event event);
+static void	bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
+					    enum bfa_fcs_itnim_event event);
+static void	bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim,
+					enum bfa_fcs_itnim_event event);
+static void	bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
+					enum bfa_fcs_itnim_event event);
+static void	bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
+					     enum bfa_fcs_itnim_event event);
+static void	bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
+					   enum bfa_fcs_itnim_event event);
+
+static struct bfa_sm_table_s itnim_sm_table[] = {
+	{BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE},
+	{BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND},
+	{BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT},
+	{BFA_SM(bfa_fcs_itnim_sm_prli_retry), BFA_ITNIM_PRLI_RETRY},
+	{BFA_SM(bfa_fcs_itnim_sm_hcb_online), BFA_ITNIM_HCB_ONLINE},
+	{BFA_SM(bfa_fcs_itnim_sm_online), BFA_ITNIM_ONLINE},
+	{BFA_SM(bfa_fcs_itnim_sm_hcb_offline), BFA_ITNIM_HCB_OFFLINE},
+	{BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR},
+};
+
+/*
+ *  fcs_itnim_sm FCS itnim state machine
+ */
+
+static void
+bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
+		 enum bfa_fcs_itnim_event event)
+{
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	bfa_trc(itnim->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_ITNIM_SM_FCS_ONLINE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
+		itnim->prli_retries = 0;
+		bfa_fcs_itnim_send_prli(itnim, NULL);
+		break;
+
+	case BFA_FCS_ITNIM_SM_OFFLINE:
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+		break;
+
+	case BFA_FCS_ITNIM_SM_INITIATOR:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
+		break;
+
+	case BFA_FCS_ITNIM_SM_DELETE:
+		bfa_fcs_itnim_free(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->fcs, event);
+	}
+
+}
+
+static void
+bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
+		 enum bfa_fcs_itnim_event event)
+{
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	bfa_trc(itnim->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_ITNIM_SM_FRMSENT:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli);
+		break;
+
+	case BFA_FCS_ITNIM_SM_INITIATOR:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
+		bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
+		break;
+
+	case BFA_FCS_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+		break;
+
+	case BFA_FCS_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
+		bfa_fcs_itnim_free(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
+		 enum bfa_fcs_itnim_event event)
+{
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	bfa_trc(itnim->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_ITNIM_SM_RSP_OK:
+		if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR)
+			bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
+		else
+			bfa_sm_set_state(itnim,
+				bfa_fcs_itnim_sm_hal_rport_online);
+
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
+		break;
+
+	case BFA_FCS_ITNIM_SM_RSP_ERROR:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_retry);
+		bfa_timer_start(itnim->fcs->bfa, &itnim->timer,
+				bfa_fcs_itnim_timeout, itnim,
+				BFA_FCS_RETRY_TIMEOUT);
+		break;
+
+	case BFA_FCS_ITNIM_SM_RSP_NOT_SUPP:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		break;
+
+	case BFA_FCS_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_fcxp_discard(itnim->fcxp);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+		break;
+
+	case BFA_FCS_ITNIM_SM_INITIATOR:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
+		bfa_fcxp_discard(itnim->fcxp);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
+		break;
+
+	case BFA_FCS_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_fcxp_discard(itnim->fcxp);
+		bfa_fcs_itnim_free(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim,
+				enum bfa_fcs_itnim_event event)
+{
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	bfa_trc(itnim->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_ITNIM_SM_HAL_ONLINE:
+		if (!itnim->bfa_itnim)
+			itnim->bfa_itnim = bfa_itnim_create(itnim->fcs->bfa,
+					itnim->rport->bfa_rport, itnim);
+
+		if (itnim->bfa_itnim) {
+			bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
+			bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
+		} else {
+			bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+			bfa_sm_send_event(itnim->rport, RPSM_EVENT_DELETE);
+		}
+
+		break;
+
+	case BFA_FCS_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+		break;
+
+	case BFA_FCS_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_fcs_itnim_free(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
+			    enum bfa_fcs_itnim_event event)
+{
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	bfa_trc(itnim->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_ITNIM_SM_TIMEOUT:
+		if (itnim->prli_retries < BFA_FCS_RPORT_MAX_RETRIES) {
+			itnim->prli_retries++;
+			bfa_trc(itnim->fcs, itnim->prli_retries);
+			bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
+			bfa_fcs_itnim_send_prli(itnim, NULL);
+		} else {
+			/* invoke target offline */
+			bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+			bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
+		}
+		break;
+
+
+	case BFA_FCS_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_timer_stop(&itnim->timer);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+		break;
+
+	case BFA_FCS_ITNIM_SM_INITIATOR:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
+		bfa_timer_stop(&itnim->timer);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
+		break;
+
+	case BFA_FCS_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_timer_stop(&itnim->timer);
+		bfa_fcs_itnim_free(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
+			    enum bfa_fcs_itnim_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
+	char	lpwwn_buf[BFA_STRING_32];
+	char	rpwwn_buf[BFA_STRING_32];
+
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	bfa_trc(itnim->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_ITNIM_SM_HCB_ONLINE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online);
+		bfa_fcb_itnim_online(itnim->itnim_drv);
+		wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
+		wwn2str(rpwwn_buf, itnim->rport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+		"Target (WWN = %s) is online for initiator (WWN = %s)\n",
+		rpwwn_buf, lpwwn_buf);
+		bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
+		break;
+
+	case BFA_FCS_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
+		bfa_itnim_offline(itnim->bfa_itnim);
+		break;
+
+	case BFA_FCS_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_fcs_itnim_free(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
+		 enum bfa_fcs_itnim_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
+	char	lpwwn_buf[BFA_STRING_32];
+	char	rpwwn_buf[BFA_STRING_32];
+
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	bfa_trc(itnim->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
+		bfa_fcb_itnim_offline(itnim->itnim_drv);
+		bfa_itnim_offline(itnim->bfa_itnim);
+		wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
+		wwn2str(rpwwn_buf, itnim->rport->pwwn);
+		if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) {
+			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+			"Target (WWN = %s) connectivity lost for "
+			"initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
+			bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
+		} else {
+			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+			"Target (WWN = %s) offlined by initiator (WWN = %s)\n",
+			rpwwn_buf, lpwwn_buf);
+			bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
+		}
+		break;
+
+	case BFA_FCS_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_fcs_itnim_free(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
+			     enum bfa_fcs_itnim_event event)
+{
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	bfa_trc(itnim->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_ITNIM_SM_HCB_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+		break;
+
+	case BFA_FCS_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_fcs_itnim_free(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->fcs, event);
+	}
+}
+
+/*
+ * This state is set when a discovered rport is also in intiator mode.
+ * This ITN is marked as no_op and is not active and will not be truned into
+ * online state.
+ */
+static void
+bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
+		 enum bfa_fcs_itnim_event event)
+{
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	bfa_trc(itnim->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+		break;
+
+	/*
+	 * fcs_online is expected here for well known initiator ports
+	 */
+	case BFA_FCS_ITNIM_SM_FCS_ONLINE:
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
+		break;
+
+	case BFA_FCS_ITNIM_SM_RSP_ERROR:
+	case BFA_FCS_ITNIM_SM_INITIATOR:
+		break;
+
+	case BFA_FCS_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+		bfa_fcs_itnim_free(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
+			enum bfa_itnim_aen_event event)
+{
+	struct bfa_fcs_rport_s *rport = itnim->rport;
+	struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
+	struct bfa_aen_entry_s	*aen_entry;
+
+	/* Don't post events for well known addresses */
+	if (BFA_FCS_PID_IS_WKA(rport->pid))
+		return;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id;
+	aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn(
+					bfa_fcs_get_base_port(itnim->fcs));
+	aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
+	aen_entry->aen_data.itnim.rpwwn = rport->pwwn;
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
+				  BFA_AEN_CAT_ITNIM, event);
+}
+
+static void
+bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
+	struct bfa_fcs_rport_s *rport = itnim->rport;
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fchs_s	fchs;
+	struct bfa_fcxp_s *fcxp;
+	int		len;
+
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		itnim->stats.fcxp_alloc_wait++;
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
+				bfa_fcs_itnim_send_prli, itnim, BFA_TRUE);
+		return;
+	}
+	itnim->fcxp = fcxp;
+
+	len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			    itnim->rport->pid, bfa_fcs_lport_get_fcid(port), 0);
+
+	bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag,
+		      BFA_FALSE, FC_CLASS_3, len, &fchs,
+		      bfa_fcs_itnim_prli_response, (void *)itnim,
+		      FC_MAX_PDUSZ, FC_ELS_TOV);
+
+	itnim->stats.prli_sent++;
+	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT);
+}
+
+static void
+bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+			    bfa_status_t req_status, u32 rsp_len,
+			    u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg;
+	struct fc_els_cmd_s *els_cmd;
+	struct fc_prli_s *prli_resp;
+	struct fc_ls_rjt_s *ls_rjt;
+	struct fc_prli_params_s *sparams;
+
+	bfa_trc(itnim->fcs, req_status);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		itnim->stats.prli_rsp_err++;
+		bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR);
+		return;
+	}
+
+	els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+	if (els_cmd->els_code == FC_ELS_ACC) {
+		prli_resp = (struct fc_prli_s *) els_cmd;
+
+		if (fc_prli_rsp_parse(prli_resp, rsp_len) != FC_PARSE_OK) {
+			bfa_trc(itnim->fcs, rsp_len);
+			/*
+			 * Check if this  r-port is also in Initiator mode.
+			 * If so, we need to set this ITN as a no-op.
+			 */
+			if (prli_resp->parampage.servparams.initiator) {
+				bfa_trc(itnim->fcs, prli_resp->parampage.type);
+				itnim->rport->scsi_function =
+						BFA_RPORT_INITIATOR;
+				itnim->stats.prli_rsp_acc++;
+				itnim->stats.initiator++;
+				bfa_sm_send_event(itnim,
+						  BFA_FCS_ITNIM_SM_RSP_OK);
+				return;
+			}
+
+			itnim->stats.prli_rsp_parse_err++;
+			return;
+		}
+		itnim->rport->scsi_function = BFA_RPORT_TARGET;
+
+		sparams = &prli_resp->parampage.servparams;
+		itnim->seq_rec	     = sparams->retry;
+		itnim->rec_support   = sparams->rec_support;
+		itnim->task_retry_id = sparams->task_retry_id;
+		itnim->conf_comp     = sparams->confirm;
+
+		itnim->stats.prli_rsp_acc++;
+		bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK);
+	} else {
+		ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+		bfa_trc(itnim->fcs, ls_rjt->reason_code);
+		bfa_trc(itnim->fcs, ls_rjt->reason_code_expl);
+
+		itnim->stats.prli_rsp_rjt++;
+		if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) {
+			bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_NOT_SUPP);
+			return;
+		}
+		bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR);
+	}
+}
+
+static void
+bfa_fcs_itnim_timeout(void *arg)
+{
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) arg;
+
+	itnim->stats.timeout++;
+	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT);
+}
+
+static void
+bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
+{
+	if (itnim->bfa_itnim) {
+		bfa_itnim_delete(itnim->bfa_itnim);
+		itnim->bfa_itnim = NULL;
+	}
+
+	bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv);
+}
+
+
+
+/*
+ *  itnim_public FCS ITNIM public interfaces
+ */
+
+/*
+ *	Called by rport when a new rport is created.
+ *
+ * @param[in] rport	-  remote port.
+ */
+struct bfa_fcs_itnim_s *
+bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
+{
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct bfa_fcs_itnim_s *itnim;
+	struct bfad_itnim_s   *itnim_drv;
+	int ret;
+
+	/*
+	 * call bfad to allocate the itnim
+	 */
+	ret = bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv);
+	if (ret) {
+		bfa_trc(port->fcs, rport->pwwn);
+		return NULL;
+	}
+
+	/*
+	 * Initialize itnim
+	 */
+	itnim->rport = rport;
+	itnim->fcs = rport->fcs;
+	itnim->itnim_drv = itnim_drv;
+
+	itnim->bfa_itnim     = NULL;
+	itnim->seq_rec	     = BFA_FALSE;
+	itnim->rec_support   = BFA_FALSE;
+	itnim->conf_comp     = BFA_FALSE;
+	itnim->task_retry_id = BFA_FALSE;
+
+	/*
+	 * Set State machine
+	 */
+	bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+
+	return itnim;
+}
+
+/*
+ *	Called by rport to delete  the instance of FCPIM.
+ *
+ * @param[in] rport	-  remote port.
+ */
+void
+bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
+{
+	bfa_trc(itnim->fcs, itnim->rport->pid);
+	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE);
+}
+
+/*
+ * Notification from rport that PLOGI is complete to initiate FC-4 session.
+ */
+void
+bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim)
+{
+	itnim->stats.onlines++;
+
+	if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid))
+		bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HAL_ONLINE);
+}
+
+/*
+ * Called by rport to handle a remote device offline.
+ */
+void
+bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim)
+{
+	itnim->stats.offlines++;
+	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE);
+}
+
+/*
+ * Called by rport when remote port is known to be an initiator from
+ * PRLI received.
+ */
+void
+bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim)
+{
+	bfa_trc(itnim->fcs, itnim->rport->pid);
+	itnim->stats.initiator++;
+	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
+}
+
+/*
+ * Called by rport to check if the itnim is online.
+ */
+bfa_status_t
+bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
+{
+	bfa_trc(itnim->fcs, itnim->rport->pid);
+	switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) {
+	case BFA_ITNIM_ONLINE:
+	case BFA_ITNIM_INITIATIOR:
+		return BFA_STATUS_OK;
+
+	default:
+		return BFA_STATUS_NO_FCPIM_NEXUS;
+	}
+}
+
+/*
+ * BFA completion callback for bfa_itnim_online().
+ */
+void
+bfa_cb_itnim_online(void *cbarg)
+{
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg;
+
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
+}
+
+/*
+ * BFA completion callback for bfa_itnim_offline().
+ */
+void
+bfa_cb_itnim_offline(void *cb_arg)
+{
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
+
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
+}
+
+/*
+ * Mark the beginning of PATH TOV handling. IO completion callbacks
+ * are still pending.
+ */
+void
+bfa_cb_itnim_tov_begin(void *cb_arg)
+{
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
+
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+}
+
+/*
+ * Mark the end of PATH TOV handling. All pending IOs are already cleaned up.
+ */
+void
+bfa_cb_itnim_tov(void *cb_arg)
+{
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
+	struct bfad_itnim_s *itnim_drv = itnim->itnim_drv;
+
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	itnim_drv->state = ITNIM_STATE_TIMEOUT;
+}
+
+/*
+ *		BFA notification to FCS/driver for second level error recovery.
+ *
+ * Atleast one I/O request has timedout and target is unresponsive to
+ * repeated abort requests. Second level error recovery should be initiated
+ * by starting implicit logout and recovery procedures.
+ */
+void
+bfa_cb_itnim_sler(void *cb_arg)
+{
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
+
+	itnim->stats.sler++;
+	bfa_trc(itnim->fcs, itnim->rport->pwwn);
+	bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
+}
+
+struct bfa_fcs_itnim_s *
+bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
+{
+	struct bfa_fcs_rport_s *rport;
+	rport = bfa_fcs_rport_lookup(port, rpwwn);
+
+	if (!rport)
+		return NULL;
+
+	WARN_ON(rport->itnim == NULL);
+	return rport->itnim;
+}
+
+bfa_status_t
+bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
+		       struct bfa_itnim_attr_s *attr)
+{
+	struct bfa_fcs_itnim_s *itnim = NULL;
+
+	itnim = bfa_fcs_itnim_lookup(port, rpwwn);
+
+	if (itnim == NULL)
+		return BFA_STATUS_NO_FCPIM_NEXUS;
+
+	attr->state	    = bfa_sm_to_state(itnim_sm_table, itnim->sm);
+	attr->retry	    = itnim->seq_rec;
+	attr->rec_support   = itnim->rec_support;
+	attr->conf_comp	    = itnim->conf_comp;
+	attr->task_retry_id = itnim->task_retry_id;
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
+			struct bfa_itnim_stats_s *stats)
+{
+	struct bfa_fcs_itnim_s *itnim = NULL;
+
+	WARN_ON(port == NULL);
+
+	itnim = bfa_fcs_itnim_lookup(port, rpwwn);
+
+	if (itnim == NULL)
+		return BFA_STATUS_NO_FCPIM_NEXUS;
+
+	memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s));
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
+{
+	struct bfa_fcs_itnim_s *itnim = NULL;
+
+	WARN_ON(port == NULL);
+
+	itnim = bfa_fcs_itnim_lookup(port, rpwwn);
+
+	if (itnim == NULL)
+		return BFA_STATUS_NO_FCPIM_NEXUS;
+
+	memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s));
+	return BFA_STATUS_OK;
+}
+
+void
+bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
+			struct fchs_s *fchs, u16 len)
+{
+	struct fc_els_cmd_s *els_cmd;
+
+	bfa_trc(itnim->fcs, fchs->type);
+
+	if (fchs->type != FC_TYPE_ELS)
+		return;
+
+	els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+	bfa_trc(itnim->fcs, els_cmd->els_code);
+
+	switch (els_cmd->els_code) {
+	case FC_ELS_PRLO:
+		bfa_fcs_rport_prlo(itnim->rport, fchs->ox_id);
+		break;
+
+	default:
+		WARN_ON(1);
+	}
+}
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
new file mode 100644
index 0000000..b4f2c1d
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -0,0 +1,6986 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_fcs.h"
+#include "bfa_fcbuild.h"
+#include "bfa_fc.h"
+
+BFA_TRC_FILE(FCS, PORT);
+
+/*
+ * ALPA to LIXA bitmap mapping
+ *
+ * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31
+ * is for L_bit (login required) and is filled as ALPA 0x00 here.
+ */
+static const u8 loop_alpa_map[] = {
+	0x00, 0x00, 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, /* Word 0 Bits 31..24 */
+	0x17, 0x18, 0x1B, 0x1D, 0x1E, 0x1F, 0x23, 0x25, /* Word 0 Bits 23..16 */
+	0x26, 0x27, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, /* Word 0 Bits 15..08 */
+	0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x39, 0x3A, /* Word 0 Bits 07..00 */
+
+	0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 0x4A, 0x4B, /* Word 1 Bits 31..24 */
+	0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 0x55, /* Word 1 Bits 23..16 */
+	0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, /* Word 1 Bits 15..08 */
+	0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, /* Word 1 Bits 07..00 */
+
+	0x73, 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, /* Word 2 Bits 31..24 */
+	0x81, 0x82, 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, /* Word 2 Bits 23..16 */
+	0x9B, 0x9D, 0x9E, 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, /* Word 2 Bits 15..08 */
+	0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xB1, 0xB2, /* Word 2 Bits 07..00 */
+
+	0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 0xBA, 0xBC, 0xC3, /* Word 3 Bits 31..24 */
+	0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, /* Word 3 Bits 23..16 */
+	0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD9, /* Word 3 Bits 15..08 */
+	0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF, /* Word 3 Bits 07..00 */
+};
+
+static void     bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
+					 struct fchs_s *rx_fchs, u8 reason_code,
+					 u8 reason_code_expl);
+static void     bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
+			struct fchs_s *rx_fchs, struct fc_logi_s *plogi);
+static void     bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port);
+static void     bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port);
+static void     bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port);
+static void     bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port);
+static void     bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port);
+static void     bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port);
+static void     bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port,
+			struct fchs_s *rx_fchs,
+			struct fc_echo_s *echo, u16 len);
+static void     bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port,
+			struct fchs_s *rx_fchs,
+			struct fc_rnid_cmd_s *rnid, u16 len);
+static void     bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
+			struct fc_rnid_general_topology_data_s *gen_topo_data);
+
+static void	bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port);
+static void	bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port);
+static void	bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port);
+
+static void	bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port);
+static void	bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port);
+static void	bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port);
+
+static void	bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port);
+static void	bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port);
+static void	bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port);
+
+static struct {
+	void		(*init) (struct bfa_fcs_lport_s *port);
+	void		(*online) (struct bfa_fcs_lport_s *port);
+	void		(*offline) (struct bfa_fcs_lport_s *port);
+} __port_action[] = {
+	[BFA_FCS_FABRIC_UNKNOWN] = {
+		.init = bfa_fcs_lport_unknown_init,
+		.online = bfa_fcs_lport_unknown_online,
+		.offline = bfa_fcs_lport_unknown_offline
+	},
+	[BFA_FCS_FABRIC_SWITCHED] = {
+		.init = bfa_fcs_lport_fab_init,
+		.online = bfa_fcs_lport_fab_online,
+		.offline = bfa_fcs_lport_fab_offline
+	},
+	[BFA_FCS_FABRIC_N2N] = {
+		.init = bfa_fcs_lport_n2n_init,
+		.online = bfa_fcs_lport_n2n_online,
+		.offline = bfa_fcs_lport_n2n_offline
+	},
+	[BFA_FCS_FABRIC_LOOP] = {
+		.init = bfa_fcs_lport_loop_init,
+		.online = bfa_fcs_lport_loop_online,
+		.offline = bfa_fcs_lport_loop_offline
+	},
+};
+
+/*
+ *  fcs_port_sm FCS logical port state machine
+ */
+
+enum bfa_fcs_lport_event {
+	BFA_FCS_PORT_SM_CREATE = 1,
+	BFA_FCS_PORT_SM_ONLINE = 2,
+	BFA_FCS_PORT_SM_OFFLINE = 3,
+	BFA_FCS_PORT_SM_DELETE = 4,
+	BFA_FCS_PORT_SM_DELRPORT = 5,
+	BFA_FCS_PORT_SM_STOP = 6,
+};
+
+static void     bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port,
+					enum bfa_fcs_lport_event event);
+static void     bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
+					enum bfa_fcs_lport_event event);
+static void     bfa_fcs_lport_sm_online(struct bfa_fcs_lport_s *port,
+					enum bfa_fcs_lport_event event);
+static void     bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port,
+					enum bfa_fcs_lport_event event);
+static void     bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port,
+					enum bfa_fcs_lport_event event);
+static void	bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
+					enum bfa_fcs_lport_event event);
+
+static void
+bfa_fcs_lport_sm_uninit(
+	struct bfa_fcs_lport_s *port,
+	enum bfa_fcs_lport_event event)
+{
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_PORT_SM_CREATE:
+		bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
+			enum bfa_fcs_lport_event event)
+{
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_PORT_SM_ONLINE:
+		bfa_sm_set_state(port, bfa_fcs_lport_sm_online);
+		bfa_fcs_lport_online_actions(port);
+		break;
+
+	case BFA_FCS_PORT_SM_DELETE:
+		bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
+		bfa_fcs_lport_deleted(port);
+		break;
+
+	case BFA_FCS_PORT_SM_STOP:
+		/* If vport - send completion call back */
+		if (port->vport)
+			bfa_fcs_vport_stop_comp(port->vport);
+		else
+			bfa_wc_down(&(port->fabric->stop_wc));
+		break;
+
+	case BFA_FCS_PORT_SM_OFFLINE:
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_sm_online(
+	struct bfa_fcs_lport_s *port,
+	enum bfa_fcs_lport_event event)
+{
+	struct bfa_fcs_rport_s *rport;
+	struct list_head		*qe, *qen;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_PORT_SM_OFFLINE:
+		bfa_sm_set_state(port, bfa_fcs_lport_sm_offline);
+		bfa_fcs_lport_offline_actions(port);
+		break;
+
+	case BFA_FCS_PORT_SM_STOP:
+		__port_action[port->fabric->fab_type].offline(port);
+
+		if (port->num_rports == 0) {
+			bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+			/* If vport - send completion call back */
+			if (port->vport)
+				bfa_fcs_vport_stop_comp(port->vport);
+			else
+				bfa_wc_down(&(port->fabric->stop_wc));
+		} else {
+			bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
+			list_for_each_safe(qe, qen, &port->rport_q) {
+				rport = (struct bfa_fcs_rport_s *) qe;
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+			}
+		}
+		break;
+
+	case BFA_FCS_PORT_SM_DELETE:
+
+		__port_action[port->fabric->fab_type].offline(port);
+
+		if (port->num_rports == 0) {
+			bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
+			bfa_fcs_lport_deleted(port);
+		} else {
+			bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
+			list_for_each_safe(qe, qen, &port->rport_q) {
+				rport = (struct bfa_fcs_rport_s *) qe;
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+			}
+		}
+		break;
+
+	case BFA_FCS_PORT_SM_DELRPORT:
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_sm_offline(
+	struct bfa_fcs_lport_s *port,
+	enum bfa_fcs_lport_event event)
+{
+	struct bfa_fcs_rport_s *rport;
+	struct list_head		*qe, *qen;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_PORT_SM_ONLINE:
+		bfa_sm_set_state(port, bfa_fcs_lport_sm_online);
+		bfa_fcs_lport_online_actions(port);
+		break;
+
+	case BFA_FCS_PORT_SM_STOP:
+		if (port->num_rports == 0) {
+			bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+			/* If vport - send completion call back */
+			if (port->vport)
+				bfa_fcs_vport_stop_comp(port->vport);
+			else
+				bfa_wc_down(&(port->fabric->stop_wc));
+		} else {
+			bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
+			list_for_each_safe(qe, qen, &port->rport_q) {
+				rport = (struct bfa_fcs_rport_s *) qe;
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+			}
+		}
+		break;
+
+	case BFA_FCS_PORT_SM_DELETE:
+		if (port->num_rports == 0) {
+			bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
+			bfa_fcs_lport_deleted(port);
+		} else {
+			bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
+			list_for_each_safe(qe, qen, &port->rport_q) {
+				rport = (struct bfa_fcs_rport_s *) qe;
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+			}
+		}
+		break;
+
+	case BFA_FCS_PORT_SM_DELRPORT:
+	case BFA_FCS_PORT_SM_OFFLINE:
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
+			  enum bfa_fcs_lport_event event)
+{
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_PORT_SM_DELRPORT:
+		if (port->num_rports == 0) {
+			bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+			/* If vport - send completion call back */
+			if (port->vport)
+				bfa_fcs_vport_stop_comp(port->vport);
+			else
+				bfa_wc_down(&(port->fabric->stop_wc));
+		}
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_sm_deleting(
+	struct bfa_fcs_lport_s *port,
+	enum bfa_fcs_lport_event event)
+{
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_PORT_SM_DELRPORT:
+		if (port->num_rports == 0) {
+			bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
+			bfa_fcs_lport_deleted(port);
+		}
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+/*
+ *  fcs_port_pvt
+ */
+
+/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port,
+			enum bfa_lport_aen_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
+	aen_entry->aen_data.lport.roles = port->port_cfg.roles;
+	aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
+					bfa_fcs_get_base_port(port->fcs));
+	aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+				  BFA_AEN_CAT_LPORT, event);
+}
+
+/*
+ * Send a LS reject
+ */
+static void
+bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
+			 u8 reason_code, u8 reason_code_expl)
+{
+	struct fchs_s	fchs;
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_rport_s *bfa_rport = NULL;
+	int		len;
+
+	bfa_trc(port->fcs, rx_fchs->d_id);
+	bfa_trc(port->fcs, rx_fchs->s_id);
+
+	fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp)
+		return;
+
+	len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			      rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+			      rx_fchs->ox_id, reason_code, reason_code_expl);
+
+	bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+			  BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+			  FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Send a FCCT Reject
+ */
+static void
+bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port,
+	struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl)
+{
+	struct fchs_s   fchs;
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_rport_s *bfa_rport = NULL;
+	int             len;
+	struct ct_hdr_s *rx_cthdr = (struct ct_hdr_s *)(rx_fchs + 1);
+	struct ct_hdr_s *ct_hdr;
+
+	bfa_trc(port->fcs, rx_fchs->d_id);
+	bfa_trc(port->fcs, rx_fchs->s_id);
+
+	fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp)
+		return;
+
+	ct_hdr = bfa_fcxp_get_reqbuf(fcxp);
+	ct_hdr->gs_type = rx_cthdr->gs_type;
+	ct_hdr->gs_sub_type = rx_cthdr->gs_sub_type;
+
+	len = fc_gs_rjt_build(&fchs, ct_hdr, rx_fchs->s_id,
+			bfa_fcs_lport_get_fcid(port),
+			rx_fchs->ox_id, reason_code, reason_code_expl);
+
+	bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+			BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+			FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Process incoming plogi from a remote port.
+ */
+static void
+bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
+		struct fchs_s *rx_fchs, struct fc_logi_s *plogi)
+{
+	struct bfa_fcs_rport_s *rport;
+
+	bfa_trc(port->fcs, rx_fchs->d_id);
+	bfa_trc(port->fcs, rx_fchs->s_id);
+
+	/*
+	 * If min cfg mode is enabled, drop any incoming PLOGIs
+	 */
+	if (__fcs_min_cfg(port->fcs)) {
+		bfa_trc(port->fcs, rx_fchs->s_id);
+		return;
+	}
+
+	if (fc_plogi_parse(rx_fchs) != FC_PARSE_OK) {
+		bfa_trc(port->fcs, rx_fchs->s_id);
+		/*
+		 * send a LS reject
+		 */
+		bfa_fcs_lport_send_ls_rjt(port, rx_fchs,
+					FC_LS_RJT_RSN_PROTOCOL_ERROR,
+					FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS);
+		return;
+	}
+
+	/*
+	 * Direct Attach P2P mode : verify address assigned by the r-port.
+	 */
+	if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
+		(memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
+			   (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
+		if (BFA_FCS_PID_IS_WKA(rx_fchs->d_id)) {
+			/* Address assigned to us cannot be a WKA */
+			bfa_fcs_lport_send_ls_rjt(port, rx_fchs,
+					FC_LS_RJT_RSN_PROTOCOL_ERROR,
+					FC_LS_RJT_EXP_INVALID_NPORT_ID);
+			return;
+		}
+		port->pid  = rx_fchs->d_id;
+		bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
+	}
+
+	/*
+	 * First, check if we know the device by pwwn.
+	 */
+	rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name);
+	if (rport) {
+		/*
+		 * Direct Attach P2P mode : handle address assigned by r-port.
+		 */
+		if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
+			(memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
+			(void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
+			port->pid  = rx_fchs->d_id;
+			bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
+			rport->pid = rx_fchs->s_id;
+		}
+		bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
+		return;
+	}
+
+	/*
+	 * Next, lookup rport by PID.
+	 */
+	rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id);
+	if (!rport) {
+		/*
+		 * Inbound PLOGI from a new device.
+		 */
+		bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
+		return;
+	}
+
+	/*
+	 * Rport is known only by PID.
+	 */
+	if (rport->pwwn) {
+		/*
+		 * This is a different device with the same pid. Old device
+		 * disappeared. Send implicit LOGO to old device.
+		 */
+		WARN_ON(rport->pwwn == plogi->port_name);
+		bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
+
+		/*
+		 * Inbound PLOGI from a new device (with old PID).
+		 */
+		bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
+		return;
+	}
+
+	/*
+	 * PLOGI crossing each other.
+	 */
+	WARN_ON(rport->pwwn != WWN_NULL);
+	bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
+}
+
+/*
+ * Process incoming ECHO.
+ * Since it does not require a login, it is processed here.
+ */
+static void
+bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
+		struct fc_echo_s *echo, u16 rx_len)
+{
+	struct fchs_s		fchs;
+	struct bfa_fcxp_s	*fcxp;
+	struct bfa_rport_s	*bfa_rport = NULL;
+	int			len, pyld_len;
+
+	bfa_trc(port->fcs, rx_fchs->s_id);
+	bfa_trc(port->fcs, rx_fchs->d_id);
+
+	fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp)
+		return;
+
+	len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+				rx_fchs->ox_id);
+
+	/*
+	 * Copy the payload (if any) from the echo frame
+	 */
+	pyld_len = rx_len - sizeof(struct fchs_s);
+	bfa_trc(port->fcs, rx_len);
+	bfa_trc(port->fcs, pyld_len);
+
+	if (pyld_len > len)
+		memcpy(((u8 *) bfa_fcxp_get_reqbuf(fcxp)) +
+			sizeof(struct fc_echo_s), (echo + 1),
+			(pyld_len - sizeof(struct fc_echo_s)));
+
+	bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+			BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL,
+			FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Process incoming RNID.
+ * Since it does not require a login, it is processed here.
+ */
+static void
+bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
+		struct fc_rnid_cmd_s *rnid, u16 rx_len)
+{
+	struct fc_rnid_common_id_data_s common_id_data;
+	struct fc_rnid_general_topology_data_s gen_topo_data;
+	struct fchs_s	fchs;
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_rport_s *bfa_rport = NULL;
+	u16	len;
+	u32	data_format;
+
+	bfa_trc(port->fcs, rx_fchs->s_id);
+	bfa_trc(port->fcs, rx_fchs->d_id);
+	bfa_trc(port->fcs, rx_len);
+
+	fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp)
+		return;
+
+	/*
+	 * Check Node Indentification Data Format
+	 * We only support General Topology Discovery Format.
+	 * For any other requested Data Formats, we return Common Node Id Data
+	 * only, as per FC-LS.
+	 */
+	bfa_trc(port->fcs, rnid->node_id_data_format);
+	if (rnid->node_id_data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
+		data_format = RNID_NODEID_DATA_FORMAT_DISCOVERY;
+		/*
+		 * Get General topology data for this port
+		 */
+		bfa_fs_port_get_gen_topo_data(port, &gen_topo_data);
+	} else {
+		data_format = RNID_NODEID_DATA_FORMAT_COMMON;
+	}
+
+	/*
+	 * Copy the Node Id Info
+	 */
+	common_id_data.port_name = bfa_fcs_lport_get_pwwn(port);
+	common_id_data.node_name = bfa_fcs_lport_get_nwwn(port);
+
+	len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+				rx_fchs->ox_id, data_format, &common_id_data,
+				&gen_topo_data);
+
+	bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+			BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+			FC_MAX_PDUSZ, 0);
+}
+
+/*
+ *  Fill out General Topolpgy Discovery Data for RNID ELS.
+ */
+static void
+bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
+			struct fc_rnid_general_topology_data_s *gen_topo_data)
+{
+	memset(gen_topo_data, 0,
+		      sizeof(struct fc_rnid_general_topology_data_s));
+
+	gen_topo_data->asso_type = cpu_to_be32(RNID_ASSOCIATED_TYPE_HOST);
+	gen_topo_data->phy_port_num = 0;	/* @todo */
+	gen_topo_data->num_attached_nodes = cpu_to_be32(1);
+}
+
+static void
+bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
+{
+	struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+	char	lpwwn_buf[BFA_STRING_32];
+
+	bfa_trc(port->fcs, port->fabric->oper_type);
+
+	__port_action[port->fabric->fab_type].init(port);
+	__port_action[port->fabric->fab_type].online(port);
+
+	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
+	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
+		"Logical port online: WWN = %s Role = %s\n",
+		lpwwn_buf, "Initiator");
+	bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
+
+	bfad->bfad_flags |= BFAD_PORT_ONLINE;
+}
+
+static void
+bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
+{
+	struct list_head	*qe, *qen;
+	struct bfa_fcs_rport_s *rport;
+	struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+	char    lpwwn_buf[BFA_STRING_32];
+
+	bfa_trc(port->fcs, port->fabric->oper_type);
+
+	__port_action[port->fabric->fab_type].offline(port);
+
+	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
+	if (bfa_sm_cmp_state(port->fabric,
+			bfa_fcs_fabric_sm_online) == BFA_TRUE) {
+		BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
+		"Logical port lost fabric connectivity: WWN = %s Role = %s\n",
+		lpwwn_buf, "Initiator");
+		bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
+	} else {
+		BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
+		"Logical port taken offline: WWN = %s Role = %s\n",
+		lpwwn_buf, "Initiator");
+		bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
+	}
+
+	list_for_each_safe(qe, qen, &port->rport_q) {
+		rport = (struct bfa_fcs_rport_s *) qe;
+		bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
+	}
+}
+
+static void
+bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port)
+{
+	WARN_ON(1);
+}
+
+static void
+bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port)
+{
+	WARN_ON(1);
+}
+
+static void
+bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port)
+{
+	WARN_ON(1);
+}
+
+static void
+bfa_fcs_lport_abts_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs)
+{
+	struct fchs_s fchs;
+	struct bfa_fcxp_s *fcxp;
+	int		len;
+
+	bfa_trc(port->fcs, rx_fchs->d_id);
+	bfa_trc(port->fcs, rx_fchs->s_id);
+
+	fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp)
+		return;
+
+	len = fc_ba_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+			rx_fchs->ox_id, 0);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
+			  BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+			  FC_MAX_PDUSZ, 0);
+}
+static void
+bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
+{
+	struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+	char    lpwwn_buf[BFA_STRING_32];
+
+	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
+	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+		"Logical port deleted: WWN = %s Role = %s\n",
+		lpwwn_buf, "Initiator");
+	bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
+
+	/* Base port will be deleted by the OS driver */
+	if (port->vport)
+		bfa_fcs_vport_delete_comp(port->vport);
+	else
+		bfa_wc_down(&port->fabric->wc);
+}
+
+
+/*
+ * Unsolicited frame receive handling.
+ */
+void
+bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
+			struct fchs_s *fchs, u16 len)
+{
+	u32	pid = fchs->s_id;
+	struct bfa_fcs_rport_s *rport = NULL;
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+	bfa_stats(lport, uf_recvs);
+	bfa_trc(lport->fcs, fchs->type);
+
+	if (!bfa_fcs_lport_is_online(lport)) {
+		/*
+		 * In direct attach topology, it is possible to get a PLOGI
+		 * before the lport is online due to port feature
+		 * (QoS/Trunk/FEC/CR), so send a rjt
+		 */
+		if ((fchs->type == FC_TYPE_ELS) &&
+			(els_cmd->els_code == FC_ELS_PLOGI)) {
+			bfa_fcs_lport_send_ls_rjt(lport, fchs,
+				FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD,
+				FC_LS_RJT_EXP_NO_ADDL_INFO);
+			bfa_stats(lport, plogi_rcvd);
+		} else
+			bfa_stats(lport, uf_recv_drops);
+
+		return;
+	}
+
+	/*
+	 * First, handle ELSs that donot require a login.
+	 */
+	/*
+	 * Handle PLOGI first
+	 */
+	if ((fchs->type == FC_TYPE_ELS) &&
+		(els_cmd->els_code == FC_ELS_PLOGI)) {
+		bfa_fcs_lport_plogi(lport, fchs, (struct fc_logi_s *) els_cmd);
+		return;
+	}
+
+	/*
+	 * Handle ECHO separately.
+	 */
+	if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_ECHO)) {
+		bfa_fcs_lport_echo(lport, fchs,
+				(struct fc_echo_s *)els_cmd, len);
+		return;
+	}
+
+	/*
+	 * Handle RNID separately.
+	 */
+	if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_RNID)) {
+		bfa_fcs_lport_rnid(lport, fchs,
+			(struct fc_rnid_cmd_s *) els_cmd, len);
+		return;
+	}
+
+	if (fchs->type == FC_TYPE_BLS) {
+		if ((fchs->routing == FC_RTG_BASIC_LINK) &&
+				(fchs->cat_info == FC_CAT_ABTS))
+			bfa_fcs_lport_abts_acc(lport, fchs);
+		return;
+	}
+
+	if (fchs->type == FC_TYPE_SERVICES) {
+		/*
+		 * Unhandled FC-GS frames. Send a FC-CT Reject
+		 */
+		bfa_fcs_lport_send_fcgs_rjt(lport, fchs, CT_RSN_NOT_SUPP,
+				CT_NS_EXP_NOADDITIONAL);
+		return;
+	}
+
+	/*
+	 * look for a matching remote port ID
+	 */
+	rport = bfa_fcs_lport_get_rport_by_pid(lport, pid);
+	if (rport) {
+		bfa_trc(rport->fcs, fchs->s_id);
+		bfa_trc(rport->fcs, fchs->d_id);
+		bfa_trc(rport->fcs, fchs->type);
+
+		bfa_fcs_rport_uf_recv(rport, fchs, len);
+		return;
+	}
+
+	/*
+	 * Only handles ELS frames for now.
+	 */
+	if (fchs->type != FC_TYPE_ELS) {
+		bfa_trc(lport->fcs, fchs->s_id);
+		bfa_trc(lport->fcs, fchs->d_id);
+		/* ignore type FC_TYPE_FC_FSS */
+		if (fchs->type != FC_TYPE_FC_FSS)
+			bfa_sm_fault(lport->fcs, fchs->type);
+		return;
+	}
+
+	bfa_trc(lport->fcs, els_cmd->els_code);
+	if (els_cmd->els_code == FC_ELS_RSCN) {
+		bfa_fcs_lport_scn_process_rscn(lport, fchs, len);
+		return;
+	}
+
+	if (els_cmd->els_code == FC_ELS_LOGO) {
+		/*
+		 * @todo Handle LOGO frames received.
+		 */
+		return;
+	}
+
+	if (els_cmd->els_code == FC_ELS_PRLI) {
+		/*
+		 * @todo Handle PRLI frames received.
+		 */
+		return;
+	}
+
+	/*
+	 * Unhandled ELS frames. Send a LS_RJT.
+	 */
+	bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
+				 FC_LS_RJT_EXP_NO_ADDL_INFO);
+
+}
+
+/*
+ *   PID based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
+{
+	struct bfa_fcs_rport_s *rport;
+	struct list_head	*qe;
+
+	list_for_each(qe, &port->rport_q) {
+		rport = (struct bfa_fcs_rport_s *) qe;
+		if (rport->pid == pid)
+			return rport;
+	}
+
+	bfa_trc(port->fcs, pid);
+	return NULL;
+}
+
+/*
+ * OLD_PID based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_old_pid(struct bfa_fcs_lport_s *port, u32 pid)
+{
+	struct bfa_fcs_rport_s *rport;
+	struct list_head	*qe;
+
+	list_for_each(qe, &port->rport_q) {
+		rport = (struct bfa_fcs_rport_s *) qe;
+		if (rport->old_pid == pid)
+			return rport;
+	}
+
+	bfa_trc(port->fcs, pid);
+	return NULL;
+}
+
+/*
+ *   PWWN based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn)
+{
+	struct bfa_fcs_rport_s *rport;
+	struct list_head	*qe;
+
+	list_for_each(qe, &port->rport_q) {
+		rport = (struct bfa_fcs_rport_s *) qe;
+		if (wwn_is_equal(rport->pwwn, pwwn))
+			return rport;
+	}
+
+	bfa_trc(port->fcs, pwwn);
+	return NULL;
+}
+
+/*
+ *   NWWN based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
+{
+	struct bfa_fcs_rport_s *rport;
+	struct list_head	*qe;
+
+	list_for_each(qe, &port->rport_q) {
+		rport = (struct bfa_fcs_rport_s *) qe;
+		if (wwn_is_equal(rport->nwwn, nwwn))
+			return rport;
+	}
+
+	bfa_trc(port->fcs, nwwn);
+	return NULL;
+}
+
+/*
+ * PWWN & PID based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_qualifier(struct bfa_fcs_lport_s *port,
+				     wwn_t pwwn, u32 pid)
+{
+	struct bfa_fcs_rport_s *rport;
+	struct list_head	*qe;
+
+	list_for_each(qe, &port->rport_q) {
+		rport = (struct bfa_fcs_rport_s *) qe;
+		if (wwn_is_equal(rport->pwwn, pwwn) && rport->pid == pid)
+			return rport;
+	}
+
+	bfa_trc(port->fcs, pwwn);
+	return NULL;
+}
+
+/*
+ * Called by rport module when new rports are discovered.
+ */
+void
+bfa_fcs_lport_add_rport(
+	struct bfa_fcs_lport_s *port,
+	struct bfa_fcs_rport_s *rport)
+{
+	list_add_tail(&rport->qe, &port->rport_q);
+	port->num_rports++;
+}
+
+/*
+ * Called by rport module to when rports are deleted.
+ */
+void
+bfa_fcs_lport_del_rport(
+	struct bfa_fcs_lport_s *port,
+	struct bfa_fcs_rport_s *rport)
+{
+	WARN_ON(!bfa_q_is_on_q(&port->rport_q, rport));
+	list_del(&rport->qe);
+	port->num_rports--;
+
+	bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT);
+}
+
+/*
+ * Called by fabric for base port when fabric login is complete.
+ * Called by vport for virtual ports when FDISC is complete.
+ */
+void
+bfa_fcs_lport_online(struct bfa_fcs_lport_s *port)
+{
+	bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
+}
+
+/*
+ * Called by fabric for base port when fabric goes offline.
+ * Called by vport for virtual ports when virtual port becomes offline.
+ */
+void
+bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
+{
+	bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
+}
+
+/*
+ * Called by fabric for base port and by vport for virtual ports
+ * when target mode driver is unloaded.
+ */
+void
+bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port)
+{
+	bfa_sm_send_event(port, BFA_FCS_PORT_SM_STOP);
+}
+
+/*
+ * Called by fabric to delete base lport and associated resources.
+ *
+ * Called by vport to delete lport and associated resources. Should call
+ * bfa_fcs_vport_delete_comp() for vports on completion.
+ */
+void
+bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port)
+{
+	bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
+}
+
+/*
+ * Return TRUE if port is online, else return FALSE
+ */
+bfa_boolean_t
+bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port)
+{
+	return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online);
+}
+
+/*
+  * Attach time initialization of logical ports.
+ */
+void
+bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
+		   u16 vf_id, struct bfa_fcs_vport_s *vport)
+{
+	lport->fcs = fcs;
+	lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
+	lport->vport = vport;
+	lport->lp_tag = (vport) ? vport->lps->bfa_tag :
+				  lport->fabric->lps->bfa_tag;
+
+	INIT_LIST_HEAD(&lport->rport_q);
+	lport->num_rports = 0;
+}
+
+/*
+ * Logical port initialization of base or virtual port.
+ * Called by fabric for base port or by vport for virtual ports.
+ */
+
+void
+bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
+	struct bfa_lport_cfg_s *port_cfg)
+{
+	struct bfa_fcs_vport_s *vport = lport->vport;
+	struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad;
+	char    lpwwn_buf[BFA_STRING_32];
+
+	lport->port_cfg = *port_cfg;
+
+	lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport,
+					lport->port_cfg.roles,
+					lport->fabric->vf_drv,
+					vport ? vport->vport_drv : NULL);
+
+	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(lport));
+	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+		"New logical port created: WWN = %s Role = %s\n",
+		lpwwn_buf, "Initiator");
+	bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW);
+
+	bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
+	bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
+}
+
+void
+bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port,
+				char *symname)
+{
+	strcpy(port->port_cfg.sym_name.symname, symname);
+
+	if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
+		bfa_fcs_lport_ns_util_send_rspn_id(
+			BFA_FCS_GET_NS_FROM_PORT(port), NULL);
+}
+
+/*
+ *  fcs_lport_api
+ */
+
+void
+bfa_fcs_lport_get_attr(
+	struct bfa_fcs_lport_s *port,
+	struct bfa_lport_attr_s *port_attr)
+{
+	if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
+		port_attr->pid = port->pid;
+	else
+		port_attr->pid = 0;
+
+	port_attr->port_cfg = port->port_cfg;
+
+	if (port->fabric) {
+		port_attr->port_type = port->fabric->oper_type;
+		port_attr->loopback = bfa_sm_cmp_state(port->fabric,
+				bfa_fcs_fabric_sm_loopback);
+		port_attr->authfail =
+			bfa_sm_cmp_state(port->fabric,
+				bfa_fcs_fabric_sm_auth_failed);
+		port_attr->fabric_name  = bfa_fcs_lport_get_fabric_name(port);
+		memcpy(port_attr->fabric_ip_addr,
+			bfa_fcs_lport_get_fabric_ipaddr(port),
+			BFA_FCS_FABRIC_IPADDR_SZ);
+
+		if (port->vport != NULL) {
+			port_attr->port_type = BFA_PORT_TYPE_VPORT;
+			port_attr->fpma_mac =
+				port->vport->lps->lp_mac;
+		} else {
+			port_attr->fpma_mac =
+				port->fabric->lps->lp_mac;
+		}
+	} else {
+		port_attr->port_type = BFA_PORT_TYPE_UNKNOWN;
+		port_attr->state = BFA_LPORT_UNINIT;
+	}
+}
+
+/*
+ *  bfa_fcs_lport_fab port fab functions
+ */
+
+/*
+ *   Called by port to initialize fabric services of the base port.
+ */
+static void
+bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port)
+{
+	bfa_fcs_lport_ns_init(port);
+	bfa_fcs_lport_scn_init(port);
+	bfa_fcs_lport_ms_init(port);
+}
+
+/*
+ *   Called by port to notify transition to online state.
+ */
+static void
+bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port)
+{
+	bfa_fcs_lport_ns_online(port);
+	bfa_fcs_lport_fab_scn_online(port);
+}
+
+/*
+ *   Called by port to notify transition to offline state.
+ */
+static void
+bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port)
+{
+	bfa_fcs_lport_ns_offline(port);
+	bfa_fcs_lport_scn_offline(port);
+	bfa_fcs_lport_ms_offline(port);
+}
+
+/*
+ *  bfa_fcs_lport_n2n  functions
+ */
+
+/*
+ *   Called by fcs/port to initialize N2N topology.
+ */
+static void
+bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port)
+{
+}
+
+/*
+ *   Called by fcs/port to notify transition to online state.
+ */
+static void
+bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n;
+	struct bfa_lport_cfg_s *pcfg = &port->port_cfg;
+	struct bfa_fcs_rport_s *rport;
+
+	bfa_trc(port->fcs, pcfg->pwwn);
+
+	/*
+	 * If our PWWN is > than that of the r-port, we have to initiate PLOGI
+	 * and assign an Address. if not, we need to wait for its PLOGI.
+	 *
+	 * If our PWWN is < than that of the remote port, it will send a PLOGI
+	 * with the PIDs assigned. The rport state machine take care of this
+	 * incoming PLOGI.
+	 */
+	if (memcmp
+	    ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
+	     sizeof(wwn_t)) > 0) {
+		port->pid = N2N_LOCAL_PID;
+		bfa_lps_set_n2n_pid(port->fabric->lps, N2N_LOCAL_PID);
+		/*
+		 * First, check if we know the device by pwwn.
+		 */
+		rport = bfa_fcs_lport_get_rport_by_pwwn(port,
+							n2n_port->rem_port_wwn);
+		if (rport) {
+			bfa_trc(port->fcs, rport->pid);
+			bfa_trc(port->fcs, rport->pwwn);
+			rport->pid = N2N_REMOTE_PID;
+			bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
+			return;
+		}
+
+		/*
+		 * In n2n there can be only one rport. Delete the old one
+		 * whose pid should be zero, because it is offline.
+		 */
+		if (port->num_rports > 0) {
+			rport = bfa_fcs_lport_get_rport_by_pid(port, 0);
+			WARN_ON(rport == NULL);
+			if (rport) {
+				bfa_trc(port->fcs, rport->pwwn);
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+			}
+		}
+		bfa_fcs_rport_create(port, N2N_REMOTE_PID);
+	}
+}
+
+/*
+ *   Called by fcs/port to notify transition to offline state.
+ */
+static void
+bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n;
+
+	bfa_trc(port->fcs, port->pid);
+	port->pid = 0;
+	n2n_port->rem_port_wwn = 0;
+	n2n_port->reply_oxid = 0;
+}
+
+void
+bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port)
+{
+	int i = 0, j = 0, bit = 0, alpa_bit = 0;
+	u8 k = 0;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(port->fcs->bfa);
+
+	port->port_topo.ploop.alpabm_valid = fcport->alpabm_valid;
+	port->pid = fcport->myalpa;
+	port->pid = bfa_hton3b(port->pid);
+
+	for (i = 0; i < (FC_ALPA_MAX / 8); i++) {
+		for (j = 0, alpa_bit = 0; j < 8; j++, alpa_bit++) {
+			bfa_trc(port->fcs->bfa, fcport->alpabm.alpa_bm[i]);
+			bit = (fcport->alpabm.alpa_bm[i] & (1 << (7 - j)));
+			if (bit) {
+				port->port_topo.ploop.alpa_pos_map[k] =
+					loop_alpa_map[(i * 8) + alpa_bit];
+				k++;
+				bfa_trc(port->fcs->bfa, k);
+				bfa_trc(port->fcs->bfa,
+					 port->port_topo.ploop.alpa_pos_map[k]);
+			}
+		}
+	}
+	port->port_topo.ploop.num_alpa = k;
+}
+
+/*
+ * Called by fcs/port to initialize Loop topology.
+ */
+static void
+bfa_fcs_lport_loop_init(struct bfa_fcs_lport_s *port)
+{
+}
+
+/*
+ * Called by fcs/port to notify transition to online state.
+ */
+static void
+bfa_fcs_lport_loop_online(struct bfa_fcs_lport_s *port)
+{
+	u8 num_alpa = 0, alpabm_valid = 0;
+	struct bfa_fcs_rport_s *rport;
+	u8 *alpa_map = NULL;
+	int i = 0;
+	u32 pid;
+
+	bfa_fcport_get_loop_attr(port);
+
+	num_alpa = port->port_topo.ploop.num_alpa;
+	alpabm_valid = port->port_topo.ploop.alpabm_valid;
+	alpa_map = port->port_topo.ploop.alpa_pos_map;
+
+	bfa_trc(port->fcs->bfa, port->pid);
+	bfa_trc(port->fcs->bfa, num_alpa);
+	if (alpabm_valid == 1) {
+		for (i = 0; i < num_alpa; i++) {
+			bfa_trc(port->fcs->bfa, alpa_map[i]);
+			if (alpa_map[i] != bfa_hton3b(port->pid)) {
+				pid = alpa_map[i];
+				bfa_trc(port->fcs->bfa, pid);
+				rport = bfa_fcs_lport_get_rport_by_pid(port,
+						bfa_hton3b(pid));
+				if (!rport)
+					rport = bfa_fcs_rport_create(port,
+						bfa_hton3b(pid));
+			}
+		}
+	} else {
+		for (i = 0; i < MAX_ALPA_COUNT; i++) {
+			if (alpa_map[i] != port->pid) {
+				pid = loop_alpa_map[i];
+				bfa_trc(port->fcs->bfa, pid);
+				rport = bfa_fcs_lport_get_rport_by_pid(port,
+						bfa_hton3b(pid));
+				if (!rport)
+					rport = bfa_fcs_rport_create(port,
+						bfa_hton3b(pid));
+			}
+		}
+	}
+}
+
+/*
+ * Called by fcs/port to notify transition to offline state.
+ */
+static void
+bfa_fcs_lport_loop_offline(struct bfa_fcs_lport_s *port)
+{
+}
+
+#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2
+
+/*
+ * forward declarations
+ */
+static void     bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg,
+					    struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg,
+					    struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg,
+					   struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_lport_fdmi_rhba_response(void *fcsarg,
+						struct bfa_fcxp_s *fcxp,
+						void *cbarg,
+						bfa_status_t req_status,
+						u32 rsp_len,
+						u32 resid_len,
+						struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_fdmi_rprt_response(void *fcsarg,
+						struct bfa_fcxp_s *fcxp,
+						void *cbarg,
+						bfa_status_t req_status,
+						u32 rsp_len,
+						u32 resid_len,
+						struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_fdmi_rpa_response(void *fcsarg,
+					       struct bfa_fcxp_s *fcxp,
+					       void *cbarg,
+					       bfa_status_t req_status,
+					       u32 rsp_len,
+					       u32 resid_len,
+					       struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_fdmi_timeout(void *arg);
+static u16 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
+						  u8 *pyld);
+static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
+						  u8 *pyld);
+static u16 bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
+						 u8 *pyld);
+static u16 bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *
+						       fdmi, u8 *pyld);
+static void	bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
+				 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
+static void	bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
+				  struct bfa_fcs_fdmi_port_attr_s *port_attr);
+u32	bfa_fcs_fdmi_convert_speed(enum bfa_port_speed pport_speed);
+
+/*
+ *  fcs_fdmi_sm FCS FDMI state machine
+ */
+
+/*
+ *  FDMI State Machine events
+ */
+enum port_fdmi_event {
+	FDMISM_EVENT_PORT_ONLINE = 1,
+	FDMISM_EVENT_PORT_OFFLINE = 2,
+	FDMISM_EVENT_RSP_OK = 4,
+	FDMISM_EVENT_RSP_ERROR = 5,
+	FDMISM_EVENT_TIMEOUT = 6,
+	FDMISM_EVENT_RHBA_SENT = 7,
+	FDMISM_EVENT_RPRT_SENT = 8,
+	FDMISM_EVENT_RPA_SENT = 9,
+};
+
+static void     bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi,
+					     enum port_fdmi_event event);
+static void     bfa_fcs_lport_fdmi_sm_sending_rhba(
+				struct bfa_fcs_lport_fdmi_s *fdmi,
+				enum port_fdmi_event event);
+static void     bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
+					  enum port_fdmi_event event);
+static void     bfa_fcs_lport_fdmi_sm_rhba_retry(
+				struct bfa_fcs_lport_fdmi_s *fdmi,
+				enum port_fdmi_event event);
+static void     bfa_fcs_lport_fdmi_sm_sending_rprt(
+				struct bfa_fcs_lport_fdmi_s *fdmi,
+				enum port_fdmi_event event);
+static void     bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
+					  enum port_fdmi_event event);
+static void     bfa_fcs_lport_fdmi_sm_rprt_retry(
+				struct bfa_fcs_lport_fdmi_s *fdmi,
+				enum port_fdmi_event event);
+static void     bfa_fcs_lport_fdmi_sm_sending_rpa(
+				struct bfa_fcs_lport_fdmi_s *fdmi,
+				enum port_fdmi_event event);
+static void     bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
+					 enum port_fdmi_event event);
+static void     bfa_fcs_lport_fdmi_sm_rpa_retry(
+				struct bfa_fcs_lport_fdmi_s *fdmi,
+				enum port_fdmi_event event);
+static void     bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
+					    enum port_fdmi_event event);
+static void     bfa_fcs_lport_fdmi_sm_disabled(
+				struct bfa_fcs_lport_fdmi_s *fdmi,
+				enum port_fdmi_event event);
+/*
+ *	Start in offline state - awaiting MS to send start.
+ */
+static void
+bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi,
+			     enum port_fdmi_event event)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	fdmi->retry_cnt = 0;
+
+	switch (event) {
+	case FDMISM_EVENT_PORT_ONLINE:
+		if (port->vport) {
+			/*
+			 * For Vports, register a new port.
+			 */
+			bfa_sm_set_state(fdmi,
+					 bfa_fcs_lport_fdmi_sm_sending_rprt);
+			bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL);
+		} else {
+			/*
+			 * For a base port, we should first register the HBA
+			 * attribute. The HBA attribute also contains the base
+			 *  port registration.
+			 */
+			bfa_sm_set_state(fdmi,
+					 bfa_fcs_lport_fdmi_sm_sending_rhba);
+			bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL);
+		}
+		break;
+
+	case FDMISM_EVENT_PORT_OFFLINE:
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_sending_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
+				  enum port_fdmi_event event)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case FDMISM_EVENT_RHBA_SENT:
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rhba);
+		break;
+
+	case FDMISM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
+					   &fdmi->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
+			enum port_fdmi_event event)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case FDMISM_EVENT_RSP_ERROR:
+		/*
+		 * if max retries have not been reached, start timer for a
+		 * delayed retry
+		 */
+		if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
+			bfa_sm_set_state(fdmi,
+					bfa_fcs_lport_fdmi_sm_rhba_retry);
+			bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
+					    &fdmi->timer,
+					    bfa_fcs_lport_fdmi_timeout, fdmi,
+					    BFA_FCS_RETRY_TIMEOUT);
+		} else {
+			/*
+			 * set state to offline
+			 */
+			bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+		}
+		break;
+
+	case FDMISM_EVENT_RSP_OK:
+		/*
+		 * Initiate Register Port Attributes
+		 */
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa);
+		fdmi->retry_cnt = 0;
+		bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL);
+		break;
+
+	case FDMISM_EVENT_PORT_OFFLINE:
+		bfa_fcxp_discard(fdmi->fcxp);
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_rhba_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
+				enum port_fdmi_event event)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case FDMISM_EVENT_TIMEOUT:
+		/*
+		 * Retry Timer Expired. Re-send
+		 */
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rhba);
+		bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL);
+		break;
+
+	case FDMISM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+		bfa_timer_stop(&fdmi->timer);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+/*
+* RPRT : Register Port
+ */
+static void
+bfa_fcs_lport_fdmi_sm_sending_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
+				  enum port_fdmi_event event)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case FDMISM_EVENT_RPRT_SENT:
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rprt);
+		break;
+
+	case FDMISM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
+					   &fdmi->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
+			enum port_fdmi_event event)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case FDMISM_EVENT_RSP_ERROR:
+		/*
+		 * if max retries have not been reached, start timer for a
+		 * delayed retry
+		 */
+		if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
+			bfa_sm_set_state(fdmi,
+					bfa_fcs_lport_fdmi_sm_rprt_retry);
+			bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
+					    &fdmi->timer,
+					    bfa_fcs_lport_fdmi_timeout, fdmi,
+					    BFA_FCS_RETRY_TIMEOUT);
+
+		} else {
+			/*
+			 * set state to offline
+			 */
+			bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+			fdmi->retry_cnt = 0;
+		}
+		break;
+
+	case FDMISM_EVENT_RSP_OK:
+		fdmi->retry_cnt = 0;
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online);
+		break;
+
+	case FDMISM_EVENT_PORT_OFFLINE:
+		bfa_fcxp_discard(fdmi->fcxp);
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_rprt_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
+				enum port_fdmi_event event)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case FDMISM_EVENT_TIMEOUT:
+		/*
+		 * Retry Timer Expired. Re-send
+		 */
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rprt);
+		bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL);
+		break;
+
+	case FDMISM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+		bfa_timer_stop(&fdmi->timer);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+/*
+ * Register Port Attributes
+ */
+static void
+bfa_fcs_lport_fdmi_sm_sending_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
+				 enum port_fdmi_event event)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case FDMISM_EVENT_RPA_SENT:
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa);
+		break;
+
+	case FDMISM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
+					   &fdmi->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
+			enum port_fdmi_event event)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case FDMISM_EVENT_RSP_ERROR:
+		/*
+		 * if max retries have not been reached, start timer for a
+		 * delayed retry
+		 */
+		if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
+			bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa_retry);
+			bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
+					    &fdmi->timer,
+					    bfa_fcs_lport_fdmi_timeout, fdmi,
+					    BFA_FCS_RETRY_TIMEOUT);
+		} else {
+			/*
+			 * set state to offline
+			 */
+			bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+			fdmi->retry_cnt = 0;
+		}
+		break;
+
+	case FDMISM_EVENT_RSP_OK:
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online);
+		fdmi->retry_cnt = 0;
+		break;
+
+	case FDMISM_EVENT_PORT_OFFLINE:
+		bfa_fcxp_discard(fdmi->fcxp);
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_rpa_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
+			       enum port_fdmi_event event)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case FDMISM_EVENT_TIMEOUT:
+		/*
+		 * Retry Timer Expired. Re-send
+		 */
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa);
+		bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL);
+		break;
+
+	case FDMISM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+		bfa_timer_stop(&fdmi->timer);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
+				enum port_fdmi_event event)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	switch (event) {
+	case FDMISM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+/*
+ *  FDMI is disabled state.
+ */
+static void
+bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi,
+			     enum port_fdmi_event event)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	/* No op State. It can only be enabled at Driver Init. */
+}
+
+/*
+*  RHBA : Register HBA Attributes.
+ */
+static void
+bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+	struct fchs_s fchs;
+	int             len, attr_len;
+	struct bfa_fcxp_s *fcxp;
+	u8        *pyld;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
+				bfa_fcs_lport_fdmi_send_rhba, fdmi, BFA_TRUE);
+		return;
+	}
+	fdmi->fcxp = fcxp;
+
+	pyld = bfa_fcxp_get_reqbuf(fcxp);
+	memset(pyld, 0, FC_MAX_PDUSZ);
+
+	len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
+				   FDMI_RHBA);
+
+	attr_len =
+		bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi,
+					  (u8 *) ((struct ct_hdr_s *) pyld
+						       + 1));
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, (len + attr_len), &fchs,
+			  bfa_fcs_lport_fdmi_rhba_response, (void *)fdmi,
+			  FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+	bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
+}
+
+static          u16
+bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+	struct bfa_fcs_fdmi_hba_attr_s hba_attr;
+	struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr;
+	struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
+	struct fdmi_attr_s *attr;
+	u8        *curr_ptr;
+	u16        len, count;
+	u16	templen;
+
+	/*
+	 * get hba attributes
+	 */
+	bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);
+
+	rhba->hba_id = bfa_fcs_lport_get_pwwn(port);
+	rhba->port_list.num_ports = cpu_to_be32(1);
+	rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port);
+
+	len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);
+
+	count = 0;
+	len += sizeof(rhba->hba_attr_blk.attr_count);
+
+	/*
+	 * fill out the invididual entries of the HBA attrib Block
+	 */
+	curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr;
+
+	/*
+	 * Node Name
+	 */
+	attr = (struct fdmi_attr_s *) curr_ptr;
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME);
+	templen = sizeof(wwn_t);
+	memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
+	count++;
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
+
+	/*
+	 * Manufacturer
+	 */
+	attr = (struct fdmi_attr_s *) curr_ptr;
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
+	templen = (u16) strlen(fcs_hba_attr->manufacturer);
+	memcpy(attr->value, fcs_hba_attr->manufacturer, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
+	count++;
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
+
+	/*
+	 * Serial Number
+	 */
+	attr = (struct fdmi_attr_s *) curr_ptr;
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
+	templen = (u16) strlen(fcs_hba_attr->serial_num);
+	memcpy(attr->value, fcs_hba_attr->serial_num, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
+	count++;
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
+
+	/*
+	 * Model
+	 */
+	attr = (struct fdmi_attr_s *) curr_ptr;
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
+	templen = (u16) strlen(fcs_hba_attr->model);
+	memcpy(attr->value, fcs_hba_attr->model, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
+	count++;
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
+
+	/*
+	 * Model Desc
+	 */
+	attr = (struct fdmi_attr_s *) curr_ptr;
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
+	templen = (u16) strlen(fcs_hba_attr->model_desc);
+	memcpy(attr->value, fcs_hba_attr->model_desc, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
+	count++;
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
+
+	/*
+	 * H/W Version
+	 */
+	if (fcs_hba_attr->hw_version[0] != '\0') {
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
+		templen = (u16) strlen(fcs_hba_attr->hw_version);
+		memcpy(attr->value, fcs_hba_attr->hw_version, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					 sizeof(templen));
+	}
+
+	/*
+	 * Driver Version
+	 */
+	attr = (struct fdmi_attr_s *) curr_ptr;
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
+	templen = (u16) strlen(fcs_hba_attr->driver_version);
+	memcpy(attr->value, fcs_hba_attr->driver_version, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
+	count++;
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
+
+	/*
+	 * Option Rom Version
+	 */
+	if (fcs_hba_attr->option_rom_ver[0] != '\0') {
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
+		templen = (u16) strlen(fcs_hba_attr->option_rom_ver);
+		memcpy(attr->value, fcs_hba_attr->option_rom_ver, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					 sizeof(templen));
+	}
+
+	attr = (struct fdmi_attr_s *) curr_ptr;
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
+	templen = (u16) strlen(fcs_hba_attr->fw_version);
+	memcpy(attr->value, fcs_hba_attr->fw_version, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
+	count++;
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
+
+	/*
+	 * OS Name
+	 */
+	if (fcs_hba_attr->os_name[0] != '\0') {
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
+		templen = (u16) strlen(fcs_hba_attr->os_name);
+		memcpy(attr->value, fcs_hba_attr->os_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
+	}
+
+	/*
+	 * MAX_CT_PAYLOAD
+	 */
+	attr = (struct fdmi_attr_s *) curr_ptr;
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
+	templen = sizeof(fcs_hba_attr->max_ct_pyld);
+	memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
+	count++;
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
+	/*
+	 * Send extended attributes ( FOS 7.1 support )
+	 */
+	if (fdmi->retry_cnt == 0) {
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODE_SYM_NAME);
+		templen = sizeof(fcs_hba_attr->node_sym_name);
+		memcpy(attr->value, &fcs_hba_attr->node_sym_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_VENDOR_ID);
+		templen = sizeof(fcs_hba_attr->vendor_info);
+		memcpy(attr->value, &fcs_hba_attr->vendor_info, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NUM_PORTS);
+		templen = sizeof(fcs_hba_attr->num_ports);
+		memcpy(attr->value, &fcs_hba_attr->num_ports, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FABRIC_NAME);
+		templen = sizeof(fcs_hba_attr->fabric_name);
+		memcpy(attr->value, &fcs_hba_attr->fabric_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_BIOS_VER);
+		templen = sizeof(fcs_hba_attr->bios_ver);
+		memcpy(attr->value, &fcs_hba_attr->bios_ver, templen);
+		templen = fc_roundup(attr->len, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		count++;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
+	}
+
+	/*
+	 * Update size of payload
+	 */
+	len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
+
+	rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
+	return len;
+}
+
+static void
+bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+				void *cbarg, bfa_status_t req_status,
+				u32 rsp_len, u32 resid_len,
+				struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_fdmi_s *fdmi =
+				(struct bfa_fcs_lport_fdmi_s *) cbarg;
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+	struct ct_hdr_s *cthdr = NULL;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+		bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
+		return;
+	}
+
+	bfa_trc(port->fcs, cthdr->reason_code);
+	bfa_trc(port->fcs, cthdr->exp_code);
+	bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
+}
+
+/*
+*  RPRT : Register Port
+ */
+static void
+bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+	struct fchs_s fchs;
+	u16        len, attr_len;
+	struct bfa_fcxp_s *fcxp;
+	u8        *pyld;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
+				bfa_fcs_lport_fdmi_send_rprt, fdmi, BFA_TRUE);
+		return;
+	}
+	fdmi->fcxp = fcxp;
+
+	pyld = bfa_fcxp_get_reqbuf(fcxp);
+	memset(pyld, 0, FC_MAX_PDUSZ);
+
+	len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
+				   FDMI_RPRT);
+
+	attr_len =
+		bfa_fcs_lport_fdmi_build_rprt_pyld(fdmi,
+					  (u8 *) ((struct ct_hdr_s *) pyld
+						       + 1));
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len + attr_len, &fchs,
+			  bfa_fcs_lport_fdmi_rprt_response, (void *)fdmi,
+			  FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+	bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
+}
+
+/*
+ * This routine builds Port Attribute Block that used in RPA, RPRT commands.
+ */
+static          u16
+bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
+				       u8 *pyld)
+{
+	struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
+	struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld;
+	struct fdmi_attr_s *attr;
+	u8        *curr_ptr;
+	u16        len;
+	u8	count = 0;
+	u16	templen;
+
+	/*
+	 * get port attributes
+	 */
+	bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
+
+	len = sizeof(port_attrib->attr_count);
+
+	/*
+	 * fill out the invididual entries
+	 */
+	curr_ptr = (u8 *) &port_attrib->port_attr;
+
+	/*
+	 * FC4 Types
+	 */
+	attr = (struct fdmi_attr_s *) curr_ptr;
+	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES);
+	templen = sizeof(fcs_port_attr.supp_fc4_types);
+	memcpy(attr->value, fcs_port_attr.supp_fc4_types, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
+	++count;
+	attr->len =
+		cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
+
+	/*
+	 * Supported Speed
+	 */
+	attr = (struct fdmi_attr_s *) curr_ptr;
+	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED);
+	templen = sizeof(fcs_port_attr.supp_speed);
+	memcpy(attr->value, &fcs_port_attr.supp_speed, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
+	++count;
+	attr->len =
+		cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
+
+	/*
+	 * current Port Speed
+	 */
+	attr = (struct fdmi_attr_s *) curr_ptr;
+	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED);
+	templen = sizeof(fcs_port_attr.curr_speed);
+	memcpy(attr->value, &fcs_port_attr.curr_speed, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
+	++count;
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
+
+	/*
+	 * max frame size
+	 */
+	attr = (struct fdmi_attr_s *) curr_ptr;
+	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE);
+	templen = sizeof(fcs_port_attr.max_frm_size);
+	memcpy(attr->value, &fcs_port_attr.max_frm_size, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
+	++count;
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
+
+	/*
+	 * OS Device Name
+	 */
+	if (fcs_port_attr.os_device_name[0] != '\0') {
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
+		templen = (u16) strlen(fcs_port_attr.os_device_name);
+		memcpy(attr->value, fcs_port_attr.os_device_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
+	}
+	/*
+	 * Host Name
+	 */
+	if (fcs_port_attr.host_name[0] != '\0') {
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
+		templen = (u16) strlen(fcs_port_attr.host_name);
+		memcpy(attr->value, fcs_port_attr.host_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				sizeof(templen));
+	}
+
+	if (fdmi->retry_cnt == 0) {
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_NODE_NAME);
+		templen = sizeof(fcs_port_attr.node_name);
+		memcpy(attr->value, &fcs_port_attr.node_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_NAME);
+		templen = sizeof(fcs_port_attr.port_name);
+		memcpy(attr->value, &fcs_port_attr.port_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		if (fcs_port_attr.port_sym_name.symname[0] != '\0') {
+			attr = (struct fdmi_attr_s *) curr_ptr;
+			attr->type =
+				cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SYM_NAME);
+			templen = sizeof(fcs_port_attr.port_sym_name);
+			memcpy(attr->value,
+				&fcs_port_attr.port_sym_name, templen);
+			templen = fc_roundup(templen, sizeof(u32));
+			curr_ptr += sizeof(attr->type) +
+					sizeof(templen) + templen;
+			len += templen;
+			++count;
+			attr->len = cpu_to_be16(templen +
+				sizeof(attr->type) + sizeof(templen));
+		}
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_TYPE);
+		templen = sizeof(fcs_port_attr.port_type);
+		memcpy(attr->value, &fcs_port_attr.port_type, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_COS);
+		templen = sizeof(fcs_port_attr.scos);
+		memcpy(attr->value, &fcs_port_attr.scos, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_FAB_NAME);
+		templen = sizeof(fcs_port_attr.port_fabric_name);
+		memcpy(attr->value, &fcs_port_attr.port_fabric_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_FC4_TYPE);
+		templen = sizeof(fcs_port_attr.port_act_fc4_type);
+		memcpy(attr->value, fcs_port_attr.port_act_fc4_type,
+				templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_STATE);
+		templen = sizeof(fcs_port_attr.port_state);
+		memcpy(attr->value, &fcs_port_attr.port_state, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				 sizeof(templen));
+
+		attr = (struct fdmi_attr_s *) curr_ptr;
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_NUM_RPRT);
+		templen = sizeof(fcs_port_attr.num_ports);
+		memcpy(attr->value, &fcs_port_attr.num_ports, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
+		++count;
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				sizeof(templen));
+	}
+
+	/*
+	 * Update size of payload
+	 */
+	port_attrib->attr_count = cpu_to_be32(count);
+	len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
+	return len;
+}
+
+static          u16
+bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+	struct fdmi_rprt_s *rprt = (struct fdmi_rprt_s *) pyld;
+	u16        len;
+
+	rprt->hba_id = bfa_fcs_lport_get_pwwn(bfa_fcs_get_base_port(port->fcs));
+	rprt->port_name = bfa_fcs_lport_get_pwwn(port);
+
+	len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi,
+				(u8 *) &rprt->port_attr_blk);
+
+	len += sizeof(rprt->hba_id) + sizeof(rprt->port_name);
+
+	return len;
+}
+
+static void
+bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+				void *cbarg, bfa_status_t req_status,
+				u32 rsp_len, u32 resid_len,
+				struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_fdmi_s *fdmi =
+			(struct bfa_fcs_lport_fdmi_s *) cbarg;
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+	struct ct_hdr_s *cthdr = NULL;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+		bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
+		return;
+	}
+
+	bfa_trc(port->fcs, cthdr->reason_code);
+	bfa_trc(port->fcs, cthdr->exp_code);
+	bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
+}
+
+/*
+*  RPA : Register Port Attributes.
+ */
+static void
+bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+	struct fchs_s fchs;
+	u16        len, attr_len;
+	struct bfa_fcxp_s *fcxp;
+	u8        *pyld;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
+				bfa_fcs_lport_fdmi_send_rpa, fdmi, BFA_TRUE);
+		return;
+	}
+	fdmi->fcxp = fcxp;
+
+	pyld = bfa_fcxp_get_reqbuf(fcxp);
+	memset(pyld, 0, FC_MAX_PDUSZ);
+
+	len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
+				   FDMI_RPA);
+
+	attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
+				(u8 *) ((struct ct_hdr_s *) pyld + 1));
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len + attr_len, &fchs,
+			  bfa_fcs_lport_fdmi_rpa_response, (void *)fdmi,
+			  FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+	bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT);
+}
+
+static          u16
+bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+	struct fdmi_rpa_s *rpa = (struct fdmi_rpa_s *) pyld;
+	u16        len;
+
+	rpa->port_name = bfa_fcs_lport_get_pwwn(port);
+
+	len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi,
+				(u8 *) &rpa->port_attr_blk);
+
+	len += sizeof(rpa->port_name);
+
+	return len;
+}
+
+static void
+bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+			void *cbarg, bfa_status_t req_status, u32 rsp_len,
+			u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_fdmi_s *fdmi =
+				(struct bfa_fcs_lport_fdmi_s *) cbarg;
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+	struct ct_hdr_s *cthdr = NULL;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+		bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
+		return;
+	}
+
+	bfa_trc(port->fcs, cthdr->reason_code);
+	bfa_trc(port->fcs, cthdr->exp_code);
+	bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
+}
+
+static void
+bfa_fcs_lport_fdmi_timeout(void *arg)
+{
+	struct bfa_fcs_lport_fdmi_s *fdmi = (struct bfa_fcs_lport_fdmi_s *) arg;
+
+	bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
+}
+
+static void
+bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
+			 struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+	struct bfa_fcs_driver_info_s  *driver_info = &port->fcs->driver_info;
+	struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
+
+	memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
+
+	bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
+					hba_attr->manufacturer);
+	bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc,
+					hba_attr->serial_num);
+	bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc,
+					hba_attr->model);
+	bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc,
+					hba_attr->model_desc);
+	bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc,
+					hba_attr->hw_version);
+	bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc,
+					hba_attr->option_rom_ver);
+	bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
+					hba_attr->fw_version);
+
+	strlcpy(hba_attr->driver_version, (char *)driver_info->version,
+		sizeof(hba_attr->driver_version));
+
+	strlcpy(hba_attr->os_name, driver_info->host_os_name,
+		sizeof(hba_attr->os_name));
+
+	/*
+	 * If there is a patch level, append it
+	 * to the os name along with a separator
+	 */
+	if (driver_info->host_os_patch[0] != '\0') {
+		strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+			sizeof(hba_attr->os_name));
+		strlcat(hba_attr->os_name, driver_info->host_os_patch,
+				sizeof(hba_attr->os_name));
+	}
+
+	/* Retrieve the max frame size from the port attr */
+	bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
+	hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
+
+	strlcpy(hba_attr->node_sym_name.symname,
+		port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
+	strcpy(hba_attr->vendor_info, "QLogic");
+	hba_attr->num_ports =
+		cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
+	hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
+	strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
+
+}
+
+static void
+bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
+			  struct bfa_fcs_fdmi_port_attr_s *port_attr)
+{
+	struct bfa_fcs_lport_s *port = fdmi->ms->port;
+	struct bfa_fcs_driver_info_s  *driver_info = &port->fcs->driver_info;
+	struct bfa_port_attr_s pport_attr;
+	struct bfa_lport_attr_s lport_attr;
+
+	memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
+
+	/*
+	 * get pport attributes from hal
+	 */
+	bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
+
+	/*
+	 * get FC4 type Bitmask
+	 */
+	fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->supp_fc4_types);
+
+	/*
+	 * Supported Speeds
+	 */
+	switch (pport_attr.speed_supported) {
+	case BFA_PORT_SPEED_16GBPS:
+		port_attr->supp_speed =
+			cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_16G);
+		break;
+
+	case BFA_PORT_SPEED_10GBPS:
+		port_attr->supp_speed =
+			cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_10G);
+		break;
+
+	case BFA_PORT_SPEED_8GBPS:
+		port_attr->supp_speed =
+			cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_8G);
+		break;
+
+	case BFA_PORT_SPEED_4GBPS:
+		port_attr->supp_speed =
+			cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_4G);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, pport_attr.speed_supported);
+	}
+
+	/*
+	 * Current Speed
+	 */
+	port_attr->curr_speed = cpu_to_be32(
+				bfa_fcs_fdmi_convert_speed(pport_attr.speed));
+
+	/*
+	 * Max PDU Size.
+	 */
+	port_attr->max_frm_size = cpu_to_be32(pport_attr.pport_cfg.maxfrsize);
+
+	/*
+	 * OS device Name
+	 */
+	strlcpy(port_attr->os_device_name, driver_info->os_device_name,
+		sizeof(port_attr->os_device_name));
+
+	/*
+	 * Host name
+	 */
+	strlcpy(port_attr->host_name, driver_info->host_machine_name,
+		sizeof(port_attr->host_name));
+
+	port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
+	port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
+
+	strlcpy(port_attr->port_sym_name.symname,
+		bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
+	bfa_fcs_lport_get_attr(port, &lport_attr);
+	port_attr->port_type = cpu_to_be32(lport_attr.port_type);
+	port_attr->scos = pport_attr.cos_supported;
+	port_attr->port_fabric_name = port->fabric->lps->pr_nwwn;
+	fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->port_act_fc4_type);
+	port_attr->port_state = cpu_to_be32(pport_attr.port_state);
+	port_attr->num_ports = cpu_to_be32(port->num_rports);
+}
+
+/*
+ * Convert BFA speed to FDMI format.
+ */
+u32
+bfa_fcs_fdmi_convert_speed(bfa_port_speed_t pport_speed)
+{
+	u32	ret;
+
+	switch (pport_speed) {
+	case BFA_PORT_SPEED_1GBPS:
+	case BFA_PORT_SPEED_2GBPS:
+		ret = pport_speed;
+		break;
+
+	case BFA_PORT_SPEED_4GBPS:
+		ret = FDMI_TRANS_SPEED_4G;
+		break;
+
+	case BFA_PORT_SPEED_8GBPS:
+		ret = FDMI_TRANS_SPEED_8G;
+		break;
+
+	case BFA_PORT_SPEED_10GBPS:
+		ret = FDMI_TRANS_SPEED_10G;
+		break;
+
+	case BFA_PORT_SPEED_16GBPS:
+		ret = FDMI_TRANS_SPEED_16G;
+		break;
+
+	default:
+		ret = FDMI_TRANS_SPEED_UNKNOWN;
+	}
+	return ret;
+}
+
+void
+bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms)
+{
+	struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
+
+	fdmi->ms = ms;
+	if (ms->port->fcs->fdmi_enabled)
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
+	else
+		bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_disabled);
+}
+
+void
+bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms)
+{
+	struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
+
+	fdmi->ms = ms;
+	bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_OFFLINE);
+}
+
+void
+bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms)
+{
+	struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
+
+	fdmi->ms = ms;
+	bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_ONLINE);
+}
+
+#define BFA_FCS_MS_CMD_MAX_RETRIES  2
+
+/*
+ * forward declarations
+ */
+static void     bfa_fcs_lport_ms_send_plogi(void *ms_cbarg,
+					   struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_lport_ms_timeout(void *arg);
+static void     bfa_fcs_lport_ms_plogi_response(void *fcsarg,
+					       struct bfa_fcxp_s *fcxp,
+					       void *cbarg,
+					       bfa_status_t req_status,
+					       u32 rsp_len,
+					       u32 resid_len,
+					       struct fchs_s *rsp_fchs);
+
+static void	bfa_fcs_lport_ms_send_gmal(void *ms_cbarg,
+					struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_lport_ms_gmal_response(void *fcsarg,
+					       struct bfa_fcxp_s *fcxp,
+					       void *cbarg,
+					       bfa_status_t req_status,
+					       u32 rsp_len,
+					       u32 resid_len,
+					       struct fchs_s *rsp_fchs);
+static void	bfa_fcs_lport_ms_send_gfn(void *ms_cbarg,
+					struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_lport_ms_gfn_response(void *fcsarg,
+					       struct bfa_fcxp_s *fcxp,
+					       void *cbarg,
+					       bfa_status_t req_status,
+					       u32 rsp_len,
+					       u32 resid_len,
+					       struct fchs_s *rsp_fchs);
+/*
+ *  fcs_ms_sm FCS MS state machine
+ */
+
+/*
+ *  MS State Machine events
+ */
+enum port_ms_event {
+	MSSM_EVENT_PORT_ONLINE = 1,
+	MSSM_EVENT_PORT_OFFLINE = 2,
+	MSSM_EVENT_RSP_OK = 3,
+	MSSM_EVENT_RSP_ERROR = 4,
+	MSSM_EVENT_TIMEOUT = 5,
+	MSSM_EVENT_FCXP_SENT = 6,
+	MSSM_EVENT_PORT_FABRIC_RSCN = 7
+};
+
+static void     bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms,
+					   enum port_ms_event event);
+static void     bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms,
+						 enum port_ms_event event);
+static void     bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
+					 enum port_ms_event event);
+static void     bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms,
+					       enum port_ms_event event);
+static void     bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms,
+						 enum port_ms_event event);
+static void     bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms,
+					 enum port_ms_event event);
+static void     bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
+					       enum port_ms_event event);
+static void     bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms,
+						 enum port_ms_event event);
+static void     bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms,
+					 enum port_ms_event event);
+static void     bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
+					       enum port_ms_event event);
+static void     bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
+					  enum port_ms_event event);
+/*
+ *	Start in offline state - awaiting NS to send start.
+ */
+static void
+bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms,
+				enum port_ms_event event)
+{
+	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+	bfa_trc(ms->port->fcs, event);
+
+	switch (event) {
+	case MSSM_EVENT_PORT_ONLINE:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending);
+		bfa_fcs_lport_ms_send_plogi(ms, NULL);
+		break;
+
+	case MSSM_EVENT_PORT_OFFLINE:
+		break;
+
+	default:
+		bfa_sm_fault(ms->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms,
+				enum port_ms_event event)
+{
+	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+	bfa_trc(ms->port->fcs, event);
+
+	switch (event) {
+	case MSSM_EVENT_FCXP_SENT:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi);
+		break;
+
+	case MSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
+					   &ms->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(ms->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
+			enum port_ms_event event)
+{
+	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+	bfa_trc(ms->port->fcs, event);
+
+	switch (event) {
+	case MSSM_EVENT_RSP_ERROR:
+		/*
+		 * Start timer for a delayed retry
+		 */
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_retry);
+		ms->port->stats.ms_retries++;
+		bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
+				    &ms->timer, bfa_fcs_lport_ms_timeout, ms,
+				    BFA_FCS_RETRY_TIMEOUT);
+		break;
+
+	case MSSM_EVENT_RSP_OK:
+		/*
+		 * since plogi is done, now invoke MS related sub-modules
+		 */
+		bfa_fcs_lport_fdmi_online(ms);
+
+		/*
+		 * if this is a Vport, go to online state.
+		 */
+		if (ms->port->vport) {
+			bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
+			break;
+		}
+
+		/*
+		 * For a base port we need to get the
+		 * switch's IP address.
+		 */
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending);
+		bfa_fcs_lport_ms_send_gmal(ms, NULL);
+		break;
+
+	case MSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+		bfa_fcxp_discard(ms->fcxp);
+		break;
+
+	default:
+		bfa_sm_fault(ms->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms,
+			enum port_ms_event event)
+{
+	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+	bfa_trc(ms->port->fcs, event);
+
+	switch (event) {
+	case MSSM_EVENT_TIMEOUT:
+		/*
+		 * Retry Timer Expired. Re-send
+		 */
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending);
+		bfa_fcs_lport_ms_send_plogi(ms, NULL);
+		break;
+
+	case MSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+		bfa_timer_stop(&ms->timer);
+		break;
+
+	default:
+		bfa_sm_fault(ms->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
+			enum port_ms_event event)
+{
+	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+	bfa_trc(ms->port->fcs, event);
+
+	switch (event) {
+	case MSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+		break;
+
+	case MSSM_EVENT_PORT_FABRIC_RSCN:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
+		ms->retry_cnt = 0;
+		bfa_fcs_lport_ms_send_gfn(ms, NULL);
+		break;
+
+	default:
+		bfa_sm_fault(ms->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms,
+				enum port_ms_event event)
+{
+	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+	bfa_trc(ms->port->fcs, event);
+
+	switch (event) {
+	case MSSM_EVENT_FCXP_SENT:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal);
+		break;
+
+	case MSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
+					   &ms->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(ms->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms,
+				enum port_ms_event event)
+{
+	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+	bfa_trc(ms->port->fcs, event);
+
+	switch (event) {
+	case MSSM_EVENT_RSP_ERROR:
+		/*
+		 * Start timer for a delayed retry
+		 */
+		if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
+			bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_retry);
+			ms->port->stats.ms_retries++;
+			bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
+				&ms->timer, bfa_fcs_lport_ms_timeout, ms,
+				BFA_FCS_RETRY_TIMEOUT);
+		} else {
+			bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
+			bfa_fcs_lport_ms_send_gfn(ms, NULL);
+			ms->retry_cnt = 0;
+		}
+		break;
+
+	case MSSM_EVENT_RSP_OK:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
+		bfa_fcs_lport_ms_send_gfn(ms, NULL);
+		break;
+
+	case MSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+		bfa_fcxp_discard(ms->fcxp);
+		break;
+
+	default:
+		bfa_sm_fault(ms->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
+				enum port_ms_event event)
+{
+	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+	bfa_trc(ms->port->fcs, event);
+
+	switch (event) {
+	case MSSM_EVENT_TIMEOUT:
+		/*
+		 * Retry Timer Expired. Re-send
+		 */
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending);
+		bfa_fcs_lport_ms_send_gmal(ms, NULL);
+		break;
+
+	case MSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+		bfa_timer_stop(&ms->timer);
+		break;
+
+	default:
+		bfa_sm_fault(ms->port->fcs, event);
+	}
+}
+/*
+ *  ms_pvt MS local functions
+ */
+
+static void
+bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
+	bfa_fcs_lport_t *port = ms->port;
+	struct fchs_s	fchs;
+	int		len;
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_trc(port->fcs, port->pid);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
+				bfa_fcs_lport_ms_send_gmal, ms, BFA_TRUE);
+		return;
+	}
+	ms->fcxp = fcxp;
+
+	len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			     bfa_fcs_lport_get_fcid(port),
+				 port->fabric->lps->pr_nwwn);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len, &fchs,
+			  bfa_fcs_lport_ms_gmal_response, (void *)ms,
+			  FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+	bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+				void *cbarg, bfa_status_t req_status,
+				u32 rsp_len, u32 resid_len,
+				struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
+	bfa_fcs_lport_t *port = ms->port;
+	struct ct_hdr_s		*cthdr = NULL;
+	struct fcgs_gmal_resp_s *gmal_resp;
+	struct fcgs_gmal_entry_s *gmal_entry;
+	u32		num_entries;
+	u8			*rsp_str;
+
+	bfa_trc(port->fcs, req_status);
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+		gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1);
+
+		num_entries = be32_to_cpu(gmal_resp->ms_len);
+		if (num_entries == 0) {
+			bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+			return;
+		}
+		/*
+		* The response could contain multiple Entries.
+		* Entries for SNMP interface, etc.
+		* We look for the entry with a telnet prefix.
+		* First "http://" entry refers to IP addr
+		*/
+
+		gmal_entry = (struct fcgs_gmal_entry_s *)gmal_resp->ms_ma;
+		while (num_entries > 0) {
+			if (strncmp(gmal_entry->prefix,
+				CT_GMAL_RESP_PREFIX_HTTP,
+				sizeof(gmal_entry->prefix)) == 0) {
+
+				/*
+				* if the IP address is terminating with a '/',
+				* remove it.
+				* Byte 0 consists of the length of the string.
+				*/
+				rsp_str = &(gmal_entry->prefix[0]);
+				if (rsp_str[gmal_entry->len-1] == '/')
+					rsp_str[gmal_entry->len-1] = 0;
+
+				/* copy IP Address to fabric */
+				strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
+					gmal_entry->ip_addr,
+					BFA_FCS_FABRIC_IPADDR_SZ);
+				break;
+			} else {
+				--num_entries;
+				++gmal_entry;
+			}
+		}
+
+		bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
+		return;
+	}
+
+	bfa_trc(port->fcs, cthdr->reason_code);
+	bfa_trc(port->fcs, cthdr->exp_code);
+	bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+}
+
+static void
+bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms,
+			enum port_ms_event event)
+{
+	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+	bfa_trc(ms->port->fcs, event);
+
+	switch (event) {
+	case MSSM_EVENT_FCXP_SENT:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn);
+		break;
+
+	case MSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
+					   &ms->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(ms->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms,
+			enum port_ms_event event)
+{
+	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+	bfa_trc(ms->port->fcs, event);
+
+	switch (event) {
+	case MSSM_EVENT_RSP_ERROR:
+		/*
+		 * Start timer for a delayed retry
+		 */
+		if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
+			bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_retry);
+			ms->port->stats.ms_retries++;
+			bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
+				&ms->timer, bfa_fcs_lport_ms_timeout, ms,
+				BFA_FCS_RETRY_TIMEOUT);
+		} else {
+			bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
+			ms->retry_cnt = 0;
+		}
+		break;
+
+	case MSSM_EVENT_RSP_OK:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
+		break;
+
+	case MSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+		bfa_fcxp_discard(ms->fcxp);
+		break;
+
+	default:
+		bfa_sm_fault(ms->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
+				enum port_ms_event event)
+{
+	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
+	bfa_trc(ms->port->fcs, event);
+
+	switch (event) {
+	case MSSM_EVENT_TIMEOUT:
+		/*
+		 * Retry Timer Expired. Re-send
+		 */
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
+		bfa_fcs_lport_ms_send_gfn(ms, NULL);
+		break;
+
+	case MSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+		bfa_timer_stop(&ms->timer);
+		break;
+
+	default:
+		bfa_sm_fault(ms->port->fcs, event);
+	}
+}
+/*
+ *  ms_pvt MS local functions
+ */
+
+static void
+bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
+	bfa_fcs_lport_t *port = ms->port;
+	struct fchs_s		fchs;
+	int			len;
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_trc(port->fcs, port->pid);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
+				bfa_fcs_lport_ms_send_gfn, ms, BFA_TRUE);
+		return;
+	}
+	ms->fcxp = fcxp;
+
+	len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			     bfa_fcs_lport_get_fcid(port),
+				 port->fabric->lps->pr_nwwn);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len, &fchs,
+			  bfa_fcs_lport_ms_gfn_response, (void *)ms,
+			  FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+	bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+			void *cbarg, bfa_status_t req_status, u32 rsp_len,
+			u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
+	bfa_fcs_lport_t *port = ms->port;
+	struct ct_hdr_s	*cthdr = NULL;
+	wwn_t	       *gfn_resp;
+
+	bfa_trc(port->fcs, req_status);
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+		gfn_resp = (wwn_t *)(cthdr + 1);
+		/* check if it has actually changed */
+		if ((memcmp((void *)&bfa_fcs_lport_get_fabric_name(port),
+				gfn_resp, sizeof(wwn_t)) != 0)) {
+			bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp);
+		}
+		bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
+		return;
+	}
+
+	bfa_trc(port->fcs, cthdr->reason_code);
+	bfa_trc(port->fcs, cthdr->exp_code);
+	bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+}
+
+/*
+ *  ms_pvt MS local functions
+ */
+
+static void
+bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
+	struct bfa_fcs_lport_s *port = ms->port;
+	struct fchs_s	fchs;
+	int	len;
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_trc(port->fcs, port->pid);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		port->stats.ms_plogi_alloc_wait++;
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
+				bfa_fcs_lport_ms_send_plogi, ms, BFA_TRUE);
+		return;
+	}
+	ms->fcxp = fcxp;
+
+	len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			     bfa_hton3b(FC_MGMT_SERVER),
+			     bfa_fcs_lport_get_fcid(port), 0,
+			     port->port_cfg.pwwn, port->port_cfg.nwwn,
+			     bfa_fcport_get_maxfrsize(port->fcs->bfa),
+			     bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len, &fchs,
+			  bfa_fcs_lport_ms_plogi_response, (void *)ms,
+			  FC_MAX_PDUSZ, FC_ELS_TOV);
+
+	port->stats.ms_plogi_sent++;
+	bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_lport_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+			void *cbarg, bfa_status_t req_status,
+			u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
+	struct bfa_fcs_lport_s *port = ms->port;
+	struct fc_els_cmd_s *els_cmd;
+	struct fc_ls_rjt_s *ls_rjt;
+
+	bfa_trc(port->fcs, req_status);
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		port->stats.ms_plogi_rsp_err++;
+		bfa_trc(port->fcs, req_status);
+		bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+	switch (els_cmd->els_code) {
+
+	case FC_ELS_ACC:
+		if (rsp_len < sizeof(struct fc_logi_s)) {
+			bfa_trc(port->fcs, rsp_len);
+			port->stats.ms_plogi_acc_err++;
+			bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+			break;
+		}
+		port->stats.ms_plogi_accepts++;
+		bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
+		break;
+
+	case FC_ELS_LS_RJT:
+		ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+		bfa_trc(port->fcs, ls_rjt->reason_code);
+		bfa_trc(port->fcs, ls_rjt->reason_code_expl);
+
+		port->stats.ms_rejects++;
+		bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+		break;
+
+	default:
+		port->stats.ms_plogi_unknown_rsp++;
+		bfa_trc(port->fcs, els_cmd->els_code);
+		bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
+	}
+}
+
+static void
+bfa_fcs_lport_ms_timeout(void *arg)
+{
+	struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) arg;
+
+	ms->port->stats.ms_timeouts++;
+	bfa_sm_send_event(ms, MSSM_EVENT_TIMEOUT);
+}
+
+
+void
+bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
+
+	ms->port = port;
+	bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
+
+	/*
+	 * Invoke init routines of sub modules.
+	 */
+	bfa_fcs_lport_fdmi_init(ms);
+}
+
+void
+bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
+
+	ms->port = port;
+	bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE);
+	bfa_fcs_lport_fdmi_offline(ms);
+}
+
+void
+bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
+
+	ms->port = port;
+	bfa_sm_send_event(ms, MSSM_EVENT_PORT_ONLINE);
+}
+void
+bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
+
+	/* todo.  Handle this only  when in Online state */
+	if (bfa_sm_cmp_state(ms, bfa_fcs_lport_ms_sm_online))
+		bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
+}
+
+/*
+ * @page ns_sm_info VPORT NS State Machine
+ *
+ * @section ns_sm_interactions VPORT NS State Machine Interactions
+ *
+ * @section ns_sm VPORT NS State Machine
+ * img ns_sm.jpg
+ */
+
+/*
+ * forward declarations
+ */
+static void     bfa_fcs_lport_ns_send_plogi(void *ns_cbarg,
+					   struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg,
+					     struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg,
+					    struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg,
+					    struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg,
+					    struct bfa_fcxp_s *fcxp_alloced);
+static void	bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg,
+					struct bfa_fcxp_s *fcxp_alloced);
+static void	bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg,
+					struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_lport_ns_timeout(void *arg);
+static void     bfa_fcs_lport_ns_plogi_response(void *fcsarg,
+					       struct bfa_fcxp_s *fcxp,
+					       void *cbarg,
+					       bfa_status_t req_status,
+					       u32 rsp_len,
+					       u32 resid_len,
+					       struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_ns_rspn_id_response(void *fcsarg,
+						 struct bfa_fcxp_s *fcxp,
+						 void *cbarg,
+						 bfa_status_t req_status,
+						 u32 rsp_len,
+						 u32 resid_len,
+						 struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_ns_rft_id_response(void *fcsarg,
+						struct bfa_fcxp_s *fcxp,
+						void *cbarg,
+						bfa_status_t req_status,
+						u32 rsp_len,
+						u32 resid_len,
+						struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_ns_rff_id_response(void *fcsarg,
+						struct bfa_fcxp_s *fcxp,
+						void *cbarg,
+						bfa_status_t req_status,
+						u32 rsp_len,
+						u32 resid_len,
+						struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_ns_gid_ft_response(void *fcsarg,
+						struct bfa_fcxp_s *fcxp,
+						void *cbarg,
+						bfa_status_t req_status,
+						u32 rsp_len,
+						u32 resid_len,
+						struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_ns_rnn_id_response(void *fcsarg,
+						struct bfa_fcxp_s *fcxp,
+						void *cbarg,
+						bfa_status_t req_status,
+						u32 rsp_len,
+						u32 resid_len,
+						struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg,
+						struct bfa_fcxp_s *fcxp,
+						void *cbarg,
+						bfa_status_t req_status,
+						u32 rsp_len,
+						u32 resid_len,
+						struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_ns_process_gidft_pids(
+				struct bfa_fcs_lport_s *port,
+				u32 *pid_buf, u32 n_pids);
+
+static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
+/*
+ *  fcs_ns_sm FCS nameserver interface state machine
+ */
+
+/*
+ * VPort NS State Machine events
+ */
+enum vport_ns_event {
+	NSSM_EVENT_PORT_ONLINE = 1,
+	NSSM_EVENT_PORT_OFFLINE = 2,
+	NSSM_EVENT_PLOGI_SENT = 3,
+	NSSM_EVENT_RSP_OK = 4,
+	NSSM_EVENT_RSP_ERROR = 5,
+	NSSM_EVENT_TIMEOUT = 6,
+	NSSM_EVENT_NS_QUERY = 7,
+	NSSM_EVENT_RSPNID_SENT = 8,
+	NSSM_EVENT_RFTID_SENT = 9,
+	NSSM_EVENT_RFFID_SENT = 10,
+	NSSM_EVENT_GIDFT_SENT = 11,
+	NSSM_EVENT_RNNID_SENT = 12,
+	NSSM_EVENT_RSNN_NN_SENT = 13,
+};
+
+static void     bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
+					   enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns,
+						 enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
+					 enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
+					       enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_sending_rspn_id(
+					struct bfa_fcs_lport_ns_s *ns,
+					enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns,
+					   enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns,
+						 enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_sending_rft_id(
+					struct bfa_fcs_lport_ns_s *ns,
+					enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns,
+						enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns,
+					  enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_sending_rff_id(
+					struct bfa_fcs_lport_ns_s *ns,
+					enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns,
+						enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns,
+					  enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_sending_gid_ft(
+					struct bfa_fcs_lport_ns_s *ns,
+					enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns,
+					  enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
+						enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
+					  enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_sending_rnn_id(
+					struct bfa_fcs_lport_ns_s *ns,
+					enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns,
+					enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns,
+						enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_sending_rsnn_nn(
+					struct bfa_fcs_lport_ns_s *ns,
+					enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
+						enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rsnn_nn_retry(
+					struct bfa_fcs_lport_ns_s *ns,
+					enum vport_ns_event event);
+/*
+ *	Start in offline state - awaiting linkup
+ */
+static void
+bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
+			enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_PORT_ONLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending);
+		bfa_fcs_lport_ns_send_plogi(ns, NULL);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns,
+			enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_PLOGI_SENT:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+					   &ns->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
+			enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_RSP_ERROR:
+		/*
+		 * Start timer for a delayed retry
+		 */
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_retry);
+		ns->port->stats.ns_retries++;
+		bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+				    &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+				    BFA_FCS_RETRY_TIMEOUT);
+		break;
+
+	case NSSM_EVENT_RSP_OK:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id);
+		ns->num_rnnid_retries = 0;
+		bfa_fcs_lport_ns_send_rnn_id(ns, NULL);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_fcxp_discard(ns->fcxp);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
+				enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_TIMEOUT:
+		/*
+		 * Retry Timer Expired. Re-send
+		 */
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending);
+		bfa_fcs_lport_ns_send_plogi(ns, NULL);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_timer_stop(&ns->timer);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_sending_rnn_id(struct bfa_fcs_lport_ns_s *ns,
+					enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_RNNID_SENT:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+						&ns->fcxp_wqe);
+		break;
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns,
+				enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_RSP_OK:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn);
+		ns->num_rnnid_retries = 0;
+		ns->num_rsnn_nn_retries = 0;
+		bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL);
+		break;
+
+	case NSSM_EVENT_RSP_ERROR:
+		if (ns->num_rnnid_retries < BFA_FCS_MAX_NS_RETRIES) {
+			bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id_retry);
+			ns->port->stats.ns_retries++;
+			ns->num_rnnid_retries++;
+			bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+				&ns->timer, bfa_fcs_lport_ns_timeout, ns,
+				BFA_FCS_RETRY_TIMEOUT);
+		} else {
+			bfa_sm_set_state(ns,
+				bfa_fcs_lport_ns_sm_sending_rspn_id);
+			bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+		}
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_fcxp_discard(ns->fcxp);
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns,
+				enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_TIMEOUT:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id);
+		bfa_fcs_lport_ns_send_rnn_id(ns, NULL);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_timer_stop(&ns->timer);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_sending_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
+					enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_RSNN_NN_SENT:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+			&ns->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
+				enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_RSP_OK:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
+		ns->num_rsnn_nn_retries = 0;
+		bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+		break;
+
+	case NSSM_EVENT_RSP_ERROR:
+		if (ns->num_rsnn_nn_retries < BFA_FCS_MAX_NS_RETRIES) {
+			bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn_retry);
+			ns->port->stats.ns_retries++;
+			ns->num_rsnn_nn_retries++;
+			bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+					&ns->timer, bfa_fcs_lport_ns_timeout,
+					ns, BFA_FCS_RETRY_TIMEOUT);
+		} else {
+			bfa_sm_set_state(ns,
+				bfa_fcs_lport_ns_sm_sending_rspn_id);
+			bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+		}
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_fcxp_discard(ns->fcxp);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_rsnn_nn_retry(struct bfa_fcs_lport_ns_s *ns,
+					enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_TIMEOUT:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn);
+		bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_timer_stop(&ns->timer);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns,
+				   enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_RSPNID_SENT:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+					   &ns->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns,
+			enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_RSP_ERROR:
+		/*
+		 * Start timer for a delayed retry
+		 */
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry);
+		ns->port->stats.ns_retries++;
+		bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+				    &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+				    BFA_FCS_RETRY_TIMEOUT);
+		break;
+
+	case NSSM_EVENT_RSP_OK:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id);
+		bfa_fcs_lport_ns_send_rft_id(ns, NULL);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_fcxp_discard(ns->fcxp);
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns,
+				enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_TIMEOUT:
+		/*
+		 * Retry Timer Expired. Re-send
+		 */
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
+		bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_timer_stop(&ns->timer);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_sending_rft_id(struct bfa_fcs_lport_ns_s *ns,
+				  enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_RFTID_SENT:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+					   &ns->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns,
+			enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_RSP_OK:
+		/* Now move to register FC4 Features */
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id);
+		bfa_fcs_lport_ns_send_rff_id(ns, NULL);
+		break;
+
+	case NSSM_EVENT_RSP_ERROR:
+		/*
+		 * Start timer for a delayed retry
+		 */
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id_retry);
+		ns->port->stats.ns_retries++;
+		bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+				    &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+				    BFA_FCS_RETRY_TIMEOUT);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_fcxp_discard(ns->fcxp);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns,
+				enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_TIMEOUT:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id);
+		bfa_fcs_lport_ns_send_rft_id(ns, NULL);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_timer_stop(&ns->timer);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_sending_rff_id(struct bfa_fcs_lport_ns_s *ns,
+				  enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_RFFID_SENT:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+					   &ns->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns,
+			enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_RSP_OK:
+
+		/*
+		 * If min cfg mode is enabled, we donot initiate rport
+		 * discovery with the fabric. Instead, we will retrieve the
+		 * boot targets from HAL/FW.
+		 */
+		if (__fcs_min_cfg(ns->port->fcs)) {
+			bfa_fcs_lport_ns_boot_target_disc(ns->port);
+			bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online);
+			return;
+		}
+
+		/*
+		 * If the port role is Initiator Mode issue NS query.
+		 * If it is Target Mode, skip this and go to online.
+		 */
+		if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
+			bfa_sm_set_state(ns,
+				bfa_fcs_lport_ns_sm_sending_gid_ft);
+			bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
+		}
+		/*
+		 * kick off mgmt srvr state machine
+		 */
+		bfa_fcs_lport_ms_online(ns->port);
+		break;
+
+	case NSSM_EVENT_RSP_ERROR:
+		/*
+		 * Start timer for a delayed retry
+		 */
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id_retry);
+		ns->port->stats.ns_retries++;
+		bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+				    &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+				    BFA_FCS_RETRY_TIMEOUT);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_fcxp_discard(ns->fcxp);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns,
+				enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_TIMEOUT:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id);
+		bfa_fcs_lport_ns_send_rff_id(ns, NULL);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_timer_stop(&ns->timer);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+static void
+bfa_fcs_lport_ns_sm_sending_gid_ft(struct bfa_fcs_lport_ns_s *ns,
+				  enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_GIDFT_SENT:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+					   &ns->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns,
+			enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_RSP_OK:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online);
+		break;
+
+	case NSSM_EVENT_RSP_ERROR:
+		/*
+		 * TBD: for certain reject codes, we don't need to retry
+		 */
+		/*
+		 * Start timer for a delayed retry
+		 */
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft_retry);
+		ns->port->stats.ns_retries++;
+		bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+				    &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+				    BFA_FCS_RETRY_TIMEOUT);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_fcxp_discard(ns->fcxp);
+		break;
+
+	case  NSSM_EVENT_NS_QUERY:
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
+				enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_TIMEOUT:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_gid_ft);
+		bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
+		break;
+
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		bfa_timer_stop(&ns->timer);
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
+			enum vport_ns_event event)
+{
+	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+	bfa_trc(ns->port->fcs, event);
+
+	switch (event) {
+	case NSSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+		break;
+
+	case NSSM_EVENT_NS_QUERY:
+		/*
+		 * If the port role is Initiator Mode issue NS query.
+		 * If it is Target Mode, skip this and go to online.
+		 */
+		if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
+			bfa_sm_set_state(ns,
+				bfa_fcs_lport_ns_sm_sending_gid_ft);
+			bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
+		};
+		break;
+
+	default:
+		bfa_sm_fault(ns->port->fcs, event);
+	}
+}
+
+
+
+/*
+ *  ns_pvt Nameserver local functions
+ */
+
+static void
+bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct fchs_s fchs;
+	int             len;
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_trc(port->fcs, port->pid);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		port->stats.ns_plogi_alloc_wait++;
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+				bfa_fcs_lport_ns_send_plogi, ns, BFA_TRUE);
+		return;
+	}
+	ns->fcxp = fcxp;
+
+	len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			     bfa_hton3b(FC_NAME_SERVER),
+			     bfa_fcs_lport_get_fcid(port), 0,
+			     port->port_cfg.pwwn, port->port_cfg.nwwn,
+			     bfa_fcport_get_maxfrsize(port->fcs->bfa),
+			     bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len, &fchs,
+			  bfa_fcs_lport_ns_plogi_response, (void *)ns,
+			  FC_MAX_PDUSZ, FC_ELS_TOV);
+	port->stats.ns_plogi_sent++;
+
+	bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+			void *cbarg, bfa_status_t req_status, u32 rsp_len,
+		       u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	/* struct fc_logi_s *plogi_resp; */
+	struct fc_els_cmd_s *els_cmd;
+	struct fc_ls_rjt_s *ls_rjt;
+
+	bfa_trc(port->fcs, req_status);
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		port->stats.ns_plogi_rsp_err++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+	switch (els_cmd->els_code) {
+
+	case FC_ELS_ACC:
+		if (rsp_len < sizeof(struct fc_logi_s)) {
+			bfa_trc(port->fcs, rsp_len);
+			port->stats.ns_plogi_acc_err++;
+			bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+			break;
+		}
+		port->stats.ns_plogi_accepts++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+		break;
+
+	case FC_ELS_LS_RJT:
+		ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+		bfa_trc(port->fcs, ls_rjt->reason_code);
+		bfa_trc(port->fcs, ls_rjt->reason_code_expl);
+
+		port->stats.ns_rejects++;
+
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+		break;
+
+	default:
+		port->stats.ns_plogi_unknown_rsp++;
+		bfa_trc(port->fcs, els_cmd->els_code);
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+	}
+}
+
+/*
+ * Register node name for port_id
+ */
+static void
+bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct fchs_s  fchs;
+	int	len;
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+			bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		port->stats.ns_rnnid_alloc_wait++;
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+				bfa_fcs_lport_ns_send_rnn_id, ns, BFA_TRUE);
+		return;
+	}
+
+	ns->fcxp = fcxp;
+
+	len = fc_rnnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				bfa_fcs_lport_get_fcid(port),
+				bfa_fcs_lport_get_fcid(port),
+				bfa_fcs_lport_get_nwwn(port));
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len, &fchs,
+			  bfa_fcs_lport_ns_rnn_id_response, (void *)ns,
+			  FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+	port->stats.ns_rnnid_sent++;
+	bfa_sm_send_event(ns, NSSM_EVENT_RNNID_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rnn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+				void *cbarg, bfa_status_t req_status,
+				u32 rsp_len, u32 resid_len,
+				struct fchs_s *rsp_fchs)
+
+{
+	struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct ct_hdr_s	*cthdr = NULL;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		port->stats.ns_rnnid_rsp_err++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+		port->stats.ns_rnnid_accepts++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+		return;
+	}
+
+	port->stats.ns_rnnid_rejects++;
+	bfa_trc(port->fcs, cthdr->reason_code);
+	bfa_trc(port->fcs, cthdr->exp_code);
+	bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+
+/*
+ * Register the symbolic node name for a given node name.
+ */
+static void
+bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct fchs_s  fchs;
+	int     len;
+	struct bfa_fcxp_s *fcxp;
+	u8 *nsymbl;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+			bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		port->stats.ns_rsnn_nn_alloc_wait++;
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+				bfa_fcs_lport_ns_send_rsnn_nn, ns, BFA_TRUE);
+		return;
+	}
+	ns->fcxp = fcxp;
+
+	nsymbl = (u8 *) &(bfa_fcs_lport_get_nsym_name(
+					bfa_fcs_get_base_port(port->fcs)));
+
+	len = fc_rsnn_nn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				bfa_fcs_lport_get_fcid(port),
+				bfa_fcs_lport_get_nwwn(port), nsymbl);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len, &fchs,
+			  bfa_fcs_lport_ns_rsnn_nn_response, (void *)ns,
+			  FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+	port->stats.ns_rsnn_nn_sent++;
+
+	bfa_sm_send_event(ns, NSSM_EVENT_RSNN_NN_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+				void *cbarg, bfa_status_t req_status,
+				u32 rsp_len, u32 resid_len,
+				struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct ct_hdr_s	*cthdr = NULL;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		port->stats.ns_rsnn_nn_rsp_err++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+		port->stats.ns_rsnn_nn_accepts++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+		return;
+	}
+
+	port->stats.ns_rsnn_nn_rejects++;
+	bfa_trc(port->fcs, cthdr->reason_code);
+	bfa_trc(port->fcs, cthdr->exp_code);
+	bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+
+/*
+ * Register the symbolic port name.
+ */
+static void
+bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct fchs_s fchs;
+	int             len;
+	struct bfa_fcxp_s *fcxp;
+	u8         symbl[256];
+	u8         *psymbl = &symbl[0];
+
+	memset(symbl, 0, sizeof(symbl));
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		port->stats.ns_rspnid_alloc_wait++;
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+				bfa_fcs_lport_ns_send_rspn_id, ns, BFA_TRUE);
+		return;
+	}
+	ns->fcxp = fcxp;
+
+	/*
+	 * for V-Port, form a Port Symbolic Name
+	 */
+	if (port->vport) {
+		/*
+		 * For Vports, we append the vport's port symbolic name
+		 * to that of the base port.
+		 */
+
+		strlcpy(symbl,
+			(char *)&(bfa_fcs_lport_get_psym_name
+			 (bfa_fcs_get_base_port(port->fcs))),
+			sizeof(symbl));
+
+		strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)),
+			sizeof(symbl));
+	} else {
+		psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
+	}
+
+	len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			      bfa_fcs_lport_get_fcid(port), 0, psymbl);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len, &fchs,
+			  bfa_fcs_lport_ns_rspn_id_response, (void *)ns,
+			  FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+	port->stats.ns_rspnid_sent++;
+
+	bfa_sm_send_event(ns, NSSM_EVENT_RSPNID_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+				 void *cbarg, bfa_status_t req_status,
+				 u32 rsp_len, u32 resid_len,
+				 struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct ct_hdr_s *cthdr = NULL;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		port->stats.ns_rspnid_rsp_err++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+		port->stats.ns_rspnid_accepts++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+		return;
+	}
+
+	port->stats.ns_rspnid_rejects++;
+	bfa_trc(port->fcs, cthdr->reason_code);
+	bfa_trc(port->fcs, cthdr->exp_code);
+	bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+
+/*
+ * Register FC4-Types
+ */
+static void
+bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct fchs_s fchs;
+	int             len;
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		port->stats.ns_rftid_alloc_wait++;
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+				bfa_fcs_lport_ns_send_rft_id, ns, BFA_TRUE);
+		return;
+	}
+	ns->fcxp = fcxp;
+
+	len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+		     bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.roles);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len, &fchs,
+			  bfa_fcs_lport_ns_rft_id_response, (void *)ns,
+			  FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+	port->stats.ns_rftid_sent++;
+	bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+				void *cbarg, bfa_status_t req_status,
+				u32 rsp_len, u32 resid_len,
+				struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct ct_hdr_s *cthdr = NULL;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		port->stats.ns_rftid_rsp_err++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+		port->stats.ns_rftid_accepts++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+		return;
+	}
+
+	port->stats.ns_rftid_rejects++;
+	bfa_trc(port->fcs, cthdr->reason_code);
+	bfa_trc(port->fcs, cthdr->exp_code);
+	bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+
+/*
+ * Register FC4-Features : Should be done after RFT_ID
+ */
+static void
+bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct fchs_s fchs;
+	int             len;
+	struct bfa_fcxp_s *fcxp;
+	u8			fc4_ftrs = 0;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		port->stats.ns_rffid_alloc_wait++;
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+				bfa_fcs_lport_ns_send_rff_id, ns, BFA_TRUE);
+		return;
+	}
+	ns->fcxp = fcxp;
+
+	if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port))
+		fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR;
+
+	len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			     bfa_fcs_lport_get_fcid(port), 0,
+				 FC_TYPE_FCP, fc4_ftrs);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len, &fchs,
+			  bfa_fcs_lport_ns_rff_id_response, (void *)ns,
+			  FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+	port->stats.ns_rffid_sent++;
+	bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+				void *cbarg, bfa_status_t req_status,
+				u32 rsp_len, u32 resid_len,
+				struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct ct_hdr_s *cthdr = NULL;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		port->stats.ns_rffid_rsp_err++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+		port->stats.ns_rffid_accepts++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+		return;
+	}
+
+	port->stats.ns_rffid_rejects++;
+	bfa_trc(port->fcs, cthdr->reason_code);
+	bfa_trc(port->fcs, cthdr->exp_code);
+
+	if (cthdr->reason_code == CT_RSN_NOT_SUPP) {
+		/* if this command is not supported, we don't retry */
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+	} else
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+/*
+ * Query Fabric for FC4-Types Devices.
+ *
+* TBD : Need to use a local (FCS private) response buffer, since the response
+ * can be larger than 2K.
+ */
+static void
+bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct fchs_s fchs;
+	int             len;
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_trc(port->fcs, port->pid);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		port->stats.ns_gidft_alloc_wait++;
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+				bfa_fcs_lport_ns_send_gid_ft, ns, BFA_TRUE);
+		return;
+	}
+	ns->fcxp = fcxp;
+
+	/*
+	 * This query is only initiated for FCP initiator mode.
+	 */
+	len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			      ns->port->pid, FC_TYPE_FCP);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len, &fchs,
+			  bfa_fcs_lport_ns_gid_ft_response, (void *)ns,
+			  bfa_fcxp_get_maxrsp(port->fcs->bfa), FC_FCCT_TOV);
+
+	port->stats.ns_gidft_sent++;
+
+	bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+				void *cbarg, bfa_status_t req_status,
+				u32 rsp_len, u32 resid_len,
+				struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct ct_hdr_s *cthdr = NULL;
+	u32        n_pids;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		port->stats.ns_gidft_rsp_err++;
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	if (resid_len != 0) {
+		/*
+		 * TBD : we will need to allocate a larger buffer & retry the
+		 * command
+		 */
+		bfa_trc(port->fcs, rsp_len);
+		bfa_trc(port->fcs, resid_len);
+		return;
+	}
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	switch (cthdr->cmd_rsp_code) {
+
+	case CT_RSP_ACCEPT:
+
+		port->stats.ns_gidft_accepts++;
+		n_pids = (fc_get_ctresp_pyld_len(rsp_len) / sizeof(u32));
+		bfa_trc(port->fcs, n_pids);
+		bfa_fcs_lport_ns_process_gidft_pids(port,
+						   (u32 *) (cthdr + 1),
+						   n_pids);
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+		break;
+
+	case CT_RSP_REJECT:
+
+		/*
+		 * Check the reason code  & explanation.
+		 * There may not have been any FC4 devices in the fabric
+		 */
+		port->stats.ns_gidft_rejects++;
+		bfa_trc(port->fcs, cthdr->reason_code);
+		bfa_trc(port->fcs, cthdr->exp_code);
+
+		if ((cthdr->reason_code == CT_RSN_UNABLE_TO_PERF)
+		    && (cthdr->exp_code == CT_NS_EXP_FT_NOT_REG)) {
+
+			bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+		} else {
+			/*
+			 * for all other errors, retry
+			 */
+			bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+		}
+		break;
+
+	default:
+		port->stats.ns_gidft_unknown_rsp++;
+		bfa_trc(port->fcs, cthdr->cmd_rsp_code);
+		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+	}
+}
+
+/*
+ *     This routine will be called by bfa_timer on timer timeouts.
+ *
+ *	param[in]	port - pointer to bfa_fcs_lport_t.
+ *
+ *	return
+ *		void
+ *
+ *	Special Considerations:
+ *
+ *	note
+ */
+static void
+bfa_fcs_lport_ns_timeout(void *arg)
+{
+	struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) arg;
+
+	ns->port->stats.ns_timeouts++;
+	bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT);
+}
+
+/*
+ * Process the PID list in GID_FT response
+ */
+static void
+bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
+				   u32 n_pids)
+{
+	struct fcgs_gidft_resp_s *gidft_entry;
+	struct bfa_fcs_rport_s *rport;
+	u32        ii;
+	struct bfa_fcs_fabric_s *fabric = port->fabric;
+	struct bfa_fcs_vport_s *vport;
+	struct list_head *qe;
+	u8 found = 0;
+
+	for (ii = 0; ii < n_pids; ii++) {
+		gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii];
+
+		if (gidft_entry->pid == port->pid)
+			continue;
+
+		/*
+		 * Ignore PID if it is of base port
+		 * (Avoid vports discovering base port as remote port)
+		 */
+		if (gidft_entry->pid == fabric->bport.pid)
+			continue;
+
+		/*
+		 * Ignore PID if it is of vport created on the same base port
+		 * (Avoid vport discovering every other vport created on the
+		 * same port as remote port)
+		 */
+		list_for_each(qe, &fabric->vport_q) {
+			vport = (struct bfa_fcs_vport_s *) qe;
+			if (vport->lport.pid == gidft_entry->pid)
+				found = 1;
+		}
+
+		if (found) {
+			found = 0;
+			continue;
+		}
+
+		/*
+		 * Check if this rport already exists
+		 */
+		rport = bfa_fcs_lport_get_rport_by_pid(port, gidft_entry->pid);
+		if (rport == NULL) {
+			/*
+			 * this is a new device. create rport
+			 */
+			rport = bfa_fcs_rport_create(port, gidft_entry->pid);
+		} else {
+			/*
+			 * this rport already exists
+			 */
+			bfa_fcs_rport_scn(rport);
+		}
+
+		bfa_trc(port->fcs, gidft_entry->pid);
+
+		/*
+		 * if the last entry bit is set, bail out.
+		 */
+		if (gidft_entry->last)
+			return;
+	}
+}
+
+/*
+ *  fcs_ns_public FCS nameserver public interfaces
+ */
+
+/*
+ * Functions called by port/fab.
+ * These will send relevant Events to the ns state machine.
+ */
+void
+bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
+
+	ns->port = port;
+	bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+}
+
+void
+bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
+
+	ns->port = port;
+	bfa_sm_send_event(ns, NSSM_EVENT_PORT_OFFLINE);
+}
+
+void
+bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
+
+	ns->port = port;
+	bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE);
+}
+
+void
+bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
+
+	bfa_trc(port->fcs, port->pid);
+	if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_online))
+		bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
+}
+
+static void
+bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
+{
+
+	struct bfa_fcs_rport_s *rport;
+	u8 nwwns;
+	wwn_t  wwns[BFA_PREBOOT_BOOTLUN_MAX];
+	int ii;
+
+	bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, wwns);
+
+	for (ii = 0 ; ii < nwwns; ++ii) {
+		rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
+		WARN_ON(!rport);
+	}
+}
+
+void
+bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_ns_s *ns = cbarg;
+	struct bfa_fcs_lport_s *port = ns->port;
+	struct fchs_s fchs;
+	struct bfa_fcxp_s *fcxp;
+	u8 symbl[256];
+	int len;
+
+	/* Avoid sending RSPN in the following states. */
+	if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) ||
+	    bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) ||
+	    bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi) ||
+	    bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_retry) ||
+	    bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry))
+		return;
+
+	memset(symbl, 0, sizeof(symbl));
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp) {
+		port->stats.ns_rspnid_alloc_wait++;
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+			bfa_fcs_lport_ns_util_send_rspn_id, ns, BFA_FALSE);
+		return;
+	}
+
+	ns->fcxp = fcxp;
+
+	if (port->vport) {
+		/*
+		 * For Vports, we append the vport's port symbolic name
+		 * to that of the base port.
+		 */
+		strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
+			(bfa_fcs_get_base_port(port->fcs))),
+			sizeof(symbl));
+
+		strlcat(symbl,
+			(char *)&(bfa_fcs_lport_get_psym_name(port)),
+			sizeof(symbl));
+	}
+
+	len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			      bfa_fcs_lport_get_fcid(port), 0, symbl);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+		      FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
+
+	port->stats.ns_rspnid_sent++;
+}
+
+/*
+ * FCS SCN
+ */
+
+#define FC_QOS_RSCN_EVENT		0x0c
+#define FC_FABRIC_NAME_RSCN_EVENT	0x0d
+
+/*
+ * forward declarations
+ */
+static void     bfa_fcs_lport_scn_send_scr(void *scn_cbarg,
+					  struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_lport_scn_scr_response(void *fcsarg,
+					      struct bfa_fcxp_s *fcxp,
+					      void *cbarg,
+					      bfa_status_t req_status,
+					      u32 rsp_len,
+					      u32 resid_len,
+					      struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
+					     struct fchs_s *rx_fchs);
+static void     bfa_fcs_lport_scn_timeout(void *arg);
+
+/*
+ *  fcs_scm_sm FCS SCN state machine
+ */
+
+/*
+ * VPort SCN State Machine events
+ */
+enum port_scn_event {
+	SCNSM_EVENT_PORT_ONLINE = 1,
+	SCNSM_EVENT_PORT_OFFLINE = 2,
+	SCNSM_EVENT_RSP_OK = 3,
+	SCNSM_EVENT_RSP_ERROR = 4,
+	SCNSM_EVENT_TIMEOUT = 5,
+	SCNSM_EVENT_SCR_SENT = 6,
+};
+
+static void     bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn,
+					    enum port_scn_event event);
+static void     bfa_fcs_lport_scn_sm_sending_scr(
+					struct bfa_fcs_lport_scn_s *scn,
+					enum port_scn_event event);
+static void     bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn,
+					enum port_scn_event event);
+static void     bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
+					      enum port_scn_event event);
+static void     bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
+					   enum port_scn_event event);
+
+/*
+ *	Starting state - awaiting link up.
+ */
+static void
+bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn,
+			enum port_scn_event event)
+{
+	switch (event) {
+	case SCNSM_EVENT_PORT_ONLINE:
+		bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr);
+		bfa_fcs_lport_scn_send_scr(scn, NULL);
+		break;
+
+	case SCNSM_EVENT_PORT_OFFLINE:
+		break;
+
+	default:
+		bfa_sm_fault(scn->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_scn_sm_sending_scr(struct bfa_fcs_lport_scn_s *scn,
+				enum port_scn_event event)
+{
+	switch (event) {
+	case SCNSM_EVENT_SCR_SENT:
+		bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr);
+		break;
+
+	case SCNSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
+		bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe);
+		break;
+
+	default:
+		bfa_sm_fault(scn->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn,
+			enum port_scn_event event)
+{
+	struct bfa_fcs_lport_s *port = scn->port;
+
+	switch (event) {
+	case SCNSM_EVENT_RSP_OK:
+		bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_online);
+		break;
+
+	case SCNSM_EVENT_RSP_ERROR:
+		bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr_retry);
+		bfa_timer_start(port->fcs->bfa, &scn->timer,
+				    bfa_fcs_lport_scn_timeout, scn,
+				    BFA_FCS_RETRY_TIMEOUT);
+		break;
+
+	case SCNSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
+		bfa_fcxp_discard(scn->fcxp);
+		break;
+
+	default:
+		bfa_sm_fault(port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
+				enum port_scn_event event)
+{
+	switch (event) {
+	case SCNSM_EVENT_TIMEOUT:
+		bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr);
+		bfa_fcs_lport_scn_send_scr(scn, NULL);
+		break;
+
+	case SCNSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
+		bfa_timer_stop(&scn->timer);
+		break;
+
+	default:
+		bfa_sm_fault(scn->port->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
+			enum port_scn_event event)
+{
+	switch (event) {
+	case SCNSM_EVENT_PORT_OFFLINE:
+		bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
+		break;
+
+	default:
+		bfa_sm_fault(scn->port->fcs, event);
+	}
+}
+
+
+
+/*
+ *  fcs_scn_private FCS SCN private functions
+ */
+
+/*
+ * This routine will be called to send a SCR command.
+ */
+static void
+bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_lport_scn_s *scn = scn_cbarg;
+	struct bfa_fcs_lport_s *port = scn->port;
+	struct fchs_s fchs;
+	int             len;
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_trc(port->fcs, port->pid);
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe,
+				bfa_fcs_lport_scn_send_scr, scn, BFA_TRUE);
+		return;
+	}
+	scn->fcxp = fcxp;
+
+	/* Handle VU registrations for Base port only */
+	if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
+		len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				port->fabric->lps->brcd_switch,
+				port->pid, 0);
+	} else {
+	    len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				    BFA_FALSE,
+				    port->pid, 0);
+	}
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len, &fchs,
+			  bfa_fcs_lport_scn_scr_response,
+			  (void *)scn, FC_MAX_PDUSZ, FC_ELS_TOV);
+
+	bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT);
+}
+
+static void
+bfa_fcs_lport_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+			void *cbarg, bfa_status_t req_status, u32 rsp_len,
+			      u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) cbarg;
+	struct bfa_fcs_lport_s *port = scn->port;
+	struct fc_els_cmd_s *els_cmd;
+	struct fc_ls_rjt_s *ls_rjt;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(port->fcs, req_status);
+		bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
+		return;
+	}
+
+	els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+	switch (els_cmd->els_code) {
+
+	case FC_ELS_ACC:
+		bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK);
+		break;
+
+	case FC_ELS_LS_RJT:
+
+		ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+		bfa_trc(port->fcs, ls_rjt->reason_code);
+		bfa_trc(port->fcs, ls_rjt->reason_code_expl);
+
+		bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
+		break;
+
+	default:
+		bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
+	}
+}
+
+/*
+ * Send a LS Accept
+ */
+static void
+bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
+				struct fchs_s *rx_fchs)
+{
+	struct fchs_s fchs;
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_rport_s *bfa_rport = NULL;
+	int             len;
+
+	bfa_trc(port->fcs, rx_fchs->s_id);
+
+	fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp)
+		return;
+
+	len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			      rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+			      rx_fchs->ox_id);
+
+	bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+			  BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+			  FC_MAX_PDUSZ, 0);
+}
+
+/*
+ *     This routine will be called by bfa_timer on timer timeouts.
+ *
+ *	param[in]	vport		- pointer to bfa_fcs_lport_t.
+ *	param[out]	vport_status	- pointer to return vport status in
+ *
+ *	return
+ *		void
+ *
+ *	Special Considerations:
+ *
+ *	note
+ */
+static void
+bfa_fcs_lport_scn_timeout(void *arg)
+{
+	struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) arg;
+
+	bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT);
+}
+
+
+
+/*
+ *  fcs_scn_public FCS state change notification public interfaces
+ */
+
+/*
+ * Functions called by port/fab
+ */
+void
+bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
+
+	scn->port = port;
+	bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
+}
+
+void
+bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
+
+	scn->port = port;
+	bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE);
+}
+
+void
+bfa_fcs_lport_fab_scn_online(struct bfa_fcs_lport_s *port)
+{
+	struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
+
+	scn->port = port;
+	bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE);
+}
+
+static void
+bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
+{
+	struct bfa_fcs_rport_s *rport;
+	struct bfa_fcs_fabric_s *fabric = port->fabric;
+	struct bfa_fcs_vport_s *vport;
+	struct list_head *qe;
+
+	bfa_trc(port->fcs, rpid);
+
+	/*
+	 * Ignore PID if it is of base port or of vports created on the
+	 * same base port. It is to avoid vports discovering base port or
+	 * other vports created on same base port as remote port
+	 */
+	if (rpid == fabric->bport.pid)
+		return;
+
+	list_for_each(qe, &fabric->vport_q) {
+		vport = (struct bfa_fcs_vport_s *) qe;
+		if (vport->lport.pid == rpid)
+			return;
+	}
+	/*
+	 * If this is an unknown device, then it just came online.
+	 * Otherwise let rport handle the RSCN event.
+	 */
+	rport = bfa_fcs_lport_get_rport_by_pid(port, rpid);
+	if (!rport)
+		rport = bfa_fcs_lport_get_rport_by_old_pid(port, rpid);
+
+	if (rport == NULL) {
+		/*
+		 * If min cfg mode is enabled, we donot need to
+		 * discover any new rports.
+		 */
+		if (!__fcs_min_cfg(port->fcs))
+			rport = bfa_fcs_rport_create(port, rpid);
+	} else
+		bfa_fcs_rport_scn(rport);
+}
+
+/*
+ * rscn format based PID comparison
+ */
+#define __fc_pid_match(__c0, __c1, __fmt)		\
+	(((__fmt) == FC_RSCN_FORMAT_FABRIC) ||		\
+	 (((__fmt) == FC_RSCN_FORMAT_DOMAIN) &&		\
+	  ((__c0)[0] == (__c1)[0])) ||				\
+	 (((__fmt) == FC_RSCN_FORMAT_AREA) &&		\
+	  ((__c0)[0] == (__c1)[0]) &&				\
+	  ((__c0)[1] == (__c1)[1])))
+
+static void
+bfa_fcs_lport_scn_multiport_rscn(struct bfa_fcs_lport_s *port,
+				enum fc_rscn_format format,
+				u32 rscn_pid)
+{
+	struct bfa_fcs_rport_s *rport;
+	struct list_head        *qe, *qe_next;
+	u8        *c0, *c1;
+
+	bfa_trc(port->fcs, format);
+	bfa_trc(port->fcs, rscn_pid);
+
+	c0 = (u8 *) &rscn_pid;
+
+	list_for_each_safe(qe, qe_next, &port->rport_q) {
+		rport = (struct bfa_fcs_rport_s *) qe;
+		c1 = (u8 *) &rport->pid;
+		if (__fc_pid_match(c0, c1, format))
+			bfa_fcs_rport_scn(rport);
+	}
+}
+
+
+void
+bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
+			struct fchs_s *fchs, u32 len)
+{
+	struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1);
+	int             num_entries;
+	u32        rscn_pid;
+	bfa_boolean_t   nsquery = BFA_FALSE, found;
+	int             i = 0, j;
+
+	num_entries =
+		(be16_to_cpu(rscn->payldlen) -
+		 sizeof(u32)) / sizeof(rscn->event[0]);
+
+	bfa_trc(port->fcs, num_entries);
+
+	port->stats.num_rscn++;
+
+	bfa_fcs_lport_scn_send_ls_acc(port, fchs);
+
+	for (i = 0; i < num_entries; i++) {
+		rscn_pid = rscn->event[i].portid;
+
+		bfa_trc(port->fcs, rscn->event[i].format);
+		bfa_trc(port->fcs, rscn_pid);
+
+		/* check for duplicate entries in the list */
+		found = BFA_FALSE;
+		for (j = 0; j < i; j++) {
+			if (rscn->event[j].portid == rscn_pid) {
+				found = BFA_TRUE;
+				break;
+			}
+		}
+
+		/* if found in down the list, pid has been already processed */
+		if (found) {
+			bfa_trc(port->fcs, rscn_pid);
+			continue;
+		}
+
+		switch (rscn->event[i].format) {
+		case FC_RSCN_FORMAT_PORTID:
+			if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) {
+				/*
+				 * Ignore this event.
+				 * f/w would have processed it
+				 */
+				bfa_trc(port->fcs, rscn_pid);
+			} else {
+				port->stats.num_portid_rscn++;
+				bfa_fcs_lport_scn_portid_rscn(port, rscn_pid);
+			}
+		break;
+
+		case FC_RSCN_FORMAT_FABRIC:
+			if (rscn->event[i].qualifier ==
+					FC_FABRIC_NAME_RSCN_EVENT) {
+				bfa_fcs_lport_ms_fabric_rscn(port);
+				break;
+			}
+			/* !!!!!!!!! Fall Through !!!!!!!!!!!!! */
+
+		case FC_RSCN_FORMAT_AREA:
+		case FC_RSCN_FORMAT_DOMAIN:
+			nsquery = BFA_TRUE;
+			bfa_fcs_lport_scn_multiport_rscn(port,
+							rscn->event[i].format,
+							rscn_pid);
+			break;
+
+
+		default:
+			WARN_ON(1);
+			nsquery = BFA_TRUE;
+		}
+	}
+
+	/*
+	 * If any of area, domain or fabric RSCN is received, do a fresh
+	 * discovery to find new devices.
+	 */
+	if (nsquery)
+		bfa_fcs_lport_ns_query(port);
+}
+
+/*
+ * BFA FCS port
+ */
+/*
+ *  fcs_port_api BFA FCS port API
+ */
+struct bfa_fcs_lport_s *
+bfa_fcs_get_base_port(struct bfa_fcs_s *fcs)
+{
+	return &fcs->fabric.bport;
+}
+
+wwn_t
+bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index,
+		int nrports, bfa_boolean_t bwwn)
+{
+	struct list_head	*qh, *qe;
+	struct bfa_fcs_rport_s *rport = NULL;
+	int	i;
+	struct bfa_fcs_s	*fcs;
+
+	if (port == NULL || nrports == 0)
+		return (wwn_t) 0;
+
+	fcs = port->fcs;
+	bfa_trc(fcs, (u32) nrports);
+
+	i = 0;
+	qh = &port->rport_q;
+	qe = bfa_q_first(qh);
+
+	while ((qe != qh) && (i < nrports)) {
+		rport = (struct bfa_fcs_rport_s *) qe;
+		if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
+			qe = bfa_q_next(qe);
+			bfa_trc(fcs, (u32) rport->pwwn);
+			bfa_trc(fcs, rport->pid);
+			bfa_trc(fcs, i);
+			continue;
+		}
+
+		if (bwwn) {
+			if (!memcmp(&wwn, &rport->pwwn, 8))
+				break;
+		} else {
+			if (i == index)
+				break;
+		}
+
+		i++;
+		qe = bfa_q_next(qe);
+	}
+
+	bfa_trc(fcs, i);
+	if (rport)
+		return rport->pwwn;
+	else
+		return (wwn_t) 0;
+}
+
+void
+bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port,
+		struct bfa_rport_qualifier_s rports[], int *nrports)
+{
+	struct list_head	*qh, *qe;
+	struct bfa_fcs_rport_s *rport = NULL;
+	int	i;
+	struct bfa_fcs_s	*fcs;
+
+	if (port == NULL || rports == NULL || *nrports == 0)
+		return;
+
+	fcs = port->fcs;
+	bfa_trc(fcs, (u32) *nrports);
+
+	i = 0;
+	qh = &port->rport_q;
+	qe = bfa_q_first(qh);
+
+	while ((qe != qh) && (i < *nrports)) {
+		rport = (struct bfa_fcs_rport_s *) qe;
+		if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
+			qe = bfa_q_next(qe);
+			bfa_trc(fcs, (u32) rport->pwwn);
+			bfa_trc(fcs, rport->pid);
+			bfa_trc(fcs, i);
+			continue;
+		}
+
+		if (!rport->pwwn && !rport->pid) {
+			qe = bfa_q_next(qe);
+			continue;
+		}
+
+		rports[i].pwwn = rport->pwwn;
+		rports[i].pid = rport->pid;
+
+		i++;
+		qe = bfa_q_next(qe);
+	}
+
+	bfa_trc(fcs, i);
+	*nrports = i;
+}
+
+/*
+ * Iterate's through all the rport's in the given port to
+ * determine the maximum operating speed.
+ *
+ * !!!! To be used in TRL Functionality only !!!!
+ */
+bfa_port_speed_t
+bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
+{
+	struct list_head *qh, *qe;
+	struct bfa_fcs_rport_s *rport = NULL;
+	struct bfa_fcs_s	*fcs;
+	bfa_port_speed_t max_speed = 0;
+	struct bfa_port_attr_s port_attr;
+	bfa_port_speed_t port_speed, rport_speed;
+	bfa_boolean_t trl_enabled;
+
+	if (port == NULL)
+		return 0;
+
+	fcs = port->fcs;
+	trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
+
+	/* Get Physical port's current speed */
+	bfa_fcport_get_attr(port->fcs->bfa, &port_attr);
+	port_speed = port_attr.speed;
+	bfa_trc(fcs, port_speed);
+
+	qh = &port->rport_q;
+	qe = bfa_q_first(qh);
+
+	while (qe != qh) {
+		rport = (struct bfa_fcs_rport_s *) qe;
+		if ((bfa_ntoh3b(rport->pid) > 0xFFF000) ||
+			(bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE) ||
+			(rport->scsi_function != BFA_RPORT_TARGET)) {
+			qe = bfa_q_next(qe);
+			continue;
+		}
+
+		rport_speed = rport->rpf.rpsc_speed;
+		if ((trl_enabled) && (rport_speed ==
+			BFA_PORT_SPEED_UNKNOWN)) {
+			/* Use default ratelim speed setting */
+			rport_speed =
+				bfa_fcport_get_ratelim_speed(port->fcs->bfa);
+		}
+
+		if (rport_speed > max_speed)
+			max_speed = rport_speed;
+
+		qe = bfa_q_next(qe);
+	}
+
+	if (max_speed > port_speed)
+		max_speed = port_speed;
+
+	bfa_trc(fcs, max_speed);
+	return max_speed;
+}
+
+struct bfa_fcs_lport_s *
+bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
+{
+	struct bfa_fcs_vport_s *vport;
+	bfa_fcs_vf_t   *vf;
+
+	WARN_ON(fcs == NULL);
+
+	vf = bfa_fcs_vf_lookup(fcs, vf_id);
+	if (vf == NULL) {
+		bfa_trc(fcs, vf_id);
+		return NULL;
+	}
+
+	if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn))
+		return &vf->bport;
+
+	vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn);
+	if (vport)
+		return &vport->lport;
+
+	return NULL;
+}
+
+/*
+ *  API corresponding to NPIV_VPORT_GETINFO.
+ */
+void
+bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
+	 struct bfa_lport_info_s *port_info)
+{
+
+	bfa_trc(port->fcs, port->fabric->fabric_name);
+
+	if (port->vport == NULL) {
+		/*
+		 * This is a Physical port
+		 */
+		port_info->port_type = BFA_LPORT_TYPE_PHYSICAL;
+
+		/*
+		 * @todo : need to fix the state & reason
+		 */
+		port_info->port_state = 0;
+		port_info->offline_reason = 0;
+
+		port_info->port_wwn = bfa_fcs_lport_get_pwwn(port);
+		port_info->node_wwn = bfa_fcs_lport_get_nwwn(port);
+
+		port_info->max_vports_supp =
+			bfa_lps_get_max_vport(port->fcs->bfa);
+		port_info->num_vports_inuse =
+			port->fabric->num_vports;
+		port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
+		port_info->num_rports_inuse = port->num_rports;
+	} else {
+		/*
+		 * This is a virtual port
+		 */
+		port_info->port_type = BFA_LPORT_TYPE_VIRTUAL;
+
+		/*
+		 * @todo : need to fix the state & reason
+		 */
+		port_info->port_state = 0;
+		port_info->offline_reason = 0;
+
+		port_info->port_wwn = bfa_fcs_lport_get_pwwn(port);
+		port_info->node_wwn = bfa_fcs_lport_get_nwwn(port);
+	}
+}
+
+void
+bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
+	 struct bfa_lport_stats_s *port_stats)
+{
+	*port_stats = fcs_port->stats;
+}
+
+void
+bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
+{
+	memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
+}
+
+/*
+ * Let new loop map create missing rports
+ */
+void
+bfa_fcs_lport_lip_scn_online(struct bfa_fcs_lport_s *port)
+{
+	bfa_fcs_lport_loop_online(port);
+}
+
+/*
+ * FCS virtual port state machine
+ */
+
+#define __vport_fcs(__vp)       ((__vp)->lport.fcs)
+#define __vport_pwwn(__vp)      ((__vp)->lport.port_cfg.pwwn)
+#define __vport_nwwn(__vp)      ((__vp)->lport.port_cfg.nwwn)
+#define __vport_bfa(__vp)       ((__vp)->lport.fcs->bfa)
+#define __vport_fcid(__vp)      ((__vp)->lport.pid)
+#define __vport_fabric(__vp)    ((__vp)->lport.fabric)
+#define __vport_vfid(__vp)      ((__vp)->lport.fabric->vf_id)
+
+#define BFA_FCS_VPORT_MAX_RETRIES  5
+/*
+ * Forward declarations
+ */
+static void     bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport);
+static void     bfa_fcs_vport_timeout(void *vport_arg);
+static void     bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
+static void     bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
+
+/*
+ *  fcs_vport_sm FCS virtual port state machine
+ */
+
+/*
+ * VPort State Machine events
+ */
+enum bfa_fcs_vport_event {
+	BFA_FCS_VPORT_SM_CREATE = 1,	/*  vport create event */
+	BFA_FCS_VPORT_SM_DELETE = 2,	/*  vport delete event */
+	BFA_FCS_VPORT_SM_START = 3,	/*  vport start request */
+	BFA_FCS_VPORT_SM_STOP = 4,	/*  stop: unsupported */
+	BFA_FCS_VPORT_SM_ONLINE = 5,	/*  fabric online */
+	BFA_FCS_VPORT_SM_OFFLINE = 6,	/*  fabric offline event */
+	BFA_FCS_VPORT_SM_FRMSENT = 7,	/*  fdisc/logo sent events */
+	BFA_FCS_VPORT_SM_RSP_OK = 8,	/*  good response */
+	BFA_FCS_VPORT_SM_RSP_ERROR = 9,	/*  error/bad response */
+	BFA_FCS_VPORT_SM_TIMEOUT = 10,	/*  delay timer event */
+	BFA_FCS_VPORT_SM_DELCOMP = 11,	/*  lport delete completion */
+	BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12,	/*  Dup wnn error*/
+	BFA_FCS_VPORT_SM_RSP_FAILED = 13,	/*  non-retryable failure */
+	BFA_FCS_VPORT_SM_STOPCOMP = 14,	/* vport delete completion */
+	BFA_FCS_VPORT_SM_FABRIC_MAX = 15, /* max vports on fabric */
+};
+
+static void     bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
+					enum bfa_fcs_vport_event event);
+static void     bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
+					 enum bfa_fcs_vport_event event);
+static void     bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
+					 enum bfa_fcs_vport_event event);
+static void     bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
+				       enum bfa_fcs_vport_event event);
+static void     bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
+					     enum bfa_fcs_vport_event event);
+static void	bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
+					enum bfa_fcs_vport_event event);
+static void     bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
+					enum bfa_fcs_vport_event event);
+static void     bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
+					  enum bfa_fcs_vport_event event);
+static void     bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
+					 enum bfa_fcs_vport_event event);
+static void     bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
+				      enum bfa_fcs_vport_event event);
+static void     bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
+				      enum bfa_fcs_vport_event event);
+static void	bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
+					enum bfa_fcs_vport_event event);
+static void	bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
+					enum bfa_fcs_vport_event event);
+
+static struct bfa_sm_table_s  vport_sm_table[] = {
+	{BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
+	{BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED},
+	{BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
+	{BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC},
+	{BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY},
+	{BFA_SM(bfa_fcs_vport_sm_fdisc_rsp_wait), BFA_FCS_VPORT_FDISC_RSP_WAIT},
+	{BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE},
+	{BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING},
+	{BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP},
+	{BFA_SM(bfa_fcs_vport_sm_logo), BFA_FCS_VPORT_LOGO},
+	{BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
+};
+
+/*
+ * Beginning state.
+ */
+static void
+bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
+			enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_CREATE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
+		bfa_fcs_fabric_addvport(__vport_fabric(vport), vport);
+		break;
+
+	default:
+		bfa_sm_fault(__vport_fcs(vport), event);
+	}
+}
+
+/*
+ * Created state - a start event is required to start up the state machine.
+ */
+static void
+bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
+			enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_START:
+		if (bfa_sm_cmp_state(__vport_fabric(vport),
+					bfa_fcs_fabric_sm_online)
+		    && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
+			bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
+			bfa_fcs_vport_do_fdisc(vport);
+		} else {
+			/*
+			 * Fabric is offline or not NPIV capable, stay in
+			 * offline state.
+			 */
+			vport->vport_stats.fab_no_npiv++;
+			bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
+		}
+		break;
+
+	case BFA_FCS_VPORT_SM_DELETE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+		bfa_fcs_lport_delete(&vport->lport);
+		break;
+
+	case BFA_FCS_VPORT_SM_ONLINE:
+	case BFA_FCS_VPORT_SM_OFFLINE:
+		/*
+		 * Ignore ONLINE/OFFLINE events from fabric
+		 * till vport is started.
+		 */
+		break;
+
+	default:
+		bfa_sm_fault(__vport_fcs(vport), event);
+	}
+}
+
+/*
+ * Offline state - awaiting ONLINE event from fabric SM.
+ */
+static void
+bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
+			enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_DELETE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+		bfa_fcs_lport_delete(&vport->lport);
+		break;
+
+	case BFA_FCS_VPORT_SM_ONLINE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
+		vport->fdisc_retries = 0;
+		bfa_fcs_vport_do_fdisc(vport);
+		break;
+
+	case BFA_FCS_VPORT_SM_STOP:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+		bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
+		break;
+
+	case BFA_FCS_VPORT_SM_OFFLINE:
+		/*
+		 * This can happen if the vport couldn't be initialzied
+		 * due the fact that the npiv was not enabled on the switch.
+		 * In that case we will put the vport in offline state.
+		 * However, the link can go down and cause the this event to
+		 * be sent when we are already offline. Ignore it.
+		 */
+		break;
+
+	default:
+		bfa_sm_fault(__vport_fcs(vport), event);
+	}
+}
+
+
+/*
+ * FDISC is sent and awaiting reply from fabric.
+ */
+static void
+bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
+			enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_DELETE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_rsp_wait);
+		break;
+
+	case BFA_FCS_VPORT_SM_OFFLINE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+		break;
+
+	case BFA_FCS_VPORT_SM_RSP_OK:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_online);
+		bfa_fcs_lport_online(&vport->lport);
+		break;
+
+	case BFA_FCS_VPORT_SM_RSP_ERROR:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry);
+		bfa_timer_start(__vport_bfa(vport), &vport->timer,
+				    bfa_fcs_vport_timeout, vport,
+				    BFA_FCS_RETRY_TIMEOUT);
+		break;
+
+	case BFA_FCS_VPORT_SM_RSP_FAILED:
+	case BFA_FCS_VPORT_SM_FABRIC_MAX:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
+		break;
+
+	case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_error);
+		break;
+
+	default:
+		bfa_sm_fault(__vport_fcs(vport), event);
+	}
+}
+
+/*
+ * FDISC attempt failed - a timer is active to retry FDISC.
+ */
+static void
+bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
+			     enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_DELETE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+		bfa_timer_stop(&vport->timer);
+		bfa_fcs_lport_delete(&vport->lport);
+		break;
+
+	case BFA_FCS_VPORT_SM_OFFLINE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
+		bfa_timer_stop(&vport->timer);
+		break;
+
+	case BFA_FCS_VPORT_SM_TIMEOUT:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
+		vport->vport_stats.fdisc_retries++;
+		vport->fdisc_retries++;
+		bfa_fcs_vport_do_fdisc(vport);
+		break;
+
+	default:
+		bfa_sm_fault(__vport_fcs(vport), event);
+	}
+}
+
+/*
+ * FDISC is in progress and we got a vport delete request -
+ * this is a wait state while we wait for fdisc response and
+ * we will transition to the appropriate state - on rsp status.
+ */
+static void
+bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
+				enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_RSP_OK:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
+		bfa_fcs_lport_delete(&vport->lport);
+		break;
+
+	case BFA_FCS_VPORT_SM_DELETE:
+		break;
+
+	case BFA_FCS_VPORT_SM_OFFLINE:
+	case BFA_FCS_VPORT_SM_RSP_ERROR:
+	case BFA_FCS_VPORT_SM_RSP_FAILED:
+	case BFA_FCS_VPORT_SM_FABRIC_MAX:
+	case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+		bfa_fcs_lport_delete(&vport->lport);
+		break;
+
+	default:
+		bfa_sm_fault(__vport_fcs(vport), event);
+	}
+}
+
+/*
+ * Vport is online (FDISC is complete).
+ */
+static void
+bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
+			enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_DELETE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
+		bfa_fcs_lport_delete(&vport->lport);
+		break;
+
+	case BFA_FCS_VPORT_SM_STOP:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_stopping);
+		bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
+		break;
+
+	case BFA_FCS_VPORT_SM_OFFLINE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+		bfa_fcs_lport_offline(&vport->lport);
+		break;
+
+	default:
+		bfa_sm_fault(__vport_fcs(vport), event);
+	}
+}
+
+/*
+ * Vport is being stopped - awaiting lport stop completion to send
+ * LOGO to fabric.
+ */
+static void
+bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
+			  enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_STOPCOMP:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo_for_stop);
+		bfa_fcs_vport_do_logo(vport);
+		break;
+
+	case BFA_FCS_VPORT_SM_OFFLINE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+		break;
+
+	default:
+		bfa_sm_fault(__vport_fcs(vport), event);
+	}
+}
+
+/*
+ * Vport is being deleted - awaiting lport delete completion to send
+ * LOGO to fabric.
+ */
+static void
+bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
+			enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_DELETE:
+		break;
+
+	case BFA_FCS_VPORT_SM_DELCOMP:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo);
+		bfa_fcs_vport_do_logo(vport);
+		break;
+
+	case BFA_FCS_VPORT_SM_OFFLINE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+		break;
+
+	default:
+		bfa_sm_fault(__vport_fcs(vport), event);
+	}
+}
+
+/*
+ * Error State.
+ * This state will be set when the Vport Creation fails due
+ * to errors like Dup WWN. In this state only operation allowed
+ * is a Vport Delete.
+ */
+static void
+bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
+			enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_DELETE:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+		bfa_fcs_lport_delete(&vport->lport);
+		break;
+
+	default:
+		bfa_trc(__vport_fcs(vport), event);
+	}
+}
+
+/*
+ * Lport cleanup is in progress since vport is being deleted. Fabric is
+ * offline, so no LOGO is needed to complete vport deletion.
+ */
+static void
+bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
+			enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_DELCOMP:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
+		bfa_fcs_vport_free(vport);
+		break;
+
+	case BFA_FCS_VPORT_SM_STOPCOMP:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
+		break;
+
+	case BFA_FCS_VPORT_SM_DELETE:
+		break;
+
+	default:
+		bfa_sm_fault(__vport_fcs(vport), event);
+	}
+}
+
+/*
+ * LOGO is sent to fabric. Vport stop is in progress. Lport stop cleanup
+ * is done.
+ */
+static void
+bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
+			       enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_OFFLINE:
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+		/*
+		 * !!! fall through !!!
+		 */
+
+	case BFA_FCS_VPORT_SM_RSP_OK:
+	case BFA_FCS_VPORT_SM_RSP_ERROR:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
+		break;
+
+	default:
+		bfa_sm_fault(__vport_fcs(vport), event);
+	}
+}
+
+/*
+ * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
+ * is done.
+ */
+static void
+bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
+			enum bfa_fcs_vport_event event)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), event);
+
+	switch (event) {
+	case BFA_FCS_VPORT_SM_OFFLINE:
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+		/*
+		 * !!! fall through !!!
+		 */
+
+	case BFA_FCS_VPORT_SM_RSP_OK:
+	case BFA_FCS_VPORT_SM_RSP_ERROR:
+		bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
+		bfa_fcs_vport_free(vport);
+		break;
+
+	case BFA_FCS_VPORT_SM_DELETE:
+		break;
+
+	default:
+		bfa_sm_fault(__vport_fcs(vport), event);
+	}
+}
+
+
+
+/*
+ *  fcs_vport_private FCS virtual port private functions
+ */
+/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port,
+		       enum bfa_lport_aen_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
+	aen_entry->aen_data.lport.roles = port->port_cfg.roles;
+	aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
+					bfa_fcs_get_base_port(port->fcs));
+	aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+				  BFA_AEN_CAT_LPORT, event);
+}
+
+/*
+ * This routine will be called to send a FDISC command.
+ */
+static void
+bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
+{
+	bfa_lps_fdisc(vport->lps, vport,
+		bfa_fcport_get_maxfrsize(__vport_bfa(vport)),
+		__vport_pwwn(vport), __vport_nwwn(vport));
+	vport->vport_stats.fdisc_sent++;
+}
+
+static void
+bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
+{
+	u8		lsrjt_rsn = vport->lps->lsrjt_rsn;
+	u8		lsrjt_expl = vport->lps->lsrjt_expl;
+
+	bfa_trc(__vport_fcs(vport), lsrjt_rsn);
+	bfa_trc(__vport_fcs(vport), lsrjt_expl);
+
+	/* For certain reason codes, we don't want to retry. */
+	switch (vport->lps->lsrjt_expl) {
+	case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
+	case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
+		if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+		else {
+			bfa_fcs_vport_aen_post(&vport->lport,
+					BFA_LPORT_AEN_NPIV_DUP_WWN);
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
+		}
+		break;
+
+	case FC_LS_RJT_EXP_INSUFF_RES:
+		/*
+		 * This means max logins per port/switch setting on the
+		 * switch was exceeded.
+		 */
+		if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+		else {
+			bfa_fcs_vport_aen_post(&vport->lport,
+					BFA_LPORT_AEN_NPIV_FABRIC_MAX);
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_FABRIC_MAX);
+		}
+		break;
+
+	default:
+		if (vport->fdisc_retries == 0)
+			bfa_fcs_vport_aen_post(&vport->lport,
+					BFA_LPORT_AEN_NPIV_UNKNOWN);
+		bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+	}
+}
+
+/*
+ *	Called to send a logout to the fabric. Used when a V-Port is
+ *	deleted/stopped.
+ */
+static void
+bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport)
+{
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+
+	vport->vport_stats.logo_sent++;
+	bfa_lps_fdisclogo(vport->lps);
+}
+
+
+/*
+ *     This routine will be called by bfa_timer on timer timeouts.
+ *
+ *	param[in]	vport		- pointer to bfa_fcs_vport_t.
+ *	param[out]	vport_status	- pointer to return vport status in
+ *
+ *	return
+ *		void
+ *
+ *	Special Considerations:
+ *
+ *	note
+ */
+static void
+bfa_fcs_vport_timeout(void *vport_arg)
+{
+	struct bfa_fcs_vport_s *vport = (struct bfa_fcs_vport_s *) vport_arg;
+
+	vport->vport_stats.fdisc_timeouts++;
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_TIMEOUT);
+}
+
+static void
+bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
+{
+	struct bfad_vport_s *vport_drv =
+			(struct bfad_vport_s *)vport->vport_drv;
+
+	bfa_fcs_fabric_delvport(__vport_fabric(vport), vport);
+	bfa_lps_delete(vport->lps);
+
+	if (vport_drv->comp_del) {
+		complete(vport_drv->comp_del);
+		return;
+	}
+
+	/*
+	 * We queue the vport delete work to the IM work_q from here.
+	 * The memory for the bfad_vport_s is freed from the FC function
+	 * template vport_delete entry point.
+	 */
+	bfad_im_port_delete(vport_drv->drv_port.bfad, &vport_drv->drv_port);
+}
+
+/*
+ *  fcs_vport_public FCS virtual port public interfaces
+ */
+
+/*
+ * Online notification from fabric SM.
+ */
+void
+bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
+{
+	vport->vport_stats.fab_online++;
+	if (bfa_fcs_fabric_npiv_capable(__vport_fabric(vport)))
+		bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
+	else
+		vport->vport_stats.fab_no_npiv++;
+}
+
+/*
+ * Offline notification from fabric SM.
+ */
+void
+bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport)
+{
+	vport->vport_stats.fab_offline++;
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
+}
+
+/*
+ * Cleanup notification from fabric SM on link timer expiry.
+ */
+void
+bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
+{
+	vport->vport_stats.fab_cleanup++;
+}
+
+/*
+ * Stop notification from fabric SM. To be invoked from within FCS.
+ */
+void
+bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport)
+{
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
+}
+
+/*
+ * delete notification from fabric SM. To be invoked from within FCS.
+ */
+void
+bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
+{
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
+}
+
+/*
+ * Stop completion callback from associated lport
+ */
+void
+bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport)
+{
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOPCOMP);
+}
+
+/*
+ * Delete completion callback from associated lport
+ */
+void
+bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
+{
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP);
+}
+
+
+
+/*
+ *  fcs_vport_api Virtual port API
+ */
+
+/*
+ *	Use this function to instantiate a new FCS vport object. This
+ *	function will not trigger any HW initialization process (which will be
+ *	done in vport_start() call)
+ *
+ *	param[in] vport	-		pointer to bfa_fcs_vport_t. This space
+ *					needs to be allocated by the driver.
+ *	param[in] fcs		-	FCS instance
+ *	param[in] vport_cfg	-	vport configuration
+ *	param[in] vf_id		-	VF_ID if vport is created within a VF.
+ *					FC_VF_ID_NULL to specify base fabric.
+ *	param[in] vport_drv	-	Opaque handle back to the driver's vport
+ *					structure
+ *
+ *	retval BFA_STATUS_OK - on success.
+ *	retval BFA_STATUS_FAILED - on failure.
+ */
+bfa_status_t
+bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
+		u16 vf_id, struct bfa_lport_cfg_s *vport_cfg,
+		struct bfad_vport_s *vport_drv)
+{
+	if (vport_cfg->pwwn == 0)
+		return BFA_STATUS_INVALID_WWN;
+
+	if (bfa_fcs_lport_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn)
+		return BFA_STATUS_VPORT_WWN_BP;
+
+	if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
+		return BFA_STATUS_VPORT_EXISTS;
+
+	if (fcs->fabric.num_vports ==
+			bfa_lps_get_max_vport(fcs->bfa))
+		return BFA_STATUS_VPORT_MAX;
+
+	vport->lps = bfa_lps_alloc(fcs->bfa);
+	if (!vport->lps)
+		return BFA_STATUS_VPORT_MAX;
+
+	vport->vport_drv = vport_drv;
+	vport_cfg->preboot_vp = BFA_FALSE;
+
+	bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
+	bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport);
+	bfa_fcs_lport_init(&vport->lport, vport_cfg);
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ *	Use this function to instantiate a new FCS PBC vport object. This
+ *	function will not trigger any HW initialization process (which will be
+ *	done in vport_start() call)
+ *
+ *	param[in] vport	-	pointer to bfa_fcs_vport_t. This space
+ *				needs to be allocated by the driver.
+ *	param[in] fcs	-	FCS instance
+ *	param[in] vport_cfg	-	vport configuration
+ *	param[in] vf_id		-	VF_ID if vport is created within a VF.
+ *					FC_VF_ID_NULL to specify base fabric.
+ *	param[in] vport_drv	-	Opaque handle back to the driver's vport
+ *					structure
+ *
+ *	retval BFA_STATUS_OK - on success.
+ *	retval BFA_STATUS_FAILED - on failure.
+ */
+bfa_status_t
+bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
+			u16 vf_id, struct bfa_lport_cfg_s *vport_cfg,
+			struct bfad_vport_s *vport_drv)
+{
+	bfa_status_t rc;
+
+	rc = bfa_fcs_vport_create(vport, fcs, vf_id, vport_cfg, vport_drv);
+	vport->lport.port_cfg.preboot_vp = BFA_TRUE;
+
+	return rc;
+}
+
+/*
+ *	Use this function to findout if this is a pbc vport or not.
+ *
+ * @param[in] vport - pointer to bfa_fcs_vport_t.
+ *
+ * @returns None
+ */
+bfa_boolean_t
+bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport)
+{
+
+	if (vport && (vport->lport.port_cfg.preboot_vp == BFA_TRUE))
+		return BFA_TRUE;
+	else
+		return BFA_FALSE;
+
+}
+
+/*
+ * Use this function initialize the vport.
+ *
+ * @param[in] vport - pointer to bfa_fcs_vport_t.
+ *
+ * @returns None
+ */
+bfa_status_t
+bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport)
+{
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_START);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ *	Use this function quiese the vport object. This function will return
+ *	immediately, when the vport is actually stopped, the
+ *	bfa_drv_vport_stop_cb() will be called.
+ *
+ *	param[in] vport - pointer to bfa_fcs_vport_t.
+ *
+ *	return None
+ */
+bfa_status_t
+bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
+{
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ *	Use this function to delete a vport object. Fabric object should
+ *	be stopped before this function call.
+ *
+ *	!!!!!!! Donot invoke this from within FCS  !!!!!!!
+ *
+ *	param[in] vport - pointer to bfa_fcs_vport_t.
+ *
+ *	return     None
+ */
+bfa_status_t
+bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
+{
+
+	if (vport->lport.port_cfg.preboot_vp)
+		return BFA_STATUS_PBC;
+
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ *	Use this function to get vport's current status info.
+ *
+ *	param[in] vport		pointer to bfa_fcs_vport_t.
+ *	param[out] attr		pointer to return vport attributes
+ *
+ *	return None
+ */
+void
+bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
+			struct bfa_vport_attr_s *attr)
+{
+	if (vport == NULL || attr == NULL)
+		return;
+
+	memset(attr, 0, sizeof(struct bfa_vport_attr_s));
+
+	bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr);
+	attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
+}
+
+
+/*
+ *	Lookup a virtual port. Excludes base port from lookup.
+ */
+struct bfa_fcs_vport_s *
+bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn)
+{
+	struct bfa_fcs_vport_s *vport;
+	struct bfa_fcs_fabric_s *fabric;
+
+	bfa_trc(fcs, vf_id);
+	bfa_trc(fcs, vpwwn);
+
+	fabric = bfa_fcs_vf_lookup(fcs, vf_id);
+	if (!fabric) {
+		bfa_trc(fcs, vf_id);
+		return NULL;
+	}
+
+	vport = bfa_fcs_fabric_vport_lookup(fabric, vpwwn);
+	return vport;
+}
+
+/*
+ * FDISC Response
+ */
+void
+bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
+{
+	struct bfa_fcs_vport_s *vport = uarg;
+
+	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+	bfa_trc(__vport_fcs(vport), status);
+
+	switch (status) {
+	case BFA_STATUS_OK:
+		/*
+		 * Initialize the V-Port fields
+		 */
+		__vport_fcid(vport) = vport->lps->lp_pid;
+		vport->vport_stats.fdisc_accepts++;
+		bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
+		break;
+
+	case BFA_STATUS_INVALID_MAC:
+		/* Only for CNA */
+		vport->vport_stats.fdisc_acc_bad++;
+		bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+
+		break;
+
+	case BFA_STATUS_EPROTOCOL:
+		switch (vport->lps->ext_status) {
+		case BFA_EPROTO_BAD_ACCEPT:
+			vport->vport_stats.fdisc_acc_bad++;
+			break;
+
+		case BFA_EPROTO_UNKNOWN_RSP:
+			vport->vport_stats.fdisc_unknown_rsp++;
+			break;
+
+		default:
+			break;
+		}
+
+		if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+		else
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
+
+		break;
+
+	case BFA_STATUS_ETIMER:
+		vport->vport_stats.fdisc_timeouts++;
+		if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+		else
+			bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
+		break;
+
+	case BFA_STATUS_FABRIC_RJT:
+		vport->vport_stats.fdisc_rejects++;
+		bfa_fcs_vport_fdisc_rejected(vport);
+		break;
+
+	default:
+		vport->vport_stats.fdisc_rsp_err++;
+		bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
+	}
+}
+
+/*
+ * LOGO response
+ */
+void
+bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
+{
+	struct bfa_fcs_vport_s *vport = uarg;
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
+}
+
+/*
+ * Received clear virtual link
+ */
+void
+bfa_cb_lps_cvl_event(void *bfad, void *uarg)
+{
+	struct bfa_fcs_vport_s *vport = uarg;
+
+	/* Send an Offline followed by an ONLINE */
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
+}
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
new file mode 100644
index 0000000..de50349
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -0,0 +1,3465 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ *  rport.c Remote port implementation.
+ */
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_fcs.h"
+#include "bfa_fcbuild.h"
+
+BFA_TRC_FILE(FCS, RPORT);
+
+static u32
+bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000;
+	 /* In millisecs */
+/*
+ * bfa_fcs_rport_max_logins is max count of bfa_fcs_rports
+ * whereas DEF_CFG_NUM_RPORTS is max count of bfa_rports
+ */
+static u32 bfa_fcs_rport_max_logins = BFA_FCS_MAX_RPORT_LOGINS;
+
+/*
+ * forward declarations
+ */
+static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(
+		struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid);
+static void	bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport);
+static void	bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport);
+static void	bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport);
+static void	bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport);
+static void	bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport);
+static void	bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport);
+static void	bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport,
+					struct fc_logi_s *plogi);
+static void	bfa_fcs_rport_timeout(void *arg);
+static void	bfa_fcs_rport_send_plogi(void *rport_cbarg,
+					 struct bfa_fcxp_s *fcxp_alloced);
+static void	bfa_fcs_rport_send_plogiacc(void *rport_cbarg,
+					struct bfa_fcxp_s *fcxp_alloced);
+static void	bfa_fcs_rport_plogi_response(void *fcsarg,
+				struct bfa_fcxp_s *fcxp, void *cbarg,
+				bfa_status_t req_status, u32 rsp_len,
+				u32 resid_len, struct fchs_s *rsp_fchs);
+static void	bfa_fcs_rport_send_adisc(void *rport_cbarg,
+					 struct bfa_fcxp_s *fcxp_alloced);
+static void	bfa_fcs_rport_adisc_response(void *fcsarg,
+				struct bfa_fcxp_s *fcxp, void *cbarg,
+				bfa_status_t req_status, u32 rsp_len,
+				u32 resid_len, struct fchs_s *rsp_fchs);
+static void	bfa_fcs_rport_send_nsdisc(void *rport_cbarg,
+					 struct bfa_fcxp_s *fcxp_alloced);
+static void	bfa_fcs_rport_gidpn_response(void *fcsarg,
+				struct bfa_fcxp_s *fcxp, void *cbarg,
+				bfa_status_t req_status, u32 rsp_len,
+				u32 resid_len, struct fchs_s *rsp_fchs);
+static void	bfa_fcs_rport_gpnid_response(void *fcsarg,
+				struct bfa_fcxp_s *fcxp, void *cbarg,
+				bfa_status_t req_status, u32 rsp_len,
+				u32 resid_len, struct fchs_s *rsp_fchs);
+static void	bfa_fcs_rport_send_logo(void *rport_cbarg,
+					struct bfa_fcxp_s *fcxp_alloced);
+static void	bfa_fcs_rport_send_logo_acc(void *rport_cbarg);
+static void	bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
+					struct fchs_s *rx_fchs, u16 len);
+static void	bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
+				struct fchs_s *rx_fchs, u8 reason_code,
+					  u8 reason_code_expl);
+static void	bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
+				struct fchs_s *rx_fchs, u16 len);
+static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
+static void	bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport);
+
+static void	bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
+					enum rport_event event);
+static void	bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
+						  enum rport_event event);
+static void	bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport,
+					enum rport_event event);
+static void	bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
+					enum rport_event event);
+static void	bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport,
+					enum rport_event event);
+static void	bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
+						 enum rport_event event);
+static void	bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport,
+					 enum rport_event event);
+static void	bfa_fcs_rport_sm_adisc_online_sending(
+			struct bfa_fcs_rport_s *rport, enum rport_event event);
+static void	bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
+					enum rport_event event);
+static void	bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s
+					*rport, enum rport_event event);
+static void	bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport,
+					enum rport_event event);
+static void	bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport,
+					 enum rport_event event);
+static void	bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+static void	bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
+						enum rport_event event);
+
+static struct bfa_sm_table_s rport_sm_table[] = {
+	{BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
+	{BFA_SM(bfa_fcs_rport_sm_plogi_sending), BFA_RPORT_PLOGI},
+	{BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE},
+	{BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY},
+	{BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI},
+	{BFA_SM(bfa_fcs_rport_sm_fc4_fcs_online), BFA_RPORT_ONLINE},
+	{BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE},
+	{BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE},
+	{BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY},
+	{BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY},
+	{BFA_SM(bfa_fcs_rport_sm_adisc_online_sending), BFA_RPORT_ADISC},
+	{BFA_SM(bfa_fcs_rport_sm_adisc_online), BFA_RPORT_ADISC},
+	{BFA_SM(bfa_fcs_rport_sm_adisc_offline_sending), BFA_RPORT_ADISC},
+	{BFA_SM(bfa_fcs_rport_sm_adisc_offline), BFA_RPORT_ADISC},
+	{BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV},
+	{BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO},
+	{BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE},
+	{BFA_SM(bfa_fcs_rport_sm_hcb_offline), BFA_RPORT_OFFLINE},
+	{BFA_SM(bfa_fcs_rport_sm_hcb_logorcv), BFA_RPORT_LOGORCV},
+	{BFA_SM(bfa_fcs_rport_sm_hcb_logosend), BFA_RPORT_LOGO},
+	{BFA_SM(bfa_fcs_rport_sm_logo_sending), BFA_RPORT_LOGO},
+	{BFA_SM(bfa_fcs_rport_sm_offline), BFA_RPORT_OFFLINE},
+	{BFA_SM(bfa_fcs_rport_sm_nsdisc_sending), BFA_RPORT_NSDISC},
+	{BFA_SM(bfa_fcs_rport_sm_nsdisc_retry), BFA_RPORT_NSDISC},
+	{BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC},
+};
+
+/*
+ *		Beginning state.
+ */
+static void
+bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_PLOGI_SEND:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+		rport->plogi_retries = 0;
+		bfa_fcs_rport_send_plogi(rport, NULL);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_PLOGI_COMP:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+		bfa_fcs_rport_hal_online(rport);
+		break;
+
+	case RPSM_EVENT_ADDRESS_CHANGE:
+	case RPSM_EVENT_ADDRESS_DISC:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+		rport->ns_retries = 0;
+		bfa_fcs_rport_send_nsdisc(rport, NULL);
+		break;
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		PLOGI is being sent.
+ */
+static void
+bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
+	 enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FCXP_SENT:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_SCN_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+	case RPSM_EVENT_ADDRESS_CHANGE:
+	case RPSM_EVENT_FAB_SCN:
+		/* query the NS */
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
+					BFA_PORT_TOPOLOGY_LOOP));
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+		rport->ns_retries = 0;
+		bfa_fcs_rport_send_nsdisc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_LOGO_IMP:
+		rport->pid = 0;
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		PLOGI is being sent.
+ */
+static void
+bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
+	 enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FCXP_SENT:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+		bfa_fcs_rport_fcs_online_action(rport);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+	case RPSM_EVENT_PLOGI_COMP:
+	case RPSM_EVENT_FAB_SCN:
+		/*
+		 * Ignore, SCN is possibly online notification.
+		 */
+		break;
+
+	case RPSM_EVENT_SCN_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+		rport->ns_retries = 0;
+		bfa_fcs_rport_send_nsdisc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_LOGO_IMP:
+		rport->pid = 0;
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_HCB_OFFLINE:
+		/*
+		 * Ignore BFA callback, on a PLOGI receive we call bfa offline.
+		 */
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		PLOGI is sent.
+ */
+static void
+bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
+			enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_TIMEOUT:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+		bfa_fcs_rport_send_plogi(rport, NULL);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_timer_stop(&rport->timer);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	case RPSM_EVENT_PRLO_RCVD:
+	case RPSM_EVENT_LOGO_RCVD:
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_timer_stop(&rport->timer);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_SCN_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_timer_stop(&rport->timer);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_ADDRESS_CHANGE:
+	case RPSM_EVENT_FAB_SCN:
+		bfa_timer_stop(&rport->timer);
+		WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
+					BFA_PORT_TOPOLOGY_LOOP));
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+		rport->ns_retries = 0;
+		bfa_fcs_rport_send_nsdisc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_LOGO_IMP:
+		rport->pid = 0;
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_timer_stop(&rport->timer);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_PLOGI_COMP:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+		bfa_timer_stop(&rport->timer);
+		bfa_fcs_rport_fcs_online_action(rport);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		PLOGI is sent.
+ */
+static void
+bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_ACCEPTED:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+		rport->plogi_retries = 0;
+		bfa_fcs_rport_fcs_online_action(rport);
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+		bfa_fcs_rport_send_logo_acc(rport);
+		/*
+		 * !! fall through !!
+		 */
+	case RPSM_EVENT_PRLO_RCVD:
+		if (rport->prlo == BFA_TRUE)
+			bfa_fcs_rport_send_prlo_acc(rport);
+
+		bfa_fcxp_discard(rport->fcxp);
+		/*
+		 * !! fall through !!
+		 */
+	case RPSM_EVENT_FAILED:
+		if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) {
+			rport->plogi_retries++;
+			bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
+			bfa_timer_start(rport->fcs->bfa, &rport->timer,
+					bfa_fcs_rport_timeout, rport,
+					BFA_FCS_RETRY_TIMEOUT);
+		} else {
+			bfa_stats(rport->port, rport_del_max_plogi_retry);
+			rport->old_pid = rport->pid;
+			rport->pid = 0;
+			bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+			bfa_timer_start(rport->fcs->bfa, &rport->timer,
+					bfa_fcs_rport_timeout, rport,
+					bfa_fcs_rport_del_timeout);
+		}
+		break;
+
+	case RPSM_EVENT_SCN_ONLINE:
+		break;
+
+	case RPSM_EVENT_SCN_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_PLOGI_RETRY:
+		rport->plogi_retries = 0;
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				(FC_RA_TOV * 1000));
+		break;
+
+	case RPSM_EVENT_LOGO_IMP:
+		rport->pid = 0;
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_ADDRESS_CHANGE:
+	case RPSM_EVENT_FAB_SCN:
+		bfa_fcxp_discard(rport->fcxp);
+		WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
+					BFA_PORT_TOPOLOGY_LOOP));
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+		rport->ns_retries = 0;
+		bfa_fcs_rport_send_nsdisc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_COMP:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_fcs_online_action(rport);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ * PLOGI is done. Await bfa_fcs_itnim to ascertain the scsi function
+ */
+static void
+bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
+				enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FC4_FCS_ONLINE:
+		if (rport->scsi_function == BFA_RPORT_INITIATOR) {
+			if (!BFA_FCS_PID_IS_WKA(rport->pid))
+				bfa_fcs_rpf_rport_online(rport);
+			bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
+			break;
+		}
+
+		if (!rport->bfa_rport)
+			rport->bfa_rport =
+				bfa_rport_create(rport->fcs->bfa, rport);
+
+		if (rport->bfa_rport) {
+			bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+			bfa_fcs_rport_hal_online(rport);
+		} else {
+			bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+			bfa_fcs_rport_fcs_offline_action(rport);
+		}
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+		rport->plogi_pending = BFA_TRUE;
+		bfa_fcs_rport_fcs_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_COMP:
+	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+	case RPSM_EVENT_FAB_SCN:
+	case RPSM_EVENT_SCN_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+		bfa_fcs_rport_fcs_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+		bfa_fcs_rport_fcs_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+		bfa_fcs_rport_fcs_offline_action(rport);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+		break;
+	}
+}
+
+/*
+ *		PLOGI is complete. Awaiting BFA rport online callback. FC-4s
+ *		are offline.
+ */
+static void
+bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
+			enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_HCB_ONLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
+		bfa_fcs_rport_hal_online_action(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_COMP:
+		break;
+
+	case RPSM_EVENT_PRLO_RCVD:
+	case RPSM_EVENT_LOGO_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+		bfa_fcs_rport_fcs_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_FAB_SCN:
+	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+	case RPSM_EVENT_SCN_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+		bfa_fcs_rport_fcs_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		rport->plogi_pending = BFA_TRUE;
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+		bfa_fcs_rport_fcs_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+		bfa_fcs_rport_fcs_offline_action(rport);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		Rport is ONLINE. FC-4s active.
+ */
+static void
+bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FAB_SCN:
+		if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+			bfa_sm_set_state(rport,
+					 bfa_fcs_rport_sm_nsquery_sending);
+			rport->ns_retries = 0;
+			bfa_fcs_rport_send_nsdisc(rport, NULL);
+		} else {
+			bfa_sm_set_state(rport,
+				bfa_fcs_rport_sm_adisc_online_sending);
+			bfa_fcs_rport_send_adisc(rport, NULL);
+		}
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+	case RPSM_EVENT_SCN_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_SCN_ONLINE:
+	case RPSM_EVENT_PLOGI_COMP:
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		An SCN event is received in ONLINE state. NS query is being sent
+ *		prior to ADISC authentication with rport. FC-4s are paused.
+ */
+static void
+bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
+	 enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FCXP_SENT:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_FAB_SCN:
+		/*
+		 * ignore SCN, wait for response to query itself
+		 */
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_PLOGI_RCVD:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+	case RPSM_EVENT_PLOGI_COMP:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *	An SCN event is received in ONLINE state. NS query is sent to rport.
+ *	FC-4s are paused.
+ */
+static void
+bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_ACCEPTED:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online_sending);
+		bfa_fcs_rport_send_adisc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_FAILED:
+		rport->ns_retries++;
+		if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) {
+			bfa_sm_set_state(rport,
+					 bfa_fcs_rport_sm_nsquery_sending);
+			bfa_fcs_rport_send_nsdisc(rport, NULL);
+		} else {
+			bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+			bfa_fcs_rport_hal_offline_action(rport);
+		}
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_FAB_SCN:
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_COMP:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+	case RPSM_EVENT_PLOGI_RCVD:
+	case RPSM_EVENT_LOGO_IMP:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *	An SCN event is received in ONLINE state. ADISC is being sent for
+ *	authenticating with rport. FC-4s are paused.
+ */
+static void
+bfa_fcs_rport_sm_adisc_online_sending(struct bfa_fcs_rport_s *rport,
+	 enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FCXP_SENT:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_FAB_SCN:
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		An SCN event is received in ONLINE state. ADISC is to rport.
+ *		FC-4s are paused.
+ */
+static void
+bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
+				enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_ACCEPTED:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		/*
+		 * Too complex to cleanup FC-4 & rport and then acc to PLOGI.
+		 * At least go offline when a PLOGI is received.
+		 */
+		bfa_fcxp_discard(rport->fcxp);
+		/*
+		 * !!! fall through !!!
+		 */
+
+	case RPSM_EVENT_FAILED:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_FAB_SCN:
+		/*
+		 * already processing RSCN
+		 */
+		break;
+
+	case RPSM_EVENT_LOGO_IMP:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_hal_offline_action(rport);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ * ADISC is being sent for authenticating with rport
+ * Already did offline actions.
+ */
+static void
+bfa_fcs_rport_sm_adisc_offline_sending(struct bfa_fcs_rport_s *rport,
+	enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FCXP_SENT:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_offline);
+		break;
+
+	case RPSM_EVENT_DELETE:
+	case RPSM_EVENT_SCN_OFFLINE:
+	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa,
+			&rport->fcxp_wqe);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+			bfa_fcs_rport_timeout, rport,
+			bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ * ADISC to rport
+ * Already did offline actions
+ */
+static void
+bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport,
+			enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_ACCEPTED:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+		bfa_fcs_rport_hal_online(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_FAILED:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+			bfa_fcs_rport_timeout, rport,
+			bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_DELETE:
+	case RPSM_EVENT_SCN_OFFLINE:
+	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+			bfa_fcs_rport_timeout, rport,
+			bfa_fcs_rport_del_timeout);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ * Rport has sent LOGO. Awaiting FC-4 offline completion callback.
+ */
+static void
+bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
+			enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FC4_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
+		bfa_fcs_rport_hal_offline(rport);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		if (rport->pid && (rport->prlo == BFA_TRUE))
+			bfa_fcs_rport_send_prlo_acc(rport);
+		if (rport->pid && (rport->prlo == BFA_FALSE))
+			bfa_fcs_rport_send_logo_acc(rport);
+
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
+		break;
+
+	case RPSM_EVENT_SCN_ONLINE:
+	case RPSM_EVENT_SCN_OFFLINE:
+	case RPSM_EVENT_HCB_ONLINE:
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		LOGO needs to be sent to rport. Awaiting FC-4 offline completion
+ *		callback.
+ */
+static void
+bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
+	 enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FC4_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
+		bfa_fcs_rport_hal_offline(rport);
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+		bfa_fcs_rport_send_logo_acc(rport);
+	case RPSM_EVENT_PRLO_RCVD:
+		if (rport->prlo == BFA_TRUE)
+			bfa_fcs_rport_send_prlo_acc(rport);
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
+		break;
+
+	case RPSM_EVENT_HCB_ONLINE:
+	case RPSM_EVENT_DELETE:
+		/* Rport is being deleted */
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *	Rport is going offline. Awaiting FC-4 offline completion callback.
+ */
+static void
+bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
+			enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FC4_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
+		bfa_fcs_rport_hal_offline(rport);
+		break;
+
+	case RPSM_EVENT_SCN_ONLINE:
+		break;
+	case RPSM_EVENT_LOGO_RCVD:
+		/*
+		 * Rport is going offline. Just ack the logo
+		 */
+		bfa_fcs_rport_send_logo_acc(rport);
+		break;
+
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_fcs_rport_send_prlo_acc(rport);
+		break;
+
+	case RPSM_EVENT_SCN_OFFLINE:
+	case RPSM_EVENT_HCB_ONLINE:
+	case RPSM_EVENT_FAB_SCN:
+	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		/*
+		 * rport is already going offline.
+		 * SCN - ignore and wait till transitioning to offline state
+		 */
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		Rport is offline. FC-4s are offline. Awaiting BFA rport offline
+ *		callback.
+ */
+static void
+bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
+				enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_HCB_OFFLINE:
+		if (bfa_fcs_lport_is_online(rport->port) &&
+		    (rport->plogi_pending)) {
+			rport->plogi_pending = BFA_FALSE;
+			bfa_sm_set_state(rport,
+				bfa_fcs_rport_sm_plogiacc_sending);
+			bfa_fcs_rport_send_plogiacc(rport, NULL);
+			break;
+		}
+		/*
+		 * !! fall through !!
+		 */
+
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		if (!bfa_fcs_lport_is_online(rport->port)) {
+			rport->pid = 0;
+			bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+			bfa_timer_start(rport->fcs->bfa, &rport->timer,
+					bfa_fcs_rport_timeout, rport,
+					bfa_fcs_rport_del_timeout);
+			break;
+		}
+		if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+			bfa_sm_set_state(rport,
+				bfa_fcs_rport_sm_nsdisc_sending);
+			rport->ns_retries = 0;
+			bfa_fcs_rport_send_nsdisc(rport, NULL);
+		} else if (bfa_fcport_get_topology(rport->port->fcs->bfa) ==
+					BFA_PORT_TOPOLOGY_LOOP) {
+			if (rport->scn_online) {
+				bfa_sm_set_state(rport,
+					bfa_fcs_rport_sm_adisc_offline_sending);
+				bfa_fcs_rport_send_adisc(rport, NULL);
+			} else {
+				bfa_sm_set_state(rport,
+					bfa_fcs_rport_sm_offline);
+				bfa_timer_start(rport->fcs->bfa, &rport->timer,
+					bfa_fcs_rport_timeout, rport,
+					bfa_fcs_rport_del_timeout);
+			}
+		} else {
+			bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+			rport->plogi_retries = 0;
+			bfa_fcs_rport_send_plogi(rport, NULL);
+		}
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	case RPSM_EVENT_SCN_ONLINE:
+	case RPSM_EVENT_SCN_OFFLINE:
+	case RPSM_EVENT_FAB_SCN:
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+	case RPSM_EVENT_PLOGI_RCVD:
+	case RPSM_EVENT_LOGO_IMP:
+		/*
+		 * Ignore, already offline.
+		 */
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		Rport is offline. FC-4s are offline. Awaiting BFA rport offline
+ *		callback to send LOGO accept.
+ */
+static void
+bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
+			enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_HCB_OFFLINE:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		if (rport->pid && (rport->prlo == BFA_TRUE))
+			bfa_fcs_rport_send_prlo_acc(rport);
+		if (rport->pid && (rport->prlo == BFA_FALSE))
+			bfa_fcs_rport_send_logo_acc(rport);
+		/*
+		 * If the lport is online and if the rport is not a well
+		 * known address port,
+		 * we try to re-discover the r-port.
+		 */
+		if (bfa_fcs_lport_is_online(rport->port) &&
+			(!BFA_FCS_PID_IS_WKA(rport->pid))) {
+			if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+				bfa_sm_set_state(rport,
+					bfa_fcs_rport_sm_nsdisc_sending);
+				rport->ns_retries = 0;
+				bfa_fcs_rport_send_nsdisc(rport, NULL);
+			} else {
+				/* For N2N  Direct Attach, try to re-login */
+				bfa_sm_set_state(rport,
+					bfa_fcs_rport_sm_plogi_sending);
+				rport->plogi_retries = 0;
+				bfa_fcs_rport_send_plogi(rport, NULL);
+			}
+		} else {
+			/*
+			 * if it is not a well known address, reset the
+			 * pid to 0.
+			 */
+			if (!BFA_FCS_PID_IS_WKA(rport->pid))
+				rport->pid = 0;
+			bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+			bfa_timer_start(rport->fcs->bfa, &rport->timer,
+					bfa_fcs_rport_timeout, rport,
+					bfa_fcs_rport_del_timeout);
+		}
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
+		if (rport->pid && (rport->prlo == BFA_TRUE))
+			bfa_fcs_rport_send_prlo_acc(rport);
+		if (rport->pid && (rport->prlo == BFA_FALSE))
+			bfa_fcs_rport_send_logo_acc(rport);
+		break;
+
+	case RPSM_EVENT_LOGO_IMP:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
+		break;
+
+	case RPSM_EVENT_SCN_ONLINE:
+	case RPSM_EVENT_SCN_OFFLINE:
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+		/*
+		 * Ignore - already processing a LOGO.
+		 */
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		Rport is being deleted. FC-4s are offline.
+ *  Awaiting BFA rport offline
+ *		callback to send LOGO.
+ */
+static void
+bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
+		 enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_HCB_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_logo_sending);
+		bfa_fcs_rport_send_logo(rport, NULL);
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+		bfa_fcs_rport_send_logo_acc(rport);
+	case RPSM_EVENT_PRLO_RCVD:
+		if (rport->prlo == BFA_TRUE)
+			bfa_fcs_rport_send_prlo_acc(rport);
+
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
+		break;
+
+	case RPSM_EVENT_SCN_ONLINE:
+	case RPSM_EVENT_SCN_OFFLINE:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		Rport is being deleted. FC-4s are offline. LOGO is being sent.
+ */
+static void
+bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
+	 enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FCXP_SENT:
+		/* Once LOGO is sent, we donot wait for the response */
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	case RPSM_EVENT_SCN_ONLINE:
+	case RPSM_EVENT_SCN_OFFLINE:
+	case RPSM_EVENT_FAB_SCN:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+		bfa_fcs_rport_send_logo_acc(rport);
+	case RPSM_EVENT_PRLO_RCVD:
+		if (rport->prlo == BFA_TRUE)
+			bfa_fcs_rport_send_prlo_acc(rport);
+
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		Rport is offline. FC-4s are offline. BFA rport is offline.
+ *		Timer active to delete stale rport.
+ */
+static void
+bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_TIMEOUT:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	case RPSM_EVENT_FAB_SCN:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		bfa_timer_stop(&rport->timer);
+		WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) !=
+					BFA_PORT_TOPOLOGY_LOOP));
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+		rport->ns_retries = 0;
+		bfa_fcs_rport_send_nsdisc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_timer_stop(&rport->timer);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_timer_stop(&rport->timer);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_SCN_OFFLINE:
+		break;
+
+	case RPSM_EVENT_PLOGI_COMP:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+		bfa_timer_stop(&rport->timer);
+		bfa_fcs_rport_fcs_online_action(rport);
+		break;
+
+	case RPSM_EVENT_SCN_ONLINE:
+		bfa_timer_stop(&rport->timer);
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+		bfa_fcs_rport_send_plogi(rport, NULL);
+		break;
+
+	case RPSM_EVENT_PLOGI_SEND:
+		bfa_timer_stop(&rport->timer);
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+		rport->plogi_retries = 0;
+		bfa_fcs_rport_send_plogi(rport, NULL);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *	Rport address has changed. Nameserver discovery request is being sent.
+ */
+static void
+bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
+	 enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FCXP_SENT:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sent);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_FAB_SCN:
+	case RPSM_EVENT_LOGO_RCVD:
+	case RPSM_EVENT_PRLO_RCVD:
+	case RPSM_EVENT_PLOGI_SEND:
+		break;
+
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		rport->ns_retries = 0; /* reset the retry count */
+		break;
+
+	case RPSM_EVENT_LOGO_IMP:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_PLOGI_COMP:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
+		bfa_fcs_rport_fcs_online_action(rport);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		Nameserver discovery failed. Waiting for timeout to retry.
+ */
+static void
+bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
+	 enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_TIMEOUT:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+		bfa_fcs_rport_send_nsdisc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_FAB_SCN:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
+		bfa_timer_stop(&rport->timer);
+		rport->ns_retries = 0;
+		bfa_fcs_rport_send_nsdisc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_timer_stop(&rport->timer);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_timer_stop(&rport->timer);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_LOGO_IMP:
+		rport->pid = 0;
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_timer_stop(&rport->timer);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+		bfa_fcs_rport_send_logo_acc(rport);
+		break;
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_fcs_rport_send_prlo_acc(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_COMP:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+		bfa_timer_stop(&rport->timer);
+		bfa_fcs_rport_fcs_online_action(rport);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *		Rport address has changed. Nameserver discovery request is sent.
+ */
+static void
+bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
+			enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_ACCEPTED:
+	case RPSM_EVENT_ADDRESS_CHANGE:
+		if (rport->pid) {
+			bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+			bfa_fcs_rport_send_plogi(rport, NULL);
+		} else {
+			bfa_sm_set_state(rport,
+				 bfa_fcs_rport_sm_nsdisc_sending);
+			rport->ns_retries = 0;
+			bfa_fcs_rport_send_nsdisc(rport, NULL);
+		}
+		break;
+
+	case RPSM_EVENT_FAILED:
+		rport->ns_retries++;
+		if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) {
+			bfa_sm_set_state(rport,
+				 bfa_fcs_rport_sm_nsdisc_sending);
+			bfa_fcs_rport_send_nsdisc(rport, NULL);
+		} else {
+			rport->old_pid = rport->pid;
+			rport->pid = 0;
+			bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+			bfa_timer_start(rport->fcs->bfa, &rport->timer,
+					bfa_fcs_rport_timeout, rport,
+					bfa_fcs_rport_del_timeout);
+		};
+		break;
+
+	case RPSM_EVENT_DELETE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_RCVD:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_send_plogiacc(rport, NULL);
+		break;
+
+	case RPSM_EVENT_LOGO_IMP:
+		rport->pid = 0;
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_timer_start(rport->fcs->bfa, &rport->timer,
+				bfa_fcs_rport_timeout, rport,
+				bfa_fcs_rport_del_timeout);
+		break;
+
+
+	case RPSM_EVENT_PRLO_RCVD:
+		bfa_fcs_rport_send_prlo_acc(rport);
+		break;
+	case RPSM_EVENT_FAB_SCN:
+		/*
+		 * ignore, wait for NS query response
+		 */
+		break;
+
+	case RPSM_EVENT_LOGO_RCVD:
+		/*
+		 * Not logged-in yet. Accept LOGO.
+		 */
+		bfa_fcs_rport_send_logo_acc(rport);
+		break;
+
+	case RPSM_EVENT_PLOGI_COMP:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+		bfa_fcxp_discard(rport->fcxp);
+		bfa_fcs_rport_fcs_online_action(rport);
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ * Rport needs to be deleted
+ * waiting for ITNIM clean up to finish
+ */
+static void
+bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
+				enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_FC4_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
+		bfa_fcs_rport_hal_offline(rport);
+		break;
+
+	case RPSM_EVENT_DELETE:
+	case RPSM_EVENT_PLOGI_RCVD:
+		/* Ignore these events */
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+		break;
+	}
+}
+
+/*
+ * RPort needs to be deleted
+ * waiting for BFA/FW to finish current processing
+ */
+static void
+bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
+				enum rport_event event)
+{
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPSM_EVENT_HCB_OFFLINE:
+		bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+		bfa_fcs_rport_free(rport);
+		break;
+
+	case RPSM_EVENT_DELETE:
+	case RPSM_EVENT_LOGO_IMP:
+	case RPSM_EVENT_PLOGI_RCVD:
+		/* Ignore these events */
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+/*
+ *  fcs_rport_private FCS RPORT provate functions
+ */
+
+static void
+bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_rport_s *rport = rport_cbarg;
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fchs_s	fchs;
+	int		len;
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
+				bfa_fcs_rport_send_plogi, rport, BFA_TRUE);
+		return;
+	}
+	rport->fcxp = fcxp;
+
+	len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
+				bfa_fcs_lport_get_fcid(port), 0,
+				port->port_cfg.pwwn, port->port_cfg.nwwn,
+				bfa_fcport_get_maxfrsize(port->fcs->bfa),
+				bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
+			(void *)rport, FC_MAX_PDUSZ, FC_ELS_TOV);
+
+	rport->stats.plogis++;
+	bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+				bfa_status_t req_status, u32 rsp_len,
+				u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+	struct fc_logi_s	*plogi_rsp;
+	struct fc_ls_rjt_s	*ls_rjt;
+	struct bfa_fcs_rport_s *twin;
+	struct list_head	*qe;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+
+	/*
+	 * Sanity Checks
+	 */
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(rport->fcs, req_status);
+		rport->stats.plogi_failed++;
+		bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+		return;
+	}
+
+	plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+	/*
+	 * Check for failure first.
+	 */
+	if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) {
+		ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
+
+		bfa_trc(rport->fcs, ls_rjt->reason_code);
+		bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
+
+		if ((ls_rjt->reason_code == FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD) &&
+		 (ls_rjt->reason_code_expl == FC_LS_RJT_EXP_INSUFF_RES)) {
+			rport->stats.rjt_insuff_res++;
+			bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RETRY);
+			return;
+		}
+
+		rport->stats.plogi_rejects++;
+		bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+		return;
+	}
+
+	/*
+	 * PLOGI is complete. Make sure this device is not one of the known
+	 * device with a new FC port address.
+	 */
+	list_for_each(qe, &rport->port->rport_q) {
+		twin = (struct bfa_fcs_rport_s *) qe;
+		if (twin == rport)
+			continue;
+		if (!rport->pwwn && (plogi_rsp->port_name == twin->pwwn)) {
+			bfa_trc(rport->fcs, twin->pid);
+			bfa_trc(rport->fcs, rport->pid);
+
+			/* Update plogi stats in twin */
+			twin->stats.plogis  += rport->stats.plogis;
+			twin->stats.plogi_rejects  +=
+				 rport->stats.plogi_rejects;
+			twin->stats.plogi_timeouts  +=
+				 rport->stats.plogi_timeouts;
+			twin->stats.plogi_failed +=
+				 rport->stats.plogi_failed;
+			twin->stats.plogi_rcvd	  += rport->stats.plogi_rcvd;
+			twin->stats.plogi_accs++;
+
+			bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+
+			bfa_fcs_rport_update(twin, plogi_rsp);
+			twin->pid = rsp_fchs->s_id;
+			bfa_sm_send_event(twin, RPSM_EVENT_PLOGI_COMP);
+			return;
+		}
+	}
+
+	/*
+	 * Normal login path -- no evil twins.
+	 */
+	rport->stats.plogi_accs++;
+	bfa_fcs_rport_update(rport, plogi_rsp);
+	bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
+}
+
+static void
+bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_rport_s *rport = rport_cbarg;
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fchs_s		fchs;
+	int		len;
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->reply_oxid);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp) {
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
+				bfa_fcs_rport_send_plogiacc, rport, BFA_FALSE);
+		return;
+	}
+	rport->fcxp = fcxp;
+
+	len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				 rport->pid, bfa_fcs_lport_get_fcid(port),
+				 rport->reply_oxid, port->port_cfg.pwwn,
+				 port->port_cfg.nwwn,
+				 bfa_fcport_get_maxfrsize(port->fcs->bfa),
+				 bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
+
+	bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_rport_s *rport = rport_cbarg;
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fchs_s		fchs;
+	int		len;
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
+				bfa_fcs_rport_send_adisc, rport, BFA_TRUE);
+		return;
+	}
+	rport->fcxp = fcxp;
+
+	len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
+				bfa_fcs_lport_get_fcid(port), 0,
+				port->port_cfg.pwwn, port->port_cfg.nwwn);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response,
+			rport, FC_MAX_PDUSZ, FC_ELS_TOV);
+
+	rport->stats.adisc_sent++;
+	bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+				bfa_status_t req_status, u32 rsp_len,
+				u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+	void		*pld = bfa_fcxp_get_rspbuf(fcxp);
+	struct fc_ls_rjt_s	*ls_rjt;
+
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(rport->fcs, req_status);
+		rport->stats.adisc_failed++;
+		bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+		return;
+	}
+
+	if (fc_adisc_rsp_parse((struct fc_adisc_s *)pld, rsp_len, rport->pwwn,
+				rport->nwwn)  == FC_PARSE_OK) {
+		rport->stats.adisc_accs++;
+		bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
+		return;
+	}
+
+	rport->stats.adisc_rejects++;
+	ls_rjt = pld;
+	bfa_trc(rport->fcs, ls_rjt->els_cmd.els_code);
+	bfa_trc(rport->fcs, ls_rjt->reason_code);
+	bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
+	bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+}
+
+static void
+bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_rport_s *rport = rport_cbarg;
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fchs_s	fchs;
+	struct bfa_fcxp_s *fcxp;
+	int		len;
+	bfa_cb_fcxp_send_t cbfn;
+
+	bfa_trc(rport->fcs, rport->pid);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
+				bfa_fcs_rport_send_nsdisc, rport, BFA_TRUE);
+		return;
+	}
+	rport->fcxp = fcxp;
+
+	if (rport->pwwn) {
+		len = fc_gidpn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				bfa_fcs_lport_get_fcid(port), 0, rport->pwwn);
+		cbfn = bfa_fcs_rport_gidpn_response;
+	} else {
+		len = fc_gpnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				bfa_fcs_lport_get_fcid(port), 0, rport->pid);
+		cbfn = bfa_fcs_rport_gpnid_response;
+	}
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			FC_CLASS_3, len, &fchs, cbfn,
+			(void *)rport, FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+	bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
+}
+
+static void
+bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+				bfa_status_t req_status, u32 rsp_len,
+				u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+	struct ct_hdr_s	*cthdr;
+	struct fcgs_gidpn_resp_s	*gidpn_rsp;
+	struct bfa_fcs_rport_s	*twin;
+	struct list_head	*qe;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+		/* Check if the pid is the same as before. */
+		gidpn_rsp = (struct fcgs_gidpn_resp_s *) (cthdr + 1);
+
+		if (gidpn_rsp->dap == rport->pid) {
+			/* Device is online  */
+			bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
+		} else {
+			/*
+			 * Device's PID has changed. We need to cleanup
+			 * and re-login. If there is another device with
+			 * the the newly discovered pid, send an scn notice
+			 * so that its new pid can be discovered.
+			 */
+			list_for_each(qe, &rport->port->rport_q) {
+				twin = (struct bfa_fcs_rport_s *) qe;
+				if (twin == rport)
+					continue;
+				if (gidpn_rsp->dap == twin->pid) {
+					bfa_trc(rport->fcs, twin->pid);
+					bfa_trc(rport->fcs, rport->pid);
+
+					twin->pid = 0;
+					bfa_sm_send_event(twin,
+					 RPSM_EVENT_ADDRESS_CHANGE);
+				}
+			}
+			rport->pid = gidpn_rsp->dap;
+			bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_CHANGE);
+		}
+		return;
+	}
+
+	/*
+	 * Reject Response
+	 */
+	switch (cthdr->reason_code) {
+	case CT_RSN_LOGICAL_BUSY:
+		/*
+		 * Need to retry
+		 */
+		bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
+		break;
+
+	case CT_RSN_UNABLE_TO_PERF:
+		/*
+		 * device doesn't exist : Start timer to cleanup this later.
+		 */
+		bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+		break;
+
+	default:
+		bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+		break;
+	}
+}
+
+static void
+bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+				bfa_status_t req_status, u32 rsp_len,
+				u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+	struct ct_hdr_s	*cthdr;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+
+	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+		bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
+		return;
+	}
+
+	/*
+	 * Reject Response
+	 */
+	switch (cthdr->reason_code) {
+	case CT_RSN_LOGICAL_BUSY:
+		/*
+		 * Need to retry
+		 */
+		bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
+		break;
+
+	case CT_RSN_UNABLE_TO_PERF:
+		/*
+		 * device doesn't exist : Start timer to cleanup this later.
+		 */
+		bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+		break;
+
+	default:
+		bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
+		break;
+	}
+}
+
+/*
+ *	Called to send a logout to the rport.
+ */
+static void
+bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_rport_s *rport = rport_cbarg;
+	struct bfa_fcs_lport_s *port;
+	struct fchs_s	fchs;
+	struct bfa_fcxp_s *fcxp;
+	u16	len;
+
+	bfa_trc(rport->fcs, rport->pid);
+
+	port = rport->port;
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp) {
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
+				bfa_fcs_rport_send_logo, rport, BFA_FALSE);
+		return;
+	}
+	rport->fcxp = fcxp;
+
+	len = fc_logo_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
+				bfa_fcs_lport_get_fcid(port), 0,
+				bfa_fcs_lport_get_pwwn(port));
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			FC_CLASS_3, len, &fchs, NULL,
+			rport, FC_MAX_PDUSZ, FC_ELS_TOV);
+
+	rport->stats.logos++;
+	bfa_fcxp_discard(rport->fcxp);
+	bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
+}
+
+/*
+ *	Send ACC for a LOGO received.
+ */
+static void
+bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
+{
+	struct bfa_fcs_rport_s *rport = rport_cbarg;
+	struct bfa_fcs_lport_s *port;
+	struct fchs_s	fchs;
+	struct bfa_fcxp_s *fcxp;
+	u16	len;
+
+	bfa_trc(rport->fcs, rport->pid);
+
+	port = rport->port;
+
+	fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp)
+		return;
+
+	rport->stats.logo_rcvd++;
+	len = fc_logo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				rport->pid, bfa_fcs_lport_get_fcid(port),
+				rport->reply_oxid);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
+}
+
+/*
+ *	brief
+ *	This routine will be called by bfa_timer on timer timeouts.
+ *
+ *	param[in]	rport			- pointer to bfa_fcs_lport_ns_t.
+ *	param[out]	rport_status	- pointer to return vport status in
+ *
+ *	return
+ *		void
+ *
+ *	Special Considerations:
+ *
+ *	note
+ */
+static void
+bfa_fcs_rport_timeout(void *arg)
+{
+	struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) arg;
+
+	rport->stats.plogi_timeouts++;
+	bfa_stats(rport->port, rport_plogi_timeouts);
+	bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
+}
+
+static void
+bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
+			struct fchs_s *rx_fchs, u16 len)
+{
+	struct bfa_fcxp_s *fcxp;
+	struct fchs_s	fchs;
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fc_prli_s	*prli;
+
+	bfa_trc(port->fcs, rx_fchs->s_id);
+	bfa_trc(port->fcs, rx_fchs->d_id);
+
+	rport->stats.prli_rcvd++;
+
+	/*
+	 * We are in Initiator Mode
+	 */
+	prli = (struct fc_prli_s *) (rx_fchs + 1);
+
+	if (prli->parampage.servparams.target) {
+		/*
+		 * PRLI from a target ?
+		 * Send the Acc.
+		 * PRLI sent by us will be used to transition the IT nexus,
+		 * once the response is received from the target.
+		 */
+		bfa_trc(port->fcs, rx_fchs->s_id);
+		rport->scsi_function = BFA_RPORT_TARGET;
+	} else {
+		bfa_trc(rport->fcs, prli->parampage.type);
+		rport->scsi_function = BFA_RPORT_INITIATOR;
+		bfa_fcs_itnim_is_initiator(rport->itnim);
+	}
+
+	fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp)
+		return;
+
+	len = fc_prli_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+				rx_fchs->ox_id, port->port_cfg.roles);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
+}
+
+static void
+bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
+			struct fchs_s *rx_fchs, u16 len)
+{
+	struct bfa_fcxp_s *fcxp;
+	struct fchs_s	fchs;
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fc_rpsc_speed_info_s speeds;
+	struct bfa_port_attr_s pport_attr;
+
+	bfa_trc(port->fcs, rx_fchs->s_id);
+	bfa_trc(port->fcs, rx_fchs->d_id);
+
+	rport->stats.rpsc_rcvd++;
+	speeds.port_speed_cap =
+		RPSC_SPEED_CAP_1G | RPSC_SPEED_CAP_2G | RPSC_SPEED_CAP_4G |
+		RPSC_SPEED_CAP_8G;
+
+	/*
+	 * get curent speed from pport attributes from BFA
+	 */
+	bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
+
+	speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed);
+
+	fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp)
+		return;
+
+	len = fc_rpsc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+				rx_fchs->ox_id, &speeds);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
+}
+
+static void
+bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
+			struct fchs_s *rx_fchs, u16 len)
+{
+	struct bfa_fcxp_s *fcxp;
+	struct fchs_s	fchs;
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fc_adisc_s	*adisc;
+
+	bfa_trc(port->fcs, rx_fchs->s_id);
+	bfa_trc(port->fcs, rx_fchs->d_id);
+
+	rport->stats.adisc_rcvd++;
+
+	adisc = (struct fc_adisc_s *) (rx_fchs + 1);
+
+	/*
+	 * Accept if the itnim for this rport is online.
+	 * Else reject the ADISC.
+	 */
+	if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) {
+
+		fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+		if (!fcxp)
+			return;
+
+		len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			 rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+			 rx_fchs->ox_id, port->port_cfg.pwwn,
+			 port->port_cfg.nwwn);
+
+		bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
+				BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+				FC_MAX_PDUSZ, 0);
+	} else {
+		rport->stats.adisc_rejected++;
+		bfa_fcs_rport_send_ls_rjt(rport, rx_fchs,
+					  FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD,
+					  FC_LS_RJT_EXP_LOGIN_REQUIRED);
+	}
+}
+
+static void
+bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport)
+{
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct bfa_rport_info_s rport_info;
+
+	rport_info.pid = rport->pid;
+	rport_info.local_pid = port->pid;
+	rport_info.lp_tag = port->lp_tag;
+	rport_info.vf_id = port->fabric->vf_id;
+	rport_info.vf_en = port->fabric->is_vf;
+	rport_info.fc_class = rport->fc_cos;
+	rport_info.cisc = rport->cisc;
+	rport_info.max_frmsz = rport->maxfrsize;
+	bfa_rport_online(rport->bfa_rport, &rport_info);
+}
+
+static void
+bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport)
+{
+	if (rport->bfa_rport)
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+	else
+		bfa_cb_rport_offline(rport);
+}
+
+static struct bfa_fcs_rport_s *
+bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
+{
+	struct bfa_fcs_s	*fcs = port->fcs;
+	struct bfa_fcs_rport_s *rport;
+	struct bfad_rport_s	*rport_drv;
+
+	/*
+	 * allocate rport
+	 */
+	if (fcs->num_rport_logins >= bfa_fcs_rport_max_logins) {
+		bfa_trc(fcs, rpid);
+		return NULL;
+	}
+
+	if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
+		!= BFA_STATUS_OK) {
+		bfa_trc(fcs, rpid);
+		return NULL;
+	}
+
+	/*
+	 * Initialize r-port
+	 */
+	rport->port = port;
+	rport->fcs = fcs;
+	rport->rp_drv = rport_drv;
+	rport->pid = rpid;
+	rport->pwwn = pwwn;
+	rport->old_pid = 0;
+
+	rport->bfa_rport = NULL;
+
+	/*
+	 * allocate FC-4s
+	 */
+	WARN_ON(!bfa_fcs_lport_is_initiator(port));
+
+	if (bfa_fcs_lport_is_initiator(port)) {
+		rport->itnim = bfa_fcs_itnim_create(rport);
+		if (!rport->itnim) {
+			bfa_trc(fcs, rpid);
+			kfree(rport_drv);
+			return NULL;
+		}
+	}
+
+	bfa_fcs_lport_add_rport(port, rport);
+	fcs->num_rport_logins++;
+
+	bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+
+	/* Initialize the Rport Features(RPF) Sub Module  */
+	if (!BFA_FCS_PID_IS_WKA(rport->pid))
+		bfa_fcs_rpf_init(rport);
+
+	return rport;
+}
+
+
+static void
+bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
+{
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct bfa_fcs_s *fcs = port->fcs;
+
+	/*
+	 * - delete FC-4s
+	 * - delete BFA rport
+	 * - remove from queue of rports
+	 */
+	rport->plogi_pending = BFA_FALSE;
+
+	if (bfa_fcs_lport_is_initiator(port)) {
+		bfa_fcs_itnim_delete(rport->itnim);
+		if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid))
+			bfa_fcs_rpf_rport_offline(rport);
+	}
+
+	if (rport->bfa_rport) {
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
+		rport->bfa_rport = NULL;
+	}
+
+	bfa_fcs_lport_del_rport(port, rport);
+	fcs->num_rport_logins--;
+	kfree(rport->rp_drv);
+}
+
+static void
+bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
+			enum bfa_rport_aen_event event,
+			struct bfa_rport_aen_data_s *data)
+{
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	if (event == BFA_RPORT_AEN_QOS_PRIO)
+		aen_entry->aen_data.rport.priv.qos = data->priv.qos;
+	else if (event == BFA_RPORT_AEN_QOS_FLOWID)
+		aen_entry->aen_data.rport.priv.qos = data->priv.qos;
+
+	aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id;
+	aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn(
+					bfa_fcs_get_base_port(rport->fcs));
+	aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
+	aen_entry->aen_data.rport.rpwwn = rport->pwwn;
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
+				  BFA_AEN_CAT_RPORT, event);
+}
+
+static void
+bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport)
+{
+	if ((!rport->pid) || (!rport->pwwn)) {
+		bfa_trc(rport->fcs, rport->pid);
+		bfa_sm_fault(rport->fcs, rport->pid);
+	}
+
+	bfa_sm_send_event(rport->itnim, BFA_FCS_ITNIM_SM_FCS_ONLINE);
+}
+
+static void
+bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport)
+{
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+	char	lpwwn_buf[BFA_STRING_32];
+	char	rpwwn_buf[BFA_STRING_32];
+
+	rport->stats.onlines++;
+
+	if ((!rport->pid) || (!rport->pwwn)) {
+		bfa_trc(rport->fcs, rport->pid);
+		bfa_sm_fault(rport->fcs, rport->pid);
+	}
+
+	if (bfa_fcs_lport_is_initiator(port)) {
+		bfa_fcs_itnim_brp_online(rport->itnim);
+		if (!BFA_FCS_PID_IS_WKA(rport->pid))
+			bfa_fcs_rpf_rport_online(rport);
+	};
+
+	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
+	wwn2str(rpwwn_buf, rport->pwwn);
+	if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
+		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+		"Remote port (WWN = %s) online for logical port (WWN = %s)\n",
+		rpwwn_buf, lpwwn_buf);
+		bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL);
+	}
+}
+
+static void
+bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport)
+{
+	if (!BFA_FCS_PID_IS_WKA(rport->pid))
+		bfa_fcs_rpf_rport_offline(rport);
+
+	bfa_fcs_itnim_rport_offline(rport->itnim);
+}
+
+static void
+bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport)
+{
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+	char	lpwwn_buf[BFA_STRING_32];
+	char	rpwwn_buf[BFA_STRING_32];
+
+	if (!rport->bfa_rport) {
+		bfa_fcs_rport_fcs_offline_action(rport);
+		return;
+	}
+
+	rport->stats.offlines++;
+
+	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
+	wwn2str(rpwwn_buf, rport->pwwn);
+	if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
+		if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) {
+			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+				"Remote port (WWN = %s) connectivity lost for "
+				"logical port (WWN = %s)\n",
+				rpwwn_buf, lpwwn_buf);
+			bfa_fcs_rport_aen_post(rport,
+				BFA_RPORT_AEN_DISCONNECT, NULL);
+		} else {
+			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+				"Remote port (WWN = %s) offlined by "
+				"logical port (WWN = %s)\n",
+				rpwwn_buf, lpwwn_buf);
+			bfa_fcs_rport_aen_post(rport,
+				BFA_RPORT_AEN_OFFLINE, NULL);
+		}
+	}
+
+	if (bfa_fcs_lport_is_initiator(port)) {
+		bfa_fcs_itnim_rport_offline(rport->itnim);
+		if (!BFA_FCS_PID_IS_WKA(rport->pid))
+			bfa_fcs_rpf_rport_offline(rport);
+	}
+}
+
+/*
+ * Update rport parameters from PLOGI or PLOGI accept.
+ */
+static void
+bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
+{
+	bfa_fcs_lport_t *port = rport->port;
+
+	/*
+	 * - port name
+	 * - node name
+	 */
+	rport->pwwn = plogi->port_name;
+	rport->nwwn = plogi->node_name;
+
+	/*
+	 * - class of service
+	 */
+	rport->fc_cos = 0;
+	if (plogi->class3.class_valid)
+		rport->fc_cos = FC_CLASS_3;
+
+	if (plogi->class2.class_valid)
+		rport->fc_cos |= FC_CLASS_2;
+
+	/*
+	 * - CISC
+	 * - MAX receive frame size
+	 */
+	rport->cisc = plogi->csp.cisc;
+	if (be16_to_cpu(plogi->class3.rxsz) < be16_to_cpu(plogi->csp.rxsz))
+		rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
+	else
+		rport->maxfrsize = be16_to_cpu(plogi->csp.rxsz);
+
+	bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
+	bfa_trc(port->fcs, port->fabric->bb_credit);
+	/*
+	 * Direct Attach P2P mode :
+	 * This is to handle a bug (233476) in IBM targets in Direct Attach
+	 *  Mode. Basically, in FLOGI Accept the target would have
+	 * erroneously set the BB Credit to the value used in the FLOGI
+	 * sent by the HBA. It uses the correct value (its own BB credit)
+	 * in PLOGI.
+	 */
+	if ((!bfa_fcs_fabric_is_switched(port->fabric))	 &&
+		(be16_to_cpu(plogi->csp.bbcred) < port->fabric->bb_credit)) {
+
+		bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
+		bfa_trc(port->fcs, port->fabric->bb_credit);
+
+		port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
+		bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
+					  port->fabric->bb_credit);
+	}
+
+}
+
+/*
+ *	Called to handle LOGO received from an existing remote port.
+ */
+static void
+bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
+{
+	rport->reply_oxid = fchs->ox_id;
+	bfa_trc(rport->fcs, rport->reply_oxid);
+
+	rport->prlo = BFA_FALSE;
+	rport->stats.logo_rcvd++;
+	bfa_sm_send_event(rport, RPSM_EVENT_LOGO_RCVD);
+}
+
+
+
+/*
+ *  fcs_rport_public FCS rport public interfaces
+ */
+
+/*
+ *	Called by bport/vport to create a remote port instance for a discovered
+ *	remote device.
+ *
+ * @param[in] port	- base port or vport
+ * @param[in] rpid	- remote port ID
+ *
+ * @return None
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid)
+{
+	struct bfa_fcs_rport_s *rport;
+
+	bfa_trc(port->fcs, rpid);
+	rport = bfa_fcs_rport_alloc(port, WWN_NULL, rpid);
+	if (!rport)
+		return NULL;
+
+	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
+	return rport;
+}
+
+/*
+ * Called to create a rport for which only the wwn is known.
+ *
+ * @param[in] port	- base port
+ * @param[in] rpwwn	- remote port wwn
+ *
+ * @return None
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
+{
+	struct bfa_fcs_rport_s *rport;
+	bfa_trc(port->fcs, rpwwn);
+	rport = bfa_fcs_rport_alloc(port, rpwwn, 0);
+	if (!rport)
+		return NULL;
+
+	bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
+	return rport;
+}
+/*
+ * Called by bport in private loop topology to indicate that a
+ * rport has been discovered and plogi has been completed.
+ *
+ * @param[in] port	- base port or vport
+ * @param[in] rpid	- remote port ID
+ */
+void
+bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
+	 struct fc_logi_s *plogi)
+{
+	struct bfa_fcs_rport_s *rport;
+
+	rport = bfa_fcs_rport_alloc(port, WWN_NULL, fchs->s_id);
+	if (!rport)
+		return;
+
+	bfa_fcs_rport_update(rport, plogi);
+
+	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP);
+}
+
+/*
+ *	Called by bport/vport to handle PLOGI received from a new remote port.
+ *	If an existing rport does a plogi, it will be handled separately.
+ */
+void
+bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
+				struct fc_logi_s *plogi)
+{
+	struct bfa_fcs_rport_s *rport;
+
+	rport = bfa_fcs_rport_alloc(port, plogi->port_name, fchs->s_id);
+	if (!rport)
+		return;
+
+	bfa_fcs_rport_update(rport, plogi);
+
+	rport->reply_oxid = fchs->ox_id;
+	bfa_trc(rport->fcs, rport->reply_oxid);
+
+	rport->stats.plogi_rcvd++;
+	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
+}
+
+/*
+ *	Called by bport/vport to handle PLOGI received from an existing
+ *	 remote port.
+ */
+void
+bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
+			struct fc_logi_s *plogi)
+{
+	/*
+	 * @todo Handle P2P and initiator-initiator.
+	 */
+
+	bfa_fcs_rport_update(rport, plogi);
+
+	rport->reply_oxid = rx_fchs->ox_id;
+	bfa_trc(rport->fcs, rport->reply_oxid);
+
+	rport->pid = rx_fchs->s_id;
+	bfa_trc(rport->fcs, rport->pid);
+
+	rport->stats.plogi_rcvd++;
+	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
+}
+
+
+/*
+ *	Called by bport/vport to notify SCN for the remote port
+ */
+void
+bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
+{
+	rport->stats.rscns++;
+	bfa_sm_send_event(rport, RPSM_EVENT_FAB_SCN);
+}
+
+/*
+ *	brief
+ *	This routine BFA callback for bfa_rport_online() call.
+ *
+ *	param[in]	cb_arg	-  rport struct.
+ *
+ *	return
+ *		void
+ *
+ *	Special Considerations:
+ *
+ *	note
+ */
+void
+bfa_cb_rport_online(void *cbarg)
+{
+
+	struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
+}
+
+/*
+ *	brief
+ *	This routine BFA callback for bfa_rport_offline() call.
+ *
+ *	param[in]	rport	-
+ *
+ *	return
+ *		void
+ *
+ *	Special Considerations:
+ *
+ *	note
+ */
+void
+bfa_cb_rport_offline(void *cbarg)
+{
+	struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
+}
+
+/*
+ *	brief
+ *	This routine is a static BFA callback when there is a QoS flow_id
+ *	change notification
+ *
+ *	param[in]	rport	-
+ *
+ *	return
+ *		void
+ *
+ *	Special Considerations:
+ *
+ *	note
+ */
+void
+bfa_cb_rport_qos_scn_flowid(void *cbarg,
+		struct bfa_rport_qos_attr_s old_qos_attr,
+		struct bfa_rport_qos_attr_s new_qos_attr)
+{
+	struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+	struct bfa_rport_aen_data_s aen_data;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+	aen_data.priv.qos = new_qos_attr;
+	bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
+}
+
+void
+bfa_cb_rport_scn_online(struct bfa_s *bfa)
+{
+	struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs;
+	struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs);
+	struct bfa_fcs_rport_s *rp;
+	struct list_head *qe;
+
+	list_for_each(qe, &port->rport_q) {
+		rp = (struct bfa_fcs_rport_s *) qe;
+		bfa_sm_send_event(rp, RPSM_EVENT_SCN_ONLINE);
+		rp->scn_online = BFA_TRUE;
+	}
+
+	if (bfa_fcs_lport_is_online(port))
+		bfa_fcs_lport_lip_scn_online(port);
+}
+
+void
+bfa_cb_rport_scn_no_dev(void *rport)
+{
+	struct bfa_fcs_rport_s *rp = rport;
+
+	bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE);
+	rp->scn_online = BFA_FALSE;
+}
+
+void
+bfa_cb_rport_scn_offline(struct bfa_s *bfa)
+{
+	struct bfa_fcs_s *fcs = &((struct bfad_s *)bfa->bfad)->bfa_fcs;
+	struct bfa_fcs_lport_s *port = bfa_fcs_get_base_port(fcs);
+	struct bfa_fcs_rport_s *rp;
+	struct list_head *qe;
+
+	list_for_each(qe, &port->rport_q) {
+		rp = (struct bfa_fcs_rport_s *) qe;
+		bfa_sm_send_event(rp, RPSM_EVENT_SCN_OFFLINE);
+		rp->scn_online = BFA_FALSE;
+	}
+}
+
+/*
+ *	brief
+ *	This routine is a static BFA callback when there is a QoS priority
+ *	change notification
+ *
+ *	param[in]	rport	-
+ *
+ *	return
+ *		void
+ *
+ *	Special Considerations:
+ *
+ *	note
+ */
+void
+bfa_cb_rport_qos_scn_prio(void *cbarg,
+		struct bfa_rport_qos_attr_s old_qos_attr,
+		struct bfa_rport_qos_attr_s new_qos_attr)
+{
+	struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+	struct bfa_rport_aen_data_s aen_data;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+	aen_data.priv.qos = new_qos_attr;
+	bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
+}
+
+/*
+ *		Called to process any unsolicted frames from this remote port
+ */
+void
+bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
+			struct fchs_s *fchs, u16 len)
+{
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fc_els_cmd_s	*els_cmd;
+
+	bfa_trc(rport->fcs, fchs->s_id);
+	bfa_trc(rport->fcs, fchs->d_id);
+	bfa_trc(rport->fcs, fchs->type);
+
+	if (fchs->type != FC_TYPE_ELS)
+		return;
+
+	els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+	bfa_trc(rport->fcs, els_cmd->els_code);
+
+	switch (els_cmd->els_code) {
+	case FC_ELS_LOGO:
+		bfa_stats(port, plogi_rcvd);
+		bfa_fcs_rport_process_logo(rport, fchs);
+		break;
+
+	case FC_ELS_ADISC:
+		bfa_stats(port, adisc_rcvd);
+		bfa_fcs_rport_process_adisc(rport, fchs, len);
+		break;
+
+	case FC_ELS_PRLO:
+		bfa_stats(port, prlo_rcvd);
+		if (bfa_fcs_lport_is_initiator(port))
+			bfa_fcs_fcpim_uf_recv(rport->itnim, fchs, len);
+		break;
+
+	case FC_ELS_PRLI:
+		bfa_stats(port, prli_rcvd);
+		bfa_fcs_rport_process_prli(rport, fchs, len);
+		break;
+
+	case FC_ELS_RPSC:
+		bfa_stats(port, rpsc_rcvd);
+		bfa_fcs_rport_process_rpsc(rport, fchs, len);
+		break;
+
+	default:
+		bfa_stats(port, un_handled_els_rcvd);
+		bfa_fcs_rport_send_ls_rjt(rport, fchs,
+					  FC_LS_RJT_RSN_CMD_NOT_SUPP,
+					  FC_LS_RJT_EXP_NO_ADDL_INFO);
+		break;
+	}
+}
+
+/* send best case  acc to prlo */
+static void
+bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport)
+{
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fchs_s	fchs;
+	struct bfa_fcxp_s *fcxp;
+	int		len;
+
+	bfa_trc(rport->fcs, rport->pid);
+
+	fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+	if (!fcxp)
+		return;
+	len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			rport->pid, bfa_fcs_lport_get_fcid(port),
+			rport->reply_oxid, 0);
+
+	bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id,
+		port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs,
+		NULL, NULL, FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Send a LS reject
+ */
+static void
+bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
+			  u8 reason_code, u8 reason_code_expl)
+{
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fchs_s	fchs;
+	struct bfa_fcxp_s *fcxp;
+	int		len;
+
+	bfa_trc(rport->fcs, rx_fchs->s_id);
+
+	fcxp = bfa_fcs_fcxp_alloc(rport->fcs, BFA_FALSE);
+	if (!fcxp)
+		return;
+
+	len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
+				rx_fchs->ox_id, reason_code, reason_code_expl);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
+			BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+			FC_MAX_PDUSZ, 0);
+}
+
+/*
+ * Return state of rport.
+ */
+int
+bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
+{
+	return bfa_sm_to_state(rport_sm_table, rport->sm);
+}
+
+
+/*
+ *	brief
+ *		 Called by the Driver to set rport delete/ageout timeout
+ *
+ *	param[in]		rport timeout value in seconds.
+ *
+ *	return None
+ */
+void
+bfa_fcs_rport_set_del_timeout(u8 rport_tmo)
+{
+	/* convert to Millisecs */
+	if (rport_tmo > 0)
+		bfa_fcs_rport_del_timeout = rport_tmo * 1000;
+}
+void
+bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
+{
+	bfa_trc(rport->fcs, rport->pid);
+
+	rport->prlo = BFA_TRUE;
+	rport->reply_oxid = ox_id;
+	bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
+}
+
+/*
+ * Called by BFAD to set the max limit on number of bfa_fcs_rport allocation
+ * which limits number of concurrent logins to remote ports
+ */
+void
+bfa_fcs_rport_set_max_logins(u32 max_logins)
+{
+	if (max_logins > 0)
+		bfa_fcs_rport_max_logins = max_logins;
+}
+
+void
+bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
+		struct bfa_rport_attr_s *rport_attr)
+{
+	struct bfa_rport_qos_attr_s qos_attr;
+	struct bfa_fcs_lport_s *port = rport->port;
+	bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
+	struct bfa_port_attr_s port_attr;
+
+	bfa_fcport_get_attr(rport->fcs->bfa, &port_attr);
+
+	memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
+	memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s));
+
+	rport_attr->pid = rport->pid;
+	rport_attr->pwwn = rport->pwwn;
+	rport_attr->nwwn = rport->nwwn;
+	rport_attr->cos_supported = rport->fc_cos;
+	rport_attr->df_sz = rport->maxfrsize;
+	rport_attr->state = bfa_fcs_rport_get_state(rport);
+	rport_attr->fc_cos = rport->fc_cos;
+	rport_attr->cisc = rport->cisc;
+	rport_attr->scsi_function = rport->scsi_function;
+	rport_attr->curr_speed  = rport->rpf.rpsc_speed;
+	rport_attr->assigned_speed  = rport->rpf.assigned_speed;
+
+	if (rport->bfa_rport) {
+		qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
+		qos_attr.qos_flow_id =
+			cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
+	}
+	rport_attr->qos_attr = qos_attr;
+
+	rport_attr->trl_enforced = BFA_FALSE;
+	if (bfa_fcport_is_ratelim(port->fcs->bfa) &&
+	    (rport->scsi_function == BFA_RPORT_TARGET)) {
+		if (rport_speed == BFA_PORT_SPEED_UNKNOWN)
+			rport_speed =
+				bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
+
+		if ((bfa_fcs_lport_get_rport_max_speed(port) !=
+		    BFA_PORT_SPEED_UNKNOWN) && (rport_speed < port_attr.speed))
+			rport_attr->trl_enforced = BFA_TRUE;
+	}
+}
+
+/*
+ * Remote port implementation.
+ */
+
+/*
+ *  fcs_rport_api FCS rport API.
+ */
+
+struct bfa_fcs_rport_s *
+bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
+{
+	struct bfa_fcs_rport_s *rport;
+
+	rport = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+	if (rport == NULL) {
+		/*
+		 * TBD Error handling
+		 */
+	}
+
+	return rport;
+}
+
+struct bfa_fcs_rport_s *
+bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn)
+{
+	struct bfa_fcs_rport_s *rport;
+
+	rport = bfa_fcs_lport_get_rport_by_nwwn(port, rnwwn);
+	if (rport == NULL) {
+		/*
+		 * TBD Error handling
+		 */
+	}
+
+	return rport;
+}
+
+/*
+ * Remote port features (RPF) implementation.
+ */
+
+#define BFA_FCS_RPF_RETRIES	(3)
+#define BFA_FCS_RPF_RETRY_TIMEOUT  (1000) /* 1 sec (In millisecs) */
+
+static void     bfa_fcs_rpf_send_rpsc2(void *rport_cbarg,
+				struct bfa_fcxp_s *fcxp_alloced);
+static void     bfa_fcs_rpf_rpsc2_response(void *fcsarg,
+			struct bfa_fcxp_s *fcxp,
+			void *cbarg,
+			bfa_status_t req_status,
+			u32 rsp_len,
+			u32 resid_len,
+			struct fchs_s *rsp_fchs);
+
+static void     bfa_fcs_rpf_timeout(void *arg);
+
+/*
+ *  fcs_rport_ftrs_sm FCS rport state machine events
+ */
+
+enum rpf_event {
+	RPFSM_EVENT_RPORT_OFFLINE  = 1, /* Rport offline		*/
+	RPFSM_EVENT_RPORT_ONLINE   = 2,	/* Rport online			*/
+	RPFSM_EVENT_FCXP_SENT      = 3,	/* Frame from has been sent	*/
+	RPFSM_EVENT_TIMEOUT	   = 4, /* Rport SM timeout event	*/
+	RPFSM_EVENT_RPSC_COMP      = 5,
+	RPFSM_EVENT_RPSC_FAIL      = 6,
+	RPFSM_EVENT_RPSC_ERROR     = 7,
+};
+
+static void	bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf,
+					enum rpf_event event);
+static void     bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf,
+				       enum rpf_event event);
+static void     bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf,
+				       enum rpf_event event);
+static void	bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf,
+					enum rpf_event event);
+static void     bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf,
+					enum rpf_event event);
+static void     bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf,
+					enum rpf_event event);
+
+static void
+bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
+{
+	struct bfa_fcs_rport_s *rport = rpf->rport;
+	struct bfa_fcs_fabric_s *fabric = &rport->fcs->fabric;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPFSM_EVENT_RPORT_ONLINE:
+		/* Send RPSC2 to a Brocade fabric only. */
+		if ((!BFA_FCS_PID_IS_WKA(rport->pid)) &&
+			((rport->port->fabric->lps->brcd_switch) ||
+			(bfa_fcs_fabric_get_switch_oui(fabric) ==
+						BFA_FCS_BRCD_SWITCH_OUI))) {
+			bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
+			rpf->rpsc_retries = 0;
+			bfa_fcs_rpf_send_rpsc2(rpf, NULL);
+		}
+		break;
+
+	case RPFSM_EVENT_RPORT_OFFLINE:
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
+{
+	struct bfa_fcs_rport_s *rport = rpf->rport;
+
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPFSM_EVENT_FCXP_SENT:
+		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc);
+		break;
+
+	case RPFSM_EVENT_RPORT_OFFLINE:
+		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
+		bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe);
+		rpf->rpsc_retries = 0;
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
+{
+	struct bfa_fcs_rport_s *rport = rpf->rport;
+
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPFSM_EVENT_RPSC_COMP:
+		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
+		/* Update speed info in f/w via BFA */
+		if (rpf->rpsc_speed != BFA_PORT_SPEED_UNKNOWN)
+			bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed);
+		else if (rpf->assigned_speed != BFA_PORT_SPEED_UNKNOWN)
+			bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed);
+		break;
+
+	case RPFSM_EVENT_RPSC_FAIL:
+		/* RPSC not supported by rport */
+		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
+		break;
+
+	case RPFSM_EVENT_RPSC_ERROR:
+		/* need to retry...delayed a bit. */
+		if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) {
+			bfa_timer_start(rport->fcs->bfa, &rpf->timer,
+				    bfa_fcs_rpf_timeout, rpf,
+				    BFA_FCS_RPF_RETRY_TIMEOUT);
+			bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry);
+		} else {
+			bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
+		}
+		break;
+
+	case RPFSM_EVENT_RPORT_OFFLINE:
+		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
+		bfa_fcxp_discard(rpf->fcxp);
+		rpf->rpsc_retries = 0;
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
+{
+	struct bfa_fcs_rport_s *rport = rpf->rport;
+
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPFSM_EVENT_TIMEOUT:
+		/* re-send the RPSC */
+		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
+		bfa_fcs_rpf_send_rpsc2(rpf, NULL);
+		break;
+
+	case RPFSM_EVENT_RPORT_OFFLINE:
+		bfa_timer_stop(&rpf->timer);
+		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
+		rpf->rpsc_retries = 0;
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
+{
+	struct bfa_fcs_rport_s *rport = rpf->rport;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPFSM_EVENT_RPORT_OFFLINE:
+		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
+		rpf->rpsc_retries = 0;
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+
+static void
+bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
+{
+	struct bfa_fcs_rport_s *rport = rpf->rport;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_trc(rport->fcs, event);
+
+	switch (event) {
+	case RPFSM_EVENT_RPORT_ONLINE:
+		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
+		bfa_fcs_rpf_send_rpsc2(rpf, NULL);
+		break;
+
+	case RPFSM_EVENT_RPORT_OFFLINE:
+		break;
+
+	default:
+		bfa_sm_fault(rport->fcs, event);
+	}
+}
+/*
+ * Called when Rport is created.
+ */
+void
+bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
+{
+	struct bfa_fcs_rpf_s *rpf = &rport->rpf;
+
+	bfa_trc(rport->fcs, rport->pid);
+	rpf->rport = rport;
+
+	bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
+}
+
+/*
+ * Called when Rport becomes online
+ */
+void
+bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
+{
+	bfa_trc(rport->fcs, rport->pid);
+
+	if (__fcs_min_cfg(rport->port->fcs))
+		return;
+
+	if (bfa_fcs_fabric_is_switched(rport->port->fabric))
+		bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
+}
+
+/*
+ * Called when Rport becomes offline
+ */
+void
+bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport)
+{
+	bfa_trc(rport->fcs, rport->pid);
+
+	if (__fcs_min_cfg(rport->port->fcs))
+		return;
+
+	rport->rpf.rpsc_speed = 0;
+	bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE);
+}
+
+static void
+bfa_fcs_rpf_timeout(void *arg)
+{
+	struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) arg;
+	struct bfa_fcs_rport_s *rport = rpf->rport;
+
+	bfa_trc(rport->fcs, rport->pid);
+	bfa_sm_send_event(rpf, RPFSM_EVENT_TIMEOUT);
+}
+
+static void
+bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+	struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg;
+	struct bfa_fcs_rport_s *rport = rpf->rport;
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fchs_s	fchs;
+	int		len;
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_trc(rport->fcs, rport->pwwn);
+
+	fcxp = fcxp_alloced ? fcxp_alloced :
+	       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+	if (!fcxp) {
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe,
+				bfa_fcs_rpf_send_rpsc2, rpf, BFA_TRUE);
+		return;
+	}
+	rpf->fcxp = fcxp;
+
+	len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
+			    bfa_fcs_lport_get_fcid(port), &rport->pid, 1);
+
+	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+			  FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response,
+			  rpf, FC_MAX_PDUSZ, FC_ELS_TOV);
+	rport->stats.rpsc_sent++;
+	bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT);
+
+}
+
+static void
+bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+			    bfa_status_t req_status, u32 rsp_len,
+			    u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg;
+	struct bfa_fcs_rport_s *rport = rpf->rport;
+	struct fc_ls_rjt_s *ls_rjt;
+	struct fc_rpsc2_acc_s *rpsc2_acc;
+	u16	num_ents;
+
+	bfa_trc(rport->fcs, req_status);
+
+	if (req_status != BFA_STATUS_OK) {
+		bfa_trc(rport->fcs, req_status);
+		if (req_status == BFA_STATUS_ETIMER)
+			rport->stats.rpsc_failed++;
+		bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
+		return;
+	}
+
+	rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
+	if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
+		rport->stats.rpsc_accs++;
+		num_ents = be16_to_cpu(rpsc2_acc->num_pids);
+		bfa_trc(rport->fcs, num_ents);
+		if (num_ents > 0) {
+			WARN_ON(be32_to_cpu(rpsc2_acc->port_info[0].pid) !=
+						bfa_ntoh3b(rport->pid));
+			bfa_trc(rport->fcs,
+				be32_to_cpu(rpsc2_acc->port_info[0].pid));
+			bfa_trc(rport->fcs,
+				be16_to_cpu(rpsc2_acc->port_info[0].speed));
+			bfa_trc(rport->fcs,
+				be16_to_cpu(rpsc2_acc->port_info[0].index));
+			bfa_trc(rport->fcs,
+				rpsc2_acc->port_info[0].type);
+
+			if (rpsc2_acc->port_info[0].speed == 0) {
+				bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
+				return;
+			}
+
+			rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
+				be16_to_cpu(rpsc2_acc->port_info[0].speed));
+
+			bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
+		}
+	} else {
+		ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
+		bfa_trc(rport->fcs, ls_rjt->reason_code);
+		bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
+		rport->stats.rpsc_rejects++;
+		if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP)
+			bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL);
+		else
+			bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
+	}
+}
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
new file mode 100644
index 0000000..c4a0c0e
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_modules.h"
+#include "bfi_reg.h"
+
+void
+bfa_hwcb_reginit(struct bfa_s *bfa)
+{
+	struct bfa_iocfc_regs_s	*bfa_regs = &bfa->iocfc.bfa_regs;
+	void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
+	int	fn = bfa_ioc_pcifn(&bfa->ioc);
+
+	if (fn == 0) {
+		bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
+		bfa_regs->intr_mask   = (kva + HOSTFN0_INT_MSK);
+	} else {
+		bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
+		bfa_regs->intr_mask   = (kva + HOSTFN1_INT_MSK);
+	}
+}
+
+static void
+bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
+{
+	writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq),
+			bfa->iocfc.bfa_regs.intr_status);
+}
+
+/*
+ * Actions to respond RME Interrupt for Crossbow ASIC:
+ * - Write 1 to Interrupt Status register
+ *              INTX - done in bfa_intx()
+ *              MSIX - done in bfa_hwcb_rspq_ack_msix()
+ * - Update CI (only if new CI)
+ */
+static void
+bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
+{
+	writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
+		bfa->iocfc.bfa_regs.intr_status);
+
+	if (bfa_rspq_ci(bfa, rspq) == ci)
+		return;
+
+	bfa_rspq_ci(bfa, rspq) = ci;
+	writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+	mmiowb();
+}
+
+void
+bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
+{
+	if (bfa_rspq_ci(bfa, rspq) == ci)
+		return;
+
+	bfa_rspq_ci(bfa, rspq) = ci;
+	writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+	mmiowb();
+}
+
+void
+bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
+		 u32 *num_vecs, u32 *max_vec_bit)
+{
+#define __HFN_NUMINTS	13
+	if (bfa_ioc_pcifn(&bfa->ioc) == 0) {
+		*msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
+				   __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
+				   __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
+				   __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
+				   __HFN_INT_MBOX_LPU0);
+		*max_vec_bit = __HFN_INT_MBOX_LPU0;
+	} else {
+		*msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
+				   __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
+				   __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
+				   __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
+				   __HFN_INT_MBOX_LPU1);
+		*max_vec_bit = __HFN_INT_MBOX_LPU1;
+	}
+
+	*msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
+			    __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
+	*num_vecs = __HFN_NUMINTS;
+}
+
+/*
+ * Dummy interrupt handler for handling spurious interrupts.
+ */
+static void
+bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec)
+{
+}
+
+/*
+ * No special setup required for crossbow -- vector assignments are implicit.
+ */
+void
+bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
+{
+	WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
+
+	bfa->msix.nvecs = nvecs;
+	bfa_hwcb_msix_uninstall(bfa);
+}
+
+void
+bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa)
+{
+	int i;
+
+	if (bfa->msix.nvecs == 0)
+		return;
+
+	if (bfa->msix.nvecs == 1) {
+		for (i = BFI_MSIX_CPE_QMIN_CB; i < BFI_MSIX_CB_MAX; i++)
+			bfa->msix.handler[i] = bfa_msix_all;
+		return;
+	}
+
+	for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++)
+		bfa->msix.handler[i] = bfa_msix_lpu_err;
+}
+
+void
+bfa_hwcb_msix_queue_install(struct bfa_s *bfa)
+{
+	int i;
+
+	if (bfa->msix.nvecs == 0)
+		return;
+
+	if (bfa->msix.nvecs == 1) {
+		for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
+			bfa->msix.handler[i] = bfa_msix_all;
+		return;
+	}
+
+	for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++)
+		bfa->msix.handler[i] = bfa_msix_reqq;
+
+	for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
+		bfa->msix.handler[i] = bfa_msix_rspq;
+}
+
+void
+bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
+{
+	int i;
+
+	for (i = 0; i < BFI_MSIX_CB_MAX; i++)
+		bfa->msix.handler[i] = bfa_hwcb_msix_dummy;
+}
+
+/*
+ * No special enable/disable -- vector assignments are implicit.
+ */
+void
+bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
+{
+	if (msix) {
+		bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
+		bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
+	} else {
+		bfa->iocfc.hwif.hw_reqq_ack = NULL;
+		bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
+	}
+}
+
+void
+bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
+{
+	*start = BFI_MSIX_RME_QMIN_CB;
+	*end = BFI_MSIX_RME_QMAX_CB;
+}
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
new file mode 100644
index 0000000..b0ff378
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_modules.h"
+#include "bfi_reg.h"
+
+BFA_TRC_FILE(HAL, IOCFC_CT);
+
+/*
+ * Dummy interrupt handler for handling spurious interrupt during chip-reinit.
+ */
+static void
+bfa_hwct_msix_dummy(struct bfa_s *bfa, int vec)
+{
+}
+
+void
+bfa_hwct_reginit(struct bfa_s *bfa)
+{
+	struct bfa_iocfc_regs_s	*bfa_regs = &bfa->iocfc.bfa_regs;
+	void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
+	int	fn = bfa_ioc_pcifn(&bfa->ioc);
+
+	if (fn == 0) {
+		bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
+		bfa_regs->intr_mask   = (kva + HOSTFN0_INT_MSK);
+	} else {
+		bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
+		bfa_regs->intr_mask   = (kva + HOSTFN1_INT_MSK);
+	}
+}
+
+void
+bfa_hwct2_reginit(struct bfa_s *bfa)
+{
+	struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
+	void __iomem	*kva = bfa_ioc_bar0(&bfa->ioc);
+
+	bfa_regs->intr_status = (kva + CT2_HOSTFN_INT_STATUS);
+	bfa_regs->intr_mask   = (kva + CT2_HOSTFN_INTR_MASK);
+}
+
+void
+bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
+{
+	u32	r32;
+
+	r32 = readl(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
+	writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
+}
+
+/*
+ * Actions to respond RME Interrupt for Catapult ASIC:
+ * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
+ * - Acknowledge by writing to RME Queue Control register
+ * - Update CI
+ */
+void
+bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
+{
+	u32	r32;
+
+	r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
+	writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
+
+	bfa_rspq_ci(bfa, rspq) = ci;
+	writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+	mmiowb();
+}
+
+/*
+ * Actions to respond RME Interrupt for Catapult2 ASIC:
+ * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
+ * - Update CI
+ */
+void
+bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
+{
+	bfa_rspq_ci(bfa, rspq) = ci;
+	writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+	mmiowb();
+}
+
+void
+bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
+		 u32 *num_vecs, u32 *max_vec_bit)
+{
+	*msix_vecs_bmap = (1 << BFI_MSIX_CT_MAX) - 1;
+	*max_vec_bit = (1 << (BFI_MSIX_CT_MAX - 1));
+	*num_vecs = BFI_MSIX_CT_MAX;
+}
+
+/*
+ * Setup MSI-X vector for catapult
+ */
+void
+bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
+{
+	WARN_ON((nvecs != 1) && (nvecs != BFI_MSIX_CT_MAX));
+	bfa_trc(bfa, nvecs);
+
+	bfa->msix.nvecs = nvecs;
+	bfa_hwct_msix_uninstall(bfa);
+}
+
+void
+bfa_hwct_msix_ctrl_install(struct bfa_s *bfa)
+{
+	if (bfa->msix.nvecs == 0)
+		return;
+
+	if (bfa->msix.nvecs == 1)
+		bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all;
+	else
+		bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err;
+}
+
+void
+bfa_hwct_msix_queue_install(struct bfa_s *bfa)
+{
+	int i;
+
+	if (bfa->msix.nvecs == 0)
+		return;
+
+	if (bfa->msix.nvecs == 1) {
+		for (i = BFI_MSIX_CPE_QMIN_CT; i < BFI_MSIX_CT_MAX; i++)
+			bfa->msix.handler[i] = bfa_msix_all;
+		return;
+	}
+
+	for (i = BFI_MSIX_CPE_QMIN_CT; i <= BFI_MSIX_CPE_QMAX_CT; i++)
+		bfa->msix.handler[i] = bfa_msix_reqq;
+
+	for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++)
+		bfa->msix.handler[i] = bfa_msix_rspq;
+}
+
+void
+bfa_hwct_msix_uninstall(struct bfa_s *bfa)
+{
+	int i;
+
+	for (i = 0; i < BFI_MSIX_CT_MAX; i++)
+		bfa->msix.handler[i] = bfa_hwct_msix_dummy;
+}
+
+/*
+ * Enable MSI-X vectors
+ */
+void
+bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
+{
+	bfa_trc(bfa, 0);
+	bfa_ioc_isr_mode_set(&bfa->ioc, msix);
+}
+
+void
+bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
+{
+	*start = BFI_MSIX_RME_QMIN_CT;
+	*end = BFI_MSIX_RME_QMAX_CT;
+}
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
new file mode 100644
index 0000000..16d3aeb
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -0,0 +1,7048 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_ioc.h"
+#include "bfi_reg.h"
+#include "bfa_defs.h"
+#include "bfa_defs_svc.h"
+#include "bfi.h"
+
+BFA_TRC_FILE(CNA, IOC);
+
+/*
+ * IOC local definitions
+ */
+#define BFA_IOC_TOV		3000	/* msecs */
+#define BFA_IOC_HWSEM_TOV	500	/* msecs */
+#define BFA_IOC_HB_TOV		500	/* msecs */
+#define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
+#define BFA_IOC_POLL_TOV	BFA_TIMER_FREQ
+
+#define bfa_ioc_timer_start(__ioc)					\
+	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
+			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
+#define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
+
+#define bfa_hb_timer_start(__ioc)					\
+	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
+			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
+#define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
+
+#define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
+
+#define bfa_ioc_state_disabled(__sm)		\
+	(((__sm) == BFI_IOC_UNINIT) ||		\
+	((__sm) == BFI_IOC_INITING) ||		\
+	((__sm) == BFI_IOC_HWINIT) ||		\
+	((__sm) == BFI_IOC_DISABLED) ||		\
+	((__sm) == BFI_IOC_FAIL) ||		\
+	((__sm) == BFI_IOC_CFG_DISABLED))
+
+/*
+ * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
+ */
+
+#define bfa_ioc_firmware_lock(__ioc)			\
+			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
+#define bfa_ioc_firmware_unlock(__ioc)			\
+			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
+#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
+#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
+#define bfa_ioc_notify_fail(__ioc)              \
+			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc)               \
+			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
+#define bfa_ioc_sync_join(__ioc)                \
+			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
+#define bfa_ioc_sync_leave(__ioc)               \
+			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
+#define bfa_ioc_sync_ack(__ioc)                 \
+			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
+#define bfa_ioc_sync_complete(__ioc)            \
+			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
+#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)		\
+			((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_cur_ioc_fwstate(__ioc)		\
+			((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
+#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)		\
+		((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
+#define bfa_ioc_get_alt_ioc_fwstate(__ioc)		\
+			((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
+
+#define bfa_ioc_mbox_cmd_pending(__ioc)		\
+			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
+			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
+
+bfa_boolean_t bfa_auto_recover = BFA_TRUE;
+
+/*
+ * forward declarations
+ */
+static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
+static void bfa_ioc_timeout(void *ioc);
+static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
+static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
+static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
+static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
+static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
+static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
+static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
+static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
+				enum bfa_ioc_event_e event);
+static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
+static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
+static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
+static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
+static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
+				struct bfi_ioc_image_hdr_s *base_fwhdr,
+				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
+static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
+				struct bfa_ioc_s *ioc,
+				struct bfi_ioc_image_hdr_s *base_fwhdr);
+
+/*
+ * IOC state machine definitions/declarations
+ */
+enum ioc_event {
+	IOC_E_RESET		= 1,	/*  IOC reset request		*/
+	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
+	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
+	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
+	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
+	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
+	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
+	IOC_E_PFFAILED		= 8,	/*  failure notice by iocpf sm	*/
+	IOC_E_HBFAIL		= 9,	/*  heartbeat failure		*/
+	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
+	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
+	IOC_E_HWFAILED		= 12,	/*  PCI mapping failure notice	*/
+};
+
+bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
+
+static struct bfa_sm_table_s ioc_sm_table[] = {
+	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
+	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
+	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
+	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
+	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
+	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
+	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
+	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
+	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
+};
+
+/*
+ * IOCPF state machine definitions/declarations
+ */
+
+#define bfa_iocpf_timer_start(__ioc)					\
+	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
+			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
+#define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
+
+#define bfa_iocpf_poll_timer_start(__ioc)				\
+	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
+			bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
+
+#define bfa_sem_timer_start(__ioc)					\
+	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
+			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
+#define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
+
+/*
+ * Forward declareations for iocpf state machine
+ */
+static void bfa_iocpf_timeout(void *ioc_arg);
+static void bfa_iocpf_sem_timeout(void *ioc_arg);
+static void bfa_iocpf_poll_timeout(void *ioc_arg);
+
+/*
+ * IOCPF state machine events
+ */
+enum iocpf_event {
+	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
+	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
+	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
+	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
+	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
+	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
+	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
+	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
+	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
+	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
+	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
+	IOCPF_E_SEM_ERROR	= 12,	/*  h/w sem mapping error	*/
+};
+
+/*
+ * IOCPF states
+ */
+enum bfa_iocpf_state {
+	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
+	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
+	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
+	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
+	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
+	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
+	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
+	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
+	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
+};
+
+bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
+						enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
+						enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
+
+static struct bfa_sm_table_s iocpf_sm_table[] = {
+	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
+	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
+	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
+	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
+	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
+	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
+	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
+	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
+	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
+	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
+	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
+	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
+	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
+	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
+};
+
+/*
+ * IOC State Machine
+ */
+
+/*
+ * Beginning state. IOC uninit state.
+ */
+
+static void
+bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
+{
+}
+
+/*
+ * IOC is in uninit state.
+ */
+static void
+bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOC_E_RESET:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+/*
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
+{
+	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
+}
+
+/*
+ * IOC is in reset state.
+ */
+static void
+bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOC_E_ENABLE:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+		break;
+
+	case IOC_E_DISABLE:
+		bfa_ioc_disable_comp(ioc);
+		break;
+
+	case IOC_E_DETACH:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+
+static void
+bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
+}
+
+/*
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOC_E_ENABLED:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+		break;
+
+	case IOC_E_PFFAILED:
+		/* !!! fall through !!! */
+	case IOC_E_HWERROR:
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+		if (event != IOC_E_PFFAILED)
+			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
+		break;
+
+	case IOC_E_HWFAILED:
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+		break;
+
+	case IOC_E_DISABLE:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+		break;
+
+	case IOC_E_DETACH:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+		break;
+
+	case IOC_E_ENABLE:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+
+static void
+bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
+{
+	bfa_ioc_timer_start(ioc);
+	bfa_ioc_send_getattr(ioc);
+}
+
+/*
+ * IOC configuration in progress. Timer is active.
+ */
+static void
+bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOC_E_FWRSP_GETATTR:
+		bfa_ioc_timer_stop(ioc);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+		break;
+
+	case IOC_E_PFFAILED:
+	case IOC_E_HWERROR:
+		bfa_ioc_timer_stop(ioc);
+		/* !!! fall through !!! */
+	case IOC_E_TIMEOUT:
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+		if (event != IOC_E_PFFAILED)
+			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
+		break;
+
+	case IOC_E_DISABLE:
+		bfa_ioc_timer_stop(ioc);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+		break;
+
+	case IOC_E_ENABLE:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
+{
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+
+	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
+	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
+	bfa_ioc_hb_monitor(ioc);
+	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
+	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
+}
+
+static void
+bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOC_E_ENABLE:
+		break;
+
+	case IOC_E_DISABLE:
+		bfa_hb_timer_stop(ioc);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+		break;
+
+	case IOC_E_PFFAILED:
+	case IOC_E_HWERROR:
+		bfa_hb_timer_stop(ioc);
+		/* !!! fall through !!! */
+	case IOC_E_HBFAIL:
+		if (ioc->iocpf.auto_recover)
+			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+		else
+			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+
+		bfa_ioc_fail_notify(ioc);
+
+		if (event != IOC_E_PFFAILED)
+			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+
+static void
+bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
+{
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
+	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
+	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
+}
+
+/*
+ * IOC is being disabled
+ */
+static void
+bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOC_E_DISABLED:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+		break;
+
+	case IOC_E_HWERROR:
+		/*
+		 * No state change.  Will move to disabled state
+		 * after iocpf sm completes failure processing and
+		 * moves to disabled state.
+		 */
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
+		break;
+
+	case IOC_E_HWFAILED:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+		bfa_ioc_disable_comp(ioc);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/*
+ * IOC disable completion entry.
+ */
+static void
+bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
+{
+	bfa_ioc_disable_comp(ioc);
+}
+
+static void
+bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOC_E_ENABLE:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+		break;
+
+	case IOC_E_DISABLE:
+		ioc->cbfn->disable_cbfn(ioc->bfa);
+		break;
+
+	case IOC_E_DETACH:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+
+static void
+bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
+{
+	bfa_trc(ioc, 0);
+}
+
+/*
+ * Hardware initialization retry.
+ */
+static void
+bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOC_E_ENABLED:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+		break;
+
+	case IOC_E_PFFAILED:
+	case IOC_E_HWERROR:
+		/*
+		 * Initialization retry failed.
+		 */
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+		if (event != IOC_E_PFFAILED)
+			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
+		break;
+
+	case IOC_E_HWFAILED:
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+		break;
+
+	case IOC_E_ENABLE:
+		break;
+
+	case IOC_E_DISABLE:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+		break;
+
+	case IOC_E_DETACH:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+
+static void
+bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
+{
+	bfa_trc(ioc, 0);
+}
+
+/*
+ * IOC failure.
+ */
+static void
+bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+	bfa_trc(ioc, event);
+
+	switch (event) {
+
+	case IOC_E_ENABLE:
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		break;
+
+	case IOC_E_DISABLE:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+		break;
+
+	case IOC_E_DETACH:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+		break;
+
+	case IOC_E_HWERROR:
+	case IOC_E_HWFAILED:
+		/*
+		 * HB failure / HW error notification, ignore.
+		 */
+		break;
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
+{
+	bfa_trc(ioc, 0);
+}
+
+static void
+bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOC_E_ENABLE:
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		break;
+
+	case IOC_E_DISABLE:
+		ioc->cbfn->disable_cbfn(ioc->bfa);
+		break;
+
+	case IOC_E_DETACH:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		break;
+
+	case IOC_E_HWERROR:
+		/* Ignore - already in hwfail state */
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/*
+ * IOCPF State Machine
+ */
+
+/*
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
+{
+	iocpf->fw_mismatch_notified = BFA_FALSE;
+	iocpf->auto_recover = bfa_auto_recover;
+}
+
+/*
+ * Beginning state. IOC is in reset state.
+ */
+static void
+bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_ENABLE:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+		break;
+
+	case IOCPF_E_STOP:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/*
+ * Semaphore should be acquired for version check.
+ */
+static void
+bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
+{
+	struct bfi_ioc_image_hdr_s	fwhdr;
+	u32	r32, fwstate, pgnum, pgoff, loff = 0;
+	int	i;
+
+	/*
+	 * Spin on init semaphore to serialize.
+	 */
+	r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+	while (r32 & 0x1) {
+		udelay(20);
+		r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+	}
+
+	/* h/w sem init */
+	fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
+	if (fwstate == BFI_IOC_UNINIT) {
+		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+		goto sem_get;
+	}
+
+	bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
+
+	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
+		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+		goto sem_get;
+	}
+
+	/*
+	 * Clear fwver hdr
+	 */
+	pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
+	pgoff = PSS_SMEM_PGOFF(loff);
+	writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
+
+	for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
+		bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
+		loff += sizeof(u32);
+	}
+
+	bfa_trc(iocpf->ioc, fwstate);
+	bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
+	bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
+	bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
+
+	/*
+	 * Unlock the hw semaphore. Should be here only once per boot.
+	 */
+	bfa_ioc_ownership_reset(iocpf->ioc);
+
+	/*
+	 * unlock init semaphore.
+	 */
+	writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+
+sem_get:
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/*
+ * Awaiting h/w semaphore to continue with version check.
+ */
+static void
+bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		if (bfa_ioc_firmware_lock(ioc)) {
+			if (bfa_ioc_sync_start(ioc)) {
+				bfa_ioc_sync_join(ioc);
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+			} else {
+				bfa_ioc_firmware_unlock(ioc);
+				writel(1, ioc->ioc_regs.ioc_sem_reg);
+				bfa_sem_timer_start(ioc);
+			}
+		} else {
+			writel(1, ioc->ioc_regs.ioc_sem_reg);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
+		}
+		break;
+
+	case IOCPF_E_SEM_ERROR:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_sem_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_sem_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/*
+ * Notify enable completion callback.
+ */
+static void
+bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
+{
+	/*
+	 * Call only the first time sm enters fwmismatch state.
+	 */
+	if (iocpf->fw_mismatch_notified == BFA_FALSE)
+		bfa_ioc_pf_fwmismatch(iocpf->ioc);
+
+	iocpf->fw_mismatch_notified = BFA_TRUE;
+	bfa_iocpf_timer_start(iocpf->ioc);
+}
+
+/*
+ * Awaiting firmware version match.
+ */
+static void
+bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_TIMEOUT:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_iocpf_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_iocpf_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/*
+ * Request for semaphore.
+ */
+static void
+bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/*
+ * Awaiting semaphore for h/w initialzation.
+ */
+static void
+bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		if (bfa_ioc_sync_complete(ioc)) {
+			bfa_ioc_sync_join(ioc);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+		} else {
+			writel(1, ioc->ioc_regs.ioc_sem_reg);
+			bfa_sem_timer_start(ioc);
+		}
+		break;
+
+	case IOCPF_E_SEM_ERROR:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_sem_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
+{
+	iocpf->poll_time = 0;
+	bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
+}
+
+/*
+ * Hardware is being initialized. Interrupts are enabled.
+ * Holding hardware semaphore lock.
+ */
+static void
+bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_FWREADY:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
+		break;
+
+	case IOCPF_E_TIMEOUT:
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
+		bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_iocpf_timer_stop(ioc);
+		bfa_ioc_sync_leave(ioc);
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_iocpf_timer_start(iocpf->ioc);
+	/*
+	 * Enable Interrupts before sending fw IOC ENABLE cmd.
+	 */
+	iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
+	bfa_ioc_send_enable(iocpf->ioc);
+}
+
+/*
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_FWRSP_ENABLE:
+		bfa_iocpf_timer_stop(ioc);
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
+		break;
+
+	case IOCPF_E_INITFAIL:
+		bfa_iocpf_timer_stop(ioc);
+		/*
+		 * !!! fall through !!!
+		 */
+
+	case IOCPF_E_TIMEOUT:
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
+		if (event == IOCPF_E_TIMEOUT)
+			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_iocpf_timer_stop(ioc);
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
+}
+
+static void
+bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_DISABLE:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+		break;
+
+	case IOCPF_E_GETATTRFAIL:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+		break;
+
+	case IOCPF_E_FAIL:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_iocpf_timer_start(iocpf->ioc);
+	bfa_ioc_send_disable(iocpf->ioc);
+}
+
+/*
+ * IOC is being disabled
+ */
+static void
+bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_FWRSP_DISABLE:
+		bfa_iocpf_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+		break;
+
+	case IOCPF_E_FAIL:
+		bfa_iocpf_timer_stop(ioc);
+		/*
+		 * !!! fall through !!!
+		 */
+
+	case IOCPF_E_TIMEOUT:
+		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+		break;
+
+	case IOCPF_E_FWRSP_ENABLE:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/*
+ * IOC hb ack request is being removed.
+ */
+static void
+bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		bfa_ioc_sync_leave(ioc);
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	case IOCPF_E_SEM_ERROR:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+		break;
+
+	case IOCPF_E_FAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/*
+ * IOC disable completion entry.
+ */
+static void
+bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_ioc_mbox_flush(iocpf->ioc);
+	bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
+}
+
+static void
+bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_ENABLE:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_ioc_firmware_unlock(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_ioc_debug_save_ftrc(iocpf->ioc);
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/*
+ * Hardware initialization failed.
+ */
+static void
+bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		bfa_ioc_notify_fail(ioc);
+		bfa_ioc_sync_leave(ioc);
+		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+		break;
+
+	case IOCPF_E_SEM_ERROR:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_sem_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_sem_timer_stop(ioc);
+		bfa_ioc_firmware_unlock(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	case IOCPF_E_FAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_trc(iocpf->ioc, 0);
+}
+
+/*
+ * Hardware initialization failed.
+ */
+static void
+bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_DISABLE:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_ioc_firmware_unlock(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
+{
+	/*
+	 * Mark IOC as failed in hardware and stop firmware.
+	 */
+	bfa_ioc_lpu_stop(iocpf->ioc);
+
+	/*
+	 * Flush any queued up mailbox requests.
+	 */
+	bfa_ioc_mbox_flush(iocpf->ioc);
+
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+static void
+bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		bfa_ioc_sync_ack(ioc);
+		bfa_ioc_notify_fail(ioc);
+		if (!iocpf->auto_recover) {
+			bfa_ioc_sync_leave(ioc);
+			bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
+			writel(1, ioc->ioc_regs.ioc_sem_reg);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+		} else {
+			if (bfa_ioc_sync_complete(ioc))
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+			else {
+				writel(1, ioc->ioc_regs.ioc_sem_reg);
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+			}
+		}
+		break;
+
+	case IOCPF_E_SEM_ERROR:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_sem_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+		break;
+
+	case IOCPF_E_FAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_trc(iocpf->ioc, 0);
+}
+
+/*
+ * IOC is in failed state.
+ */
+static void
+bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_DISABLE:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/*
+ *  BFA IOC private functions
+ */
+
+/*
+ * Notify common modules registered for notification.
+ */
+static void
+bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
+{
+	struct bfa_ioc_notify_s	*notify;
+	struct list_head	*qe;
+
+	list_for_each(qe, &ioc->notify_q) {
+		notify = (struct bfa_ioc_notify_s *)qe;
+		notify->cbfn(notify->cbarg, event);
+	}
+}
+
+static void
+bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
+{
+	ioc->cbfn->disable_cbfn(ioc->bfa);
+	bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
+}
+
+bfa_boolean_t
+bfa_ioc_sem_get(void __iomem *sem_reg)
+{
+	u32 r32;
+	int cnt = 0;
+#define BFA_SEM_SPINCNT	3000
+
+	r32 = readl(sem_reg);
+
+	while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
+		cnt++;
+		udelay(2);
+		r32 = readl(sem_reg);
+	}
+
+	if (!(r32 & 1))
+		return BFA_TRUE;
+
+	return BFA_FALSE;
+}
+
+static void
+bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
+{
+	u32	r32;
+
+	/*
+	 * First read to the semaphore register will return 0, subsequent reads
+	 * will return 1. Semaphore is released by writing 1 to the register
+	 */
+	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
+	if (r32 == ~0) {
+		WARN_ON(r32 == ~0);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
+		return;
+	}
+	if (!(r32 & 1)) {
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
+		return;
+	}
+
+	bfa_sem_timer_start(ioc);
+}
+
+/*
+ * Initialize LPU local memory (aka secondary memory / SRAM)
+ */
+static void
+bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
+{
+	u32	pss_ctl;
+	int		i;
+#define PSS_LMEM_INIT_TIME  10000
+
+	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+	pss_ctl &= ~__PSS_LMEM_RESET;
+	pss_ctl |= __PSS_LMEM_INIT_EN;
+
+	/*
+	 * i2c workaround 12.5khz clock
+	 */
+	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
+	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+
+	/*
+	 * wait for memory initialization to be complete
+	 */
+	i = 0;
+	do {
+		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+		i++;
+	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
+
+	/*
+	 * If memory initialization is not successful, IOC timeout will catch
+	 * such failures.
+	 */
+	WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
+	bfa_trc(ioc, pss_ctl);
+
+	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
+	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+static void
+bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
+{
+	u32	pss_ctl;
+
+	/*
+	 * Take processor out of reset.
+	 */
+	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+	pss_ctl &= ~__PSS_LPU0_RESET;
+
+	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+static void
+bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
+{
+	u32	pss_ctl;
+
+	/*
+	 * Put processors in reset.
+	 */
+	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
+
+	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+/*
+ * Get driver and firmware versions.
+ */
+void
+bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
+{
+	u32	pgnum, pgoff;
+	u32	loff = 0;
+	int		i;
+	u32	*fwsig = (u32 *) fwhdr;
+
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+	pgoff = PSS_SMEM_PGOFF(loff);
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
+	     i++) {
+		fwsig[i] =
+			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+		loff += sizeof(u32);
+	}
+}
+
+/*
+ * Returns TRUE if driver is willing to work with current smem f/w version.
+ */
+bfa_boolean_t
+bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
+		struct bfi_ioc_image_hdr_s *smem_fwhdr)
+{
+	struct bfi_ioc_image_hdr_s *drv_fwhdr;
+	enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
+
+	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
+		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+
+	/*
+	 * If smem is incompatible or old, driver should not work with it.
+	 */
+	drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
+	if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
+		drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
+		return BFA_FALSE;
+	}
+
+	/*
+	 * IF Flash has a better F/W than smem do not work with smem.
+	 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
+	 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
+	 */
+	smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
+
+	if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
+		return BFA_FALSE;
+	} else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
+		return BFA_TRUE;
+	} else {
+		return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
+			BFA_TRUE : BFA_FALSE;
+	}
+}
+
+/*
+ * Return true if current running version is valid. Firmware signature and
+ * execution context (driver/bios) must match.
+ */
+static bfa_boolean_t
+bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
+{
+	struct bfi_ioc_image_hdr_s fwhdr;
+
+	bfa_ioc_fwver_get(ioc, &fwhdr);
+
+	if (swab32(fwhdr.bootenv) != boot_env) {
+		bfa_trc(ioc, fwhdr.bootenv);
+		bfa_trc(ioc, boot_env);
+		return BFA_FALSE;
+	}
+
+	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
+}
+
+static bfa_boolean_t
+bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
+				struct bfi_ioc_image_hdr_s *fwhdr_2)
+{
+	int i;
+
+	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
+		if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
+			return BFA_FALSE;
+
+	return BFA_TRUE;
+}
+
+/*
+ * Returns TRUE if major minor and maintainence are same.
+ * If patch versions are same, check for MD5 Checksum to be same.
+ */
+static bfa_boolean_t
+bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
+				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
+{
+	if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
+		return BFA_FALSE;
+
+	if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
+		return BFA_FALSE;
+
+	if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
+		return BFA_FALSE;
+
+	if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
+		return BFA_FALSE;
+
+	if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
+		drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
+		drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
+		return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
+	}
+
+	return BFA_TRUE;
+}
+
+static bfa_boolean_t
+bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
+{
+	if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
+		return BFA_FALSE;
+
+	return BFA_TRUE;
+}
+
+static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
+{
+	if (fwhdr->fwver.phase == 0 &&
+		fwhdr->fwver.build == 0)
+		return BFA_TRUE;
+
+	return BFA_FALSE;
+}
+
+/*
+ * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
+ */
+static enum bfi_ioc_img_ver_cmp_e
+bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
+				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
+{
+	if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
+		return BFI_IOC_IMG_VER_INCOMP;
+
+	if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
+		return BFI_IOC_IMG_VER_BETTER;
+
+	else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
+		return BFI_IOC_IMG_VER_OLD;
+
+	/*
+	 * GA takes priority over internal builds of the same patch stream.
+	 * At this point major minor maint and patch numbers are same.
+	 */
+
+	if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
+		if (fwhdr_is_ga(fwhdr_to_cmp))
+			return BFI_IOC_IMG_VER_SAME;
+		else
+			return BFI_IOC_IMG_VER_OLD;
+	} else {
+		if (fwhdr_is_ga(fwhdr_to_cmp))
+			return BFI_IOC_IMG_VER_BETTER;
+	}
+
+	if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
+		return BFI_IOC_IMG_VER_BETTER;
+	else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
+		return BFI_IOC_IMG_VER_OLD;
+
+	if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
+		return BFI_IOC_IMG_VER_BETTER;
+	else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
+		return BFI_IOC_IMG_VER_OLD;
+
+	/*
+	 * All Version Numbers are equal.
+	 * Md5 check to be done as a part of compatibility check.
+	 */
+	return BFI_IOC_IMG_VER_SAME;
+}
+
+#define BFA_FLASH_PART_FWIMG_ADDR	0x100000 /* fw image address */
+
+bfa_status_t
+bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
+				u32 *fwimg)
+{
+	return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
+			BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
+			(char *)fwimg, BFI_FLASH_CHUNK_SZ);
+}
+
+static enum bfi_ioc_img_ver_cmp_e
+bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
+			struct bfi_ioc_image_hdr_s *base_fwhdr)
+{
+	struct bfi_ioc_image_hdr_s *flash_fwhdr;
+	bfa_status_t status;
+	u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
+
+	status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
+	if (status != BFA_STATUS_OK)
+		return BFI_IOC_IMG_VER_INCOMP;
+
+	flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
+	if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
+		return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
+	else
+		return BFI_IOC_IMG_VER_INCOMP;
+}
+
+
+/*
+ * Invalidate fwver signature
+ */
+bfa_status_t
+bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
+{
+
+	u32	pgnum, pgoff;
+	u32	loff = 0;
+	enum bfi_ioc_state ioc_fwstate;
+
+	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
+	if (!bfa_ioc_state_disabled(ioc_fwstate))
+		return BFA_STATUS_ADAPTER_ENABLED;
+
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+	pgoff = PSS_SMEM_PGOFF(loff);
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+	bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Conditionally flush any pending message from firmware at start.
+ */
+static void
+bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
+{
+	u32	r32;
+
+	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+	if (r32)
+		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+}
+
+static void
+bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
+{
+	enum bfi_ioc_state ioc_fwstate;
+	bfa_boolean_t fwvalid;
+	u32 boot_type;
+	u32 boot_env;
+
+	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
+
+	if (force)
+		ioc_fwstate = BFI_IOC_UNINIT;
+
+	bfa_trc(ioc, ioc_fwstate);
+
+	boot_type = BFI_FWBOOT_TYPE_NORMAL;
+	boot_env = BFI_FWBOOT_ENV_OS;
+
+	/*
+	 * check if firmware is valid
+	 */
+	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
+		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
+
+	if (!fwvalid) {
+		if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
+			bfa_ioc_poll_fwinit(ioc);
+		return;
+	}
+
+	/*
+	 * If hardware initialization is in progress (initialized by other IOC),
+	 * just wait for an initialization completion interrupt.
+	 */
+	if (ioc_fwstate == BFI_IOC_INITING) {
+		bfa_ioc_poll_fwinit(ioc);
+		return;
+	}
+
+	/*
+	 * If IOC function is disabled and firmware version is same,
+	 * just re-enable IOC.
+	 *
+	 * If option rom, IOC must not be in operational state. With
+	 * convergence, IOC will be in operational state when 2nd driver
+	 * is loaded.
+	 */
+	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
+
+		/*
+		 * When using MSI-X any pending firmware ready event should
+		 * be flushed. Otherwise MSI-X interrupts are not delivered.
+		 */
+		bfa_ioc_msgflush(ioc);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
+		return;
+	}
+
+	/*
+	 * Initialize the h/w for any other states.
+	 */
+	if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
+		bfa_ioc_poll_fwinit(ioc);
+}
+
+static void
+bfa_ioc_timeout(void *ioc_arg)
+{
+	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+	bfa_trc(ioc, 0);
+	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
+}
+
+void
+bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
+{
+	u32 *msgp = (u32 *) ioc_msg;
+	u32 i;
+
+	bfa_trc(ioc, msgp[0]);
+	bfa_trc(ioc, len);
+
+	WARN_ON(len > BFI_IOC_MSGLEN_MAX);
+
+	/*
+	 * first write msg to mailbox registers
+	 */
+	for (i = 0; i < len / sizeof(u32); i++)
+		writel(cpu_to_le32(msgp[i]),
+			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
+
+	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
+		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
+
+	/*
+	 * write 1 to mailbox CMD to trigger LPU event
+	 */
+	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
+	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
+}
+
+static void
+bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
+{
+	struct bfi_ioc_ctrl_req_s enable_req;
+
+	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
+		    bfa_ioc_portid(ioc));
+	enable_req.clscode = cpu_to_be16(ioc->clscode);
+	/* unsigned 32-bit time_t overflow in y2106 */
+	enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
+	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
+}
+
+static void
+bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
+{
+	struct bfi_ioc_ctrl_req_s disable_req;
+
+	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
+		    bfa_ioc_portid(ioc));
+	disable_req.clscode = cpu_to_be16(ioc->clscode);
+	/* unsigned 32-bit time_t overflow in y2106 */
+	disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
+	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
+}
+
+static void
+bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
+{
+	struct bfi_ioc_getattr_req_s	attr_req;
+
+	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
+		    bfa_ioc_portid(ioc));
+	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
+	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
+}
+
+static void
+bfa_ioc_hb_check(void *cbarg)
+{
+	struct bfa_ioc_s  *ioc = cbarg;
+	u32	hb_count;
+
+	hb_count = readl(ioc->ioc_regs.heartbeat);
+	if (ioc->hb_count == hb_count) {
+		bfa_ioc_recover(ioc);
+		return;
+	} else {
+		ioc->hb_count = hb_count;
+	}
+
+	bfa_ioc_mbox_poll(ioc);
+	bfa_hb_timer_start(ioc);
+}
+
+static void
+bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
+{
+	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
+	bfa_hb_timer_start(ioc);
+}
+
+/*
+ *	Initiate a full firmware download.
+ */
+static bfa_status_t
+bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
+		    u32 boot_env)
+{
+	u32 *fwimg;
+	u32 pgnum, pgoff;
+	u32 loff = 0;
+	u32 chunkno = 0;
+	u32 i;
+	u32 asicmode;
+	u32 fwimg_size;
+	u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
+	bfa_status_t status;
+
+	if (boot_env == BFI_FWBOOT_ENV_OS &&
+		boot_type == BFI_FWBOOT_TYPE_FLASH) {
+		fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
+
+		status = bfa_ioc_flash_img_get_chnk(ioc,
+			BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
+		if (status != BFA_STATUS_OK)
+			return status;
+
+		fwimg = fwimg_buf;
+	} else {
+		fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
+		fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+	}
+
+	bfa_trc(ioc, fwimg_size);
+
+
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+	pgoff = PSS_SMEM_PGOFF(loff);
+
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+	for (i = 0; i < fwimg_size; i++) {
+
+		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
+			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
+
+			if (boot_env == BFI_FWBOOT_ENV_OS &&
+				boot_type == BFI_FWBOOT_TYPE_FLASH) {
+				status = bfa_ioc_flash_img_get_chnk(ioc,
+					BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
+					fwimg_buf);
+				if (status != BFA_STATUS_OK)
+					return status;
+
+				fwimg = fwimg_buf;
+			} else {
+				fwimg = bfa_cb_image_get_chunk(
+					bfa_ioc_asic_gen(ioc),
+					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+			}
+		}
+
+		/*
+		 * write smem
+		 */
+		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
+			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
+
+		loff += sizeof(u32);
+
+		/*
+		 * handle page offset wrap around
+		 */
+		loff = PSS_SMEM_PGOFF(loff);
+		if (loff == 0) {
+			pgnum++;
+			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+		}
+	}
+
+	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+			ioc->ioc_regs.host_page_num_fn);
+
+	/*
+	 * Set boot type, env and device mode at the end.
+	 */
+	if (boot_env == BFI_FWBOOT_ENV_OS &&
+		boot_type == BFI_FWBOOT_TYPE_FLASH) {
+		boot_type = BFI_FWBOOT_TYPE_NORMAL;
+	}
+	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
+				ioc->port0_mode, ioc->port1_mode);
+	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
+			swab32(asicmode));
+	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
+			swab32(boot_type));
+	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
+			swab32(boot_env));
+	return BFA_STATUS_OK;
+}
+
+
+/*
+ * Update BFA configuration from firmware configuration.
+ */
+static void
+bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
+{
+	struct bfi_ioc_attr_s	*attr = ioc->attr;
+
+	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
+	attr->card_type     = be32_to_cpu(attr->card_type);
+	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
+	ioc->fcmode	= (attr->port_mode == BFI_PORT_MODE_FC);
+	attr->mfg_year	= be16_to_cpu(attr->mfg_year);
+
+	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
+}
+
+/*
+ * Attach time initialization of mbox logic.
+ */
+static void
+bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
+{
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+	int	mc;
+
+	INIT_LIST_HEAD(&mod->cmd_q);
+	for (mc = 0; mc < BFI_MC_MAX; mc++) {
+		mod->mbhdlr[mc].cbfn = NULL;
+		mod->mbhdlr[mc].cbarg = ioc->bfa;
+	}
+}
+
+/*
+ * Mbox poll timer -- restarts any pending mailbox requests.
+ */
+static void
+bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
+{
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+	struct bfa_mbox_cmd_s		*cmd;
+	u32			stat;
+
+	/*
+	 * If no command pending, do nothing
+	 */
+	if (list_empty(&mod->cmd_q))
+		return;
+
+	/*
+	 * If previous command is not yet fetched by firmware, do nothing
+	 */
+	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
+	if (stat)
+		return;
+
+	/*
+	 * Enqueue command to firmware.
+	 */
+	bfa_q_deq(&mod->cmd_q, &cmd);
+	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+}
+
+/*
+ * Cleanup any pending requests.
+ */
+static void
+bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
+{
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+	struct bfa_mbox_cmd_s		*cmd;
+
+	while (!list_empty(&mod->cmd_q))
+		bfa_q_deq(&mod->cmd_q, &cmd);
+}
+
+/*
+ * Read data from SMEM to host through PCI memmap
+ *
+ * @param[in]	ioc	memory for IOC
+ * @param[in]	tbuf	app memory to store data from smem
+ * @param[in]	soff	smem offset
+ * @param[in]	sz	size of smem in bytes
+ */
+static bfa_status_t
+bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
+{
+	u32 pgnum, loff;
+	__be32 r32;
+	int i, len;
+	u32 *buf = tbuf;
+
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+	loff = PSS_SMEM_PGOFF(soff);
+	bfa_trc(ioc, pgnum);
+	bfa_trc(ioc, loff);
+	bfa_trc(ioc, sz);
+
+	/*
+	 *  Hold semaphore to serialize pll init and fwtrc.
+	 */
+	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
+		bfa_trc(ioc, 0);
+		return BFA_STATUS_FAILED;
+	}
+
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+	len = sz/sizeof(u32);
+	bfa_trc(ioc, len);
+	for (i = 0; i < len; i++) {
+		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+		buf[i] = swab32(r32);
+		loff += sizeof(u32);
+
+		/*
+		 * handle page offset wrap around
+		 */
+		loff = PSS_SMEM_PGOFF(loff);
+		if (loff == 0) {
+			pgnum++;
+			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+		}
+	}
+	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+			ioc->ioc_regs.host_page_num_fn);
+	/*
+	 *  release semaphore.
+	 */
+	readl(ioc->ioc_regs.ioc_init_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+
+	bfa_trc(ioc, pgnum);
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Clear SMEM data from host through PCI memmap
+ *
+ * @param[in]	ioc	memory for IOC
+ * @param[in]	soff	smem offset
+ * @param[in]	sz	size of smem in bytes
+ */
+static bfa_status_t
+bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
+{
+	int i, len;
+	u32 pgnum, loff;
+
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+	loff = PSS_SMEM_PGOFF(soff);
+	bfa_trc(ioc, pgnum);
+	bfa_trc(ioc, loff);
+	bfa_trc(ioc, sz);
+
+	/*
+	 *  Hold semaphore to serialize pll init and fwtrc.
+	 */
+	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
+		bfa_trc(ioc, 0);
+		return BFA_STATUS_FAILED;
+	}
+
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+	len = sz/sizeof(u32); /* len in words */
+	bfa_trc(ioc, len);
+	for (i = 0; i < len; i++) {
+		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
+		loff += sizeof(u32);
+
+		/*
+		 * handle page offset wrap around
+		 */
+		loff = PSS_SMEM_PGOFF(loff);
+		if (loff == 0) {
+			pgnum++;
+			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+		}
+	}
+	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+			ioc->ioc_regs.host_page_num_fn);
+
+	/*
+	 *  release semaphore.
+	 */
+	readl(ioc->ioc_regs.ioc_init_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+	bfa_trc(ioc, pgnum);
+	return BFA_STATUS_OK;
+}
+
+static void
+bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
+{
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+
+	/*
+	 * Notify driver and common modules registered for notification.
+	 */
+	ioc->cbfn->hbfail_cbfn(ioc->bfa);
+	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
+
+	bfa_ioc_debug_save_ftrc(ioc);
+
+	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
+		"Heart Beat of IOC has failed\n");
+	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
+
+}
+
+static void
+bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
+{
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+	/*
+	 * Provide enable completion callback.
+	 */
+	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
+		"Running firmware version is incompatible "
+		"with the driver version\n");
+	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
+}
+
+bfa_status_t
+bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
+{
+
+	/*
+	 *  Hold semaphore so that nobody can access the chip during init.
+	 */
+	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+
+	bfa_ioc_pll_init_asic(ioc);
+
+	ioc->pllinit = BFA_TRUE;
+
+	/*
+	 * Initialize LMEM
+	 */
+	bfa_ioc_lmem_init(ioc);
+
+	/*
+	 *  release semaphore.
+	 */
+	readl(ioc->ioc_regs.ioc_init_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Interface used by diag module to do firmware boot with memory test
+ * as the entry vector.
+ */
+bfa_status_t
+bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
+{
+	struct bfi_ioc_image_hdr_s *drv_fwhdr;
+	bfa_status_t status;
+	bfa_ioc_stats(ioc, ioc_boots);
+
+	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
+		return BFA_STATUS_FAILED;
+
+	if (boot_env == BFI_FWBOOT_ENV_OS &&
+		boot_type == BFI_FWBOOT_TYPE_NORMAL) {
+
+		drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
+			bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+
+		/*
+		 * Work with Flash iff flash f/w is better than driver f/w.
+		 * Otherwise push drivers firmware.
+		 */
+		if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
+						BFI_IOC_IMG_VER_BETTER)
+			boot_type = BFI_FWBOOT_TYPE_FLASH;
+	}
+
+	/*
+	 * Initialize IOC state of all functions on a chip reset.
+	 */
+	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
+		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
+		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
+	} else {
+		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
+		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
+	}
+
+	bfa_ioc_msgflush(ioc);
+	status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
+	if (status == BFA_STATUS_OK)
+		bfa_ioc_lpu_start(ioc);
+	else {
+		WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
+		bfa_iocpf_timeout(ioc);
+	}
+	return status;
+}
+
+/*
+ * Enable/disable IOC failure auto recovery.
+ */
+void
+bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
+{
+	bfa_auto_recover = auto_recover;
+}
+
+
+
+bfa_boolean_t
+bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
+{
+	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
+bfa_boolean_t
+bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
+{
+	u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
+
+	return ((r32 != BFI_IOC_UNINIT) &&
+		(r32 != BFI_IOC_INITING) &&
+		(r32 != BFI_IOC_MEMTEST));
+}
+
+bfa_boolean_t
+bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
+{
+	__be32	*msgp = mbmsg;
+	u32	r32;
+	int		i;
+
+	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+	if ((r32 & 1) == 0)
+		return BFA_FALSE;
+
+	/*
+	 * read the MBOX msg
+	 */
+	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
+	     i++) {
+		r32 = readl(ioc->ioc_regs.lpu_mbox +
+				   i * sizeof(u32));
+		msgp[i] = cpu_to_be32(r32);
+	}
+
+	/*
+	 * turn off mailbox interrupt by clearing mailbox status
+	 */
+	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+	readl(ioc->ioc_regs.lpu_mbox_cmd);
+
+	return BFA_TRUE;
+}
+
+void
+bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
+{
+	union bfi_ioc_i2h_msg_u	*msg;
+	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
+
+	msg = (union bfi_ioc_i2h_msg_u *) m;
+
+	bfa_ioc_stats(ioc, ioc_isrs);
+
+	switch (msg->mh.msg_id) {
+	case BFI_IOC_I2H_HBEAT:
+		break;
+
+	case BFI_IOC_I2H_ENABLE_REPLY:
+		ioc->port_mode = ioc->port_mode_cfg =
+				(enum bfa_mode_s)msg->fw_event.port_mode;
+		ioc->ad_cap_bm = msg->fw_event.cap_bm;
+		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
+		break;
+
+	case BFI_IOC_I2H_DISABLE_REPLY:
+		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
+		break;
+
+	case BFI_IOC_I2H_GETATTR_REPLY:
+		bfa_ioc_getattr_reply(ioc);
+		break;
+
+	default:
+		bfa_trc(ioc, msg->mh.msg_id);
+		WARN_ON(1);
+	}
+}
+
+/*
+ * IOC attach time initialization and setup.
+ *
+ * @param[in]	ioc	memory for IOC
+ * @param[in]	bfa	driver instance structure
+ */
+void
+bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
+	       struct bfa_timer_mod_s *timer_mod)
+{
+	ioc->bfa	= bfa;
+	ioc->cbfn	= cbfn;
+	ioc->timer_mod	= timer_mod;
+	ioc->fcmode	= BFA_FALSE;
+	ioc->pllinit	= BFA_FALSE;
+	ioc->dbg_fwsave_once = BFA_TRUE;
+	ioc->iocpf.ioc	= ioc;
+
+	bfa_ioc_mbox_attach(ioc);
+	INIT_LIST_HEAD(&ioc->notify_q);
+
+	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+	bfa_fsm_send_event(ioc, IOC_E_RESET);
+}
+
+/*
+ * Driver detach time IOC cleanup.
+ */
+void
+bfa_ioc_detach(struct bfa_ioc_s *ioc)
+{
+	bfa_fsm_send_event(ioc, IOC_E_DETACH);
+	INIT_LIST_HEAD(&ioc->notify_q);
+}
+
+/*
+ * Setup IOC PCI properties.
+ *
+ * @param[in]	pcidev	PCI device information for this IOC
+ */
+void
+bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
+		enum bfi_pcifn_class clscode)
+{
+	ioc->clscode	= clscode;
+	ioc->pcidev	= *pcidev;
+
+	/*
+	 * Initialize IOC and device personality
+	 */
+	ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
+	ioc->asic_mode  = BFI_ASIC_MODE_FC;
+
+	switch (pcidev->device_id) {
+	case BFA_PCI_DEVICE_ID_FC_8G1P:
+	case BFA_PCI_DEVICE_ID_FC_8G2P:
+		ioc->asic_gen = BFI_ASIC_GEN_CB;
+		ioc->fcmode = BFA_TRUE;
+		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+		ioc->ad_cap_bm = BFA_CM_HBA;
+		break;
+
+	case BFA_PCI_DEVICE_ID_CT:
+		ioc->asic_gen = BFI_ASIC_GEN_CT;
+		ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+		ioc->asic_mode  = BFI_ASIC_MODE_ETH;
+		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
+		ioc->ad_cap_bm = BFA_CM_CNA;
+		break;
+
+	case BFA_PCI_DEVICE_ID_CT_FC:
+		ioc->asic_gen = BFI_ASIC_GEN_CT;
+		ioc->fcmode = BFA_TRUE;
+		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+		ioc->ad_cap_bm = BFA_CM_HBA;
+		break;
+
+	case BFA_PCI_DEVICE_ID_CT2:
+	case BFA_PCI_DEVICE_ID_CT2_QUAD:
+		ioc->asic_gen = BFI_ASIC_GEN_CT2;
+		if (clscode == BFI_PCIFN_CLASS_FC &&
+		    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
+			ioc->asic_mode  = BFI_ASIC_MODE_FC16;
+			ioc->fcmode = BFA_TRUE;
+			ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+			ioc->ad_cap_bm = BFA_CM_HBA;
+		} else {
+			ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+			ioc->asic_mode  = BFI_ASIC_MODE_ETH;
+			if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
+				ioc->port_mode =
+				ioc->port_mode_cfg = BFA_MODE_CNA;
+				ioc->ad_cap_bm = BFA_CM_CNA;
+			} else {
+				ioc->port_mode =
+				ioc->port_mode_cfg = BFA_MODE_NIC;
+				ioc->ad_cap_bm = BFA_CM_NIC;
+			}
+		}
+		break;
+
+	default:
+		WARN_ON(1);
+	}
+
+	/*
+	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
+	 */
+	if (ioc->asic_gen == BFI_ASIC_GEN_CB)
+		bfa_ioc_set_cb_hwif(ioc);
+	else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
+		bfa_ioc_set_ct_hwif(ioc);
+	else {
+		WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
+		bfa_ioc_set_ct2_hwif(ioc);
+		bfa_ioc_ct2_poweron(ioc);
+	}
+
+	bfa_ioc_map_port(ioc);
+	bfa_ioc_reg_init(ioc);
+}
+
+/*
+ * Initialize IOC dma memory
+ *
+ * @param[in]	dm_kva	kernel virtual address of IOC dma memory
+ * @param[in]	dm_pa	physical address of IOC dma memory
+ */
+void
+bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
+{
+	/*
+	 * dma memory for firmware attribute
+	 */
+	ioc->attr_dma.kva = dm_kva;
+	ioc->attr_dma.pa = dm_pa;
+	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
+}
+
+void
+bfa_ioc_enable(struct bfa_ioc_s *ioc)
+{
+	bfa_ioc_stats(ioc, ioc_enables);
+	ioc->dbg_fwsave_once = BFA_TRUE;
+
+	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
+}
+
+void
+bfa_ioc_disable(struct bfa_ioc_s *ioc)
+{
+	bfa_ioc_stats(ioc, ioc_disables);
+	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
+}
+
+void
+bfa_ioc_suspend(struct bfa_ioc_s *ioc)
+{
+	ioc->dbg_fwsave_once = BFA_TRUE;
+	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
+}
+
+/*
+ * Initialize memory for saving firmware trace. Driver must initialize
+ * trace memory before call bfa_ioc_enable().
+ */
+void
+bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
+{
+	ioc->dbg_fwsave	    = dbg_fwsave;
+	ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
+}
+
+/*
+ * Register mailbox message handler functions
+ *
+ * @param[in]	ioc		IOC instance
+ * @param[in]	mcfuncs		message class handler functions
+ */
+void
+bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
+{
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+	int				mc;
+
+	for (mc = 0; mc < BFI_MC_MAX; mc++)
+		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
+}
+
+/*
+ * Register mailbox message handler function, to be called by common modules
+ */
+void
+bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
+		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
+{
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+
+	mod->mbhdlr[mc].cbfn	= cbfn;
+	mod->mbhdlr[mc].cbarg	= cbarg;
+}
+
+/*
+ * Queue a mailbox command request to firmware. Waits if mailbox is busy.
+ * Responsibility of caller to serialize
+ *
+ * @param[in]	ioc	IOC instance
+ * @param[i]	cmd	Mailbox command
+ */
+void
+bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
+{
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+	u32			stat;
+
+	/*
+	 * If a previous command is pending, queue new command
+	 */
+	if (!list_empty(&mod->cmd_q)) {
+		list_add_tail(&cmd->qe, &mod->cmd_q);
+		return;
+	}
+
+	/*
+	 * If mailbox is busy, queue command for poll timer
+	 */
+	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
+	if (stat) {
+		list_add_tail(&cmd->qe, &mod->cmd_q);
+		return;
+	}
+
+	/*
+	 * mailbox is free -- queue command to firmware
+	 */
+	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+}
+
+/*
+ * Handle mailbox interrupts
+ */
+void
+bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
+{
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+	struct bfi_mbmsg_s		m;
+	int				mc;
+
+	if (bfa_ioc_msgget(ioc, &m)) {
+		/*
+		 * Treat IOC message class as special.
+		 */
+		mc = m.mh.msg_class;
+		if (mc == BFI_MC_IOC) {
+			bfa_ioc_isr(ioc, &m);
+			return;
+		}
+
+		if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+			return;
+
+		mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+	}
+
+	bfa_ioc_lpu_read_stat(ioc);
+
+	/*
+	 * Try to send pending mailbox commands
+	 */
+	bfa_ioc_mbox_poll(ioc);
+}
+
+void
+bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
+{
+	bfa_ioc_stats(ioc, ioc_hbfails);
+	ioc->stats.hb_count = ioc->hb_count;
+	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
+}
+
+/*
+ * return true if IOC is disabled
+ */
+bfa_boolean_t
+bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
+{
+	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
+		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
+}
+
+/*
+ * return true if IOC firmware is different.
+ */
+bfa_boolean_t
+bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
+{
+	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
+		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
+		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
+}
+
+/*
+ * Check if adapter is disabled -- both IOCs should be in a disabled
+ * state.
+ */
+bfa_boolean_t
+bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
+{
+	u32	ioc_state;
+
+	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
+		return BFA_FALSE;
+
+	ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
+	if (!bfa_ioc_state_disabled(ioc_state))
+		return BFA_FALSE;
+
+	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
+		ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
+		if (!bfa_ioc_state_disabled(ioc_state))
+			return BFA_FALSE;
+	}
+
+	return BFA_TRUE;
+}
+
+/*
+ * Reset IOC fwstate registers.
+ */
+void
+bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
+{
+	bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
+	bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
+}
+
+#define BFA_MFG_NAME "QLogic"
+void
+bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
+			 struct bfa_adapter_attr_s *ad_attr)
+{
+	struct bfi_ioc_attr_s	*ioc_attr;
+
+	ioc_attr = ioc->attr;
+
+	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
+	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
+	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
+	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
+	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
+		      sizeof(struct bfa_mfg_vpd_s));
+
+	ad_attr->nports = bfa_ioc_get_nports(ioc);
+	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
+
+	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
+	/* For now, model descr uses same model string */
+	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
+
+	ad_attr->card_type = ioc_attr->card_type;
+	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
+
+	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
+		ad_attr->prototype = 1;
+	else
+		ad_attr->prototype = 0;
+
+	ad_attr->pwwn = ioc->attr->pwwn;
+	ad_attr->mac  = bfa_ioc_get_mac(ioc);
+
+	ad_attr->pcie_gen = ioc_attr->pcie_gen;
+	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
+	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
+	ad_attr->asic_rev = ioc_attr->asic_rev;
+
+	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
+
+	ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
+	ad_attr->trunk_capable = (ad_attr->nports > 1) &&
+				  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
+	ad_attr->mfg_day = ioc_attr->mfg_day;
+	ad_attr->mfg_month = ioc_attr->mfg_month;
+	ad_attr->mfg_year = ioc_attr->mfg_year;
+	memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
+}
+
+enum bfa_ioc_type_e
+bfa_ioc_get_type(struct bfa_ioc_s *ioc)
+{
+	if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
+		return BFA_IOC_TYPE_LL;
+
+	WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
+
+	return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
+		? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
+}
+
+void
+bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
+{
+	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
+	memcpy((void *)serial_num,
+			(void *)ioc->attr->brcd_serialnum,
+			BFA_ADAPTER_SERIAL_NUM_LEN);
+}
+
+void
+bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
+{
+	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
+	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
+}
+
+void
+bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
+{
+	WARN_ON(!chip_rev);
+
+	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
+
+	chip_rev[0] = 'R';
+	chip_rev[1] = 'e';
+	chip_rev[2] = 'v';
+	chip_rev[3] = '-';
+	chip_rev[4] = ioc->attr->asic_rev;
+	chip_rev[5] = '\0';
+}
+
+void
+bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
+{
+	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
+	memcpy(optrom_ver, ioc->attr->optrom_version,
+		      BFA_VERSION_LEN);
+}
+
+void
+bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
+{
+	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
+	strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+}
+
+void
+bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
+{
+	struct bfi_ioc_attr_s	*ioc_attr;
+	u8 nports = bfa_ioc_get_nports(ioc);
+
+	WARN_ON(!model);
+	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
+
+	ioc_attr = ioc->attr;
+
+	if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
+		(!bfa_mfg_is_mezz(ioc_attr->card_type)))
+		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
+			BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
+	else
+		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
+			BFA_MFG_NAME, ioc_attr->card_type);
+}
+
+enum bfa_ioc_state
+bfa_ioc_get_state(struct bfa_ioc_s *ioc)
+{
+	enum bfa_iocpf_state iocpf_st;
+	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+
+	if (ioc_st == BFA_IOC_ENABLING ||
+		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
+
+		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+
+		switch (iocpf_st) {
+		case BFA_IOCPF_SEMWAIT:
+			ioc_st = BFA_IOC_SEMWAIT;
+			break;
+
+		case BFA_IOCPF_HWINIT:
+			ioc_st = BFA_IOC_HWINIT;
+			break;
+
+		case BFA_IOCPF_FWMISMATCH:
+			ioc_st = BFA_IOC_FWMISMATCH;
+			break;
+
+		case BFA_IOCPF_FAIL:
+			ioc_st = BFA_IOC_FAIL;
+			break;
+
+		case BFA_IOCPF_INITFAIL:
+			ioc_st = BFA_IOC_INITFAIL;
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	return ioc_st;
+}
+
+void
+bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
+{
+	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
+
+	ioc_attr->state = bfa_ioc_get_state(ioc);
+	ioc_attr->port_id = bfa_ioc_portid(ioc);
+	ioc_attr->port_mode = ioc->port_mode;
+	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
+	ioc_attr->cap_bm = ioc->ad_cap_bm;
+
+	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
+
+	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
+
+	ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
+	ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
+	ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
+	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
+}
+
+mac_t
+bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
+{
+	/*
+	 * Check the IOC type and return the appropriate MAC
+	 */
+	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
+		return ioc->attr->fcoe_mac;
+	else
+		return ioc->attr->mac;
+}
+
+mac_t
+bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
+{
+	mac_t	m;
+
+	m = ioc->attr->mfg_mac;
+	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
+		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
+	else
+		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
+			bfa_ioc_pcifn(ioc));
+
+	return m;
+}
+
+/*
+ * Send AEN notification
+ */
+void
+bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+	struct bfa_aen_entry_s	*aen_entry;
+	enum bfa_ioc_type_e ioc_type;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	ioc_type = bfa_ioc_get_type(ioc);
+	switch (ioc_type) {
+	case BFA_IOC_TYPE_FC:
+		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
+		break;
+	case BFA_IOC_TYPE_FCoE:
+		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
+		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+		break;
+	case BFA_IOC_TYPE_LL:
+		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+		break;
+	default:
+		WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
+		break;
+	}
+
+	/* Send the AEN notification */
+	aen_entry->aen_data.ioc.ioc_type = ioc_type;
+	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
+				  BFA_AEN_CAT_IOC, event);
+}
+
+/*
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+bfa_status_t
+bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
+{
+	int	tlen;
+
+	if (ioc->dbg_fwsave_len == 0)
+		return BFA_STATUS_ENOFSAVE;
+
+	tlen = *trclen;
+	if (tlen > ioc->dbg_fwsave_len)
+		tlen = ioc->dbg_fwsave_len;
+
+	memcpy(trcdata, ioc->dbg_fwsave, tlen);
+	*trclen = tlen;
+	return BFA_STATUS_OK;
+}
+
+
+/*
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+bfa_status_t
+bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
+{
+	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
+	int tlen;
+	bfa_status_t status;
+
+	bfa_trc(ioc, *trclen);
+
+	tlen = *trclen;
+	if (tlen > BFA_DBG_FWTRC_LEN)
+		tlen = BFA_DBG_FWTRC_LEN;
+
+	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
+	*trclen = tlen;
+	return status;
+}
+
+static void
+bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
+{
+	struct bfa_mbox_cmd_s cmd;
+	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
+
+	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
+		    bfa_ioc_portid(ioc));
+	req->clscode = cpu_to_be16(ioc->clscode);
+	bfa_ioc_mbox_queue(ioc, &cmd);
+}
+
+static void
+bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
+{
+	u32 fwsync_iter = 1000;
+
+	bfa_ioc_send_fwsync(ioc);
+
+	/*
+	 * After sending a fw sync mbox command wait for it to
+	 * take effect.  We will not wait for a response because
+	 *    1. fw_sync mbox cmd doesn't have a response.
+	 *    2. Even if we implement that,  interrupts might not
+	 *	 be enabled when we call this function.
+	 * So, just keep checking if any mbox cmd is pending, and
+	 * after waiting for a reasonable amount of time, go ahead.
+	 * It is possible that fw has crashed and the mbox command
+	 * is never acknowledged.
+	 */
+	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
+		fwsync_iter--;
+}
+
+/*
+ * Dump firmware smem
+ */
+bfa_status_t
+bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
+				u32 *offset, int *buflen)
+{
+	u32 loff;
+	int dlen;
+	bfa_status_t status;
+	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
+
+	if (*offset >= smem_len) {
+		*offset = *buflen = 0;
+		return BFA_STATUS_EINVAL;
+	}
+
+	loff = *offset;
+	dlen = *buflen;
+
+	/*
+	 * First smem read, sync smem before proceeding
+	 * No need to sync before reading every chunk.
+	 */
+	if (loff == 0)
+		bfa_ioc_fwsync(ioc);
+
+	if ((loff + dlen) >= smem_len)
+		dlen = smem_len - loff;
+
+	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
+
+	if (status != BFA_STATUS_OK) {
+		*offset = *buflen = 0;
+		return status;
+	}
+
+	*offset += dlen;
+
+	if (*offset >= smem_len)
+		*offset = 0;
+
+	*buflen = dlen;
+
+	return status;
+}
+
+/*
+ * Firmware statistics
+ */
+bfa_status_t
+bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
+{
+	u32 loff = BFI_IOC_FWSTATS_OFF + \
+		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
+	int tlen;
+	bfa_status_t status;
+
+	if (ioc->stats_busy) {
+		bfa_trc(ioc, ioc->stats_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+	ioc->stats_busy = BFA_TRUE;
+
+	tlen = sizeof(struct bfa_fw_stats_s);
+	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
+
+	ioc->stats_busy = BFA_FALSE;
+	return status;
+}
+
+bfa_status_t
+bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
+{
+	u32 loff = BFI_IOC_FWSTATS_OFF + \
+		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
+	int tlen;
+	bfa_status_t status;
+
+	if (ioc->stats_busy) {
+		bfa_trc(ioc, ioc->stats_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+	ioc->stats_busy = BFA_TRUE;
+
+	tlen = sizeof(struct bfa_fw_stats_s);
+	status = bfa_ioc_smem_clr(ioc, loff, tlen);
+
+	ioc->stats_busy = BFA_FALSE;
+	return status;
+}
+
+/*
+ * Save firmware trace if configured.
+ */
+void
+bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
+{
+	int		tlen;
+
+	if (ioc->dbg_fwsave_once) {
+		ioc->dbg_fwsave_once = BFA_FALSE;
+		if (ioc->dbg_fwsave_len) {
+			tlen = ioc->dbg_fwsave_len;
+			bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
+		}
+	}
+}
+
+/*
+ * Firmware failure detected. Start recovery actions.
+ */
+static void
+bfa_ioc_recover(struct bfa_ioc_s *ioc)
+{
+	bfa_ioc_stats(ioc, ioc_hbfails);
+	ioc->stats.hb_count = ioc->hb_count;
+	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
+}
+
+/*
+ *  BFA IOC PF private functions
+ */
+static void
+bfa_iocpf_timeout(void *ioc_arg)
+{
+	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+	bfa_trc(ioc, 0);
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
+}
+
+static void
+bfa_iocpf_sem_timeout(void *ioc_arg)
+{
+	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+	bfa_ioc_hw_sem_get(ioc);
+}
+
+static void
+bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
+{
+	u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
+
+	bfa_trc(ioc, fwstate);
+
+	if (fwstate == BFI_IOC_DISABLED) {
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
+		return;
+	}
+
+	if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
+		bfa_iocpf_timeout(ioc);
+	else {
+		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
+		bfa_iocpf_poll_timer_start(ioc);
+	}
+}
+
+static void
+bfa_iocpf_poll_timeout(void *ioc_arg)
+{
+	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+	bfa_ioc_poll_fwinit(ioc);
+}
+
+/*
+ *  bfa timer function
+ */
+void
+bfa_timer_beat(struct bfa_timer_mod_s *mod)
+{
+	struct list_head *qh = &mod->timer_q;
+	struct list_head *qe, *qe_next;
+	struct bfa_timer_s *elem;
+	struct list_head timedout_q;
+
+	INIT_LIST_HEAD(&timedout_q);
+
+	qe = bfa_q_next(qh);
+
+	while (qe != qh) {
+		qe_next = bfa_q_next(qe);
+
+		elem = (struct bfa_timer_s *) qe;
+		if (elem->timeout <= BFA_TIMER_FREQ) {
+			elem->timeout = 0;
+			list_del(&elem->qe);
+			list_add_tail(&elem->qe, &timedout_q);
+		} else {
+			elem->timeout -= BFA_TIMER_FREQ;
+		}
+
+		qe = qe_next;	/* go to next elem */
+	}
+
+	/*
+	 * Pop all the timeout entries
+	 */
+	while (!list_empty(&timedout_q)) {
+		bfa_q_deq(&timedout_q, &elem);
+		elem->timercb(elem->arg);
+	}
+}
+
+/*
+ * Should be called with lock protection
+ */
+void
+bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
+		    void (*timercb) (void *), void *arg, unsigned int timeout)
+{
+
+	WARN_ON(timercb == NULL);
+	WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
+
+	timer->timeout = timeout;
+	timer->timercb = timercb;
+	timer->arg = arg;
+
+	list_add_tail(&timer->qe, &mod->timer_q);
+}
+
+/*
+ * Should be called with lock protection
+ */
+void
+bfa_timer_stop(struct bfa_timer_s *timer)
+{
+	WARN_ON(list_empty(&timer->qe));
+
+	list_del(&timer->qe);
+}
+
+/*
+ *	ASIC block related
+ */
+static void
+bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
+{
+	struct bfa_ablk_cfg_inst_s *cfg_inst;
+	int i, j;
+	u16	be16;
+
+	for (i = 0; i < BFA_ABLK_MAX; i++) {
+		cfg_inst = &cfg->inst[i];
+		for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
+			be16 = cfg_inst->pf_cfg[j].pers;
+			cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
+			be16 = cfg_inst->pf_cfg[j].num_qpairs;
+			cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
+			be16 = cfg_inst->pf_cfg[j].num_vectors;
+			cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
+			be16 = cfg_inst->pf_cfg[j].bw_min;
+			cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
+			be16 = cfg_inst->pf_cfg[j].bw_max;
+			cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
+		}
+	}
+}
+
+static void
+bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
+{
+	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
+	struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
+	bfa_ablk_cbfn_t cbfn;
+
+	WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
+	bfa_trc(ablk->ioc, msg->mh.msg_id);
+
+	switch (msg->mh.msg_id) {
+	case BFI_ABLK_I2H_QUERY:
+		if (rsp->status == BFA_STATUS_OK) {
+			memcpy(ablk->cfg, ablk->dma_addr.kva,
+				sizeof(struct bfa_ablk_cfg_s));
+			bfa_ablk_config_swap(ablk->cfg);
+			ablk->cfg = NULL;
+		}
+		break;
+
+	case BFI_ABLK_I2H_ADPT_CONFIG:
+	case BFI_ABLK_I2H_PORT_CONFIG:
+		/* update config port mode */
+		ablk->ioc->port_mode_cfg = rsp->port_mode;
+
+	case BFI_ABLK_I2H_PF_DELETE:
+	case BFI_ABLK_I2H_PF_UPDATE:
+	case BFI_ABLK_I2H_OPTROM_ENABLE:
+	case BFI_ABLK_I2H_OPTROM_DISABLE:
+		/* No-op */
+		break;
+
+	case BFI_ABLK_I2H_PF_CREATE:
+		*(ablk->pcifn) = rsp->pcifn;
+		ablk->pcifn = NULL;
+		break;
+
+	default:
+		WARN_ON(1);
+	}
+
+	ablk->busy = BFA_FALSE;
+	if (ablk->cbfn) {
+		cbfn = ablk->cbfn;
+		ablk->cbfn = NULL;
+		cbfn(ablk->cbarg, rsp->status);
+	}
+}
+
+static void
+bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
+
+	bfa_trc(ablk->ioc, event);
+
+	switch (event) {
+	case BFA_IOC_E_ENABLED:
+		WARN_ON(ablk->busy != BFA_FALSE);
+		break;
+
+	case BFA_IOC_E_DISABLED:
+	case BFA_IOC_E_FAILED:
+		/* Fail any pending requests */
+		ablk->pcifn = NULL;
+		if (ablk->busy) {
+			if (ablk->cbfn)
+				ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
+			ablk->cbfn = NULL;
+			ablk->busy = BFA_FALSE;
+		}
+		break;
+
+	default:
+		WARN_ON(1);
+		break;
+	}
+}
+
+u32
+bfa_ablk_meminfo(void)
+{
+	return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
+}
+
+void
+bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
+{
+	ablk->dma_addr.kva = dma_kva;
+	ablk->dma_addr.pa  = dma_pa;
+}
+
+void
+bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
+{
+	ablk->ioc = ioc;
+
+	bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
+	bfa_q_qe_init(&ablk->ioc_notify);
+	bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
+	list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
+}
+
+bfa_status_t
+bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
+		bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_ablk_h2i_query_s *m;
+
+	WARN_ON(!ablk_cfg);
+
+	if (!bfa_ioc_is_operational(ablk->ioc)) {
+		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+
+	if (ablk->busy) {
+		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+		return  BFA_STATUS_DEVBUSY;
+	}
+
+	ablk->cfg = ablk_cfg;
+	ablk->cbfn  = cbfn;
+	ablk->cbarg = cbarg;
+	ablk->busy  = BFA_TRUE;
+
+	m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
+	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
+		    bfa_ioc_portid(ablk->ioc));
+	bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
+	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
+		u8 port, enum bfi_pcifn_class personality,
+		u16 bw_min, u16 bw_max,
+		bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_ablk_h2i_pf_req_s *m;
+
+	if (!bfa_ioc_is_operational(ablk->ioc)) {
+		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+
+	if (ablk->busy) {
+		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+		return  BFA_STATUS_DEVBUSY;
+	}
+
+	ablk->pcifn = pcifn;
+	ablk->cbfn = cbfn;
+	ablk->cbarg = cbarg;
+	ablk->busy  = BFA_TRUE;
+
+	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
+		    bfa_ioc_portid(ablk->ioc));
+	m->pers = cpu_to_be16((u16)personality);
+	m->bw_min = cpu_to_be16(bw_min);
+	m->bw_max = cpu_to_be16(bw_max);
+	m->port = port;
+	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
+		bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_ablk_h2i_pf_req_s *m;
+
+	if (!bfa_ioc_is_operational(ablk->ioc)) {
+		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+
+	if (ablk->busy) {
+		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+		return  BFA_STATUS_DEVBUSY;
+	}
+
+	ablk->cbfn  = cbfn;
+	ablk->cbarg = cbarg;
+	ablk->busy  = BFA_TRUE;
+
+	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
+		    bfa_ioc_portid(ablk->ioc));
+	m->pcifn = (u8)pcifn;
+	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
+		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_ablk_h2i_cfg_req_s *m;
+
+	if (!bfa_ioc_is_operational(ablk->ioc)) {
+		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+
+	if (ablk->busy) {
+		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+		return  BFA_STATUS_DEVBUSY;
+	}
+
+	ablk->cbfn  = cbfn;
+	ablk->cbarg = cbarg;
+	ablk->busy  = BFA_TRUE;
+
+	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
+	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
+		    bfa_ioc_portid(ablk->ioc));
+	m->mode = (u8)mode;
+	m->max_pf = (u8)max_pf;
+	m->max_vf = (u8)max_vf;
+	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
+		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_ablk_h2i_cfg_req_s *m;
+
+	if (!bfa_ioc_is_operational(ablk->ioc)) {
+		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+
+	if (ablk->busy) {
+		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+		return  BFA_STATUS_DEVBUSY;
+	}
+
+	ablk->cbfn  = cbfn;
+	ablk->cbarg = cbarg;
+	ablk->busy  = BFA_TRUE;
+
+	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
+	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
+		bfa_ioc_portid(ablk->ioc));
+	m->port = (u8)port;
+	m->mode = (u8)mode;
+	m->max_pf = (u8)max_pf;
+	m->max_vf = (u8)max_vf;
+	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
+		   u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_ablk_h2i_pf_req_s *m;
+
+	if (!bfa_ioc_is_operational(ablk->ioc)) {
+		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+
+	if (ablk->busy) {
+		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+		return  BFA_STATUS_DEVBUSY;
+	}
+
+	ablk->cbfn  = cbfn;
+	ablk->cbarg = cbarg;
+	ablk->busy  = BFA_TRUE;
+
+	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
+		bfa_ioc_portid(ablk->ioc));
+	m->pcifn = (u8)pcifn;
+	m->bw_min = cpu_to_be16(bw_min);
+	m->bw_max = cpu_to_be16(bw_max);
+	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_ablk_h2i_optrom_s *m;
+
+	if (!bfa_ioc_is_operational(ablk->ioc)) {
+		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+
+	if (ablk->busy) {
+		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+		return  BFA_STATUS_DEVBUSY;
+	}
+
+	ablk->cbfn  = cbfn;
+	ablk->cbarg = cbarg;
+	ablk->busy  = BFA_TRUE;
+
+	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
+	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
+		bfa_ioc_portid(ablk->ioc));
+	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_ablk_h2i_optrom_s *m;
+
+	if (!bfa_ioc_is_operational(ablk->ioc)) {
+		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+
+	if (ablk->busy) {
+		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+		return  BFA_STATUS_DEVBUSY;
+	}
+
+	ablk->cbfn  = cbfn;
+	ablk->cbarg = cbarg;
+	ablk->busy  = BFA_TRUE;
+
+	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
+	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
+		bfa_ioc_portid(ablk->ioc));
+	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ *	SFP module specific
+ */
+
+/* forward declarations */
+static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
+static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
+static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
+				enum bfa_port_speed portspeed);
+
+static void
+bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
+{
+	bfa_trc(sfp, sfp->lock);
+	if (sfp->cbfn)
+		sfp->cbfn(sfp->cbarg, sfp->status);
+	sfp->lock = 0;
+	sfp->cbfn = NULL;
+}
+
+static void
+bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
+{
+	bfa_trc(sfp, sfp->portspeed);
+	if (sfp->media) {
+		bfa_sfp_media_get(sfp);
+		if (sfp->state_query_cbfn)
+			sfp->state_query_cbfn(sfp->state_query_cbarg,
+					sfp->status);
+		sfp->media = NULL;
+	}
+
+	if (sfp->portspeed) {
+		sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
+		if (sfp->state_query_cbfn)
+			sfp->state_query_cbfn(sfp->state_query_cbarg,
+					sfp->status);
+		sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
+	}
+
+	sfp->state_query_lock = 0;
+	sfp->state_query_cbfn = NULL;
+}
+
+/*
+ *	IOC event handler.
+ */
+static void
+bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
+{
+	struct bfa_sfp_s *sfp = sfp_arg;
+
+	bfa_trc(sfp, event);
+	bfa_trc(sfp, sfp->lock);
+	bfa_trc(sfp, sfp->state_query_lock);
+
+	switch (event) {
+	case BFA_IOC_E_DISABLED:
+	case BFA_IOC_E_FAILED:
+		if (sfp->lock) {
+			sfp->status = BFA_STATUS_IOC_FAILURE;
+			bfa_cb_sfp_show(sfp);
+		}
+
+		if (sfp->state_query_lock) {
+			sfp->status = BFA_STATUS_IOC_FAILURE;
+			bfa_cb_sfp_state_query(sfp);
+		}
+		break;
+
+	default:
+		break;
+	}
+}
+
+/*
+ * SFP's State Change Notification post to AEN
+ */
+static void
+bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
+{
+	struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+	enum bfa_port_aen_event aen_evt = 0;
+
+	bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
+		      ((u64)rsp->event));
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
+	aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
+	aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
+
+	switch (rsp->event) {
+	case BFA_SFP_SCN_INSERTED:
+		aen_evt = BFA_PORT_AEN_SFP_INSERT;
+		break;
+	case BFA_SFP_SCN_REMOVED:
+		aen_evt = BFA_PORT_AEN_SFP_REMOVE;
+		break;
+	case BFA_SFP_SCN_FAILED:
+		aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
+		break;
+	case BFA_SFP_SCN_UNSUPPORT:
+		aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
+		break;
+	case BFA_SFP_SCN_POM:
+		aen_evt = BFA_PORT_AEN_SFP_POM;
+		aen_entry->aen_data.port.level = rsp->pomlvl;
+		break;
+	default:
+		bfa_trc(sfp, rsp->event);
+		WARN_ON(1);
+	}
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
+				  BFA_AEN_CAT_PORT, aen_evt);
+}
+
+/*
+ *	SFP get data send
+ */
+static void
+bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
+{
+	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+	bfa_trc(sfp, req->memtype);
+
+	/* build host command */
+	bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
+			bfa_ioc_portid(sfp->ioc));
+
+	/* send mbox cmd */
+	bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
+}
+
+/*
+ *	SFP is valid, read sfp data
+ */
+static void
+bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
+{
+	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+	WARN_ON(sfp->lock != 0);
+	bfa_trc(sfp, sfp->state);
+
+	sfp->lock = 1;
+	sfp->memtype = memtype;
+	req->memtype = memtype;
+
+	/* Setup SG list */
+	bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
+
+	bfa_sfp_getdata_send(sfp);
+}
+
+/*
+ *	SFP scn handler
+ */
+static void
+bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
+{
+	struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
+
+	switch (rsp->event) {
+	case BFA_SFP_SCN_INSERTED:
+		sfp->state = BFA_SFP_STATE_INSERTED;
+		sfp->data_valid = 0;
+		bfa_sfp_scn_aen_post(sfp, rsp);
+		break;
+	case BFA_SFP_SCN_REMOVED:
+		sfp->state = BFA_SFP_STATE_REMOVED;
+		sfp->data_valid = 0;
+		bfa_sfp_scn_aen_post(sfp, rsp);
+		 break;
+	case BFA_SFP_SCN_FAILED:
+		sfp->state = BFA_SFP_STATE_FAILED;
+		sfp->data_valid = 0;
+		bfa_sfp_scn_aen_post(sfp, rsp);
+		break;
+	case BFA_SFP_SCN_UNSUPPORT:
+		sfp->state = BFA_SFP_STATE_UNSUPPORT;
+		bfa_sfp_scn_aen_post(sfp, rsp);
+		if (!sfp->lock)
+			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+		break;
+	case BFA_SFP_SCN_POM:
+		bfa_sfp_scn_aen_post(sfp, rsp);
+		break;
+	case BFA_SFP_SCN_VALID:
+		sfp->state = BFA_SFP_STATE_VALID;
+		if (!sfp->lock)
+			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+		break;
+	default:
+		bfa_trc(sfp, rsp->event);
+		WARN_ON(1);
+	}
+}
+
+/*
+ * SFP show complete
+ */
+static void
+bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
+{
+	struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
+
+	if (!sfp->lock) {
+		/*
+		 * receiving response after ioc failure
+		 */
+		bfa_trc(sfp, sfp->lock);
+		return;
+	}
+
+	bfa_trc(sfp, rsp->status);
+	if (rsp->status == BFA_STATUS_OK) {
+		sfp->data_valid = 1;
+		if (sfp->state == BFA_SFP_STATE_VALID)
+			sfp->status = BFA_STATUS_OK;
+		else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
+			sfp->status = BFA_STATUS_SFP_UNSUPP;
+		else
+			bfa_trc(sfp, sfp->state);
+	} else {
+		sfp->data_valid = 0;
+		sfp->status = rsp->status;
+		/* sfpshow shouldn't change sfp state */
+	}
+
+	bfa_trc(sfp, sfp->memtype);
+	if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
+		bfa_trc(sfp, sfp->data_valid);
+		if (sfp->data_valid) {
+			u32	size = sizeof(struct sfp_mem_s);
+			u8 *des = (u8 *)(sfp->sfpmem);
+			memcpy(des, sfp->dbuf_kva, size);
+		}
+		/*
+		 * Queue completion callback.
+		 */
+		bfa_cb_sfp_show(sfp);
+	} else
+		sfp->lock = 0;
+
+	bfa_trc(sfp, sfp->state_query_lock);
+	if (sfp->state_query_lock) {
+		sfp->state = rsp->state;
+		/* Complete callback */
+		bfa_cb_sfp_state_query(sfp);
+	}
+}
+
+/*
+ *	SFP query fw sfp state
+ */
+static void
+bfa_sfp_state_query(struct bfa_sfp_s *sfp)
+{
+	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+	/* Should not be doing query if not in _INIT state */
+	WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
+	WARN_ON(sfp->state_query_lock != 0);
+	bfa_trc(sfp, sfp->state);
+
+	sfp->state_query_lock = 1;
+	req->memtype = 0;
+
+	if (!sfp->lock)
+		bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+}
+
+static void
+bfa_sfp_media_get(struct bfa_sfp_s *sfp)
+{
+	enum bfa_defs_sfp_media_e *media = sfp->media;
+
+	*media = BFA_SFP_MEDIA_UNKNOWN;
+
+	if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
+		*media = BFA_SFP_MEDIA_UNSUPPORT;
+	else if (sfp->state == BFA_SFP_STATE_VALID) {
+		union sfp_xcvr_e10g_code_u e10g;
+		struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
+		u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
+				(sfpmem->srlid_base.xcvr[5] >> 1);
+
+		e10g.b = sfpmem->srlid_base.xcvr[0];
+		bfa_trc(sfp, e10g.b);
+		bfa_trc(sfp, xmtr_tech);
+		/* check fc transmitter tech */
+		if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
+		    (xmtr_tech & SFP_XMTR_TECH_CP) ||
+		    (xmtr_tech & SFP_XMTR_TECH_CA))
+			*media = BFA_SFP_MEDIA_CU;
+		else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
+			 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
+			*media = BFA_SFP_MEDIA_EL;
+		else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
+			 (xmtr_tech & SFP_XMTR_TECH_LC))
+			*media = BFA_SFP_MEDIA_LW;
+		else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
+			 (xmtr_tech & SFP_XMTR_TECH_SN) ||
+			 (xmtr_tech & SFP_XMTR_TECH_SA))
+			*media = BFA_SFP_MEDIA_SW;
+		/* Check 10G Ethernet Compilance code */
+		else if (e10g.r.e10g_sr)
+			*media = BFA_SFP_MEDIA_SW;
+		else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
+			*media = BFA_SFP_MEDIA_LW;
+		else if (e10g.r.e10g_unall)
+			*media = BFA_SFP_MEDIA_UNKNOWN;
+		else
+			bfa_trc(sfp, 0);
+	} else
+		bfa_trc(sfp, sfp->state);
+}
+
+static bfa_status_t
+bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
+{
+	struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
+	struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
+	union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
+	union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
+
+	if (portspeed == BFA_PORT_SPEED_10GBPS) {
+		if (e10g.r.e10g_sr || e10g.r.e10g_lr)
+			return BFA_STATUS_OK;
+		else {
+			bfa_trc(sfp, e10g.b);
+			return BFA_STATUS_UNSUPP_SPEED;
+		}
+	}
+	if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
+	    ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
+	    ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
+	    ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
+	    ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
+		return BFA_STATUS_OK;
+	else {
+		bfa_trc(sfp, portspeed);
+		bfa_trc(sfp, fc3.b);
+		bfa_trc(sfp, e10g.b);
+		return BFA_STATUS_UNSUPP_SPEED;
+	}
+}
+
+/*
+ *	SFP hmbox handler
+ */
+void
+bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
+{
+	struct bfa_sfp_s *sfp = sfparg;
+
+	switch (msg->mh.msg_id) {
+	case BFI_SFP_I2H_SHOW:
+		bfa_sfp_show_comp(sfp, msg);
+		break;
+
+	case BFI_SFP_I2H_SCN:
+		bfa_sfp_scn(sfp, msg);
+		break;
+
+	default:
+		bfa_trc(sfp, msg->mh.msg_id);
+		WARN_ON(1);
+	}
+}
+
+/*
+ *	Return DMA memory needed by sfp module.
+ */
+u32
+bfa_sfp_meminfo(void)
+{
+	return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ *	Attach virtual and physical memory for SFP.
+ */
+void
+bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
+		struct bfa_trc_mod_s *trcmod)
+{
+	sfp->dev = dev;
+	sfp->ioc = ioc;
+	sfp->trcmod = trcmod;
+
+	sfp->cbfn = NULL;
+	sfp->cbarg = NULL;
+	sfp->sfpmem = NULL;
+	sfp->lock = 0;
+	sfp->data_valid = 0;
+	sfp->state = BFA_SFP_STATE_INIT;
+	sfp->state_query_lock = 0;
+	sfp->state_query_cbfn = NULL;
+	sfp->state_query_cbarg = NULL;
+	sfp->media = NULL;
+	sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
+	sfp->is_elb = BFA_FALSE;
+
+	bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
+	bfa_q_qe_init(&sfp->ioc_notify);
+	bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
+	list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
+}
+
+/*
+ *	Claim Memory for SFP
+ */
+void
+bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
+{
+	sfp->dbuf_kva   = dm_kva;
+	sfp->dbuf_pa    = dm_pa;
+	memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
+
+	dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+	dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Show SFP eeprom content
+ *
+ * @param[in] sfp   - bfa sfp module
+ *
+ * @param[out] sfpmem - sfp eeprom data
+ *
+ */
+bfa_status_t
+bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
+		bfa_cb_sfp_t cbfn, void *cbarg)
+{
+
+	if (!bfa_ioc_is_operational(sfp->ioc)) {
+		bfa_trc(sfp, 0);
+		return BFA_STATUS_IOC_NON_OP;
+	}
+
+	if (sfp->lock) {
+		bfa_trc(sfp, 0);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	sfp->cbfn = cbfn;
+	sfp->cbarg = cbarg;
+	sfp->sfpmem = sfpmem;
+
+	bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Return SFP Media type
+ *
+ * @param[in] sfp   - bfa sfp module
+ *
+ * @param[out] media - port speed from user
+ *
+ */
+bfa_status_t
+bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
+		bfa_cb_sfp_t cbfn, void *cbarg)
+{
+	if (!bfa_ioc_is_operational(sfp->ioc)) {
+		bfa_trc(sfp, 0);
+		return BFA_STATUS_IOC_NON_OP;
+	}
+
+	sfp->media = media;
+	if (sfp->state == BFA_SFP_STATE_INIT) {
+		if (sfp->state_query_lock) {
+			bfa_trc(sfp, 0);
+			return BFA_STATUS_DEVBUSY;
+		} else {
+			sfp->state_query_cbfn = cbfn;
+			sfp->state_query_cbarg = cbarg;
+			bfa_sfp_state_query(sfp);
+			return BFA_STATUS_SFP_NOT_READY;
+		}
+	}
+
+	bfa_sfp_media_get(sfp);
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Check if user set port speed is allowed by the SFP
+ *
+ * @param[in] sfp   - bfa sfp module
+ * @param[in] portspeed - port speed from user
+ *
+ */
+bfa_status_t
+bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
+		bfa_cb_sfp_t cbfn, void *cbarg)
+{
+	WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
+
+	if (!bfa_ioc_is_operational(sfp->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/* For Mezz card, all speed is allowed */
+	if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
+		return BFA_STATUS_OK;
+
+	/* Check SFP state */
+	sfp->portspeed = portspeed;
+	if (sfp->state == BFA_SFP_STATE_INIT) {
+		if (sfp->state_query_lock) {
+			bfa_trc(sfp, 0);
+			return BFA_STATUS_DEVBUSY;
+		} else {
+			sfp->state_query_cbfn = cbfn;
+			sfp->state_query_cbarg = cbarg;
+			bfa_sfp_state_query(sfp);
+			return BFA_STATUS_SFP_NOT_READY;
+		}
+	}
+
+	if (sfp->state == BFA_SFP_STATE_REMOVED ||
+	    sfp->state == BFA_SFP_STATE_FAILED) {
+		bfa_trc(sfp, sfp->state);
+		return BFA_STATUS_NO_SFP_DEV;
+	}
+
+	if (sfp->state == BFA_SFP_STATE_INSERTED) {
+		bfa_trc(sfp, sfp->state);
+		return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
+	}
+
+	/* For eloopback, all speed is allowed */
+	if (sfp->is_elb)
+		return BFA_STATUS_OK;
+
+	return bfa_sfp_speed_valid(sfp, portspeed);
+}
+
+/*
+ *	Flash module specific
+ */
+
+/*
+ * FLASH DMA buffer should be big enough to hold both MFG block and
+ * asic block(64k) at the same time and also should be 2k aligned to
+ * avoid write segement to cross sector boundary.
+ */
+#define BFA_FLASH_SEG_SZ	2048
+#define BFA_FLASH_DMA_BUF_SZ	\
+	BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
+
+static void
+bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
+			int inst, int type)
+{
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
+	aen_entry->aen_data.audit.partition_inst = inst;
+	aen_entry->aen_data.audit.partition_type = type;
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
+				  BFA_AEN_CAT_AUDIT, event);
+}
+
+static void
+bfa_flash_cb(struct bfa_flash_s *flash)
+{
+	flash->op_busy = 0;
+	if (flash->cbfn)
+		flash->cbfn(flash->cbarg, flash->status);
+}
+
+static void
+bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+	struct bfa_flash_s	*flash = cbarg;
+
+	bfa_trc(flash, event);
+	switch (event) {
+	case BFA_IOC_E_DISABLED:
+	case BFA_IOC_E_FAILED:
+		if (flash->op_busy) {
+			flash->status = BFA_STATUS_IOC_FAILURE;
+			flash->cbfn(flash->cbarg, flash->status);
+			flash->op_busy = 0;
+		}
+		break;
+
+	default:
+		break;
+	}
+}
+
+/*
+ * Send flash attribute query request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_query_send(void *cbarg)
+{
+	struct bfa_flash_s *flash = cbarg;
+	struct bfi_flash_query_req_s *msg =
+			(struct bfi_flash_query_req_s *) flash->mb.msg;
+
+	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
+		bfa_ioc_portid(flash->ioc));
+	bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
+		flash->dbuf_pa);
+	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Send flash write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_write_send(struct bfa_flash_s *flash)
+{
+	struct bfi_flash_write_req_s *msg =
+			(struct bfi_flash_write_req_s *) flash->mb.msg;
+	u32	len;
+
+	msg->type = be32_to_cpu(flash->type);
+	msg->instance = flash->instance;
+	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+		flash->residue : BFA_FLASH_DMA_BUF_SZ;
+	msg->length = be32_to_cpu(len);
+
+	/* indicate if it's the last msg of the whole write operation */
+	msg->last = (len == flash->residue) ? 1 : 0;
+
+	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
+			bfa_ioc_portid(flash->ioc));
+	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+	memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
+	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+
+	flash->residue -= len;
+	flash->offset += len;
+}
+
+/*
+ * Send flash read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_read_send(void *cbarg)
+{
+	struct bfa_flash_s *flash = cbarg;
+	struct bfi_flash_read_req_s *msg =
+			(struct bfi_flash_read_req_s *) flash->mb.msg;
+	u32	len;
+
+	msg->type = be32_to_cpu(flash->type);
+	msg->instance = flash->instance;
+	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+			flash->residue : BFA_FLASH_DMA_BUF_SZ;
+	msg->length = be32_to_cpu(len);
+	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
+		bfa_ioc_portid(flash->ioc));
+	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Send flash erase request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_erase_send(void *cbarg)
+{
+	struct bfa_flash_s *flash = cbarg;
+	struct bfi_flash_erase_req_s *msg =
+			(struct bfi_flash_erase_req_s *) flash->mb.msg;
+
+	msg->type = be32_to_cpu(flash->type);
+	msg->instance = flash->instance;
+	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
+			bfa_ioc_portid(flash->ioc));
+	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Process flash response messages upon receiving interrupts.
+ *
+ * @param[in] flasharg - flash structure
+ * @param[in] msg - message structure
+ */
+static void
+bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
+{
+	struct bfa_flash_s *flash = flasharg;
+	u32	status;
+
+	union {
+		struct bfi_flash_query_rsp_s *query;
+		struct bfi_flash_erase_rsp_s *erase;
+		struct bfi_flash_write_rsp_s *write;
+		struct bfi_flash_read_rsp_s *read;
+		struct bfi_flash_event_s *event;
+		struct bfi_mbmsg_s   *msg;
+	} m;
+
+	m.msg = msg;
+	bfa_trc(flash, msg->mh.msg_id);
+
+	if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
+		/* receiving response after ioc failure */
+		bfa_trc(flash, 0x9999);
+		return;
+	}
+
+	switch (msg->mh.msg_id) {
+	case BFI_FLASH_I2H_QUERY_RSP:
+		status = be32_to_cpu(m.query->status);
+		bfa_trc(flash, status);
+		if (status == BFA_STATUS_OK) {
+			u32	i;
+			struct bfa_flash_attr_s *attr, *f;
+
+			attr = (struct bfa_flash_attr_s *) flash->ubuf;
+			f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
+			attr->status = be32_to_cpu(f->status);
+			attr->npart = be32_to_cpu(f->npart);
+			bfa_trc(flash, attr->status);
+			bfa_trc(flash, attr->npart);
+			for (i = 0; i < attr->npart; i++) {
+				attr->part[i].part_type =
+					be32_to_cpu(f->part[i].part_type);
+				attr->part[i].part_instance =
+					be32_to_cpu(f->part[i].part_instance);
+				attr->part[i].part_off =
+					be32_to_cpu(f->part[i].part_off);
+				attr->part[i].part_size =
+					be32_to_cpu(f->part[i].part_size);
+				attr->part[i].part_len =
+					be32_to_cpu(f->part[i].part_len);
+				attr->part[i].part_status =
+					be32_to_cpu(f->part[i].part_status);
+			}
+		}
+		flash->status = status;
+		bfa_flash_cb(flash);
+		break;
+	case BFI_FLASH_I2H_ERASE_RSP:
+		status = be32_to_cpu(m.erase->status);
+		bfa_trc(flash, status);
+		flash->status = status;
+		bfa_flash_cb(flash);
+		break;
+	case BFI_FLASH_I2H_WRITE_RSP:
+		status = be32_to_cpu(m.write->status);
+		bfa_trc(flash, status);
+		if (status != BFA_STATUS_OK || flash->residue == 0) {
+			flash->status = status;
+			bfa_flash_cb(flash);
+		} else {
+			bfa_trc(flash, flash->offset);
+			bfa_flash_write_send(flash);
+		}
+		break;
+	case BFI_FLASH_I2H_READ_RSP:
+		status = be32_to_cpu(m.read->status);
+		bfa_trc(flash, status);
+		if (status != BFA_STATUS_OK) {
+			flash->status = status;
+			bfa_flash_cb(flash);
+		} else {
+			u32 len = be32_to_cpu(m.read->length);
+			bfa_trc(flash, flash->offset);
+			bfa_trc(flash, len);
+			memcpy(flash->ubuf + flash->offset,
+				flash->dbuf_kva, len);
+			flash->residue -= len;
+			flash->offset += len;
+			if (flash->residue == 0) {
+				flash->status = status;
+				bfa_flash_cb(flash);
+			} else
+				bfa_flash_read_send(flash);
+		}
+		break;
+	case BFI_FLASH_I2H_BOOT_VER_RSP:
+		break;
+	case BFI_FLASH_I2H_EVENT:
+		status = be32_to_cpu(m.event->status);
+		bfa_trc(flash, status);
+		if (status == BFA_STATUS_BAD_FWCFG)
+			bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
+		else if (status == BFA_STATUS_INVALID_VENDOR) {
+			u32 param;
+			param = be32_to_cpu(m.event->param);
+			bfa_trc(flash, param);
+			bfa_ioc_aen_post(flash->ioc,
+				BFA_IOC_AEN_INVALID_VENDOR);
+		}
+		break;
+
+	default:
+		WARN_ON(1);
+	}
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_flash_meminfo(bfa_boolean_t mincfg)
+{
+	/* min driver doesn't need flash */
+	if (mincfg)
+		return 0;
+	return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] ioc  - ioc structure
+ * @param[in] dev  - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
+		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+	flash->ioc = ioc;
+	flash->trcmod = trcmod;
+	flash->cbfn = NULL;
+	flash->cbarg = NULL;
+	flash->op_busy = 0;
+
+	bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
+	bfa_q_qe_init(&flash->ioc_notify);
+	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
+	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
+
+	/* min driver doesn't need flash */
+	if (mincfg) {
+		flash->dbuf_kva = NULL;
+		flash->dbuf_pa = 0;
+	}
+}
+
+/*
+ * Claim memory for flash
+ *
+ * @param[in] flash - flash structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
+		bfa_boolean_t mincfg)
+{
+	if (mincfg)
+		return;
+
+	flash->dbuf_kva = dm_kva;
+	flash->dbuf_pa = dm_pa;
+	memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
+	dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+	dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Get flash attribute.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] attr - flash attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
+		bfa_cb_flash_t cbfn, void *cbarg)
+{
+	bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
+
+	if (!bfa_ioc_is_operational(flash->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (flash->op_busy) {
+		bfa_trc(flash, flash->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	flash->op_busy = 1;
+	flash->cbfn = cbfn;
+	flash->cbarg = cbarg;
+	flash->ubuf = (u8 *) attr;
+	bfa_flash_query_send(flash);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Erase flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+		u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
+{
+	bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
+	bfa_trc(flash, type);
+	bfa_trc(flash, instance);
+
+	if (!bfa_ioc_is_operational(flash->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (flash->op_busy) {
+		bfa_trc(flash, flash->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	flash->op_busy = 1;
+	flash->cbfn = cbfn;
+	flash->cbarg = cbarg;
+	flash->type = type;
+	flash->instance = instance;
+
+	bfa_flash_erase_send(flash);
+	bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
+				instance, type);
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Update flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+		u8 instance, void *buf, u32 len, u32 offset,
+		bfa_cb_flash_t cbfn, void *cbarg)
+{
+	bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
+	bfa_trc(flash, type);
+	bfa_trc(flash, instance);
+	bfa_trc(flash, len);
+	bfa_trc(flash, offset);
+
+	if (!bfa_ioc_is_operational(flash->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/*
+	 * 'len' must be in word (4-byte) boundary
+	 * 'offset' must be in sector (16kb) boundary
+	 */
+	if (!len || (len & 0x03) || (offset & 0x00003FFF))
+		return BFA_STATUS_FLASH_BAD_LEN;
+
+	if (type == BFA_FLASH_PART_MFG)
+		return BFA_STATUS_EINVAL;
+
+	if (flash->op_busy) {
+		bfa_trc(flash, flash->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	flash->op_busy = 1;
+	flash->cbfn = cbfn;
+	flash->cbarg = cbarg;
+	flash->type = type;
+	flash->instance = instance;
+	flash->residue = len;
+	flash->offset = 0;
+	flash->addr_off = offset;
+	flash->ubuf = buf;
+
+	bfa_flash_write_send(flash);
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Read flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+		u8 instance, void *buf, u32 len, u32 offset,
+		bfa_cb_flash_t cbfn, void *cbarg)
+{
+	bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
+	bfa_trc(flash, type);
+	bfa_trc(flash, instance);
+	bfa_trc(flash, len);
+	bfa_trc(flash, offset);
+
+	if (!bfa_ioc_is_operational(flash->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/*
+	 * 'len' must be in word (4-byte) boundary
+	 * 'offset' must be in sector (16kb) boundary
+	 */
+	if (!len || (len & 0x03) || (offset & 0x00003FFF))
+		return BFA_STATUS_FLASH_BAD_LEN;
+
+	if (flash->op_busy) {
+		bfa_trc(flash, flash->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	flash->op_busy = 1;
+	flash->cbfn = cbfn;
+	flash->cbarg = cbarg;
+	flash->type = type;
+	flash->instance = instance;
+	flash->residue = len;
+	flash->offset = 0;
+	flash->addr_off = offset;
+	flash->ubuf = buf;
+	bfa_flash_read_send(flash);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ *	DIAG module specific
+ */
+
+#define BFA_DIAG_MEMTEST_TOV	50000	/* memtest timeout in msec */
+#define CT2_BFA_DIAG_MEMTEST_TOV	(9*30*1000)  /* 4.5 min */
+
+/* IOC event handler */
+static void
+bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
+{
+	struct bfa_diag_s *diag = diag_arg;
+
+	bfa_trc(diag, event);
+	bfa_trc(diag, diag->block);
+	bfa_trc(diag, diag->fwping.lock);
+	bfa_trc(diag, diag->tsensor.lock);
+
+	switch (event) {
+	case BFA_IOC_E_DISABLED:
+	case BFA_IOC_E_FAILED:
+		if (diag->fwping.lock) {
+			diag->fwping.status = BFA_STATUS_IOC_FAILURE;
+			diag->fwping.cbfn(diag->fwping.cbarg,
+					diag->fwping.status);
+			diag->fwping.lock = 0;
+		}
+
+		if (diag->tsensor.lock) {
+			diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
+			diag->tsensor.cbfn(diag->tsensor.cbarg,
+					   diag->tsensor.status);
+			diag->tsensor.lock = 0;
+		}
+
+		if (diag->block) {
+			if (diag->timer_active) {
+				bfa_timer_stop(&diag->timer);
+				diag->timer_active = 0;
+			}
+
+			diag->status = BFA_STATUS_IOC_FAILURE;
+			diag->cbfn(diag->cbarg, diag->status);
+			diag->block = 0;
+		}
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void
+bfa_diag_memtest_done(void *cbarg)
+{
+	struct bfa_diag_s *diag = cbarg;
+	struct bfa_ioc_s  *ioc = diag->ioc;
+	struct bfa_diag_memtest_result *res = diag->result;
+	u32	loff = BFI_BOOT_MEMTEST_RES_ADDR;
+	u32	pgnum, pgoff, i;
+
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+	pgoff = PSS_SMEM_PGOFF(loff);
+
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+	for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
+			 sizeof(u32)); i++) {
+		/* read test result from smem */
+		*((u32 *) res + i) =
+			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+		loff += sizeof(u32);
+	}
+
+	/* Reset IOC fwstates to BFI_IOC_UNINIT */
+	bfa_ioc_reset_fwstate(ioc);
+
+	res->status = swab32(res->status);
+	bfa_trc(diag, res->status);
+
+	if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
+		diag->status = BFA_STATUS_OK;
+	else {
+		diag->status = BFA_STATUS_MEMTEST_FAILED;
+		res->addr = swab32(res->addr);
+		res->exp = swab32(res->exp);
+		res->act = swab32(res->act);
+		res->err_status = swab32(res->err_status);
+		res->err_status1 = swab32(res->err_status1);
+		res->err_addr = swab32(res->err_addr);
+		bfa_trc(diag, res->addr);
+		bfa_trc(diag, res->exp);
+		bfa_trc(diag, res->act);
+		bfa_trc(diag, res->err_status);
+		bfa_trc(diag, res->err_status1);
+		bfa_trc(diag, res->err_addr);
+	}
+	diag->timer_active = 0;
+	diag->cbfn(diag->cbarg, diag->status);
+	diag->block = 0;
+}
+
+/*
+ * Firmware ping
+ */
+
+/*
+ * Perform DMA test directly
+ */
+static void
+diag_fwping_send(struct bfa_diag_s *diag)
+{
+	struct bfi_diag_fwping_req_s *fwping_req;
+	u32	i;
+
+	bfa_trc(diag, diag->fwping.dbuf_pa);
+
+	/* fill DMA area with pattern */
+	for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
+		*((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
+
+	/* Fill mbox msg */
+	fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
+
+	/* Setup SG list */
+	bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
+			diag->fwping.dbuf_pa);
+	/* Set up dma count */
+	fwping_req->count = cpu_to_be32(diag->fwping.count);
+	/* Set up data pattern */
+	fwping_req->data = diag->fwping.data;
+
+	/* build host command */
+	bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
+		bfa_ioc_portid(diag->ioc));
+
+	/* send mbox cmd */
+	bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
+}
+
+static void
+diag_fwping_comp(struct bfa_diag_s *diag,
+		 struct bfi_diag_fwping_rsp_s *diag_rsp)
+{
+	u32	rsp_data = diag_rsp->data;
+	u8	rsp_dma_status = diag_rsp->dma_status;
+
+	bfa_trc(diag, rsp_data);
+	bfa_trc(diag, rsp_dma_status);
+
+	if (rsp_dma_status == BFA_STATUS_OK) {
+		u32	i, pat;
+		pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
+			diag->fwping.data;
+		/* Check mbox data */
+		if (diag->fwping.data != rsp_data) {
+			bfa_trc(diag, rsp_data);
+			diag->fwping.result->dmastatus =
+					BFA_STATUS_DATACORRUPTED;
+			diag->fwping.status = BFA_STATUS_DATACORRUPTED;
+			diag->fwping.cbfn(diag->fwping.cbarg,
+					diag->fwping.status);
+			diag->fwping.lock = 0;
+			return;
+		}
+		/* Check dma pattern */
+		for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
+			if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
+				bfa_trc(diag, i);
+				bfa_trc(diag, pat);
+				bfa_trc(diag,
+					*((u32 *)diag->fwping.dbuf_kva + i));
+				diag->fwping.result->dmastatus =
+						BFA_STATUS_DATACORRUPTED;
+				diag->fwping.status = BFA_STATUS_DATACORRUPTED;
+				diag->fwping.cbfn(diag->fwping.cbarg,
+						diag->fwping.status);
+				diag->fwping.lock = 0;
+				return;
+			}
+		}
+		diag->fwping.result->dmastatus = BFA_STATUS_OK;
+		diag->fwping.status = BFA_STATUS_OK;
+		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
+		diag->fwping.lock = 0;
+	} else {
+		diag->fwping.status = BFA_STATUS_HDMA_FAILED;
+		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
+		diag->fwping.lock = 0;
+	}
+}
+
+/*
+ * Temperature Sensor
+ */
+
+static void
+diag_tempsensor_send(struct bfa_diag_s *diag)
+{
+	struct bfi_diag_ts_req_s *msg;
+
+	msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
+	bfa_trc(diag, msg->temp);
+	/* build host command */
+	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
+		bfa_ioc_portid(diag->ioc));
+	/* send mbox cmd */
+	bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
+}
+
+static void
+diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
+{
+	if (!diag->tsensor.lock) {
+		/* receiving response after ioc failure */
+		bfa_trc(diag, diag->tsensor.lock);
+		return;
+	}
+
+	/*
+	 * ASIC junction tempsensor is a reg read operation
+	 * it will always return OK
+	 */
+	diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
+	diag->tsensor.temp->ts_junc = rsp->ts_junc;
+	diag->tsensor.temp->ts_brd = rsp->ts_brd;
+
+	if (rsp->ts_brd) {
+		/* tsensor.temp->status is brd_temp status */
+		diag->tsensor.temp->status = rsp->status;
+		if (rsp->status == BFA_STATUS_OK) {
+			diag->tsensor.temp->brd_temp =
+				be16_to_cpu(rsp->brd_temp);
+		} else
+			diag->tsensor.temp->brd_temp = 0;
+	}
+
+	bfa_trc(diag, rsp->status);
+	bfa_trc(diag, rsp->ts_junc);
+	bfa_trc(diag, rsp->temp);
+	bfa_trc(diag, rsp->ts_brd);
+	bfa_trc(diag, rsp->brd_temp);
+
+	/* tsensor status is always good bcos we always have junction temp */
+	diag->tsensor.status = BFA_STATUS_OK;
+	diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
+	diag->tsensor.lock = 0;
+}
+
+/*
+ *	LED Test command
+ */
+static void
+diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
+{
+	struct bfi_diag_ledtest_req_s  *msg;
+
+	msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
+	/* build host command */
+	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
+			bfa_ioc_portid(diag->ioc));
+
+	/*
+	 * convert the freq from N blinks per 10 sec to
+	 * crossbow ontime value. We do it here because division is need
+	 */
+	if (ledtest->freq)
+		ledtest->freq = 500 / ledtest->freq;
+
+	if (ledtest->freq == 0)
+		ledtest->freq = 1;
+
+	bfa_trc(diag, ledtest->freq);
+	/* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
+	msg->cmd = (u8) ledtest->cmd;
+	msg->color = (u8) ledtest->color;
+	msg->portid = bfa_ioc_portid(diag->ioc);
+	msg->led = ledtest->led;
+	msg->freq = cpu_to_be16(ledtest->freq);
+
+	/* send mbox cmd */
+	bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
+}
+
+static void
+diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
+{
+	bfa_trc(diag, diag->ledtest.lock);
+	diag->ledtest.lock = BFA_FALSE;
+	/* no bfa_cb_queue is needed because driver is not waiting */
+}
+
+/*
+ * Port beaconing
+ */
+static void
+diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
+{
+	struct bfi_diag_portbeacon_req_s *msg;
+
+	msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
+	/* build host command */
+	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
+		bfa_ioc_portid(diag->ioc));
+	msg->beacon = beacon;
+	msg->period = cpu_to_be32(sec);
+	/* send mbox cmd */
+	bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
+}
+
+static void
+diag_portbeacon_comp(struct bfa_diag_s *diag)
+{
+	bfa_trc(diag, diag->beacon.state);
+	diag->beacon.state = BFA_FALSE;
+	if (diag->cbfn_beacon)
+		diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
+}
+
+/*
+ *	Diag hmbox handler
+ */
+void
+bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
+{
+	struct bfa_diag_s *diag = diagarg;
+
+	switch (msg->mh.msg_id) {
+	case BFI_DIAG_I2H_PORTBEACON:
+		diag_portbeacon_comp(diag);
+		break;
+	case BFI_DIAG_I2H_FWPING:
+		diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
+		break;
+	case BFI_DIAG_I2H_TEMPSENSOR:
+		diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
+		break;
+	case BFI_DIAG_I2H_LEDTEST:
+		diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
+		break;
+	default:
+		bfa_trc(diag, msg->mh.msg_id);
+		WARN_ON(1);
+	}
+}
+
+/*
+ * Gen RAM Test
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] *memtest        - mem test params input from upper layer,
+ *   @param[in] pattern         - mem test pattern
+ *   @param[in] *result         - mem test result
+ *   @param[in] cbfn            - mem test callback functioin
+ *   @param[in] cbarg           - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
+		u32 pattern, struct bfa_diag_memtest_result *result,
+		bfa_cb_diag_t cbfn, void *cbarg)
+{
+	u32	memtest_tov;
+
+	bfa_trc(diag, pattern);
+
+	if (!bfa_ioc_adapter_is_disabled(diag->ioc))
+		return BFA_STATUS_ADAPTER_ENABLED;
+
+	/* check to see if there is another destructive diag cmd running */
+	if (diag->block) {
+		bfa_trc(diag, diag->block);
+		return BFA_STATUS_DEVBUSY;
+	} else
+		diag->block = 1;
+
+	diag->result = result;
+	diag->cbfn = cbfn;
+	diag->cbarg = cbarg;
+
+	/* download memtest code and take LPU0 out of reset */
+	bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
+
+	memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
+		       CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
+	bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
+			bfa_diag_memtest_done, diag, memtest_tov);
+	diag->timer_active = 1;
+	return BFA_STATUS_OK;
+}
+
+/*
+ * DIAG firmware ping command
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] cnt             - dma loop count for testing PCIE
+ *   @param[in] data            - data pattern to pass in fw
+ *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
+ *   @param[in] cbfn            - callback function
+ *   @param[in] *cbarg          - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
+		struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
+		void *cbarg)
+{
+	bfa_trc(diag, cnt);
+	bfa_trc(diag, data);
+
+	if (!bfa_ioc_is_operational(diag->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
+	    ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
+		return BFA_STATUS_CMD_NOTSUPP;
+
+	/* check to see if there is another destructive diag cmd running */
+	if (diag->block || diag->fwping.lock) {
+		bfa_trc(diag, diag->block);
+		bfa_trc(diag, diag->fwping.lock);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	/* Initialization */
+	diag->fwping.lock = 1;
+	diag->fwping.cbfn = cbfn;
+	diag->fwping.cbarg = cbarg;
+	diag->fwping.result = result;
+	diag->fwping.data = data;
+	diag->fwping.count = cnt;
+
+	/* Init test results */
+	diag->fwping.result->data = 0;
+	diag->fwping.result->status = BFA_STATUS_OK;
+
+	/* kick off the first ping */
+	diag_fwping_send(diag);
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Read Temperature Sensor
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] *result         - pt to bfa_diag_temp_t data struct
+ *   @param[in] cbfn            - callback function
+ *   @param[in] *cbarg          - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_tsensor_query(struct bfa_diag_s *diag,
+		struct bfa_diag_results_tempsensor_s *result,
+		bfa_cb_diag_t cbfn, void *cbarg)
+{
+	/* check to see if there is a destructive diag cmd running */
+	if (diag->block || diag->tsensor.lock) {
+		bfa_trc(diag, diag->block);
+		bfa_trc(diag, diag->tsensor.lock);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	if (!bfa_ioc_is_operational(diag->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/* Init diag mod params */
+	diag->tsensor.lock = 1;
+	diag->tsensor.temp = result;
+	diag->tsensor.cbfn = cbfn;
+	diag->tsensor.cbarg = cbarg;
+	diag->tsensor.status = BFA_STATUS_OK;
+
+	/* Send msg to fw */
+	diag_tempsensor_send(diag);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * LED Test command
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] *ledtest        - pt to ledtest data structure
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
+{
+	bfa_trc(diag, ledtest->cmd);
+
+	if (!bfa_ioc_is_operational(diag->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (diag->beacon.state)
+		return BFA_STATUS_BEACON_ON;
+
+	if (diag->ledtest.lock)
+		return BFA_STATUS_LEDTEST_OP;
+
+	/* Send msg to fw */
+	diag->ledtest.lock = BFA_TRUE;
+	diag_ledtest_send(diag, ledtest);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Port beaconing command
+ *
+ *   @param[in] *diag           - diag data struct
+ *   @param[in] beacon          - port beaconing 1:ON   0:OFF
+ *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
+ *   @param[in] sec             - beaconing duration in seconds
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
+		bfa_boolean_t link_e2e_beacon, uint32_t sec)
+{
+	bfa_trc(diag, beacon);
+	bfa_trc(diag, link_e2e_beacon);
+	bfa_trc(diag, sec);
+
+	if (!bfa_ioc_is_operational(diag->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (diag->ledtest.lock)
+		return BFA_STATUS_LEDTEST_OP;
+
+	if (diag->beacon.state && beacon)       /* beacon alread on */
+		return BFA_STATUS_BEACON_ON;
+
+	diag->beacon.state	= beacon;
+	diag->beacon.link_e2e	= link_e2e_beacon;
+	if (diag->cbfn_beacon)
+		diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
+
+	/* Send msg to fw */
+	diag_portbeacon_send(diag, beacon, sec);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Return DMA memory needed by diag module.
+ */
+u32
+bfa_diag_meminfo(void)
+{
+	return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ *	Attach virtual and physical memory for Diag.
+ */
+void
+bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
+	bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
+{
+	diag->dev = dev;
+	diag->ioc = ioc;
+	diag->trcmod = trcmod;
+
+	diag->block = 0;
+	diag->cbfn = NULL;
+	diag->cbarg = NULL;
+	diag->result = NULL;
+	diag->cbfn_beacon = cbfn_beacon;
+
+	bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
+	bfa_q_qe_init(&diag->ioc_notify);
+	bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
+	list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
+}
+
+void
+bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
+{
+	diag->fwping.dbuf_kva = dm_kva;
+	diag->fwping.dbuf_pa = dm_pa;
+	memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
+}
+
+/*
+ *	PHY module specific
+ */
+#define BFA_PHY_DMA_BUF_SZ	0x02000         /* 8k dma buffer */
+#define BFA_PHY_LOCK_STATUS	0x018878        /* phy semaphore status reg */
+
+static void
+bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
+{
+	int i, m = sz >> 2;
+
+	for (i = 0; i < m; i++)
+		obuf[i] = be32_to_cpu(ibuf[i]);
+}
+
+static bfa_boolean_t
+bfa_phy_present(struct bfa_phy_s *phy)
+{
+	return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
+}
+
+static void
+bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+	struct bfa_phy_s *phy = cbarg;
+
+	bfa_trc(phy, event);
+
+	switch (event) {
+	case BFA_IOC_E_DISABLED:
+	case BFA_IOC_E_FAILED:
+		if (phy->op_busy) {
+			phy->status = BFA_STATUS_IOC_FAILURE;
+			phy->cbfn(phy->cbarg, phy->status);
+			phy->op_busy = 0;
+		}
+		break;
+
+	default:
+		break;
+	}
+}
+
+/*
+ * Send phy attribute query request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_query_send(void *cbarg)
+{
+	struct bfa_phy_s *phy = cbarg;
+	struct bfi_phy_query_req_s *msg =
+			(struct bfi_phy_query_req_s *) phy->mb.msg;
+
+	msg->instance = phy->instance;
+	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
+		bfa_ioc_portid(phy->ioc));
+	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
+	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Send phy write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_write_send(void *cbarg)
+{
+	struct bfa_phy_s *phy = cbarg;
+	struct bfi_phy_write_req_s *msg =
+			(struct bfi_phy_write_req_s *) phy->mb.msg;
+	u32	len;
+	u16	*buf, *dbuf;
+	int	i, sz;
+
+	msg->instance = phy->instance;
+	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
+	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
+			phy->residue : BFA_PHY_DMA_BUF_SZ;
+	msg->length = cpu_to_be32(len);
+
+	/* indicate if it's the last msg of the whole write operation */
+	msg->last = (len == phy->residue) ? 1 : 0;
+
+	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
+		bfa_ioc_portid(phy->ioc));
+	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
+
+	buf = (u16 *) (phy->ubuf + phy->offset);
+	dbuf = (u16 *)phy->dbuf_kva;
+	sz = len >> 1;
+	for (i = 0; i < sz; i++)
+		buf[i] = cpu_to_be16(dbuf[i]);
+
+	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+
+	phy->residue -= len;
+	phy->offset += len;
+}
+
+/*
+ * Send phy read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_read_send(void *cbarg)
+{
+	struct bfa_phy_s *phy = cbarg;
+	struct bfi_phy_read_req_s *msg =
+			(struct bfi_phy_read_req_s *) phy->mb.msg;
+	u32	len;
+
+	msg->instance = phy->instance;
+	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
+	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
+			phy->residue : BFA_PHY_DMA_BUF_SZ;
+	msg->length = cpu_to_be32(len);
+	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
+		bfa_ioc_portid(phy->ioc));
+	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
+	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Send phy stats request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_stats_send(void *cbarg)
+{
+	struct bfa_phy_s *phy = cbarg;
+	struct bfi_phy_stats_req_s *msg =
+			(struct bfi_phy_stats_req_s *) phy->mb.msg;
+
+	msg->instance = phy->instance;
+	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
+		bfa_ioc_portid(phy->ioc));
+	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
+	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_phy_meminfo(bfa_boolean_t mincfg)
+{
+	/* min driver doesn't need phy */
+	if (mincfg)
+		return 0;
+
+	return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] ioc  - ioc structure
+ * @param[in] dev  - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
+		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+	phy->ioc = ioc;
+	phy->trcmod = trcmod;
+	phy->cbfn = NULL;
+	phy->cbarg = NULL;
+	phy->op_busy = 0;
+
+	bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
+	bfa_q_qe_init(&phy->ioc_notify);
+	bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
+	list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
+
+	/* min driver doesn't need phy */
+	if (mincfg) {
+		phy->dbuf_kva = NULL;
+		phy->dbuf_pa = 0;
+	}
+}
+
+/*
+ * Claim memory for phy
+ *
+ * @param[in] phy - phy structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
+		bfa_boolean_t mincfg)
+{
+	if (mincfg)
+		return;
+
+	phy->dbuf_kva = dm_kva;
+	phy->dbuf_pa = dm_pa;
+	memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
+	dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+	dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+bfa_boolean_t
+bfa_phy_busy(struct bfa_ioc_s *ioc)
+{
+	void __iomem	*rb;
+
+	rb = bfa_ioc_bar0(ioc);
+	return readl(rb + BFA_PHY_LOCK_STATUS);
+}
+
+/*
+ * Get phy attribute.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] attr - phy attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
+		struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
+{
+	bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
+	bfa_trc(phy, instance);
+
+	if (!bfa_phy_present(phy))
+		return BFA_STATUS_PHY_NOT_PRESENT;
+
+	if (!bfa_ioc_is_operational(phy->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+		bfa_trc(phy, phy->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	phy->op_busy = 1;
+	phy->cbfn = cbfn;
+	phy->cbarg = cbarg;
+	phy->instance = instance;
+	phy->ubuf = (uint8_t *) attr;
+	bfa_phy_query_send(phy);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Get phy stats.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] stats - pointer to phy stats
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
+		struct bfa_phy_stats_s *stats,
+		bfa_cb_phy_t cbfn, void *cbarg)
+{
+	bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
+	bfa_trc(phy, instance);
+
+	if (!bfa_phy_present(phy))
+		return BFA_STATUS_PHY_NOT_PRESENT;
+
+	if (!bfa_ioc_is_operational(phy->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+		bfa_trc(phy, phy->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	phy->op_busy = 1;
+	phy->cbfn = cbfn;
+	phy->cbarg = cbarg;
+	phy->instance = instance;
+	phy->ubuf = (u8 *) stats;
+	bfa_phy_stats_send(phy);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Update phy image.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
+		void *buf, u32 len, u32 offset,
+		bfa_cb_phy_t cbfn, void *cbarg)
+{
+	bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
+	bfa_trc(phy, instance);
+	bfa_trc(phy, len);
+	bfa_trc(phy, offset);
+
+	if (!bfa_phy_present(phy))
+		return BFA_STATUS_PHY_NOT_PRESENT;
+
+	if (!bfa_ioc_is_operational(phy->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/* 'len' must be in word (4-byte) boundary */
+	if (!len || (len & 0x03))
+		return BFA_STATUS_FAILED;
+
+	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+		bfa_trc(phy, phy->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	phy->op_busy = 1;
+	phy->cbfn = cbfn;
+	phy->cbarg = cbarg;
+	phy->instance = instance;
+	phy->residue = len;
+	phy->offset = 0;
+	phy->addr_off = offset;
+	phy->ubuf = buf;
+
+	bfa_phy_write_send(phy);
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Read phy image.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
+		void *buf, u32 len, u32 offset,
+		bfa_cb_phy_t cbfn, void *cbarg)
+{
+	bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
+	bfa_trc(phy, instance);
+	bfa_trc(phy, len);
+	bfa_trc(phy, offset);
+
+	if (!bfa_phy_present(phy))
+		return BFA_STATUS_PHY_NOT_PRESENT;
+
+	if (!bfa_ioc_is_operational(phy->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/* 'len' must be in word (4-byte) boundary */
+	if (!len || (len & 0x03))
+		return BFA_STATUS_FAILED;
+
+	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+		bfa_trc(phy, phy->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	phy->op_busy = 1;
+	phy->cbfn = cbfn;
+	phy->cbarg = cbarg;
+	phy->instance = instance;
+	phy->residue = len;
+	phy->offset = 0;
+	phy->addr_off = offset;
+	phy->ubuf = buf;
+	bfa_phy_read_send(phy);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Process phy response messages upon receiving interrupts.
+ *
+ * @param[in] phyarg - phy structure
+ * @param[in] msg - message structure
+ */
+void
+bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
+{
+	struct bfa_phy_s *phy = phyarg;
+	u32	status;
+
+	union {
+		struct bfi_phy_query_rsp_s *query;
+		struct bfi_phy_stats_rsp_s *stats;
+		struct bfi_phy_write_rsp_s *write;
+		struct bfi_phy_read_rsp_s *read;
+		struct bfi_mbmsg_s   *msg;
+	} m;
+
+	m.msg = msg;
+	bfa_trc(phy, msg->mh.msg_id);
+
+	if (!phy->op_busy) {
+		/* receiving response after ioc failure */
+		bfa_trc(phy, 0x9999);
+		return;
+	}
+
+	switch (msg->mh.msg_id) {
+	case BFI_PHY_I2H_QUERY_RSP:
+		status = be32_to_cpu(m.query->status);
+		bfa_trc(phy, status);
+
+		if (status == BFA_STATUS_OK) {
+			struct bfa_phy_attr_s *attr =
+				(struct bfa_phy_attr_s *) phy->ubuf;
+			bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
+					sizeof(struct bfa_phy_attr_s));
+			bfa_trc(phy, attr->status);
+			bfa_trc(phy, attr->length);
+		}
+
+		phy->status = status;
+		phy->op_busy = 0;
+		if (phy->cbfn)
+			phy->cbfn(phy->cbarg, phy->status);
+		break;
+	case BFI_PHY_I2H_STATS_RSP:
+		status = be32_to_cpu(m.stats->status);
+		bfa_trc(phy, status);
+
+		if (status == BFA_STATUS_OK) {
+			struct bfa_phy_stats_s *stats =
+				(struct bfa_phy_stats_s *) phy->ubuf;
+			bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
+				sizeof(struct bfa_phy_stats_s));
+				bfa_trc(phy, stats->status);
+		}
+
+		phy->status = status;
+		phy->op_busy = 0;
+		if (phy->cbfn)
+			phy->cbfn(phy->cbarg, phy->status);
+		break;
+	case BFI_PHY_I2H_WRITE_RSP:
+		status = be32_to_cpu(m.write->status);
+		bfa_trc(phy, status);
+
+		if (status != BFA_STATUS_OK || phy->residue == 0) {
+			phy->status = status;
+			phy->op_busy = 0;
+			if (phy->cbfn)
+				phy->cbfn(phy->cbarg, phy->status);
+		} else {
+			bfa_trc(phy, phy->offset);
+			bfa_phy_write_send(phy);
+		}
+		break;
+	case BFI_PHY_I2H_READ_RSP:
+		status = be32_to_cpu(m.read->status);
+		bfa_trc(phy, status);
+
+		if (status != BFA_STATUS_OK) {
+			phy->status = status;
+			phy->op_busy = 0;
+			if (phy->cbfn)
+				phy->cbfn(phy->cbarg, phy->status);
+		} else {
+			u32 len = be32_to_cpu(m.read->length);
+			u16 *buf = (u16 *)(phy->ubuf + phy->offset);
+			u16 *dbuf = (u16 *)phy->dbuf_kva;
+			int i, sz = len >> 1;
+
+			bfa_trc(phy, phy->offset);
+			bfa_trc(phy, len);
+
+			for (i = 0; i < sz; i++)
+				buf[i] = be16_to_cpu(dbuf[i]);
+
+			phy->residue -= len;
+			phy->offset += len;
+
+			if (phy->residue == 0) {
+				phy->status = status;
+				phy->op_busy = 0;
+				if (phy->cbfn)
+					phy->cbfn(phy->cbarg, phy->status);
+			} else
+				bfa_phy_read_send(phy);
+		}
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+/*
+ * DCONF state machine events
+ */
+enum bfa_dconf_event {
+	BFA_DCONF_SM_INIT		= 1,	/* dconf Init */
+	BFA_DCONF_SM_FLASH_COMP		= 2,	/* read/write to flash */
+	BFA_DCONF_SM_WR			= 3,	/* binding change, map */
+	BFA_DCONF_SM_TIMEOUT		= 4,	/* Start timer */
+	BFA_DCONF_SM_EXIT		= 5,	/* exit dconf module */
+	BFA_DCONF_SM_IOCDISABLE		= 6,	/* IOC disable event */
+};
+
+/* forward declaration of DCONF state machine */
+static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
+				enum bfa_dconf_event event);
+
+static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
+static void bfa_dconf_timer(void *cbarg);
+static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
+static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
+
+/*
+ * Beginning state of dconf module. Waiting for an event to start.
+ */
+static void
+bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+	bfa_status_t bfa_status;
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_INIT:
+		if (dconf->min_cfg) {
+			bfa_trc(dconf->bfa, dconf->min_cfg);
+			bfa_fsm_send_event(&dconf->bfa->iocfc,
+					IOCFC_E_DCONF_DONE);
+			return;
+		}
+		bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
+		bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
+					BFA_FLASH_PART_DRV, dconf->instance,
+					dconf->dconf,
+					sizeof(struct bfa_dconf_s), 0,
+					bfa_dconf_init_cb, dconf->bfa);
+		if (bfa_status != BFA_STATUS_OK) {
+			bfa_timer_stop(&dconf->timer);
+			bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
+			bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+			return;
+		}
+		break;
+	case BFA_DCONF_SM_EXIT:
+		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+	case BFA_DCONF_SM_IOCDISABLE:
+	case BFA_DCONF_SM_WR:
+	case BFA_DCONF_SM_FLASH_COMP:
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+/*
+ * Read flash for dconf entries and make a call back to the driver once done.
+ */
+static void
+bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
+			enum bfa_dconf_event event)
+{
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_FLASH_COMP:
+		bfa_timer_stop(&dconf->timer);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+		break;
+	case BFA_DCONF_SM_TIMEOUT:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+		bfa_ioc_suspend(&dconf->bfa->ioc);
+		break;
+	case BFA_DCONF_SM_EXIT:
+		bfa_timer_stop(&dconf->timer);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+		break;
+	case BFA_DCONF_SM_IOCDISABLE:
+		bfa_timer_stop(&dconf->timer);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+/*
+ * DCONF Module is in ready state. Has completed the initialization.
+ */
+static void
+bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_WR:
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+		break;
+	case BFA_DCONF_SM_EXIT:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+		break;
+	case BFA_DCONF_SM_INIT:
+	case BFA_DCONF_SM_IOCDISABLE:
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+/*
+ * entries are dirty, write back to the flash.
+ */
+
+static void
+bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_TIMEOUT:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
+		bfa_dconf_flash_write(dconf);
+		break;
+	case BFA_DCONF_SM_WR:
+		bfa_timer_stop(&dconf->timer);
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+		break;
+	case BFA_DCONF_SM_EXIT:
+		bfa_timer_stop(&dconf->timer);
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
+		bfa_dconf_flash_write(dconf);
+		break;
+	case BFA_DCONF_SM_FLASH_COMP:
+		break;
+	case BFA_DCONF_SM_IOCDISABLE:
+		bfa_timer_stop(&dconf->timer);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+/*
+ * Sync the dconf entries to the flash.
+ */
+static void
+bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
+			enum bfa_dconf_event event)
+{
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_IOCDISABLE:
+	case BFA_DCONF_SM_FLASH_COMP:
+		bfa_timer_stop(&dconf->timer);
+	case BFA_DCONF_SM_TIMEOUT:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+static void
+bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_FLASH_COMP:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+		break;
+	case BFA_DCONF_SM_WR:
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+		break;
+	case BFA_DCONF_SM_EXIT:
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
+		break;
+	case BFA_DCONF_SM_IOCDISABLE:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+static void
+bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
+			enum bfa_dconf_event event)
+{
+	bfa_trc(dconf->bfa, event);
+
+	switch (event) {
+	case BFA_DCONF_SM_INIT:
+		bfa_timer_start(dconf->bfa, &dconf->timer,
+			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+		break;
+	case BFA_DCONF_SM_EXIT:
+		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+		break;
+	case BFA_DCONF_SM_IOCDISABLE:
+		break;
+	default:
+		bfa_sm_fault(dconf->bfa, event);
+	}
+}
+
+/*
+ * Compute and return memory needed by DRV_CFG module.
+ */
+void
+bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+		  struct bfa_s *bfa)
+{
+	struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
+
+	if (cfg->drvcfg.min_cfg)
+		bfa_mem_kva_setup(meminfo, dconf_kva,
+				sizeof(struct bfa_dconf_hdr_s));
+	else
+		bfa_mem_kva_setup(meminfo, dconf_kva,
+				sizeof(struct bfa_dconf_s));
+}
+
+void
+bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg)
+{
+	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+
+	dconf->bfad = bfad;
+	dconf->bfa = bfa;
+	dconf->instance = bfa->ioc.port_id;
+	bfa_trc(bfa, dconf->instance);
+
+	dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
+	if (cfg->drvcfg.min_cfg) {
+		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
+		dconf->min_cfg = BFA_TRUE;
+	} else {
+		dconf->min_cfg = BFA_FALSE;
+		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
+	}
+
+	bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
+	bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+}
+
+static void
+bfa_dconf_init_cb(void *arg, bfa_status_t status)
+{
+	struct bfa_s *bfa = arg;
+	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+
+	if (status == BFA_STATUS_OK) {
+		bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
+		if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
+			dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
+		if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
+			dconf->dconf->hdr.version = BFI_DCONF_VERSION;
+	}
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
+}
+
+void
+bfa_dconf_modinit(struct bfa_s *bfa)
+{
+	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
+}
+
+static void bfa_dconf_timer(void *cbarg)
+{
+	struct bfa_dconf_mod_s *dconf = cbarg;
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
+}
+
+void
+bfa_dconf_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
+}
+
+static bfa_status_t
+bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
+{
+	bfa_status_t bfa_status;
+	bfa_trc(dconf->bfa, 0);
+
+	bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
+				BFA_FLASH_PART_DRV, dconf->instance,
+				dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
+				bfa_dconf_cbfn, dconf);
+	if (bfa_status != BFA_STATUS_OK)
+		WARN_ON(bfa_status);
+	bfa_trc(dconf->bfa, bfa_status);
+
+	return bfa_status;
+}
+
+bfa_status_t
+bfa_dconf_update(struct bfa_s *bfa)
+{
+	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+	bfa_trc(dconf->bfa, 0);
+	if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
+		return BFA_STATUS_FAILED;
+
+	if (dconf->min_cfg) {
+		bfa_trc(dconf->bfa, dconf->min_cfg);
+		return BFA_STATUS_FAILED;
+	}
+
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
+	return BFA_STATUS_OK;
+}
+
+static void
+bfa_dconf_cbfn(void *arg, bfa_status_t status)
+{
+	struct bfa_dconf_mod_s *dconf = arg;
+	WARN_ON(status);
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+}
+
+void
+bfa_dconf_modexit(struct bfa_s *bfa)
+{
+	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+	bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
+}
+
+/*
+ * FRU specific functions
+ */
+
+#define BFA_FRU_DMA_BUF_SZ	0x02000		/* 8k dma buffer */
+#define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
+#define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
+
+static void
+bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+	struct bfa_fru_s *fru = cbarg;
+
+	bfa_trc(fru, event);
+
+	switch (event) {
+	case BFA_IOC_E_DISABLED:
+	case BFA_IOC_E_FAILED:
+		if (fru->op_busy) {
+			fru->status = BFA_STATUS_IOC_FAILURE;
+			fru->cbfn(fru->cbarg, fru->status);
+			fru->op_busy = 0;
+		}
+		break;
+
+	default:
+		break;
+	}
+}
+
+/*
+ * Send fru write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
+{
+	struct bfa_fru_s *fru = cbarg;
+	struct bfi_fru_write_req_s *msg =
+			(struct bfi_fru_write_req_s *) fru->mb.msg;
+	u32 len;
+
+	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
+	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
+				fru->residue : BFA_FRU_DMA_BUF_SZ;
+	msg->length = cpu_to_be32(len);
+
+	/*
+	 * indicate if it's the last msg of the whole write operation
+	 */
+	msg->last = (len == fru->residue) ? 1 : 0;
+
+	msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
+	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
+	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
+
+	memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
+	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
+
+	fru->residue -= len;
+	fru->offset += len;
+}
+
+/*
+ * Send fru read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
+{
+	struct bfa_fru_s *fru = cbarg;
+	struct bfi_fru_read_req_s *msg =
+			(struct bfi_fru_read_req_s *) fru->mb.msg;
+	u32 len;
+
+	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
+	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
+				fru->residue : BFA_FRU_DMA_BUF_SZ;
+	msg->length = cpu_to_be32(len);
+	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
+	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
+	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_fru_meminfo(bfa_boolean_t mincfg)
+{
+	/* min driver doesn't need fru */
+	if (mincfg)
+		return 0;
+
+	return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] ioc  - ioc structure
+ * @param[in] dev  - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
+	struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+	fru->ioc = ioc;
+	fru->trcmod = trcmod;
+	fru->cbfn = NULL;
+	fru->cbarg = NULL;
+	fru->op_busy = 0;
+
+	bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
+	bfa_q_qe_init(&fru->ioc_notify);
+	bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
+	list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
+
+	/* min driver doesn't need fru */
+	if (mincfg) {
+		fru->dbuf_kva = NULL;
+		fru->dbuf_pa = 0;
+	}
+}
+
+/*
+ * Claim memory for fru
+ *
+ * @param[in] fru - fru structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - frusical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
+	bfa_boolean_t mincfg)
+{
+	if (mincfg)
+		return;
+
+	fru->dbuf_kva = dm_kva;
+	fru->dbuf_pa = dm_pa;
+	memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
+	dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+	dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Update fru vpd image.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
+		  bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
+{
+	bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
+	bfa_trc(fru, len);
+	bfa_trc(fru, offset);
+
+	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
+		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
+		return BFA_STATUS_FRU_NOT_PRESENT;
+
+	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
+		return BFA_STATUS_CMD_NOTSUPP;
+
+	if (!bfa_ioc_is_operational(fru->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (fru->op_busy) {
+		bfa_trc(fru, fru->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	fru->op_busy = 1;
+
+	fru->cbfn = cbfn;
+	fru->cbarg = cbarg;
+	fru->residue = len;
+	fru->offset = 0;
+	fru->addr_off = offset;
+	fru->ubuf = buf;
+	fru->trfr_cmpl = trfr_cmpl;
+
+	bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Read fru vpd image.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
+		bfa_cb_fru_t cbfn, void *cbarg)
+{
+	bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
+	bfa_trc(fru, len);
+	bfa_trc(fru, offset);
+
+	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+		return BFA_STATUS_FRU_NOT_PRESENT;
+
+	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
+		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
+		return BFA_STATUS_CMD_NOTSUPP;
+
+	if (!bfa_ioc_is_operational(fru->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (fru->op_busy) {
+		bfa_trc(fru, fru->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	fru->op_busy = 1;
+
+	fru->cbfn = cbfn;
+	fru->cbarg = cbarg;
+	fru->residue = len;
+	fru->offset = 0;
+	fru->addr_off = offset;
+	fru->ubuf = buf;
+	bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Get maximum size fru vpd image.
+ *
+ * @param[in] fru - fru structure
+ * @param[out] size - maximum size of fru vpd data
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
+{
+	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+		return BFA_STATUS_FRU_NOT_PRESENT;
+
+	if (!bfa_ioc_is_operational(fru->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
+		fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
+		*max_size = BFA_FRU_CHINOOK_MAX_SIZE;
+	else
+		return BFA_STATUS_CMD_NOTSUPP;
+	return BFA_STATUS_OK;
+}
+/*
+ * tfru write.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
+	       bfa_cb_fru_t cbfn, void *cbarg)
+{
+	bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
+	bfa_trc(fru, len);
+	bfa_trc(fru, offset);
+	bfa_trc(fru, *((u8 *) buf));
+
+	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+		return BFA_STATUS_FRU_NOT_PRESENT;
+
+	if (!bfa_ioc_is_operational(fru->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (fru->op_busy) {
+		bfa_trc(fru, fru->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	fru->op_busy = 1;
+
+	fru->cbfn = cbfn;
+	fru->cbarg = cbarg;
+	fru->residue = len;
+	fru->offset = 0;
+	fru->addr_off = offset;
+	fru->ubuf = buf;
+
+	bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * tfru read.
+ *
+ * @param[in] fru - fru structure
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
+	      bfa_cb_fru_t cbfn, void *cbarg)
+{
+	bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
+	bfa_trc(fru, len);
+	bfa_trc(fru, offset);
+
+	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
+		return BFA_STATUS_FRU_NOT_PRESENT;
+
+	if (!bfa_ioc_is_operational(fru->ioc))
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (fru->op_busy) {
+		bfa_trc(fru, fru->op_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	fru->op_busy = 1;
+
+	fru->cbfn = cbfn;
+	fru->cbarg = cbarg;
+	fru->residue = len;
+	fru->offset = 0;
+	fru->addr_off = offset;
+	fru->ubuf = buf;
+	bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Process fru response messages upon receiving interrupts.
+ *
+ * @param[in] fruarg - fru structure
+ * @param[in] msg - message structure
+ */
+void
+bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
+{
+	struct bfa_fru_s *fru = fruarg;
+	struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
+	u32 status;
+
+	bfa_trc(fru, msg->mh.msg_id);
+
+	if (!fru->op_busy) {
+		/*
+		 * receiving response after ioc failure
+		 */
+		bfa_trc(fru, 0x9999);
+		return;
+	}
+
+	switch (msg->mh.msg_id) {
+	case BFI_FRUVPD_I2H_WRITE_RSP:
+	case BFI_TFRU_I2H_WRITE_RSP:
+		status = be32_to_cpu(rsp->status);
+		bfa_trc(fru, status);
+
+		if (status != BFA_STATUS_OK || fru->residue == 0) {
+			fru->status = status;
+			fru->op_busy = 0;
+			if (fru->cbfn)
+				fru->cbfn(fru->cbarg, fru->status);
+		} else {
+			bfa_trc(fru, fru->offset);
+			if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
+				bfa_fru_write_send(fru,
+					BFI_FRUVPD_H2I_WRITE_REQ);
+			else
+				bfa_fru_write_send(fru,
+					BFI_TFRU_H2I_WRITE_REQ);
+		}
+		break;
+	case BFI_FRUVPD_I2H_READ_RSP:
+	case BFI_TFRU_I2H_READ_RSP:
+		status = be32_to_cpu(rsp->status);
+		bfa_trc(fru, status);
+
+		if (status != BFA_STATUS_OK) {
+			fru->status = status;
+			fru->op_busy = 0;
+			if (fru->cbfn)
+				fru->cbfn(fru->cbarg, fru->status);
+		} else {
+			u32 len = be32_to_cpu(rsp->length);
+
+			bfa_trc(fru, fru->offset);
+			bfa_trc(fru, len);
+
+			memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
+			fru->residue -= len;
+			fru->offset += len;
+
+			if (fru->residue == 0) {
+				fru->status = status;
+				fru->op_busy = 0;
+				if (fru->cbfn)
+					fru->cbfn(fru->cbarg, fru->status);
+			} else {
+				if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
+					bfa_fru_read_send(fru,
+						BFI_FRUVPD_H2I_READ_REQ);
+				else
+					bfa_fru_read_send(fru,
+						BFI_TFRU_H2I_READ_REQ);
+			}
+		}
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+/*
+ * register definitions
+ */
+#define FLI_CMD_REG			0x0001d000
+#define FLI_RDDATA_REG			0x0001d010
+#define FLI_ADDR_REG			0x0001d004
+#define FLI_DEV_STATUS_REG		0x0001d014
+
+#define BFA_FLASH_FIFO_SIZE		128	/* fifo size */
+#define BFA_FLASH_CHECK_MAX		10000	/* max # of status check */
+#define BFA_FLASH_BLOCKING_OP_MAX	1000000	/* max # of blocking op check */
+#define BFA_FLASH_WIP_MASK		0x01	/* write in progress bit mask */
+
+enum bfa_flash_cmd {
+	BFA_FLASH_FAST_READ	= 0x0b,	/* fast read */
+	BFA_FLASH_READ_STATUS	= 0x05,	/* read status */
+};
+
+/**
+ * @brief hardware error definition
+ */
+enum bfa_flash_err {
+	BFA_FLASH_NOT_PRESENT	= -1,	/*!< flash not present */
+	BFA_FLASH_UNINIT	= -2,	/*!< flash not initialized */
+	BFA_FLASH_BAD		= -3,	/*!< flash bad */
+	BFA_FLASH_BUSY		= -4,	/*!< flash busy */
+	BFA_FLASH_ERR_CMD_ACT	= -5,	/*!< command active never cleared */
+	BFA_FLASH_ERR_FIFO_CNT	= -6,	/*!< fifo count never cleared */
+	BFA_FLASH_ERR_WIP	= -7,	/*!< write-in-progress never cleared */
+	BFA_FLASH_ERR_TIMEOUT	= -8,	/*!< fli timeout */
+	BFA_FLASH_ERR_LEN	= -9,	/*!< invalid length */
+};
+
+/**
+ * @brief flash command register data structure
+ */
+union bfa_flash_cmd_reg_u {
+	struct {
+#ifdef __BIG_ENDIAN
+		u32	act:1;
+		u32	rsv:1;
+		u32	write_cnt:9;
+		u32	read_cnt:9;
+		u32	addr_cnt:4;
+		u32	cmd:8;
+#else
+		u32	cmd:8;
+		u32	addr_cnt:4;
+		u32	read_cnt:9;
+		u32	write_cnt:9;
+		u32	rsv:1;
+		u32	act:1;
+#endif
+	} r;
+	u32	i;
+};
+
+/**
+ * @brief flash device status register data structure
+ */
+union bfa_flash_dev_status_reg_u {
+	struct {
+#ifdef __BIG_ENDIAN
+		u32	rsv:21;
+		u32	fifo_cnt:6;
+		u32	busy:1;
+		u32	init_status:1;
+		u32	present:1;
+		u32	bad:1;
+		u32	good:1;
+#else
+		u32	good:1;
+		u32	bad:1;
+		u32	present:1;
+		u32	init_status:1;
+		u32	busy:1;
+		u32	fifo_cnt:6;
+		u32	rsv:21;
+#endif
+	} r;
+	u32	i;
+};
+
+/**
+ * @brief flash address register data structure
+ */
+union bfa_flash_addr_reg_u {
+	struct {
+#ifdef __BIG_ENDIAN
+		u32	addr:24;
+		u32	dummy:8;
+#else
+		u32	dummy:8;
+		u32	addr:24;
+#endif
+	} r;
+	u32	i;
+};
+
+/**
+ * dg flash_raw_private Flash raw private functions
+ */
+static void
+bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
+		  u8 rd_cnt, u8 ad_cnt, u8 op)
+{
+	union bfa_flash_cmd_reg_u cmd;
+
+	cmd.i = 0;
+	cmd.r.act = 1;
+	cmd.r.write_cnt = wr_cnt;
+	cmd.r.read_cnt = rd_cnt;
+	cmd.r.addr_cnt = ad_cnt;
+	cmd.r.cmd = op;
+	writel(cmd.i, (pci_bar + FLI_CMD_REG));
+}
+
+static void
+bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
+{
+	union bfa_flash_addr_reg_u addr;
+
+	addr.r.addr = address & 0x00ffffff;
+	addr.r.dummy = 0;
+	writel(addr.i, (pci_bar + FLI_ADDR_REG));
+}
+
+static int
+bfa_flash_cmd_act_check(void __iomem *pci_bar)
+{
+	union bfa_flash_cmd_reg_u cmd;
+
+	cmd.i = readl(pci_bar + FLI_CMD_REG);
+
+	if (cmd.r.act)
+		return BFA_FLASH_ERR_CMD_ACT;
+
+	return 0;
+}
+
+/**
+ * @brief
+ * Flush FLI data fifo.
+ *
+ * @param[in] pci_bar - pci bar address
+ * @param[in] dev_status - device status
+ *
+ * Return 0 on success, negative error number on error.
+ */
+static u32
+bfa_flash_fifo_flush(void __iomem *pci_bar)
+{
+	u32 i;
+	u32 t;
+	union bfa_flash_dev_status_reg_u dev_status;
+
+	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+
+	if (!dev_status.r.fifo_cnt)
+		return 0;
+
+	/* fifo counter in terms of words */
+	for (i = 0; i < dev_status.r.fifo_cnt; i++)
+		t = readl(pci_bar + FLI_RDDATA_REG);
+
+	/*
+	 * Check the device status. It may take some time.
+	 */
+	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+		dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+		if (!dev_status.r.fifo_cnt)
+			break;
+	}
+
+	if (dev_status.r.fifo_cnt)
+		return BFA_FLASH_ERR_FIFO_CNT;
+
+	return 0;
+}
+
+/**
+ * @brief
+ * Read flash status.
+ *
+ * @param[in] pci_bar - pci bar address
+ *
+ * Return 0 on success, negative error number on error.
+*/
+static u32
+bfa_flash_status_read(void __iomem *pci_bar)
+{
+	union bfa_flash_dev_status_reg_u	dev_status;
+	int				status;
+	u32			ret_status;
+	int				i;
+
+	status = bfa_flash_fifo_flush(pci_bar);
+	if (status < 0)
+		return status;
+
+	bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
+
+	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
+		status = bfa_flash_cmd_act_check(pci_bar);
+		if (!status)
+			break;
+	}
+
+	if (status)
+		return status;
+
+	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
+	if (!dev_status.r.fifo_cnt)
+		return BFA_FLASH_BUSY;
+
+	ret_status = readl(pci_bar + FLI_RDDATA_REG);
+	ret_status >>= 24;
+
+	status = bfa_flash_fifo_flush(pci_bar);
+	if (status < 0)
+		return status;
+
+	return ret_status;
+}
+
+/**
+ * @brief
+ * Start flash read operation.
+ *
+ * @param[in] pci_bar - pci bar address
+ * @param[in] offset - flash address offset
+ * @param[in] len - read data length
+ * @param[in] buf - read data buffer
+ *
+ * Return 0 on success, negative error number on error.
+ */
+static u32
+bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
+			 char *buf)
+{
+	int status;
+
+	/*
+	 * len must be mutiple of 4 and not exceeding fifo size
+	 */
+	if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
+		return BFA_FLASH_ERR_LEN;
+
+	/*
+	 * check status
+	 */
+	status = bfa_flash_status_read(pci_bar);
+	if (status == BFA_FLASH_BUSY)
+		status = bfa_flash_status_read(pci_bar);
+
+	if (status < 0)
+		return status;
+
+	/*
+	 * check if write-in-progress bit is cleared
+	 */
+	if (status & BFA_FLASH_WIP_MASK)
+		return BFA_FLASH_ERR_WIP;
+
+	bfa_flash_set_addr(pci_bar, offset);
+
+	bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
+
+	return 0;
+}
+
+/**
+ * @brief
+ * Check flash read operation.
+ *
+ * @param[in] pci_bar - pci bar address
+ *
+ * Return flash device status, 1 if busy, 0 if not.
+ */
+static u32
+bfa_flash_read_check(void __iomem *pci_bar)
+{
+	if (bfa_flash_cmd_act_check(pci_bar))
+		return 1;
+
+	return 0;
+}
+/**
+ * @brief
+ * End flash read operation.
+ *
+ * @param[in] pci_bar - pci bar address
+ * @param[in] len - read data length
+ * @param[in] buf - read data buffer
+ *
+ */
+static void
+bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
+{
+
+	u32 i;
+
+	/*
+	 * read data fifo up to 32 words
+	 */
+	for (i = 0; i < len; i += 4) {
+		u32 w = readl(pci_bar + FLI_RDDATA_REG);
+		*((u32 *) (buf + i)) = swab32(w);
+	}
+
+	bfa_flash_fifo_flush(pci_bar);
+}
+
+/**
+ * @brief
+ * Perform flash raw read.
+ *
+ * @param[in] pci_bar - pci bar address
+ * @param[in] offset - flash partition address offset
+ * @param[in] buf - read data buffer
+ * @param[in] len - read data length
+ *
+ * Return status.
+ */
+
+
+#define FLASH_BLOCKING_OP_MAX   500
+#define FLASH_SEM_LOCK_REG	0x18820
+
+static int
+bfa_raw_sem_get(void __iomem *bar)
+{
+	int	locked;
+
+	locked = readl((bar + FLASH_SEM_LOCK_REG));
+	return !locked;
+
+}
+
+bfa_status_t
+bfa_flash_sem_get(void __iomem *bar)
+{
+	u32 n = FLASH_BLOCKING_OP_MAX;
+
+	while (!bfa_raw_sem_get(bar)) {
+		if (--n <= 0)
+			return BFA_STATUS_BADFLASH;
+		mdelay(10);
+	}
+	return BFA_STATUS_OK;
+}
+
+void
+bfa_flash_sem_put(void __iomem *bar)
+{
+	writel(0, (bar + FLASH_SEM_LOCK_REG));
+}
+
+bfa_status_t
+bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
+		       u32 len)
+{
+	u32 n;
+	int status;
+	u32 off, l, s, residue, fifo_sz;
+
+	residue = len;
+	off = 0;
+	fifo_sz = BFA_FLASH_FIFO_SIZE;
+	status = bfa_flash_sem_get(pci_bar);
+	if (status != BFA_STATUS_OK)
+		return status;
+
+	while (residue) {
+		s = offset + off;
+		n = s / fifo_sz;
+		l = (n + 1) * fifo_sz - s;
+		if (l > residue)
+			l = residue;
+
+		status = bfa_flash_read_start(pci_bar, offset + off, l,
+								&buf[off]);
+		if (status < 0) {
+			bfa_flash_sem_put(pci_bar);
+			return BFA_STATUS_FAILED;
+		}
+
+		n = BFA_FLASH_BLOCKING_OP_MAX;
+		while (bfa_flash_read_check(pci_bar)) {
+			if (--n <= 0) {
+				bfa_flash_sem_put(pci_bar);
+				return BFA_STATUS_FAILED;
+			}
+		}
+
+		bfa_flash_read_end(pci_bar, l, &buf[off]);
+
+		residue -= l;
+		off += l;
+	}
+	bfa_flash_sem_put(pci_bar);
+
+	return BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
new file mode 100644
index 0000000..0f9fab7
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -0,0 +1,1053 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_IOC_H__
+#define __BFA_IOC_H__
+
+#include "bfad_drv.h"
+#include "bfa_cs.h"
+#include "bfi.h"
+
+#define BFA_DBG_FWTRC_ENTS	(BFI_IOC_TRC_ENTS)
+#define BFA_DBG_FWTRC_LEN					\
+	(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) +	\
+	(sizeof(struct bfa_trc_mod_s) -				\
+	BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
+/*
+ * BFA timer declarations
+ */
+typedef void (*bfa_timer_cbfn_t)(void *);
+
+/*
+ * BFA timer data structure
+ */
+struct bfa_timer_s {
+	struct list_head	qe;
+	bfa_timer_cbfn_t timercb;
+	void		*arg;
+	int		timeout;	/* in millisecs */
+};
+
+/*
+ * Timer module structure
+ */
+struct bfa_timer_mod_s {
+	struct list_head timer_q;
+};
+
+#define BFA_TIMER_FREQ 200 /* specified in millisecs */
+
+void bfa_timer_beat(struct bfa_timer_mod_s *mod);
+void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
+			bfa_timer_cbfn_t timercb, void *arg,
+			unsigned int timeout);
+void bfa_timer_stop(struct bfa_timer_s *timer);
+
+/*
+ * Generic Scatter Gather Element used by driver
+ */
+struct bfa_sge_s {
+	u32	sg_len;
+	void		*sg_addr;
+};
+
+#define bfa_sge_word_swap(__sge) do {					     \
+	((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]);      \
+	((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]);      \
+	((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]);      \
+} while (0)
+
+#define bfa_swap_words(_x)  (	\
+	((u64)(_x) << 32) | ((u64)(_x) >> 32))
+
+#ifdef __BIG_ENDIAN
+#define bfa_sge_to_be(_x)
+#define bfa_sge_to_le(_x)	bfa_sge_word_swap(_x)
+#define bfa_sgaddr_le(_x)	bfa_swap_words(_x)
+#else
+#define	bfa_sge_to_be(_x)	bfa_sge_word_swap(_x)
+#define bfa_sge_to_le(_x)
+#define bfa_sgaddr_le(_x)	(_x)
+#endif
+
+/*
+ * BFA memory resources
+ */
+struct bfa_mem_dma_s {
+	struct list_head qe;		/* Queue of DMA elements */
+	u32		mem_len;	/* Total Length in Bytes */
+	u8		*kva;		/* kernel virtual address */
+	u64		dma;		/* dma address if DMA memory */
+	u8		*kva_curp;	/* kva allocation cursor */
+	u64		dma_curp;	/* dma allocation cursor */
+};
+#define bfa_mem_dma_t struct bfa_mem_dma_s
+
+struct bfa_mem_kva_s {
+	struct list_head qe;		/* Queue of KVA elements */
+	u32		mem_len;	/* Total Length in Bytes */
+	u8		*kva;		/* kernel virtual address */
+	u8		*kva_curp;	/* kva allocation cursor */
+};
+#define bfa_mem_kva_t struct bfa_mem_kva_s
+
+struct bfa_meminfo_s {
+	struct bfa_mem_dma_s dma_info;
+	struct bfa_mem_kva_s kva_info;
+};
+
+/* BFA memory segment setup helpers */
+static inline void bfa_mem_dma_setup(struct bfa_meminfo_s *meminfo,
+				     struct bfa_mem_dma_s *dm_ptr,
+				     size_t seg_sz)
+{
+	dm_ptr->mem_len = seg_sz;
+	if (seg_sz)
+		list_add_tail(&dm_ptr->qe, &meminfo->dma_info.qe);
+}
+
+static inline void bfa_mem_kva_setup(struct bfa_meminfo_s *meminfo,
+				     struct bfa_mem_kva_s *kva_ptr,
+				     size_t seg_sz)
+{
+	kva_ptr->mem_len = seg_sz;
+	if (seg_sz)
+		list_add_tail(&kva_ptr->qe, &meminfo->kva_info.qe);
+}
+
+/* BFA dma memory segments iterator */
+#define bfa_mem_dma_sptr(_mod, _i)	(&(_mod)->dma_seg[(_i)])
+#define bfa_mem_dma_seg_iter(_mod, _sptr, _nr, _i)			\
+	for (_i = 0, _sptr = bfa_mem_dma_sptr(_mod, _i); _i < (_nr);	\
+	     _i++, _sptr = bfa_mem_dma_sptr(_mod, _i))
+
+#define bfa_mem_kva_curp(_mod)	((_mod)->kva_seg.kva_curp)
+#define bfa_mem_dma_virt(_sptr)	((_sptr)->kva_curp)
+#define bfa_mem_dma_phys(_sptr)	((_sptr)->dma_curp)
+#define bfa_mem_dma_len(_sptr)	((_sptr)->mem_len)
+
+/* Get the corresponding dma buf kva for a req - from the tag */
+#define bfa_mem_get_dmabuf_kva(_mod, _tag, _rqsz)			      \
+	(((u8 *)(_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].kva_curp) +\
+	 BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
+
+/* Get the corresponding dma buf pa for a req - from the tag */
+#define bfa_mem_get_dmabuf_pa(_mod, _tag, _rqsz)			\
+	((_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].dma_curp +	\
+	 BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
+
+/*
+ * PCI device information required by IOC
+ */
+struct bfa_pcidev_s {
+	int		pci_slot;
+	u8		pci_func;
+	u16		device_id;
+	u16		ssid;
+	void __iomem	*pci_bar_kva;
+};
+
+/*
+ * Structure used to remember the DMA-able memory block's KVA and Physical
+ * Address
+ */
+struct bfa_dma_s {
+	void		*kva;	/* ! Kernel virtual address	*/
+	u64	pa;	/* ! Physical address		*/
+};
+
+#define BFA_DMA_ALIGN_SZ	256
+#define BFA_ROUNDUP(_l, _s)	(((_l) + ((_s) - 1)) & ~((_s) - 1))
+
+/*
+ * smem size for Crossbow and Catapult
+ */
+#define BFI_SMEM_CB_SIZE	0x200000U	/* ! 2MB for crossbow	*/
+#define BFI_SMEM_CT_SIZE	0x280000U	/* ! 2.5MB for catapult	*/
+
+#define bfa_dma_be_addr_set(dma_addr, pa)	\
+		__bfa_dma_be_addr_set(&dma_addr, (u64)pa)
+static inline void
+__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
+{
+	dma_addr->a32.addr_lo = cpu_to_be32(pa);
+	dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32);
+}
+
+#define bfa_alen_set(__alen, __len, __pa)	\
+	__bfa_alen_set(__alen, __len, (u64)__pa)
+
+static inline void
+__bfa_alen_set(struct bfi_alen_s *alen, u32 len, u64 pa)
+{
+	alen->al_len = cpu_to_be32(len);
+	bfa_dma_be_addr_set(alen->al_addr, pa);
+}
+
+struct bfa_ioc_regs_s {
+	void __iomem *hfn_mbox_cmd;
+	void __iomem *hfn_mbox;
+	void __iomem *lpu_mbox_cmd;
+	void __iomem *lpu_mbox;
+	void __iomem *lpu_read_stat;
+	void __iomem *pss_ctl_reg;
+	void __iomem *pss_err_status_reg;
+	void __iomem *app_pll_fast_ctl_reg;
+	void __iomem *app_pll_slow_ctl_reg;
+	void __iomem *ioc_sem_reg;
+	void __iomem *ioc_usage_sem_reg;
+	void __iomem *ioc_init_sem_reg;
+	void __iomem *ioc_usage_reg;
+	void __iomem *host_page_num_fn;
+	void __iomem *heartbeat;
+	void __iomem *ioc_fwstate;
+	void __iomem *alt_ioc_fwstate;
+	void __iomem *ll_halt;
+	void __iomem *alt_ll_halt;
+	void __iomem *err_set;
+	void __iomem *ioc_fail_sync;
+	void __iomem *shirq_isr_next;
+	void __iomem *shirq_msk_next;
+	void __iomem *smem_page_start;
+	u32	smem_pg0;
+};
+
+#define bfa_mem_read(_raddr, _off)	swab32(readl(((_raddr) + (_off))))
+#define bfa_mem_write(_raddr, _off, _val)	\
+			writel(swab32((_val)), ((_raddr) + (_off)))
+/*
+ * IOC Mailbox structures
+ */
+struct bfa_mbox_cmd_s {
+	struct list_head	qe;
+	u32	msg[BFI_IOC_MSGSZ];
+};
+
+/*
+ * IOC mailbox module
+ */
+typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
+struct bfa_ioc_mbox_mod_s {
+	struct list_head		cmd_q;	/*  pending mbox queue	*/
+	int			nmclass;	/*  number of handlers */
+	struct {
+		bfa_ioc_mbox_mcfunc_t	cbfn;	/*  message handlers	*/
+		void			*cbarg;
+	} mbhdlr[BFI_MC_MAX];
+};
+
+/*
+ * IOC callback function interfaces
+ */
+typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
+typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
+struct bfa_ioc_cbfn_s {
+	bfa_ioc_enable_cbfn_t	enable_cbfn;
+	bfa_ioc_disable_cbfn_t	disable_cbfn;
+	bfa_ioc_hbfail_cbfn_t	hbfail_cbfn;
+	bfa_ioc_reset_cbfn_t	reset_cbfn;
+};
+
+/*
+ * IOC event notification mechanism.
+ */
+enum bfa_ioc_event_e {
+	BFA_IOC_E_ENABLED	= 1,
+	BFA_IOC_E_DISABLED	= 2,
+	BFA_IOC_E_FAILED	= 3,
+};
+
+typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event_e);
+
+struct bfa_ioc_notify_s {
+	struct list_head		qe;
+	bfa_ioc_notify_cbfn_t	cbfn;
+	void			*cbarg;
+};
+
+/*
+ * Initialize a IOC event notification structure
+ */
+#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do {	\
+	(__notify)->cbfn = (__cbfn);      \
+	(__notify)->cbarg = (__cbarg);      \
+} while (0)
+
+struct bfa_iocpf_s {
+	bfa_fsm_t		fsm;
+	struct bfa_ioc_s	*ioc;
+	bfa_boolean_t		fw_mismatch_notified;
+	bfa_boolean_t		auto_recover;
+	u32			poll_time;
+};
+
+struct bfa_ioc_s {
+	bfa_fsm_t		fsm;
+	struct bfa_s		*bfa;
+	struct bfa_pcidev_s	pcidev;
+	struct bfa_timer_mod_s	*timer_mod;
+	struct bfa_timer_s	ioc_timer;
+	struct bfa_timer_s	sem_timer;
+	struct bfa_timer_s	hb_timer;
+	u32		hb_count;
+	struct list_head	notify_q;
+	void			*dbg_fwsave;
+	int			dbg_fwsave_len;
+	bfa_boolean_t		dbg_fwsave_once;
+	enum bfi_pcifn_class	clscode;
+	struct bfa_ioc_regs_s	ioc_regs;
+	struct bfa_trc_mod_s	*trcmod;
+	struct bfa_ioc_drv_stats_s	stats;
+	bfa_boolean_t		fcmode;
+	bfa_boolean_t		pllinit;
+	bfa_boolean_t		stats_busy;	/*  outstanding stats */
+	u8			port_id;
+	struct bfa_dma_s	attr_dma;
+	struct bfi_ioc_attr_s	*attr;
+	struct bfa_ioc_cbfn_s	*cbfn;
+	struct bfa_ioc_mbox_mod_s mbox_mod;
+	struct bfa_ioc_hwif_s	*ioc_hwif;
+	struct bfa_iocpf_s	iocpf;
+	enum bfi_asic_gen	asic_gen;
+	enum bfi_asic_mode	asic_mode;
+	enum bfi_port_mode	port0_mode;
+	enum bfi_port_mode	port1_mode;
+	enum bfa_mode_s		port_mode;
+	u8			ad_cap_bm;	/* adapter cap bit mask */
+	u8			port_mode_cfg;	/* config port mode */
+	int			ioc_aen_seq;
+};
+
+struct bfa_ioc_hwif_s {
+	bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m);
+	bfa_boolean_t	(*ioc_firmware_lock)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_firmware_unlock)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_reg_init)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_map_port)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_isr_mode_set)	(struct bfa_ioc_s *ioc,
+					bfa_boolean_t msix);
+	void		(*ioc_notify_fail)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_ownership_reset)	(struct bfa_ioc_s *ioc);
+	bfa_boolean_t   (*ioc_sync_start)       (struct bfa_ioc_s *ioc);
+	void		(*ioc_sync_join)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_sync_leave)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_sync_ack)		(struct bfa_ioc_s *ioc);
+	bfa_boolean_t	(*ioc_sync_complete)	(struct bfa_ioc_s *ioc);
+	bfa_boolean_t	(*ioc_lpu_read_stat)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_set_fwstate)	(struct bfa_ioc_s *ioc,
+					enum bfi_ioc_state fwstate);
+	enum bfi_ioc_state	(*ioc_get_fwstate)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_set_alt_fwstate)	(struct bfa_ioc_s *ioc,
+					enum bfi_ioc_state fwstate);
+	enum bfi_ioc_state	(*ioc_get_alt_fwstate)	(struct bfa_ioc_s *ioc);
+};
+
+/*
+ * Queue element to wait for room in request queue. FIFO order is
+ * maintained when fullfilling requests.
+ */
+struct bfa_reqq_wait_s {
+	struct list_head	qe;
+	void	(*qresume) (void *cbarg);
+	void	*cbarg;
+};
+
+typedef void	(*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+
+/*
+ * Generic BFA callback element.
+ */
+struct bfa_cb_qe_s {
+	struct list_head	qe;
+	bfa_cb_cbfn_t	cbfn;
+	bfa_boolean_t	once;
+	bfa_boolean_t	pre_rmv;	/* set for stack based qe(s) */
+	bfa_status_t	fw_status;	/* to access fw status in comp proc */
+	void		*cbarg;
+};
+
+/*
+ * IOCFC state machine definitions/declarations
+ */
+enum iocfc_event {
+	IOCFC_E_INIT		= 1,	/* IOCFC init request		*/
+	IOCFC_E_START		= 2,	/* IOCFC mod start request	*/
+	IOCFC_E_STOP		= 3,	/* IOCFC stop request		*/
+	IOCFC_E_ENABLE		= 4,	/* IOCFC enable request		*/
+	IOCFC_E_DISABLE		= 5,	/* IOCFC disable request	*/
+	IOCFC_E_IOC_ENABLED	= 6,	/* IOC enabled message		*/
+	IOCFC_E_IOC_DISABLED	= 7,	/* IOC disabled message		*/
+	IOCFC_E_IOC_FAILED	= 8,	/* failure notice by IOC sm	*/
+	IOCFC_E_DCONF_DONE	= 9,	/* dconf read/write done	*/
+	IOCFC_E_CFG_DONE	= 10,	/* IOCFC config complete	*/
+};
+
+/*
+ * ASIC block configurtion related
+ */
+
+typedef void (*bfa_ablk_cbfn_t)(void *, enum bfa_status);
+
+struct bfa_ablk_s {
+	struct bfa_ioc_s	*ioc;
+	struct bfa_ablk_cfg_s	*cfg;
+	u16			*pcifn;
+	struct bfa_dma_s	dma_addr;
+	bfa_boolean_t		busy;
+	struct bfa_mbox_cmd_s	mb;
+	bfa_ablk_cbfn_t		cbfn;
+	void			*cbarg;
+	struct bfa_ioc_notify_s	ioc_notify;
+	struct bfa_mem_dma_s	ablk_dma;
+};
+#define BFA_MEM_ABLK_DMA(__bfa)		(&((__bfa)->modules.ablk.ablk_dma))
+
+/*
+ *	SFP module specific
+ */
+typedef void	(*bfa_cb_sfp_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_sfp_s {
+	void	*dev;
+	struct bfa_ioc_s	*ioc;
+	struct bfa_trc_mod_s	*trcmod;
+	struct sfp_mem_s	*sfpmem;
+	bfa_cb_sfp_t		cbfn;
+	void			*cbarg;
+	enum bfi_sfp_mem_e	memtype; /* mem access type   */
+	u32			status;
+	struct bfa_mbox_cmd_s	mbcmd;
+	u8			*dbuf_kva; /* dma buf virtual address */
+	u64			dbuf_pa;   /* dma buf physical address */
+	struct bfa_ioc_notify_s	ioc_notify;
+	enum bfa_defs_sfp_media_e *media;
+	enum bfa_port_speed	portspeed;
+	bfa_cb_sfp_t		state_query_cbfn;
+	void			*state_query_cbarg;
+	u8			lock;
+	u8			data_valid; /* data in dbuf is valid */
+	u8			state;	    /* sfp state  */
+	u8			state_query_lock;
+	struct bfa_mem_dma_s	sfp_dma;
+	u8			is_elb;	    /* eloopback  */
+};
+
+#define BFA_SFP_MOD(__bfa)	(&(__bfa)->modules.sfp)
+#define BFA_MEM_SFP_DMA(__bfa)	(&(BFA_SFP_MOD(__bfa)->sfp_dma))
+
+u32	bfa_sfp_meminfo(void);
+
+void	bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc,
+			void *dev, struct bfa_trc_mod_s *trcmod);
+
+void	bfa_sfp_memclaim(struct bfa_sfp_s *diag, u8 *dm_kva, u64 dm_pa);
+void	bfa_sfp_intr(void *bfaarg, struct bfi_mbmsg_s *msg);
+
+bfa_status_t	bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
+			     bfa_cb_sfp_t cbfn, void *cbarg);
+
+bfa_status_t	bfa_sfp_media(struct bfa_sfp_s *sfp,
+			enum bfa_defs_sfp_media_e *media,
+			bfa_cb_sfp_t cbfn, void *cbarg);
+
+bfa_status_t	bfa_sfp_speed(struct bfa_sfp_s *sfp,
+			enum bfa_port_speed portspeed,
+			bfa_cb_sfp_t cbfn, void *cbarg);
+
+/*
+ *	Flash module specific
+ */
+typedef void	(*bfa_cb_flash_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_flash_s {
+	struct bfa_ioc_s *ioc;		/* back pointer to ioc */
+	struct bfa_trc_mod_s *trcmod;
+	u32		type;           /* partition type */
+	u8		instance;       /* partition instance */
+	u8		rsv[3];
+	u32		op_busy;        /*  operation busy flag */
+	u32		residue;        /*  residual length */
+	u32		offset;         /*  offset */
+	bfa_status_t	status;         /*  status */
+	u8		*dbuf_kva;      /*  dma buf virtual address */
+	u64		dbuf_pa;        /*  dma buf physical address */
+	struct bfa_reqq_wait_s	reqq_wait; /*  to wait for room in reqq */
+	bfa_cb_flash_t	cbfn;           /*  user callback function */
+	void		*cbarg;         /*  user callback arg */
+	u8		*ubuf;          /*  user supplied buffer */
+	struct bfa_cb_qe_s	hcb_qe; /*  comp: BFA callback qelem */
+	u32		addr_off;       /*  partition address offset */
+	struct bfa_mbox_cmd_s	mb;       /*  mailbox */
+	struct bfa_ioc_notify_s	ioc_notify; /*  ioc event notify */
+	struct bfa_mem_dma_s	flash_dma;
+};
+
+#define BFA_FLASH(__bfa)		(&(__bfa)->modules.flash)
+#define BFA_MEM_FLASH_DMA(__bfa)	(&(BFA_FLASH(__bfa)->flash_dma))
+
+bfa_status_t bfa_flash_get_attr(struct bfa_flash_s *flash,
+			struct bfa_flash_attr_s *attr,
+			bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_erase_part(struct bfa_flash_s *flash,
+			enum bfa_flash_part_type type, u8 instance,
+			bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_update_part(struct bfa_flash_s *flash,
+			enum bfa_flash_part_type type, u8 instance,
+			void *buf, u32 len, u32 offset,
+			bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_read_part(struct bfa_flash_s *flash,
+			enum bfa_flash_part_type type, u8 instance, void *buf,
+			u32 len, u32 offset, bfa_cb_flash_t cbfn, void *cbarg);
+u32	bfa_flash_meminfo(bfa_boolean_t mincfg);
+void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc,
+		void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_flash_memclaim(struct bfa_flash_s *flash,
+		u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+bfa_status_t    bfa_flash_raw_read(void __iomem *pci_bar_kva,
+				u32 offset, char *buf, u32 len);
+
+/*
+ *	DIAG module specific
+ */
+
+typedef void (*bfa_cb_diag_t) (void *cbarg, bfa_status_t status);
+typedef void (*bfa_cb_diag_beacon_t) (void *dev, bfa_boolean_t beacon,
+			bfa_boolean_t link_e2e_beacon);
+
+/*
+ *      Firmware ping test results
+ */
+struct bfa_diag_results_fwping {
+	u32     data;   /* store the corrupted data */
+	u32     status;
+	u32     dmastatus;
+	u8      rsvd[4];
+};
+
+struct bfa_diag_qtest_result_s {
+	u32	status;
+	u16	count;	/* successful queue test count */
+	u8	queue;
+	u8	rsvd;	/* 64-bit align */
+};
+
+/*
+ * Firmware ping test results
+ */
+struct bfa_diag_fwping_s {
+	struct bfa_diag_results_fwping *result;
+	bfa_cb_diag_t  cbfn;
+	void            *cbarg;
+	u32             data;
+	u8              lock;
+	u8              rsv[3];
+	u32             status;
+	u32             count;
+	struct bfa_mbox_cmd_s   mbcmd;
+	u8              *dbuf_kva;      /* dma buf virtual address */
+	u64             dbuf_pa;        /* dma buf physical address */
+};
+
+/*
+ *      Temperature sensor query results
+ */
+struct bfa_diag_results_tempsensor_s {
+	u32     status;
+	u16     temp;           /* 10-bit A/D value */
+	u16     brd_temp;       /* 9-bit board temp */
+	u8      ts_junc;        /* show junction tempsensor   */
+	u8      ts_brd;         /* show board tempsensor      */
+	u8      rsvd[6];        /* keep 8 bytes alignment     */
+};
+
+struct bfa_diag_tsensor_s {
+	bfa_cb_diag_t   cbfn;
+	void            *cbarg;
+	struct bfa_diag_results_tempsensor_s *temp;
+	u8              lock;
+	u8              rsv[3];
+	u32             status;
+	struct bfa_mbox_cmd_s   mbcmd;
+};
+
+struct bfa_diag_sfpshow_s {
+	struct sfp_mem_s        *sfpmem;
+	bfa_cb_diag_t           cbfn;
+	void                    *cbarg;
+	u8      lock;
+	u8      static_data;
+	u8      rsv[2];
+	u32     status;
+	struct bfa_mbox_cmd_s    mbcmd;
+	u8      *dbuf_kva;      /* dma buf virtual address */
+	u64     dbuf_pa;        /* dma buf physical address */
+};
+
+struct bfa_diag_led_s {
+	struct bfa_mbox_cmd_s   mbcmd;
+	bfa_boolean_t   lock;   /* 1: ledtest is operating */
+};
+
+struct bfa_diag_beacon_s {
+	struct bfa_mbox_cmd_s   mbcmd;
+	bfa_boolean_t   state;          /* port beacon state */
+	bfa_boolean_t   link_e2e;       /* link beacon state */
+};
+
+struct bfa_diag_s {
+	void	*dev;
+	struct bfa_ioc_s		*ioc;
+	struct bfa_trc_mod_s		*trcmod;
+	struct bfa_diag_fwping_s	fwping;
+	struct bfa_diag_tsensor_s	tsensor;
+	struct bfa_diag_sfpshow_s	sfpshow;
+	struct bfa_diag_led_s		ledtest;
+	struct bfa_diag_beacon_s	beacon;
+	void	*result;
+	struct bfa_timer_s timer;
+	bfa_cb_diag_beacon_t  cbfn_beacon;
+	bfa_cb_diag_t  cbfn;
+	void		*cbarg;
+	u8		block;
+	u8		timer_active;
+	u8		rsvd[2];
+	u32		status;
+	struct bfa_ioc_notify_s	ioc_notify;
+	struct bfa_mem_dma_s	diag_dma;
+};
+
+#define BFA_DIAG_MOD(__bfa)     (&(__bfa)->modules.diag_mod)
+#define BFA_MEM_DIAG_DMA(__bfa) (&(BFA_DIAG_MOD(__bfa)->diag_dma))
+
+u32	bfa_diag_meminfo(void);
+void bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa);
+void bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
+		     bfa_cb_diag_beacon_t cbfn_beacon,
+		     struct bfa_trc_mod_s *trcmod);
+bfa_status_t	bfa_diag_reg_read(struct bfa_diag_s *diag, u32 offset,
+			u32 len, u32 *buf, u32 force);
+bfa_status_t	bfa_diag_reg_write(struct bfa_diag_s *diag, u32 offset,
+			u32 len, u32 value, u32 force);
+bfa_status_t	bfa_diag_tsensor_query(struct bfa_diag_s *diag,
+			struct bfa_diag_results_tempsensor_s *result,
+			bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t	bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt,
+			u32 pattern, struct bfa_diag_results_fwping *result,
+			bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t	bfa_diag_sfpshow(struct bfa_diag_s *diag,
+			struct sfp_mem_s *sfpmem, u8 static_data,
+			bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t	bfa_diag_memtest(struct bfa_diag_s *diag,
+			struct bfa_diag_memtest_s *memtest, u32 pattern,
+			struct bfa_diag_memtest_result *result,
+			bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t	bfa_diag_ledtest(struct bfa_diag_s *diag,
+			struct bfa_diag_ledtest_s *ledtest);
+bfa_status_t	bfa_diag_beacon_port(struct bfa_diag_s *diag,
+			bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon,
+			u32 sec);
+
+/*
+ *	PHY module specific
+ */
+typedef void (*bfa_cb_phy_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_phy_s {
+	struct bfa_ioc_s *ioc;          /* back pointer to ioc */
+	struct bfa_trc_mod_s *trcmod;   /* trace module */
+	u8	instance;       /* port instance */
+	u8	op_busy;        /* operation busy flag */
+	u8	rsv[2];
+	u32	residue;        /* residual length */
+	u32	offset;         /* offset */
+	bfa_status_t	status;         /* status */
+	u8	*dbuf_kva;      /* dma buf virtual address */
+	u64	dbuf_pa;        /* dma buf physical address */
+	struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+	bfa_cb_phy_t	cbfn;           /* user callback function */
+	void		*cbarg;         /* user callback arg */
+	u8		*ubuf;          /* user supplied buffer */
+	struct bfa_cb_qe_s	hcb_qe; /* comp: BFA callback qelem */
+	u32	addr_off;       /* phy address offset */
+	struct bfa_mbox_cmd_s	mb;       /* mailbox */
+	struct bfa_ioc_notify_s	ioc_notify; /* ioc event notify */
+	struct bfa_mem_dma_s	phy_dma;
+};
+#define BFA_PHY(__bfa)	(&(__bfa)->modules.phy)
+#define BFA_MEM_PHY_DMA(__bfa)	(&(BFA_PHY(__bfa)->phy_dma))
+
+bfa_boolean_t bfa_phy_busy(struct bfa_ioc_s *ioc);
+bfa_status_t bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
+			struct bfa_phy_attr_s *attr,
+			bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
+			struct bfa_phy_stats_s *stats,
+			bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
+			void *buf, u32 len, u32 offset,
+			bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
+			void *buf, u32 len, u32 offset,
+			bfa_cb_phy_t cbfn, void *cbarg);
+
+u32	bfa_phy_meminfo(bfa_boolean_t mincfg);
+void bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc,
+		void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_phy_memclaim(struct bfa_phy_s *phy,
+		u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
+
+/*
+ * FRU module specific
+ */
+typedef void (*bfa_cb_fru_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_fru_s {
+	struct bfa_ioc_s *ioc;		/* back pointer to ioc */
+	struct bfa_trc_mod_s *trcmod;	/* trace module */
+	u8		op_busy;	/* operation busy flag */
+	u8		rsv[3];
+	u32		residue;	/* residual length */
+	u32		offset;		/* offset */
+	bfa_status_t	status;		/* status */
+	u8		*dbuf_kva;	/* dma buf virtual address */
+	u64		dbuf_pa;	/* dma buf physical address */
+	struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+	bfa_cb_fru_t	cbfn;		/* user callback function */
+	void		*cbarg;		/* user callback arg */
+	u8		*ubuf;		/* user supplied buffer */
+	struct bfa_cb_qe_s	hcb_qe;	/* comp: BFA callback qelem */
+	u32		addr_off;	/* fru address offset */
+	struct bfa_mbox_cmd_s mb;	/* mailbox */
+	struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
+	struct bfa_mem_dma_s	fru_dma;
+	u8		trfr_cmpl;
+};
+
+#define BFA_FRU(__bfa)	(&(__bfa)->modules.fru)
+#define BFA_MEM_FRU_DMA(__bfa)	(&(BFA_FRU(__bfa)->fru_dma))
+
+bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru,
+			void *buf, u32 len, u32 offset,
+			bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl);
+bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru,
+			void *buf, u32 len, u32 offset,
+			bfa_cb_fru_t cbfn, void *cbarg);
+bfa_status_t bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size);
+bfa_status_t bfa_tfru_write(struct bfa_fru_s *fru,
+			void *buf, u32 len, u32 offset,
+			bfa_cb_fru_t cbfn, void *cbarg);
+bfa_status_t bfa_tfru_read(struct bfa_fru_s *fru,
+			void *buf, u32 len, u32 offset,
+			bfa_cb_fru_t cbfn, void *cbarg);
+u32	bfa_fru_meminfo(bfa_boolean_t mincfg);
+void bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc,
+		void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_fru_memclaim(struct bfa_fru_s *fru,
+		u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+void bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg);
+
+/*
+ * Driver Config( dconf) specific
+ */
+#define BFI_DCONF_SIGNATURE	0xabcdabcd
+#define BFI_DCONF_VERSION	1
+
+#pragma pack(1)
+struct bfa_dconf_hdr_s {
+	u32	signature;
+	u32	version;
+};
+
+struct bfa_dconf_s {
+	struct bfa_dconf_hdr_s		hdr;
+	struct bfa_lunmask_cfg_s	lun_mask;
+	struct bfa_throttle_cfg_s	throttle_cfg;
+};
+#pragma pack()
+
+struct bfa_dconf_mod_s {
+	bfa_sm_t		sm;
+	u8			instance;
+	bfa_boolean_t		read_data_valid;
+	bfa_boolean_t		min_cfg;
+	struct bfa_timer_s	timer;
+	struct bfa_s		*bfa;
+	void			*bfad;
+	void			*trcmod;
+	struct bfa_dconf_s	*dconf;
+	struct bfa_mem_kva_s	kva_seg;
+};
+
+#define BFA_DCONF_MOD(__bfa)	\
+	(&(__bfa)->modules.dconf_mod)
+#define BFA_MEM_DCONF_KVA(__bfa)	(&(BFA_DCONF_MOD(__bfa)->kva_seg))
+#define bfa_dconf_read_data_valid(__bfa)	\
+	(BFA_DCONF_MOD(__bfa)->read_data_valid)
+#define BFA_DCONF_UPDATE_TOV	5000	/* memtest timeout in msec */
+#define bfa_dconf_get_min_cfg(__bfa)	\
+	(BFA_DCONF_MOD(__bfa)->min_cfg)
+
+void	bfa_dconf_modinit(struct bfa_s *bfa);
+void	bfa_dconf_modexit(struct bfa_s *bfa);
+bfa_status_t	bfa_dconf_update(struct bfa_s *bfa);
+
+/*
+ *	IOC specfic macros
+ */
+#define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
+#define bfa_ioc_devid(__ioc)		((__ioc)->pcidev.device_id)
+#define bfa_ioc_bar0(__ioc)		((__ioc)->pcidev.pci_bar_kva)
+#define bfa_ioc_portid(__ioc)		((__ioc)->port_id)
+#define bfa_ioc_asic_gen(__ioc)		((__ioc)->asic_gen)
+#define bfa_ioc_is_cna(__ioc)	\
+	((bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_FCoE) ||	\
+	 (bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_LL))
+#define bfa_ioc_fetch_stats(__ioc, __stats) \
+		(((__stats)->drv_stats) = (__ioc)->stats)
+#define bfa_ioc_clr_stats(__ioc)	\
+		memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
+#define bfa_ioc_maxfrsize(__ioc)	((__ioc)->attr->maxfrsize)
+#define bfa_ioc_rx_bbcredit(__ioc)	((__ioc)->attr->rx_bbcredit)
+#define bfa_ioc_speed_sup(__ioc)	\
+	((bfa_ioc_is_cna(__ioc)) ? BFA_PORT_SPEED_10GBPS :	\
+	 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop))
+#define bfa_ioc_get_nports(__ioc)	\
+	BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
+
+#define bfa_ioc_stats(_ioc, _stats)	((_ioc)->stats._stats++)
+#define BFA_IOC_FWIMG_MINSZ	(16 * 1024)
+#define BFA_IOC_FW_SMEM_SIZE(__ioc)			\
+	((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB)	\
+	 ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
+#define BFA_IOC_FLASH_CHUNK_NO(off)		(off / BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)	(off % BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
+
+/*
+ * IOC mailbox interface
+ */
+void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd);
+void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc,
+		bfa_ioc_mbox_mcfunc_t *mcfuncs);
+void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc);
+void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len);
+bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
+void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
+		bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
+
+/*
+ * IOC interfaces
+ */
+
+#define bfa_ioc_pll_init_asic(__ioc) \
+	((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
+			   (__ioc)->asic_mode))
+
+bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
+bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
+bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
+bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
+
+#define bfa_ioc_isr_mode_set(__ioc, __msix) do {			\
+	if ((__ioc)->ioc_hwif->ioc_isr_mode_set)			\
+		((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix));	\
+} while (0)
+#define	bfa_ioc_ownership_reset(__ioc)				\
+			((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+#define bfa_ioc_get_fcmode(__ioc)	((__ioc)->fcmode)
+#define bfa_ioc_lpu_read_stat(__ioc) do {			\
+	if ((__ioc)->ioc_hwif->ioc_lpu_read_stat)		\
+		((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc));	\
+} while (0)
+
+void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc);
+
+void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
+		struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
+void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
+void bfa_ioc_detach(struct bfa_ioc_s *ioc);
+void bfa_ioc_suspend(struct bfa_ioc_s *ioc);
+void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
+		enum bfi_pcifn_class clscode);
+void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa);
+void bfa_ioc_enable(struct bfa_ioc_s *ioc);
+void bfa_ioc_disable(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
+
+bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
+		u32 boot_env);
+void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
+void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
+void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
+enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
+void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
+void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
+void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
+void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
+void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
+		char *manufacturer);
+void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
+enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
+
+void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
+void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
+		struct bfa_adapter_attr_s *ad_attr);
+void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
+bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
+		int *trclen);
+bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
+				 int *trclen);
+bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
+	u32 *offset, int *buflen);
+bfa_status_t bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
+void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
+			struct bfi_ioc_image_hdr_s *fwhdr);
+bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
+			struct bfi_ioc_image_hdr_s *fwhdr);
+void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
+bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
+bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
+void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
+
+/*
+ * asic block configuration related APIs
+ */
+u32	bfa_ablk_meminfo(void);
+void bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa);
+void bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc);
+bfa_status_t bfa_ablk_query(struct bfa_ablk_s *ablk,
+		struct bfa_ablk_cfg_s *ablk_cfg,
+		bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_adapter_config(struct bfa_ablk_s *ablk,
+		enum bfa_mode_s mode, int max_pf, int max_vf,
+		bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port,
+		enum bfa_mode_s mode, int max_pf, int max_vf,
+		bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
+		u8 port, enum bfi_pcifn_class personality,
+		u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
+		bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn,
+		u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,
+		bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,
+		bfa_ablk_cbfn_t cbfn, void *cbarg);
+
+bfa_status_t bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
+				u32 *fwimg);
+/*
+ * bfa mfg wwn API functions
+ */
+mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
+mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
+
+/*
+ * F/W Image Size & Chunk
+ */
+extern u32 bfi_image_cb_size;
+extern u32 bfi_image_ct_size;
+extern u32 bfi_image_ct2_size;
+extern u32 *bfi_image_cb;
+extern u32 *bfi_image_ct;
+extern u32 *bfi_image_ct2;
+
+static inline u32 *
+bfi_image_cb_get_chunk(u32 off)
+{
+	return (u32 *)(bfi_image_cb + off);
+}
+
+static inline u32 *
+bfi_image_ct_get_chunk(u32 off)
+{
+	return (u32 *)(bfi_image_ct + off);
+}
+
+static inline u32 *
+bfi_image_ct2_get_chunk(u32 off)
+{
+	return (u32 *)(bfi_image_ct2 + off);
+}
+
+static inline u32*
+bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
+{
+	switch (asic_gen) {
+	case BFI_ASIC_GEN_CB:
+		return bfi_image_cb_get_chunk(off);
+		break;
+	case BFI_ASIC_GEN_CT:
+		return bfi_image_ct_get_chunk(off);
+		break;
+	case BFI_ASIC_GEN_CT2:
+		return bfi_image_ct2_get_chunk(off);
+		break;
+	default:
+		return NULL;
+	}
+}
+
+static inline u32
+bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
+{
+	switch (asic_gen) {
+	case BFI_ASIC_GEN_CB:
+		return bfi_image_cb_size;
+		break;
+	case BFI_ASIC_GEN_CT:
+		return bfi_image_ct_size;
+		break;
+	case BFI_ASIC_GEN_CT2:
+		return bfi_image_ct2_size;
+		break;
+	default:
+		return 0;
+	}
+}
+
+/*
+ * CNA TRCMOD declaration
+ */
+/*
+ * !!! Only append to the enums defined here to avoid any versioning
+ * !!! needed between trace utility and driver version
+ */
+enum {
+	BFA_TRC_CNA_PORT	= 1,
+	BFA_TRC_CNA_IOC		= 2,
+	BFA_TRC_CNA_IOC_CB	= 3,
+	BFA_TRC_CNA_IOC_CT	= 4,
+};
+
+#endif /* __BFA_IOC_H__ */
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
new file mode 100644
index 0000000..f1b80da
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -0,0 +1,409 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_ioc.h"
+#include "bfi_reg.h"
+#include "bfa_defs.h"
+
+BFA_TRC_FILE(CNA, IOC_CB);
+
+#define bfa_ioc_cb_join_pos(__ioc) ((u32) (1 << BFA_IOC_CB_JOIN_SH))
+
+/*
+ * forward declarations
+ */
+static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
+static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_set_cur_ioc_fwstate(
+			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_set_alt_ioc_fwstate(
+			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
+
+static struct bfa_ioc_hwif_s hwif_cb;
+
+/*
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
+{
+	hwif_cb.ioc_pll_init = bfa_ioc_cb_pll_init;
+	hwif_cb.ioc_firmware_lock = bfa_ioc_cb_firmware_lock;
+	hwif_cb.ioc_firmware_unlock = bfa_ioc_cb_firmware_unlock;
+	hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init;
+	hwif_cb.ioc_map_port = bfa_ioc_cb_map_port;
+	hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
+	hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
+	hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
+	hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
+	hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
+	hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
+	hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
+	hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete;
+	hwif_cb.ioc_set_fwstate = bfa_ioc_cb_set_cur_ioc_fwstate;
+	hwif_cb.ioc_get_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate;
+	hwif_cb.ioc_set_alt_fwstate = bfa_ioc_cb_set_alt_ioc_fwstate;
+	hwif_cb.ioc_get_alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate;
+
+	ioc->ioc_hwif = &hwif_cb;
+}
+
+/*
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bfa_boolean_t
+bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
+{
+	enum bfi_ioc_state alt_fwstate, cur_fwstate;
+	struct bfi_ioc_image_hdr_s fwhdr;
+
+	cur_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc);
+	bfa_trc(ioc, cur_fwstate);
+	alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc);
+	bfa_trc(ioc, alt_fwstate);
+
+	/*
+	 * Uninit implies this is the only driver as of now.
+	 */
+	if (cur_fwstate == BFI_IOC_UNINIT)
+		return BFA_TRUE;
+	/*
+	 * Check if another driver with a different firmware is active
+	 */
+	bfa_ioc_fwver_get(ioc, &fwhdr);
+	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr) &&
+		alt_fwstate != BFI_IOC_DISABLED) {
+		bfa_trc(ioc, alt_fwstate);
+		return BFA_FALSE;
+	}
+
+	return BFA_TRUE;
+}
+
+static void
+bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
+{
+}
+
+/*
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
+{
+	writel(~0U, ioc->ioc_regs.err_set);
+	readl(ioc->ioc_regs.err_set);
+}
+
+/*
+ * Host to LPU mailbox message addresses
+ */
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
+};
+
+/*
+ * Host <-> LPU mailbox command/status registers
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
+
+	{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
+	{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
+};
+
+static void
+bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
+{
+	void __iomem *rb;
+	int		pcifn = bfa_ioc_pcifn(ioc);
+
+	rb = bfa_ioc_bar0(ioc);
+
+	ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
+	ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
+	ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+
+	if (ioc->port_id == 0) {
+		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
+	} else {
+		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+		ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG);
+	}
+
+	/*
+	 * Host <-> LPU mailbox command/status registers
+	 */
+	ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
+	ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu;
+
+	/*
+	 * PSS control registers
+	 */
+	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
+	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
+
+	/*
+	 * IOC semaphore registers and serialization
+	 */
+	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
+	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
+
+	/*
+	 * sram memory access
+	 */
+	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
+
+	/*
+	 * err set reg : for notification of hb failure
+	 */
+	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+/*
+ * Initialize IOC to port mapping.
+ */
+
+static void
+bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
+{
+	/*
+	 * For crossbow, port id is same as pci function.
+	 */
+	ioc->port_id = bfa_ioc_pcifn(ioc);
+
+	bfa_trc(ioc, ioc->port_id);
+}
+
+/*
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
+{
+}
+
+/*
+ * Synchronized IOC failure processing routines
+ */
+static bfa_boolean_t
+bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
+{
+	u32 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+	/**
+	 * Driver load time.  If the join bit is set,
+	 * it is due to an unclean exit by the driver for this
+	 * PCI fn in the previous incarnation. Whoever comes here first
+	 * should clean it up, no matter which PCI fn.
+	 */
+	if (ioc_fwstate & BFA_IOC_CB_JOIN_MASK) {
+		writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+		writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+		return BFA_TRUE;
+	}
+
+	return bfa_ioc_cb_sync_complete(ioc);
+}
+
+/*
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
+{
+
+	/*
+	 * Read the hw sem reg to make sure that it is locked
+	 * before we clear it. If it is not locked, writing 1
+	 * will lock it instead of clearing it.
+	 */
+	readl(ioc->ioc_regs.ioc_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+/*
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc)
+{
+	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
+	u32 join_pos = bfa_ioc_cb_join_pos(ioc);
+
+	writel((r32 | join_pos), ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc)
+{
+	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
+	u32 join_pos = bfa_ioc_cb_join_pos(ioc);
+
+	writel((r32 & ~join_pos), ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_cb_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
+			enum bfi_ioc_state fwstate)
+{
+	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
+
+	writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)),
+				ioc->ioc_regs.ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
+{
+	return (enum bfi_ioc_state)(readl(ioc->ioc_regs.ioc_fwstate) &
+			BFA_IOC_CB_FWSTATE_MASK);
+}
+
+static void
+bfa_ioc_cb_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
+			enum bfi_ioc_state fwstate)
+{
+	u32 r32 = readl(ioc->ioc_regs.alt_ioc_fwstate);
+
+	writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)),
+				ioc->ioc_regs.alt_ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
+{
+	return (enum bfi_ioc_state)(readl(ioc->ioc_regs.alt_ioc_fwstate) &
+			BFA_IOC_CB_FWSTATE_MASK);
+}
+
+static void
+bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc)
+{
+	bfa_ioc_cb_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
+}
+
+static bfa_boolean_t
+bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
+{
+	u32 fwstate, alt_fwstate;
+	fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc);
+
+	/*
+	 * At this point, this IOC is hoding the hw sem in the
+	 * start path (fwcheck) OR in the disable/enable path
+	 * OR to check if the other IOC has acknowledged failure.
+	 *
+	 * So, this IOC can be in UNINIT, INITING, DISABLED, FAIL
+	 * or in MEMTEST states. In a normal scenario, this IOC
+	 * can not be in OP state when this function is called.
+	 *
+	 * However, this IOC could still be in OP state when
+	 * the OS driver is starting up, if the OptROM code has
+	 * left it in that state.
+	 *
+	 * If we had marked this IOC's fwstate as BFI_IOC_FAIL
+	 * in the failure case and now, if the fwstate is not
+	 * BFI_IOC_FAIL it implies that the other PCI fn have
+	 * reinitialized the ASIC or this IOC got disabled, so
+	 * return TRUE.
+	 */
+	if (fwstate == BFI_IOC_UNINIT ||
+		fwstate == BFI_IOC_INITING ||
+		fwstate == BFI_IOC_DISABLED ||
+		fwstate == BFI_IOC_MEMTEST ||
+		fwstate == BFI_IOC_OP)
+		return BFA_TRUE;
+	else {
+		alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc);
+		if (alt_fwstate == BFI_IOC_FAIL ||
+			alt_fwstate == BFI_IOC_DISABLED ||
+			alt_fwstate == BFI_IOC_UNINIT ||
+			alt_fwstate == BFI_IOC_INITING ||
+			alt_fwstate == BFI_IOC_MEMTEST)
+			return BFA_TRUE;
+		else
+			return BFA_FALSE;
+	}
+}
+
+bfa_status_t
+bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
+{
+	u32	pll_sclk, pll_fclk, join_bits;
+
+	pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN |
+		__APP_PLL_SCLK_P0_1(3U) |
+		__APP_PLL_SCLK_JITLMT0_1(3U) |
+		__APP_PLL_SCLK_CNTLMT0_1(3U);
+	pll_fclk = __APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN |
+		__APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
+		__APP_PLL_LCLK_JITLMT0_1(3U) |
+		__APP_PLL_LCLK_CNTLMT0_1(3U);
+	join_bits = readl(rb + BFA_IOC0_STATE_REG) &
+			BFA_IOC_CB_JOIN_MASK;
+	writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC0_STATE_REG));
+	join_bits = readl(rb + BFA_IOC1_STATE_REG) &
+			BFA_IOC_CB_JOIN_MASK;
+	writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC1_STATE_REG));
+	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+	writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
+	writel(__APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+			rb + APP_PLL_SCLK_CTL_REG);
+	writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
+	writel(__APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+			rb + APP_PLL_LCLK_CTL_REG);
+	udelay(2);
+	writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
+	writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
+	writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+			rb + APP_PLL_SCLK_CTL_REG);
+	writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+			rb + APP_PLL_LCLK_CTL_REG);
+	udelay(2000);
+	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+	writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG));
+	writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG));
+
+	return BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
new file mode 100644
index 0000000..651a8fb
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -0,0 +1,998 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_ioc.h"
+#include "bfi_reg.h"
+#include "bfa_defs.h"
+
+BFA_TRC_FILE(CNA, IOC_CT);
+
+#define bfa_ioc_ct_sync_pos(__ioc)      \
+		((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
+#define BFA_IOC_SYNC_REQD_SH    16
+#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
+#define bfa_ioc_ct_clear_sync_ackd(__val)       (__val & 0xffff0000)
+#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
+#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
+			(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
+
+/*
+ * forward declarations
+ */
+static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_set_cur_ioc_fwstate(
+			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_set_alt_ioc_fwstate(
+			struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
+static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
+
+static struct bfa_ioc_hwif_s hwif_ct;
+static struct bfa_ioc_hwif_s hwif_ct2;
+
+/*
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bfa_boolean_t
+bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
+{
+	enum bfi_ioc_state ioc_fwstate;
+	u32 usecnt;
+	struct bfi_ioc_image_hdr_s fwhdr;
+
+	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+
+	/*
+	 * If usage count is 0, always return TRUE.
+	 */
+	if (usecnt == 0) {
+		writel(1, ioc->ioc_regs.ioc_usage_reg);
+		readl(ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(0, ioc->ioc_regs.ioc_fail_sync);
+		bfa_trc(ioc, usecnt);
+		return BFA_TRUE;
+	}
+
+	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+	bfa_trc(ioc, ioc_fwstate);
+
+	/*
+	 * Use count cannot be non-zero and chip in uninitialized state.
+	 */
+	WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
+
+	/*
+	 * Check if another driver with a different firmware is active
+	 */
+	bfa_ioc_fwver_get(ioc, &fwhdr);
+	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
+		readl(ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+		bfa_trc(ioc, usecnt);
+		return BFA_FALSE;
+	}
+
+	/*
+	 * Same firmware version. Increment the reference count.
+	 */
+	usecnt++;
+	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+	readl(ioc->ioc_regs.ioc_usage_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+	bfa_trc(ioc, usecnt);
+	return BFA_TRUE;
+}
+
+static void
+bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
+{
+	u32 usecnt;
+
+	/*
+	 * decrement usage count
+	 */
+	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+	WARN_ON(usecnt <= 0);
+
+	usecnt--;
+	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+	bfa_trc(ioc, usecnt);
+
+	readl(ioc->ioc_regs.ioc_usage_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+}
+
+/*
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
+{
+	if (bfa_ioc_is_cna(ioc)) {
+		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+		writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
+		/* Wait for halt to take effect */
+		readl(ioc->ioc_regs.ll_halt);
+		readl(ioc->ioc_regs.alt_ll_halt);
+	} else {
+		writel(~0U, ioc->ioc_regs.err_set);
+		readl(ioc->ioc_regs.err_set);
+	}
+}
+
+/*
+ * Host to LPU mailbox message addresses
+ */
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
+	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
+	{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
+	{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
+};
+
+/*
+ * Host <-> LPU mailbox command/status registers - port 0
+ */
+static struct { u32 hfn, lpu; } ct_p0reg[] = {
+	{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
+	{ HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
+	{ HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
+	{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
+};
+
+/*
+ * Host <-> LPU mailbox command/status registers - port 1
+ */
+static struct { u32 hfn, lpu; } ct_p1reg[] = {
+	{ HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
+	{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
+	{ HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
+	{ HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
+};
+
+static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
+	ct2_reg[] = {
+	{ CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+	  CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
+	  CT2_HOSTFN_LPU0_READ_STAT},
+	{ CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+	  CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
+	  CT2_HOSTFN_LPU1_READ_STAT},
+};
+
+static void
+bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
+{
+	void __iomem *rb;
+	int		pcifn = bfa_ioc_pcifn(ioc);
+
+	rb = bfa_ioc_bar0(ioc);
+
+	ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
+	ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
+	ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
+
+	if (ioc->port_id == 0) {
+		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
+		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
+		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
+		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
+	} else {
+		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
+		ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
+		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
+	}
+
+	/*
+	 * PSS control registers
+	 */
+	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
+	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
+
+	/*
+	 * IOC semaphore registers and serialization
+	 */
+	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
+	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
+	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
+	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+	ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
+
+	/*
+	 * sram memory access
+	 */
+	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+	/*
+	 * err set reg : for notification of hb failure in fcmode
+	 */
+	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+static void
+bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
+{
+	void __iomem *rb;
+	int	port = bfa_ioc_portid(ioc);
+
+	rb = bfa_ioc_bar0(ioc);
+
+	ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
+	ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
+	ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
+	ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
+	ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
+	ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
+
+	if (port == 0) {
+		ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
+		ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
+		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
+	} else {
+		ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
+		ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
+		ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
+	}
+
+	/*
+	 * PSS control registers
+	 */
+	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
+	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
+
+	/*
+	 * IOC semaphore registers and serialization
+	 */
+	ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
+	ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
+	ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
+	ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
+	ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
+
+	/*
+	 * sram memory access
+	 */
+	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+	/*
+	 * err set reg : for notification of hb failure in fcmode
+	 */
+	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+/*
+ * Initialize IOC to port mapping.
+ */
+
+#define FNC_PERS_FN_SHIFT(__fn)	((__fn) * 8)
+static void
+bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
+{
+	void __iomem *rb = ioc->pcidev.pci_bar_kva;
+	u32	r32;
+
+	/*
+	 * For catapult, base port id on personality register and IOC type
+	 */
+	r32 = readl(rb + FNC_PERS_REG);
+	r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
+	ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
+
+	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
+	bfa_trc(ioc, ioc->port_id);
+}
+
+static void
+bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
+{
+	void __iomem	*rb = ioc->pcidev.pci_bar_kva;
+	u32	r32;
+
+	r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
+	ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
+
+	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
+	bfa_trc(ioc, ioc->port_id);
+}
+
+/*
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
+{
+	void __iomem *rb = ioc->pcidev.pci_bar_kva;
+	u32	r32, mode;
+
+	r32 = readl(rb + FNC_PERS_REG);
+	bfa_trc(ioc, r32);
+
+	mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
+		__F0_INTX_STATUS;
+
+	/*
+	 * If already in desired mode, do not change anything
+	 */
+	if ((!msix && mode) || (msix && !mode))
+		return;
+
+	if (msix)
+		mode = __F0_INTX_STATUS_MSIX;
+	else
+		mode = __F0_INTX_STATUS_INTA;
+
+	r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+	r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+	bfa_trc(ioc, r32);
+
+	writel(r32, rb + FNC_PERS_REG);
+}
+
+bfa_boolean_t
+bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
+{
+	u32	r32;
+
+	r32 = readl(ioc->ioc_regs.lpu_read_stat);
+	if (r32) {
+		writel(1, ioc->ioc_regs.lpu_read_stat);
+		return BFA_TRUE;
+	}
+
+	return BFA_FALSE;
+}
+
+/*
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
+{
+
+	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+	writel(0, ioc->ioc_regs.ioc_usage_reg);
+	readl(ioc->ioc_regs.ioc_usage_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+
+	writel(0, ioc->ioc_regs.ioc_fail_sync);
+	/*
+	 * Read the hw sem reg to make sure that it is locked
+	 * before we clear it. If it is not locked, writing 1
+	 * will lock it instead of clearing it.
+	 */
+	readl(ioc->ioc_regs.ioc_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+static bfa_boolean_t
+bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+	/*
+	 * Driver load time.  If the sync required bit for this PCI fn
+	 * is set, it is due to an unclean exit by the driver for this
+	 * PCI fn in the previous incarnation. Whoever comes here first
+	 * should clean it up, no matter which PCI fn.
+	 */
+
+	if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+		writel(0, ioc->ioc_regs.ioc_fail_sync);
+		writel(1, ioc->ioc_regs.ioc_usage_reg);
+		writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+		writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+		return BFA_TRUE;
+	}
+
+	return bfa_ioc_ct_sync_complete(ioc);
+}
+
+/*
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
+
+	writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
+					bfa_ioc_ct_sync_pos(ioc);
+
+	writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+
+	writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
+		ioc->ioc_regs.ioc_fail_sync);
+}
+
+static bfa_boolean_t
+bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+	uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
+	uint32_t tmp_ackd;
+
+	if (sync_ackd == 0)
+		return BFA_TRUE;
+
+	/*
+	 * The check below is to see whether any other PCI fn
+	 * has reinitialized the ASIC (reset sync_ackd bits)
+	 * and failed again while this IOC was waiting for hw
+	 * semaphore (in bfa_iocpf_sm_semwait()).
+	 */
+	tmp_ackd = sync_ackd;
+	if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
+		!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
+		sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
+
+	if (sync_reqd == sync_ackd) {
+		writel(bfa_ioc_ct_clear_sync_ackd(r32),
+			ioc->ioc_regs.ioc_fail_sync);
+		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+		writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
+		return BFA_TRUE;
+	}
+
+	/*
+	 * If another PCI fn reinitialized and failed again while
+	 * this IOC was waiting for hw sem, the sync_ackd bit for
+	 * this IOC need to be set again to allow reinitialization.
+	 */
+	if (tmp_ackd != sync_ackd)
+		writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
+
+	return BFA_FALSE;
+}
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+static void
+bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
+{
+	hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
+	hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
+	hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
+	hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+	hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
+	hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
+	hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
+	hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
+	hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
+	hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate;
+	hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate;
+	hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate;
+	hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate;
+}
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
+{
+	bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
+
+	hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
+	hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
+	hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
+	hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
+	ioc->ioc_hwif = &hwif_ct;
+}
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
+{
+	bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
+
+	hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
+	hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
+	hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
+	hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
+	hwif_ct2.ioc_isr_mode_set = NULL;
+	ioc->ioc_hwif = &hwif_ct2;
+}
+
+/*
+ * Workaround for MSI-X resource allocation for catapult-2 with no asic block
+ */
+#define HOSTFN_MSIX_DEFAULT		64
+#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR	0x30138
+#define HOSTFN_MSIX_VT_OFST_NUMVT	0x3013c
+#define __MSIX_VT_NUMVT__MK		0x003ff800
+#define __MSIX_VT_NUMVT__SH		11
+#define __MSIX_VT_NUMVT_(_v)		((_v) << __MSIX_VT_NUMVT__SH)
+#define __MSIX_VT_OFST_			0x000007ff
+void
+bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
+{
+	void __iomem *rb = ioc->pcidev.pci_bar_kva;
+	u32	r32;
+
+	r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+	if (r32 & __MSIX_VT_NUMVT__MK) {
+		writel(r32 & __MSIX_VT_OFST_,
+			rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
+		return;
+	}
+
+	writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
+		HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+		rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+	writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+		rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
+}
+
+bfa_status_t
+bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
+{
+	u32	pll_sclk, pll_fclk, r32;
+	bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
+
+	pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
+		__APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
+		__APP_PLL_SCLK_JITLMT0_1(3U) |
+		__APP_PLL_SCLK_CNTLMT0_1(1U);
+	pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
+		__APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
+		__APP_PLL_LCLK_JITLMT0_1(3U) |
+		__APP_PLL_LCLK_CNTLMT0_1(1U);
+
+	if (fcmode) {
+		writel(0, (rb + OP_MODE));
+		writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
+			 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
+	} else {
+		writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
+		writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
+	}
+	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+	writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+			rb + APP_PLL_SCLK_CTL_REG);
+	writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+			rb + APP_PLL_LCLK_CTL_REG);
+	writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
+		__APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
+	writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
+		__APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
+	readl(rb + HOSTFN0_INT_MSK);
+	udelay(2000);
+	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+	writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
+	writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
+
+	if (!fcmode) {
+		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
+		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
+	}
+	r32 = readl((rb + PSS_CTL_REG));
+	r32 &= ~__PSS_LMEM_RESET;
+	writel(r32, (rb + PSS_CTL_REG));
+	udelay(1000);
+	if (!fcmode) {
+		writel(0, (rb + PMM_1T_RESET_REG_P0));
+		writel(0, (rb + PMM_1T_RESET_REG_P1));
+	}
+
+	writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
+	udelay(1000);
+	r32 = readl((rb + MBIST_STAT_REG));
+	writel(0, (rb + MBIST_CTL_REG));
+	return BFA_STATUS_OK;
+}
+
+static void
+bfa_ioc_ct2_sclk_init(void __iomem *rb)
+{
+	u32 r32;
+
+	/*
+	 * put s_clk PLL and PLL FSM in reset
+	 */
+	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+	r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
+	r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
+		__APP_PLL_SCLK_LOGIC_SOFT_RESET);
+	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+	/*
+	 * Ignore mode and program for the max clock (which is FC16)
+	 * Firmware/NFC will do the PLL init appropiately
+	 */
+	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+	r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
+	writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+	/*
+	 * while doing PLL init dont clock gate ethernet subsystem
+	 */
+	r32 = readl((rb + CT2_CHIP_MISC_PRG));
+	writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
+
+	r32 = readl((rb + CT2_PCIE_MISC_REG));
+	writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
+
+	/*
+	 * set sclk value
+	 */
+	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+	r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
+		__APP_PLL_SCLK_CLK_DIV2);
+	writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+	/*
+	 * poll for s_clk lock or delay 1ms
+	 */
+	udelay(1000);
+}
+
+static void
+bfa_ioc_ct2_lclk_init(void __iomem *rb)
+{
+	u32 r32;
+
+	/*
+	 * put l_clk PLL and PLL FSM in reset
+	 */
+	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+	r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
+	r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
+		__APP_PLL_LCLK_LOGIC_SOFT_RESET);
+	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+	/*
+	 * set LPU speed (set for FC16 which will work for other modes)
+	 */
+	r32 = readl((rb + CT2_CHIP_MISC_PRG));
+	writel(r32, (rb + CT2_CHIP_MISC_PRG));
+
+	/*
+	 * set LPU half speed (set for FC16 which will work for other modes)
+	 */
+	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+	/*
+	 * set lclk for mode (set for FC16)
+	 */
+	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+	r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
+	r32 |= 0x20c1731b;
+	writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+	/*
+	 * poll for s_clk lock or delay 1ms
+	 */
+	udelay(1000);
+}
+
+static void
+bfa_ioc_ct2_mem_init(void __iomem *rb)
+{
+	u32	r32;
+
+	r32 = readl((rb + PSS_CTL_REG));
+	r32 &= ~__PSS_LMEM_RESET;
+	writel(r32, (rb + PSS_CTL_REG));
+	udelay(1000);
+
+	writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
+	udelay(1000);
+	writel(0, (rb + CT2_MBIST_CTL_REG));
+}
+
+void
+bfa_ioc_ct2_mac_reset(void __iomem *rb)
+{
+	/* put port0, port1 MAC & AHB in reset */
+	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+		rb + CT2_CSI_MAC_CONTROL_REG(0));
+	writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+		rb + CT2_CSI_MAC_CONTROL_REG(1));
+}
+
+static void
+bfa_ioc_ct2_enable_flash(void __iomem *rb)
+{
+	u32 r32;
+
+	r32 = readl((rb + PSS_GPIO_OUT_REG));
+	writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
+	r32 = readl((rb + PSS_GPIO_OE_REG));
+	writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
+}
+
+#define CT2_NFC_MAX_DELAY	1000
+#define CT2_NFC_PAUSE_MAX_DELAY 4000
+#define CT2_NFC_VER_VALID	0x147
+#define CT2_NFC_STATE_RUNNING   0x20000001
+#define BFA_IOC_PLL_POLL	1000000
+
+static bfa_boolean_t
+bfa_ioc_ct2_nfc_halted(void __iomem *rb)
+{
+	u32	r32;
+
+	r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+	if (r32 & __NFC_CONTROLLER_HALTED)
+		return BFA_TRUE;
+
+	return BFA_FALSE;
+}
+
+static void
+bfa_ioc_ct2_nfc_halt(void __iomem *rb)
+{
+	int	i;
+
+	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
+	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+		if (bfa_ioc_ct2_nfc_halted(rb))
+			break;
+		udelay(1000);
+	}
+	WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
+}
+
+static void
+bfa_ioc_ct2_nfc_resume(void __iomem *rb)
+{
+	u32	r32;
+	int i;
+
+	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
+	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+		r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+		if (!(r32 & __NFC_CONTROLLER_HALTED))
+			return;
+		udelay(1000);
+	}
+	WARN_ON(1);
+}
+
+static void
+bfa_ioc_ct2_clk_reset(void __iomem *rb)
+{
+	u32 r32;
+
+	bfa_ioc_ct2_sclk_init(rb);
+	bfa_ioc_ct2_lclk_init(rb);
+
+	/*
+	 * release soft reset on s_clk & l_clk
+	 */
+	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+	writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+			(rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+	writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+			(rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+}
+
+static void
+bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
+{
+	u32 r32, i;
+
+	r32 = readl((rb + PSS_CTL_REG));
+	r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
+	writel(r32, (rb + PSS_CTL_REG));
+
+	writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
+
+	for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+		r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
+
+		if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
+			break;
+	}
+	WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
+
+	for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+		r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
+
+		if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
+			break;
+	}
+	WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
+
+	r32 = readl(rb + CT2_CSI_FW_CTL_REG);
+	WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
+}
+
+static void
+bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
+{
+	u32 r32;
+	int i;
+
+	if (bfa_ioc_ct2_nfc_halted(rb))
+		bfa_ioc_ct2_nfc_resume(rb);
+	for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) {
+		r32 = readl(rb + CT2_NFC_STS_REG);
+		if (r32 == CT2_NFC_STATE_RUNNING)
+			return;
+		udelay(1000);
+	}
+
+	r32 = readl(rb + CT2_NFC_STS_REG);
+	WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING));
+}
+
+bfa_status_t
+bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
+{
+	u32 wgn, r32, nfc_ver;
+
+	wgn = readl(rb + CT2_WGN_STATUS);
+
+	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
+		/*
+		 * If flash is corrupted, enable flash explicitly
+		 */
+		bfa_ioc_ct2_clk_reset(rb);
+		bfa_ioc_ct2_enable_flash(rb);
+
+		bfa_ioc_ct2_mac_reset(rb);
+
+		bfa_ioc_ct2_clk_reset(rb);
+		bfa_ioc_ct2_enable_flash(rb);
+
+	} else {
+		nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
+
+		if ((nfc_ver >= CT2_NFC_VER_VALID) &&
+		    (wgn == (__A2T_AHB_LOAD | __WGN_READY))) {
+
+			bfa_ioc_ct2_wait_till_nfc_running(rb);
+
+			bfa_ioc_ct2_nfc_clk_reset(rb);
+		} else {
+			bfa_ioc_ct2_nfc_halt(rb);
+
+			bfa_ioc_ct2_clk_reset(rb);
+			bfa_ioc_ct2_mac_reset(rb);
+			bfa_ioc_ct2_clk_reset(rb);
+
+		}
+	}
+	/*
+	* The very first PCIe DMA Read done by LPU fails with a fatal error,
+	* when Address Translation Cache (ATC) has been enabled by system BIOS.
+	*
+	* Workaround:
+	* Disable Invalidated Tag Match Enable capability by setting the bit 26
+	* of CHIP_MISC_PRG to 0, by default it is set to 1.
+	*/
+	r32 = readl(rb + CT2_CHIP_MISC_PRG);
+	writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG));
+
+	/*
+	 * Mask the interrupts and clear any
+	 * pending interrupts left by BIOS/EFI
+	 */
+
+	writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
+	writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+
+	/* For first time initialization, no need to clear interrupts */
+	r32 = readl(rb + HOST_SEM5_REG);
+	if (r32 & 0x1) {
+		r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+		if (r32 == 1) {
+			writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
+			readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+		}
+		r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+		if (r32 == 1) {
+			writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
+			readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+		}
+	}
+
+	bfa_ioc_ct2_mem_init(rb);
+
+	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
+	writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+
+	return BFA_STATUS_OK;
+}
+
+static void
+bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
+		enum bfi_ioc_state fwstate)
+{
+	writel(fwstate, ioc->ioc_regs.ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
+{
+	return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
+}
+
+static void
+bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
+		enum bfi_ioc_state fwstate)
+{
+	writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
+}
+
+static enum bfi_ioc_state
+bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
+{
+	return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate);
+}
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
new file mode 100644
index 0000000..1c2ab39
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ *  bfa_modules.h BFA modules
+ */
+
+#ifndef __BFA_MODULES_H__
+#define __BFA_MODULES_H__
+
+#include "bfa_cs.h"
+#include "bfa.h"
+#include "bfa_svc.h"
+#include "bfa_fcpim.h"
+#include "bfa_port.h"
+
+struct bfa_modules_s {
+	struct bfa_fcdiag_s	fcdiag;		/* fcdiag module */
+	struct bfa_fcport_s	fcport;		/*  fc port module	      */
+	struct bfa_fcxp_mod_s	fcxp_mod;	/*  fcxp module	      */
+	struct bfa_lps_mod_s	lps_mod;	/*  fcxp module	      */
+	struct bfa_uf_mod_s	uf_mod;		/*  unsolicited frame module */
+	struct bfa_rport_mod_s	rport_mod;	/*  remote port module	      */
+	struct bfa_fcp_mod_s	fcp_mod;	/*  FCP initiator module     */
+	struct bfa_sgpg_mod_s	sgpg_mod;	/*  SG page module	      */
+	struct bfa_port_s	port;		/*  Physical port module     */
+	struct bfa_ablk_s	ablk;		/*  ASIC block config module */
+	struct bfa_cee_s	cee;		/*  CEE Module	*/
+	struct bfa_sfp_s	sfp;		/*  SFP module	*/
+	struct bfa_flash_s	flash;		/*  flash module */
+	struct bfa_diag_s	diag_mod;	/*  diagnostics module	*/
+	struct bfa_phy_s	phy;		/*  phy module		*/
+	struct bfa_dconf_mod_s	dconf_mod;	/*  DCONF common module	*/
+	struct bfa_fru_s	fru;		/*  fru module		*/
+};
+
+/*
+ * !!! Only append to the enums defined here to avoid any versioning
+ * !!! needed between trace utility and driver version
+ */
+enum {
+	BFA_TRC_HAL_CORE	= 1,
+	BFA_TRC_HAL_FCXP	= 2,
+	BFA_TRC_HAL_FCPIM	= 3,
+	BFA_TRC_HAL_IOCFC_CT	= 4,
+	BFA_TRC_HAL_IOCFC_CB	= 5,
+};
+
+#define BFA_CACHELINE_SZ	(256)
+
+struct bfa_s {
+	void			*bfad;		/*  BFA driver instance    */
+	struct bfa_plog_s	*plog;		/*  portlog buffer	    */
+	struct bfa_trc_mod_s	*trcmod;	/*  driver tracing	    */
+	struct bfa_ioc_s	ioc;		/*  IOC module		    */
+	struct bfa_iocfc_s	iocfc;		/*  IOCFC module	    */
+	struct bfa_timer_mod_s	timer_mod;	/*  timer module	    */
+	struct bfa_modules_s	modules;	/*  BFA modules	    */
+	struct list_head	comp_q;		/*  pending completions     */
+	bfa_boolean_t		queue_process;	/*  queue processing enabled */
+	struct list_head	reqq_waitq[BFI_IOC_MAX_CQS];
+	bfa_boolean_t		fcs;		/*  FCS is attached to BFA */
+	struct bfa_msix_s	msix;
+	int			bfa_aen_seq;
+	bfa_boolean_t		intr_enabled;	/*  Status of interrupts */
+};
+
+extern bfa_boolean_t bfa_auto_recover;
+
+void bfa_dconf_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *);
+void bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+		  struct bfa_s *);
+void bfa_dconf_iocdisable(struct bfa_s *);
+void bfa_fcp_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *,
+		struct bfa_pcidev_s *);
+void bfa_fcp_iocdisable(struct bfa_s *bfa);
+void bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+		struct bfa_s *);
+void bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *);
+void bfa_fcport_start(struct bfa_s *);
+void bfa_fcport_iocdisable(struct bfa_s *);
+void bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+		   struct bfa_s *);
+void bfa_fcport_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *,
+		struct bfa_pcidev_s *);
+void bfa_fcxp_iocdisable(struct bfa_s *);
+void bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+		struct bfa_s *);
+void bfa_fcxp_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *,
+		struct bfa_pcidev_s *);
+void bfa_fcdiag_iocdisable(struct bfa_s *);
+void bfa_fcdiag_attach(struct bfa_s *bfa, void *, struct bfa_iocfc_cfg_s *,
+		struct bfa_pcidev_s *);
+void bfa_ioim_lm_init(struct bfa_s *);
+void bfa_lps_iocdisable(struct bfa_s *bfa);
+void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+		struct bfa_s *);
+void bfa_lps_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *,
+	struct bfa_pcidev_s *);
+void bfa_rport_iocdisable(struct bfa_s *bfa);
+void bfa_rport_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+		struct bfa_s *);
+void bfa_rport_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *,
+		struct bfa_pcidev_s *);
+void bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+		struct bfa_s *);
+void bfa_sgpg_attach(struct bfa_s *, void *bfad, struct bfa_iocfc_cfg_s *,
+		struct bfa_pcidev_s *);
+void bfa_uf_iocdisable(struct bfa_s *);
+void bfa_uf_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+		struct bfa_s *);
+void bfa_uf_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *,
+		struct bfa_pcidev_s *);
+void bfa_uf_start(struct bfa_s *);
+
+#endif /* __BFA_MODULES_H__ */
diff --git a/drivers/scsi/bfa/bfa_plog.h b/drivers/scsi/bfa/bfa_plog.h
new file mode 100644
index 0000000..da570c0
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_plog.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#ifndef __BFA_PORTLOG_H__
+#define __BFA_PORTLOG_H__
+
+#include "bfa_fc.h"
+#include "bfa_defs.h"
+
+#define BFA_PL_NLOG_ENTS 256
+#define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS)
+
+#define BFA_PL_STRING_LOG_SZ   32   /* number of chars in string log */
+#define BFA_PL_INT_LOG_SZ      8    /* number of integers in the integer log */
+
+enum bfa_plog_log_type {
+	BFA_PL_LOG_TYPE_INVALID	= 0,
+	BFA_PL_LOG_TYPE_INT	= 1,
+	BFA_PL_LOG_TYPE_STRING	= 2,
+};
+
+/*
+ * the (fixed size) record format for each entry in the portlog
+ */
+struct bfa_plog_rec_s {
+	u64	tv;	/* timestamp */
+	u8	 port;	/* Source port that logged this entry */
+	u8	 mid;	/* module id */
+	u8	 eid;	/* indicates Rx, Tx, IOCTL, etc.  bfa_plog_eid */
+	u8	 log_type; /* string/integer log, bfa_plog_log_type_t */
+	u8	 log_num_ints;
+	/*
+	 * interpreted only if log_type is INT_LOG. indicates number of
+	 * integers in the int_log[] (0-PL_INT_LOG_SZ).
+	 */
+	u8	 rsvd;
+	u16	misc;	/* can be used to indicate fc frame length */
+	union {
+		char	    string_log[BFA_PL_STRING_LOG_SZ];
+		u32	int_log[BFA_PL_INT_LOG_SZ];
+	} log_entry;
+
+};
+
+/*
+ * the following #defines will be used by the logging entities to indicate
+ * their module id. BFAL will convert the integer value to string format
+ *
+* process to be used while changing the following #defines:
+ *  - Always add new entries at the end
+ *  - define corresponding string in BFAL
+ *  - Do not remove any entry or rearrange the order.
+ */
+enum bfa_plog_mid {
+	BFA_PL_MID_INVALID	= 0,
+	BFA_PL_MID_DEBUG	= 1,
+	BFA_PL_MID_DRVR		= 2,
+	BFA_PL_MID_HAL		= 3,
+	BFA_PL_MID_HAL_FCXP	= 4,
+	BFA_PL_MID_HAL_UF	= 5,
+	BFA_PL_MID_FCS		= 6,
+	BFA_PL_MID_LPS		= 7,
+	BFA_PL_MID_MAX		= 8
+};
+
+#define BFA_PL_MID_STRLEN    8
+struct bfa_plog_mid_strings_s {
+	char	    m_str[BFA_PL_MID_STRLEN];
+};
+
+/*
+ * the following #defines will be used by the logging entities to indicate
+ * their event type. BFAL will convert the integer value to string format
+ *
+* process to be used while changing the following #defines:
+ *  - Always add new entries at the end
+ *  - define corresponding string in BFAL
+ *  - Do not remove any entry or rearrange the order.
+ */
+enum bfa_plog_eid {
+	BFA_PL_EID_INVALID		= 0,
+	BFA_PL_EID_IOC_DISABLE		= 1,
+	BFA_PL_EID_IOC_ENABLE		= 2,
+	BFA_PL_EID_PORT_DISABLE		= 3,
+	BFA_PL_EID_PORT_ENABLE		= 4,
+	BFA_PL_EID_PORT_ST_CHANGE	= 5,
+	BFA_PL_EID_TX			= 6,
+	BFA_PL_EID_TX_ACK1		= 7,
+	BFA_PL_EID_TX_RJT		= 8,
+	BFA_PL_EID_TX_BSY		= 9,
+	BFA_PL_EID_RX			= 10,
+	BFA_PL_EID_RX_ACK1		= 11,
+	BFA_PL_EID_RX_RJT		= 12,
+	BFA_PL_EID_RX_BSY		= 13,
+	BFA_PL_EID_CT_IN		= 14,
+	BFA_PL_EID_CT_OUT		= 15,
+	BFA_PL_EID_DRIVER_START		= 16,
+	BFA_PL_EID_RSCN			= 17,
+	BFA_PL_EID_DEBUG		= 18,
+	BFA_PL_EID_MISC			= 19,
+	BFA_PL_EID_FIP_FCF_DISC		= 20,
+	BFA_PL_EID_FIP_FCF_CVL		= 21,
+	BFA_PL_EID_LOGIN		= 22,
+	BFA_PL_EID_LOGO			= 23,
+	BFA_PL_EID_TRUNK_SCN		= 24,
+	BFA_PL_EID_MAX
+};
+
+#define BFA_PL_ENAME_STRLEN	8
+struct bfa_plog_eid_strings_s {
+	char	    e_str[BFA_PL_ENAME_STRLEN];
+};
+
+#define BFA_PL_SIG_LEN	8
+#define BFA_PL_SIG_STR  "12pl123"
+
+/*
+ * per port circular log buffer
+ */
+struct bfa_plog_s {
+	char	    plog_sig[BFA_PL_SIG_LEN];	/* Start signature */
+	u8	 plog_enabled;
+	u8	 rsvd[7];
+	u32	ticks;
+	u16	head;
+	u16	tail;
+	struct bfa_plog_rec_s  plog_recs[BFA_PL_NLOG_ENTS];
+};
+
+void bfa_plog_init(struct bfa_plog_s *plog);
+void bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+			enum bfa_plog_eid event, u16 misc, char *log_str);
+void bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+			enum bfa_plog_eid event, u16 misc,
+			u32 *intarr, u32 num_ints);
+void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+		enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr);
+void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+			enum bfa_plog_eid event, u16 misc,
+			struct fchs_s *fchdr, u32 pld_w0);
+
+#endif /* __BFA_PORTLOG_H__ */
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
new file mode 100644
index 0000000..079bc77
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -0,0 +1,872 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfa_defs_svc.h"
+#include "bfa_port.h"
+#include "bfi.h"
+#include "bfa_ioc.h"
+
+
+BFA_TRC_FILE(CNA, PORT);
+
+static void
+bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
+{
+	u32    *dip = (u32 *) stats;
+	__be32    t0, t1;
+	int	    i;
+
+	for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32);
+		i += 2) {
+		t0 = dip[i];
+		t1 = dip[i + 1];
+#ifdef __BIG_ENDIAN
+		dip[i] = be32_to_cpu(t0);
+		dip[i + 1] = be32_to_cpu(t1);
+#else
+		dip[i] = be32_to_cpu(t1);
+		dip[i + 1] = be32_to_cpu(t0);
+#endif
+	}
+}
+
+/*
+ * bfa_port_enable_isr()
+ *
+ *
+ * @param[in] port - Pointer to the port module
+ *            status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
+{
+	bfa_trc(port, status);
+	port->endis_pending = BFA_FALSE;
+	port->endis_cbfn(port->endis_cbarg, status);
+}
+
+/*
+ * bfa_port_disable_isr()
+ *
+ *
+ * @param[in] port - Pointer to the port module
+ *            status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
+{
+	bfa_trc(port, status);
+	port->endis_pending = BFA_FALSE;
+	port->endis_cbfn(port->endis_cbarg, status);
+}
+
+/*
+ * bfa_port_get_stats_isr()
+ *
+ *
+ * @param[in] port - Pointer to the Port module
+ *            status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
+{
+	port->stats_status = status;
+	port->stats_busy = BFA_FALSE;
+
+	if (status == BFA_STATUS_OK) {
+		memcpy(port->stats, port->stats_dma.kva,
+		       sizeof(union bfa_port_stats_u));
+		bfa_port_stats_swap(port, port->stats);
+
+		port->stats->fc.secs_reset = ktime_get_seconds() - port->stats_reset_time;
+	}
+
+	if (port->stats_cbfn) {
+		port->stats_cbfn(port->stats_cbarg, status);
+		port->stats_cbfn = NULL;
+	}
+}
+
+/*
+ * bfa_port_clear_stats_isr()
+ *
+ *
+ * @param[in] port - Pointer to the Port module
+ *            status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
+{
+	port->stats_status = status;
+	port->stats_busy   = BFA_FALSE;
+
+	/*
+	* re-initialize time stamp for stats reset
+	*/
+	port->stats_reset_time = ktime_get_seconds();
+
+	if (port->stats_cbfn) {
+		port->stats_cbfn(port->stats_cbarg, status);
+		port->stats_cbfn = NULL;
+	}
+}
+
+/*
+ * bfa_port_isr()
+ *
+ *
+ * @param[in] Pointer to the Port module data structure.
+ *
+ * @return void
+ */
+static void
+bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
+{
+	struct bfa_port_s *port = (struct bfa_port_s *) cbarg;
+	union bfi_port_i2h_msg_u *i2hmsg;
+
+	i2hmsg = (union bfi_port_i2h_msg_u *) m;
+	bfa_trc(port, m->mh.msg_id);
+
+	switch (m->mh.msg_id) {
+	case BFI_PORT_I2H_ENABLE_RSP:
+		if (port->endis_pending == BFA_FALSE)
+			break;
+		bfa_port_enable_isr(port, i2hmsg->enable_rsp.status);
+		break;
+
+	case BFI_PORT_I2H_DISABLE_RSP:
+		if (port->endis_pending == BFA_FALSE)
+			break;
+		bfa_port_disable_isr(port, i2hmsg->disable_rsp.status);
+		break;
+
+	case BFI_PORT_I2H_GET_STATS_RSP:
+		/* Stats busy flag is still set? (may be cmd timed out) */
+		if (port->stats_busy == BFA_FALSE)
+			break;
+		bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status);
+		break;
+
+	case BFI_PORT_I2H_CLEAR_STATS_RSP:
+		if (port->stats_busy == BFA_FALSE)
+			break;
+		bfa_port_clear_stats_isr(port, i2hmsg->clearstats_rsp.status);
+		break;
+
+	default:
+		WARN_ON(1);
+	}
+}
+
+/*
+ * bfa_port_meminfo()
+ *
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+u32
+bfa_port_meminfo(void)
+{
+	return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * bfa_port_mem_claim()
+ *
+ *
+ * @param[in] port Port module pointer
+ *	      dma_kva Kernel Virtual Address of Port DMA Memory
+ *	      dma_pa  Physical Address of Port DMA Memory
+ *
+ * @return void
+ */
+void
+bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
+{
+	port->stats_dma.kva = dma_kva;
+	port->stats_dma.pa  = dma_pa;
+}
+
+/*
+ * bfa_port_enable()
+ *
+ *   Send the Port enable request to the f/w
+ *
+ * @param[in] Pointer to the Port module data structure.
+ *
+ * @return Status
+ */
+bfa_status_t
+bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
+		 void *cbarg)
+{
+	struct bfi_port_generic_req_s *m;
+
+	/* If port is PBC disabled, return error */
+	if (port->pbc_disabled) {
+		bfa_trc(port, BFA_STATUS_PBC);
+		return BFA_STATUS_PBC;
+	}
+
+	if (bfa_ioc_is_disabled(port->ioc)) {
+		bfa_trc(port, BFA_STATUS_IOC_DISABLED);
+		return BFA_STATUS_IOC_DISABLED;
+	}
+
+	if (!bfa_ioc_is_operational(port->ioc)) {
+		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+
+	/* if port is d-port enabled, return error */
+	if (port->dport_enabled) {
+		bfa_trc(port, BFA_STATUS_DPORT_ERR);
+		return BFA_STATUS_DPORT_ERR;
+	}
+
+	if (port->endis_pending) {
+		bfa_trc(port, BFA_STATUS_DEVBUSY);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
+
+	port->msgtag++;
+	port->endis_cbfn    = cbfn;
+	port->endis_cbarg   = cbarg;
+	port->endis_pending = BFA_TRUE;
+
+	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ,
+		    bfa_ioc_portid(port->ioc));
+	bfa_ioc_mbox_queue(port->ioc, &port->endis_mb);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_port_disable()
+ *
+ *   Send the Port disable request to the f/w
+ *
+ * @param[in] Pointer to the Port module data structure.
+ *
+ * @return Status
+ */
+bfa_status_t
+bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
+		  void *cbarg)
+{
+	struct bfi_port_generic_req_s *m;
+
+	/* If port is PBC disabled, return error */
+	if (port->pbc_disabled) {
+		bfa_trc(port, BFA_STATUS_PBC);
+		return BFA_STATUS_PBC;
+	}
+
+	if (bfa_ioc_is_disabled(port->ioc)) {
+		bfa_trc(port, BFA_STATUS_IOC_DISABLED);
+		return BFA_STATUS_IOC_DISABLED;
+	}
+
+	if (!bfa_ioc_is_operational(port->ioc)) {
+		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+
+	/* if port is d-port enabled, return error */
+	if (port->dport_enabled) {
+		bfa_trc(port, BFA_STATUS_DPORT_ERR);
+		return BFA_STATUS_DPORT_ERR;
+	}
+
+	if (port->endis_pending) {
+		bfa_trc(port, BFA_STATUS_DEVBUSY);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
+
+	port->msgtag++;
+	port->endis_cbfn    = cbfn;
+	port->endis_cbarg   = cbarg;
+	port->endis_pending = BFA_TRUE;
+
+	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ,
+		    bfa_ioc_portid(port->ioc));
+	bfa_ioc_mbox_queue(port->ioc, &port->endis_mb);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_port_get_stats()
+ *
+ *   Send the request to the f/w to fetch Port statistics.
+ *
+ * @param[in] Pointer to the Port module data structure.
+ *
+ * @return Status
+ */
+bfa_status_t
+bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
+		    bfa_port_stats_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_port_get_stats_req_s *m;
+
+	if (!bfa_ioc_is_operational(port->ioc)) {
+		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+
+	if (port->stats_busy) {
+		bfa_trc(port, BFA_STATUS_DEVBUSY);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	m = (struct bfi_port_get_stats_req_s *) port->stats_mb.msg;
+
+	port->stats	  = stats;
+	port->stats_cbfn  = cbfn;
+	port->stats_cbarg = cbarg;
+	port->stats_busy  = BFA_TRUE;
+	bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa);
+
+	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ,
+		    bfa_ioc_portid(port->ioc));
+	bfa_ioc_mbox_queue(port->ioc, &port->stats_mb);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_port_clear_stats()
+ *
+ *
+ * @param[in] Pointer to the Port module data structure.
+ *
+ * @return Status
+ */
+bfa_status_t
+bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
+		      void *cbarg)
+{
+	struct bfi_port_generic_req_s *m;
+
+	if (!bfa_ioc_is_operational(port->ioc)) {
+		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+
+	if (port->stats_busy) {
+		bfa_trc(port, BFA_STATUS_DEVBUSY);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	m = (struct bfi_port_generic_req_s *) port->stats_mb.msg;
+
+	port->stats_cbfn  = cbfn;
+	port->stats_cbarg = cbarg;
+	port->stats_busy  = BFA_TRUE;
+
+	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ,
+		    bfa_ioc_portid(port->ioc));
+	bfa_ioc_mbox_queue(port->ioc, &port->stats_mb);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_port_notify()
+ *
+ * Port module IOC event handler
+ *
+ * @param[in] Pointer to the Port module data structure.
+ * @param[in] IOC event structure
+ *
+ * @return void
+ */
+void
+bfa_port_notify(void *arg, enum bfa_ioc_event_e event)
+{
+	struct bfa_port_s *port = (struct bfa_port_s *) arg;
+
+	switch (event) {
+	case BFA_IOC_E_DISABLED:
+	case BFA_IOC_E_FAILED:
+		/* Fail any pending get_stats/clear_stats requests */
+		if (port->stats_busy) {
+			if (port->stats_cbfn)
+				port->stats_cbfn(port->stats_cbarg,
+						BFA_STATUS_FAILED);
+			port->stats_cbfn = NULL;
+			port->stats_busy = BFA_FALSE;
+		}
+
+		/* Clear any enable/disable is pending */
+		if (port->endis_pending) {
+			if (port->endis_cbfn)
+				port->endis_cbfn(port->endis_cbarg,
+						BFA_STATUS_FAILED);
+			port->endis_cbfn = NULL;
+			port->endis_pending = BFA_FALSE;
+		}
+
+		/* clear D-port mode */
+		if (port->dport_enabled)
+			bfa_port_set_dportenabled(port, BFA_FALSE);
+		break;
+	default:
+		break;
+	}
+}
+
+/*
+ * bfa_port_attach()
+ *
+ *
+ * @param[in] port - Pointer to the Port module data structure
+ *            ioc  - Pointer to the ioc module data structure
+ *            dev  - Pointer to the device driver module data structure
+ *                   The device driver specific mbox ISR functions have
+ *                   this pointer as one of the parameters.
+ *            trcmod -
+ *
+ * @return void
+ */
+void
+bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
+		 void *dev, struct bfa_trc_mod_s *trcmod)
+{
+	WARN_ON(!port);
+
+	port->dev    = dev;
+	port->ioc    = ioc;
+	port->trcmod = trcmod;
+
+	port->stats_busy = BFA_FALSE;
+	port->endis_pending = BFA_FALSE;
+	port->stats_cbfn = NULL;
+	port->endis_cbfn = NULL;
+	port->pbc_disabled = BFA_FALSE;
+	port->dport_enabled = BFA_FALSE;
+
+	bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
+	bfa_q_qe_init(&port->ioc_notify);
+	bfa_ioc_notify_init(&port->ioc_notify, bfa_port_notify, port);
+	list_add_tail(&port->ioc_notify.qe, &port->ioc->notify_q);
+
+	/*
+	 * initialize time stamp for stats reset
+	 */
+	port->stats_reset_time = ktime_get_seconds();
+
+	bfa_trc(port, 0);
+}
+
+/*
+ * bfa_port_set_dportenabled();
+ *
+ * Port module- set pbc disabled flag
+ *
+ * @param[in] port - Pointer to the Port module data structure
+ *
+ * @return void
+ */
+void
+bfa_port_set_dportenabled(struct bfa_port_s *port, bfa_boolean_t enabled)
+{
+	port->dport_enabled = enabled;
+}
+
+/*
+ *	CEE module specific definitions
+ */
+
+/*
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-attributes responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ *		    status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+	struct bfa_cee_lldp_cfg_s *lldp_cfg = &cee->attr->lldp_remote;
+
+	cee->get_attr_status = status;
+	bfa_trc(cee, 0);
+	if (status == BFA_STATUS_OK) {
+		bfa_trc(cee, 0);
+		memcpy(cee->attr, cee->attr_dma.kva,
+			sizeof(struct bfa_cee_attr_s));
+		lldp_cfg->time_to_live = be16_to_cpu(lldp_cfg->time_to_live);
+		lldp_cfg->enabled_system_cap =
+				be16_to_cpu(lldp_cfg->enabled_system_cap);
+	}
+	cee->get_attr_pending = BFA_FALSE;
+	if (cee->cbfn.get_attr_cbfn) {
+		bfa_trc(cee, 0);
+		cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
+	}
+}
+
+/*
+ * bfa_cee_get_stats_isr()
+ *
+ * @brief CEE ISR for get-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ *	      status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+	u32 *buffer;
+	int i;
+
+	cee->get_stats_status = status;
+	bfa_trc(cee, 0);
+	if (status == BFA_STATUS_OK) {
+		bfa_trc(cee, 0);
+		memcpy(cee->stats, cee->stats_dma.kva,
+			sizeof(struct bfa_cee_stats_s));
+		/* swap the cee stats */
+		buffer = (u32 *)cee->stats;
+		for (i = 0; i < (sizeof(struct bfa_cee_stats_s) /
+				 sizeof(u32)); i++)
+			buffer[i] = cpu_to_be32(buffer[i]);
+	}
+	cee->get_stats_pending = BFA_FALSE;
+	bfa_trc(cee, 0);
+	if (cee->cbfn.get_stats_cbfn) {
+		bfa_trc(cee, 0);
+		cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
+	}
+}
+
+/*
+ * bfa_cee_reset_stats_isr()
+ *
+ * @brief CEE ISR for reset-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ *            status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+	cee->reset_stats_status = status;
+	cee->reset_stats_pending = BFA_FALSE;
+	if (cee->cbfn.reset_stats_cbfn)
+		cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
+}
+
+/*
+ * bfa_cee_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE module
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+u32
+bfa_cee_meminfo(void)
+{
+	return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ) +
+		BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * bfa_cee_mem_claim()
+ *
+ * @brief Initialized CEE DMA Memory
+ *
+ * @param[in] cee CEE module pointer
+ *            dma_kva Kernel Virtual Address of CEE DMA Memory
+ *            dma_pa  Physical Address of CEE DMA Memory
+ *
+ * @return void
+ */
+void
+bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
+{
+	cee->attr_dma.kva = dma_kva;
+	cee->attr_dma.pa = dma_pa;
+	cee->stats_dma.kva = dma_kva + BFA_ROUNDUP(
+			     sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
+	cee->stats_dma.pa = dma_pa + BFA_ROUNDUP(
+			     sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
+	cee->attr = (struct bfa_cee_attr_s *) dma_kva;
+	cee->stats = (struct bfa_cee_stats_s *) (dma_kva + BFA_ROUNDUP(
+			sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ));
+}
+
+/*
+ * bfa_cee_get_attr()
+ *
+ * @brief
+ *   Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
+		 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_cee_get_req_s *cmd;
+
+	WARN_ON((cee == NULL) || (cee->ioc == NULL));
+	bfa_trc(cee, 0);
+	if (!bfa_ioc_is_operational(cee->ioc)) {
+		bfa_trc(cee, 0);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+	if (cee->get_attr_pending == BFA_TRUE) {
+		bfa_trc(cee, 0);
+		return  BFA_STATUS_DEVBUSY;
+	}
+	cee->get_attr_pending = BFA_TRUE;
+	cmd = (struct bfi_cee_get_req_s *) cee->get_cfg_mb.msg;
+	cee->attr = attr;
+	cee->cbfn.get_attr_cbfn = cbfn;
+	cee->cbfn.get_attr_cbarg = cbarg;
+	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+		bfa_ioc_portid(cee->ioc));
+	bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+	bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_get_stats()
+ *
+ * @brief
+ *   Send the request to the f/w to fetch CEE statistics.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
+		  bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_cee_get_req_s *cmd;
+
+	WARN_ON((cee == NULL) || (cee->ioc == NULL));
+
+	if (!bfa_ioc_is_operational(cee->ioc)) {
+		bfa_trc(cee, 0);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+	if (cee->get_stats_pending == BFA_TRUE) {
+		bfa_trc(cee, 0);
+		return  BFA_STATUS_DEVBUSY;
+	}
+	cee->get_stats_pending = BFA_TRUE;
+	cmd = (struct bfi_cee_get_req_s *) cee->get_stats_mb.msg;
+	cee->stats = stats;
+	cee->cbfn.get_stats_cbfn = cbfn;
+	cee->cbfn.get_stats_cbarg = cbarg;
+	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
+		bfa_ioc_portid(cee->ioc));
+	bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
+	bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_reset_stats()
+ *
+ * @brief Clears CEE Stats in the f/w.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_reset_stats(struct bfa_cee_s *cee,
+		    bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg)
+{
+	struct bfi_cee_reset_stats_s *cmd;
+
+	WARN_ON((cee == NULL) || (cee->ioc == NULL));
+	if (!bfa_ioc_is_operational(cee->ioc)) {
+		bfa_trc(cee, 0);
+		return BFA_STATUS_IOC_FAILURE;
+	}
+	if (cee->reset_stats_pending == BFA_TRUE) {
+		bfa_trc(cee, 0);
+		return  BFA_STATUS_DEVBUSY;
+	}
+	cee->reset_stats_pending = BFA_TRUE;
+	cmd = (struct bfi_cee_reset_stats_s *) cee->reset_stats_mb.msg;
+	cee->cbfn.reset_stats_cbfn = cbfn;
+	cee->cbfn.reset_stats_cbarg = cbarg;
+	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
+		bfa_ioc_portid(cee->ioc));
+	bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_isrs()
+ *
+ * @brief Handles Mail-box interrupts for CEE module.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+void
+bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
+{
+	union bfi_cee_i2h_msg_u *msg;
+	struct bfi_cee_get_rsp_s *get_rsp;
+	struct bfa_cee_s *cee = (struct bfa_cee_s *) cbarg;
+	msg = (union bfi_cee_i2h_msg_u *) m;
+	get_rsp = (struct bfi_cee_get_rsp_s *) m;
+	bfa_trc(cee, msg->mh.msg_id);
+	switch (msg->mh.msg_id) {
+	case BFI_CEE_I2H_GET_CFG_RSP:
+		bfa_trc(cee, get_rsp->cmd_status);
+		bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
+		break;
+	case BFI_CEE_I2H_GET_STATS_RSP:
+		bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
+		break;
+	case BFI_CEE_I2H_RESET_STATS_RSP:
+		bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+/*
+ * bfa_cee_notify()
+ *
+ * @brief CEE module IOC event handler.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ * @param[in] IOC event type
+ *
+ * @return void
+ */
+
+void
+bfa_cee_notify(void *arg, enum bfa_ioc_event_e event)
+{
+	struct bfa_cee_s *cee = (struct bfa_cee_s *) arg;
+
+	bfa_trc(cee, event);
+
+	switch (event) {
+	case BFA_IOC_E_DISABLED:
+	case BFA_IOC_E_FAILED:
+		if (cee->get_attr_pending == BFA_TRUE) {
+			cee->get_attr_status = BFA_STATUS_FAILED;
+			cee->get_attr_pending  = BFA_FALSE;
+			if (cee->cbfn.get_attr_cbfn) {
+				cee->cbfn.get_attr_cbfn(
+					cee->cbfn.get_attr_cbarg,
+					BFA_STATUS_FAILED);
+			}
+		}
+		if (cee->get_stats_pending == BFA_TRUE) {
+			cee->get_stats_status = BFA_STATUS_FAILED;
+			cee->get_stats_pending  = BFA_FALSE;
+			if (cee->cbfn.get_stats_cbfn) {
+				cee->cbfn.get_stats_cbfn(
+				cee->cbfn.get_stats_cbarg,
+				BFA_STATUS_FAILED);
+			}
+		}
+		if (cee->reset_stats_pending == BFA_TRUE) {
+			cee->reset_stats_status = BFA_STATUS_FAILED;
+			cee->reset_stats_pending  = BFA_FALSE;
+			if (cee->cbfn.reset_stats_cbfn) {
+				cee->cbfn.reset_stats_cbfn(
+				cee->cbfn.reset_stats_cbarg,
+				BFA_STATUS_FAILED);
+			}
+		}
+		break;
+
+	default:
+		break;
+	}
+}
+
+/*
+ * bfa_cee_attach()
+ *
+ * @brief CEE module-attach API
+ *
+ * @param[in] cee - Pointer to the CEE module data structure
+ *            ioc - Pointer to the ioc module data structure
+ *            dev - Pointer to the device driver module data structure
+ *                  The device driver specific mbox ISR functions have
+ *                  this pointer as one of the parameters.
+ *
+ * @return void
+ */
+void
+bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc,
+		void *dev)
+{
+	WARN_ON(cee == NULL);
+	cee->dev = dev;
+	cee->ioc = ioc;
+
+	bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
+	bfa_q_qe_init(&cee->ioc_notify);
+	bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
+	list_add_tail(&cee->ioc_notify.qe, &cee->ioc->notify_q);
+}
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
new file mode 100644
index 0000000..0c3b200
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_PORT_H__
+#define __BFA_PORT_H__
+
+#include "bfa_defs_svc.h"
+#include "bfa_ioc.h"
+#include "bfa_cs.h"
+
+typedef void (*bfa_port_stats_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_port_endis_cbfn_t) (void *dev, bfa_status_t status);
+
+struct bfa_port_s {
+	void				*dev;
+	struct bfa_ioc_s		*ioc;
+	struct bfa_trc_mod_s		*trcmod;
+	u32			msgtag;
+	bfa_boolean_t			stats_busy;
+	struct bfa_mbox_cmd_s		stats_mb;
+	bfa_port_stats_cbfn_t		stats_cbfn;
+	void				*stats_cbarg;
+	bfa_status_t			stats_status;
+	time64_t			stats_reset_time;
+	union bfa_port_stats_u		*stats;
+	struct bfa_dma_s		stats_dma;
+	bfa_boolean_t			endis_pending;
+	struct bfa_mbox_cmd_s		endis_mb;
+	bfa_port_endis_cbfn_t		endis_cbfn;
+	void				*endis_cbarg;
+	bfa_status_t			endis_status;
+	struct bfa_ioc_notify_s		ioc_notify;
+	bfa_boolean_t			pbc_disabled;
+	bfa_boolean_t			dport_enabled;
+	struct bfa_mem_dma_s		port_dma;
+};
+
+#define BFA_MEM_PORT_DMA(__bfa)		(&((__bfa)->modules.port.port_dma))
+
+void	     bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
+				void *dev, struct bfa_trc_mod_s *trcmod);
+void	bfa_port_notify(void *arg, enum bfa_ioc_event_e event);
+
+bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
+				 union bfa_port_stats_u *stats,
+				 bfa_port_stats_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port,
+				   bfa_port_stats_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_port_enable(struct bfa_port_s *port,
+			      bfa_port_endis_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_port_disable(struct bfa_port_s *port,
+			       bfa_port_endis_cbfn_t cbfn, void *cbarg);
+u32     bfa_port_meminfo(void);
+void	     bfa_port_mem_claim(struct bfa_port_s *port,
+				 u8 *dma_kva, u64 dma_pa);
+void	bfa_port_set_dportenabled(struct bfa_port_s *port,
+				  bfa_boolean_t enabled);
+
+/*
+ * CEE declaration
+ */
+typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, bfa_status_t status);
+
+struct bfa_cee_cbfn_s {
+	bfa_cee_get_attr_cbfn_t		get_attr_cbfn;
+	void				*get_attr_cbarg;
+	bfa_cee_get_stats_cbfn_t	get_stats_cbfn;
+	void				*get_stats_cbarg;
+	bfa_cee_reset_stats_cbfn_t	reset_stats_cbfn;
+	void				*reset_stats_cbarg;
+};
+
+struct bfa_cee_s {
+	void *dev;
+	bfa_boolean_t		get_attr_pending;
+	bfa_boolean_t		get_stats_pending;
+	bfa_boolean_t		reset_stats_pending;
+	bfa_status_t		get_attr_status;
+	bfa_status_t		get_stats_status;
+	bfa_status_t		reset_stats_status;
+	struct bfa_cee_cbfn_s	cbfn;
+	struct bfa_ioc_notify_s	ioc_notify;
+	struct bfa_trc_mod_s	*trcmod;
+	struct bfa_cee_attr_s	*attr;
+	struct bfa_cee_stats_s	*stats;
+	struct bfa_dma_s	attr_dma;
+	struct bfa_dma_s	stats_dma;
+	struct bfa_ioc_s	*ioc;
+	struct bfa_mbox_cmd_s	get_cfg_mb;
+	struct bfa_mbox_cmd_s	get_stats_mb;
+	struct bfa_mbox_cmd_s	reset_stats_mb;
+	struct bfa_mem_dma_s	cee_dma;
+};
+
+#define BFA_MEM_CEE_DMA(__bfa)	(&((__bfa)->modules.cee.cee_dma))
+
+u32	bfa_cee_meminfo(void);
+void	bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa);
+void	bfa_cee_attach(struct bfa_cee_s *cee,
+			struct bfa_ioc_s *ioc, void *dev);
+bfa_status_t	bfa_cee_get_attr(struct bfa_cee_s *cee,
+				struct bfa_cee_attr_s *attr,
+				bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
+bfa_status_t	bfa_cee_get_stats(struct bfa_cee_s *cee,
+				struct bfa_cee_stats_s *stats,
+				bfa_cee_get_stats_cbfn_t cbfn, void *cbarg);
+bfa_status_t	bfa_cee_reset_stats(struct bfa_cee_s *cee,
+				bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
+
+#endif	/* __BFA_PORT_H__ */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
new file mode 100644
index 0000000..6fc34fb
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -0,0 +1,6914 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_plog.h"
+#include "bfa_cs.h"
+#include "bfa_modules.h"
+
+BFA_TRC_FILE(HAL, FCXP);
+
+/*
+ * LPS related definitions
+ */
+#define BFA_LPS_MIN_LPORTS      (1)
+#define BFA_LPS_MAX_LPORTS      (256)
+
+/*
+ * Maximum Vports supported per physical port or vf.
+ */
+#define BFA_LPS_MAX_VPORTS_SUPP_CB  255
+#define BFA_LPS_MAX_VPORTS_SUPP_CT  190
+
+
+/*
+ * FC PORT related definitions
+ */
+/*
+ * The port is considered disabled if corresponding physical port or IOC are
+ * disabled explicitly
+ */
+#define BFA_PORT_IS_DISABLED(bfa) \
+	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
+	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
+
+/*
+ * BFA port state machine events
+ */
+enum bfa_fcport_sm_event {
+	BFA_FCPORT_SM_START	= 1,	/*  start port state machine	*/
+	BFA_FCPORT_SM_STOP	= 2,	/*  stop port state machine	*/
+	BFA_FCPORT_SM_ENABLE	= 3,	/*  enable port		*/
+	BFA_FCPORT_SM_DISABLE	= 4,	/*  disable port state machine */
+	BFA_FCPORT_SM_FWRSP	= 5,	/*  firmware enable/disable rsp */
+	BFA_FCPORT_SM_LINKUP	= 6,	/*  firmware linkup event	*/
+	BFA_FCPORT_SM_LINKDOWN	= 7,	/*  firmware linkup down	*/
+	BFA_FCPORT_SM_QRESUME	= 8,	/*  CQ space available	*/
+	BFA_FCPORT_SM_HWFAIL	= 9,	/*  IOC h/w failure		*/
+	BFA_FCPORT_SM_DPORTENABLE = 10, /*  enable dport      */
+	BFA_FCPORT_SM_DPORTDISABLE = 11,/*  disable dport     */
+	BFA_FCPORT_SM_FAA_MISCONFIG = 12,	/* FAA misconfiguratin */
+	BFA_FCPORT_SM_DDPORTENABLE  = 13,	/* enable ddport	*/
+	BFA_FCPORT_SM_DDPORTDISABLE = 14,	/* disable ddport	*/
+};
+
+/*
+ * BFA port link notification state machine events
+ */
+
+enum bfa_fcport_ln_sm_event {
+	BFA_FCPORT_LN_SM_LINKUP		= 1,	/*  linkup event	*/
+	BFA_FCPORT_LN_SM_LINKDOWN	= 2,	/*  linkdown event	*/
+	BFA_FCPORT_LN_SM_NOTIFICATION	= 3	/*  done notification	*/
+};
+
+/*
+ * RPORT related definitions
+ */
+#define bfa_rport_offline_cb(__rp) do {					\
+	if ((__rp)->bfa->fcs)						\
+		bfa_cb_rport_offline((__rp)->rport_drv);      \
+	else {								\
+		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
+				__bfa_cb_rport_offline, (__rp));      \
+	}								\
+} while (0)
+
+#define bfa_rport_online_cb(__rp) do {					\
+	if ((__rp)->bfa->fcs)						\
+		bfa_cb_rport_online((__rp)->rport_drv);      \
+	else {								\
+		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
+				  __bfa_cb_rport_online, (__rp));      \
+		}							\
+} while (0)
+
+/*
+ * forward declarations FCXP related functions
+ */
+static void	__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
+static void	hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
+				struct bfi_fcxp_send_rsp_s *fcxp_rsp);
+static void	hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
+				struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
+static void	bfa_fcxp_qresume(void *cbarg);
+static void	bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
+				struct bfi_fcxp_send_req_s *send_req);
+
+/*
+ * forward declarations for LPS functions
+ */
+static void bfa_lps_login_rsp(struct bfa_s *bfa,
+				struct bfi_lps_login_rsp_s *rsp);
+static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
+static void bfa_lps_logout_rsp(struct bfa_s *bfa,
+				struct bfi_lps_logout_rsp_s *rsp);
+static void bfa_lps_reqq_resume(void *lps_arg);
+static void bfa_lps_free(struct bfa_lps_s *lps);
+static void bfa_lps_send_login(struct bfa_lps_s *lps);
+static void bfa_lps_send_logout(struct bfa_lps_s *lps);
+static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
+static void bfa_lps_login_comp(struct bfa_lps_s *lps);
+static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
+static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
+
+/*
+ * forward declaration for LPS state machine
+ */
+static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
+					event);
+static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
+					enum bfa_lps_event event);
+static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
+					event);
+
+/*
+ * forward declaration for FC Port functions
+ */
+static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
+static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
+static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
+static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
+static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
+static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
+static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
+			enum bfa_port_linkstate event, bfa_boolean_t trunk);
+static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
+				enum bfa_port_linkstate event);
+static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
+static void bfa_fcport_stats_get_timeout(void *cbarg);
+static void bfa_fcport_stats_clr_timeout(void *cbarg);
+static void bfa_trunk_iocdisable(struct bfa_s *bfa);
+
+/*
+ * forward declaration for FC PORT state machine
+ */
+static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void	bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void	bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+
+static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+
+static struct bfa_sm_table_s hal_port_sm_table[] = {
+	{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
+	{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
+	{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
+	{BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
+	{BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
+	{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
+	{BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
+	{BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
+	{BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
+	{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
+	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
+	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
+	{BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
+	{BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
+	{BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
+};
+
+
+/*
+ * forward declaration for RPORT related functions
+ */
+static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
+static void		bfa_rport_free(struct bfa_rport_s *rport);
+static bfa_boolean_t	bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
+static bfa_boolean_t	bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
+static bfa_boolean_t	bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
+static void		__bfa_cb_rport_online(void *cbarg,
+						bfa_boolean_t complete);
+static void		__bfa_cb_rport_offline(void *cbarg,
+						bfa_boolean_t complete);
+
+/*
+ * forward declaration for RPORT state machine
+ */
+static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+
+/*
+ * PLOG related definitions
+ */
+static int
+plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
+{
+	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
+		(pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
+		return 1;
+
+	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
+		(pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
+		return 1;
+
+	return 0;
+}
+
+static void
+bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
+{
+	u16 tail;
+	struct bfa_plog_rec_s *pl_recp;
+
+	if (plog->plog_enabled == 0)
+		return;
+
+	if (plkd_validate_logrec(pl_rec)) {
+		WARN_ON(1);
+		return;
+	}
+
+	tail = plog->tail;
+
+	pl_recp = &(plog->plog_recs[tail]);
+
+	memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
+
+	pl_recp->tv = ktime_get_real_seconds();
+	BFA_PL_LOG_REC_INCR(plog->tail);
+
+	if (plog->head == plog->tail)
+		BFA_PL_LOG_REC_INCR(plog->head);
+}
+
+void
+bfa_plog_init(struct bfa_plog_s *plog)
+{
+	memset((char *)plog, 0, sizeof(struct bfa_plog_s));
+
+	memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
+	plog->head = plog->tail = 0;
+	plog->plog_enabled = 1;
+}
+
+void
+bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+		enum bfa_plog_eid event,
+		u16 misc, char *log_str)
+{
+	struct bfa_plog_rec_s  lp;
+
+	if (plog->plog_enabled) {
+		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+		lp.mid = mid;
+		lp.eid = event;
+		lp.log_type = BFA_PL_LOG_TYPE_STRING;
+		lp.misc = misc;
+		strlcpy(lp.log_entry.string_log, log_str,
+			BFA_PL_STRING_LOG_SZ);
+		lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
+		bfa_plog_add(plog, &lp);
+	}
+}
+
+void
+bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+		enum bfa_plog_eid event,
+		u16 misc, u32 *intarr, u32 num_ints)
+{
+	struct bfa_plog_rec_s  lp;
+	u32 i;
+
+	if (num_ints > BFA_PL_INT_LOG_SZ)
+		num_ints = BFA_PL_INT_LOG_SZ;
+
+	if (plog->plog_enabled) {
+		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+		lp.mid = mid;
+		lp.eid = event;
+		lp.log_type = BFA_PL_LOG_TYPE_INT;
+		lp.misc = misc;
+
+		for (i = 0; i < num_ints; i++)
+			lp.log_entry.int_log[i] = intarr[i];
+
+		lp.log_num_ints = (u8) num_ints;
+
+		bfa_plog_add(plog, &lp);
+	}
+}
+
+void
+bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+			enum bfa_plog_eid event,
+			u16 misc, struct fchs_s *fchdr)
+{
+	struct bfa_plog_rec_s  lp;
+	u32	*tmp_int = (u32 *) fchdr;
+	u32	ints[BFA_PL_INT_LOG_SZ];
+
+	if (plog->plog_enabled) {
+		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+
+		ints[0] = tmp_int[0];
+		ints[1] = tmp_int[1];
+		ints[2] = tmp_int[4];
+
+		bfa_plog_intarr(plog, mid, event, misc, ints, 3);
+	}
+}
+
+void
+bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+		      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
+		      u32 pld_w0)
+{
+	struct bfa_plog_rec_s  lp;
+	u32	*tmp_int = (u32 *) fchdr;
+	u32	ints[BFA_PL_INT_LOG_SZ];
+
+	if (plog->plog_enabled) {
+		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+
+		ints[0] = tmp_int[0];
+		ints[1] = tmp_int[1];
+		ints[2] = tmp_int[4];
+		ints[3] = pld_w0;
+
+		bfa_plog_intarr(plog, mid, event, misc, ints, 4);
+	}
+}
+
+
+/*
+ *  fcxp_pvt BFA FCXP private functions
+ */
+
+static void
+claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
+{
+	u16	i;
+	struct bfa_fcxp_s *fcxp;
+
+	fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
+	memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
+
+	INIT_LIST_HEAD(&mod->fcxp_req_free_q);
+	INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
+	INIT_LIST_HEAD(&mod->fcxp_active_q);
+	INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
+	INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
+
+	mod->fcxp_list = fcxp;
+
+	for (i = 0; i < mod->num_fcxps; i++) {
+		fcxp->fcxp_mod = mod;
+		fcxp->fcxp_tag = i;
+
+		if (i < (mod->num_fcxps / 2)) {
+			list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
+			fcxp->req_rsp = BFA_TRUE;
+		} else {
+			list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
+			fcxp->req_rsp = BFA_FALSE;
+		}
+
+		bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
+		fcxp->reqq_waiting = BFA_FALSE;
+
+		fcxp = fcxp + 1;
+	}
+
+	bfa_mem_kva_curp(mod) = (void *)fcxp;
+}
+
+void
+bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+		struct bfa_s *bfa)
+{
+	struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
+	struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
+	struct bfa_mem_dma_s *seg_ptr;
+	u16	nsegs, idx, per_seg_fcxp;
+	u16	num_fcxps = cfg->fwcfg.num_fcxp_reqs;
+	u32	per_fcxp_sz;
+
+	if (num_fcxps == 0)
+		return;
+
+	if (cfg->drvcfg.min_cfg)
+		per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
+	else
+		per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
+
+	/* dma memory */
+	nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
+	per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
+
+	bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
+		if (num_fcxps >= per_seg_fcxp) {
+			num_fcxps -= per_seg_fcxp;
+			bfa_mem_dma_setup(minfo, seg_ptr,
+				per_seg_fcxp * per_fcxp_sz);
+		} else
+			bfa_mem_dma_setup(minfo, seg_ptr,
+				num_fcxps * per_fcxp_sz);
+	}
+
+	/* kva memory */
+	bfa_mem_kva_setup(minfo, fcxp_kva,
+		cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
+}
+
+void
+bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+	mod->bfa = bfa;
+	mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
+
+	/*
+	 * Initialize FCXP request and response payload sizes.
+	 */
+	mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
+	if (!cfg->drvcfg.min_cfg)
+		mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
+
+	INIT_LIST_HEAD(&mod->req_wait_q);
+	INIT_LIST_HEAD(&mod->rsp_wait_q);
+
+	claim_fcxps_mem(mod);
+}
+
+void
+bfa_fcxp_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+	struct bfa_fcxp_s *fcxp;
+	struct list_head	      *qe, *qen;
+
+	/* Enqueue unused fcxp resources to free_q */
+	list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
+	list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
+
+	list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
+		fcxp = (struct bfa_fcxp_s *) qe;
+		if (fcxp->caller == NULL) {
+			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
+					BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
+			bfa_fcxp_free(fcxp);
+		} else {
+			fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
+			bfa_cb_queue(bfa, &fcxp->hcb_qe,
+				     __bfa_fcxp_send_cbfn, fcxp);
+		}
+	}
+}
+
+static struct bfa_fcxp_s *
+bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
+{
+	struct bfa_fcxp_s *fcxp;
+
+	if (req)
+		bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
+	else
+		bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
+
+	if (fcxp)
+		list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
+
+	return fcxp;
+}
+
+static void
+bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
+	       struct bfa_s *bfa,
+	       u8 *use_ibuf,
+	       u32 *nr_sgles,
+	       bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
+	       bfa_fcxp_get_sglen_t *r_sglen_cbfn,
+	       struct list_head *r_sgpg_q,
+	       int n_sgles,
+	       bfa_fcxp_get_sgaddr_t sga_cbfn,
+	       bfa_fcxp_get_sglen_t sglen_cbfn)
+{
+
+	WARN_ON(bfa == NULL);
+
+	bfa_trc(bfa, fcxp->fcxp_tag);
+
+	if (n_sgles == 0) {
+		*use_ibuf = 1;
+	} else {
+		WARN_ON(*sga_cbfn == NULL);
+		WARN_ON(*sglen_cbfn == NULL);
+
+		*use_ibuf = 0;
+		*r_sga_cbfn = sga_cbfn;
+		*r_sglen_cbfn = sglen_cbfn;
+
+		*nr_sgles = n_sgles;
+
+		/*
+		 * alloc required sgpgs
+		 */
+		if (n_sgles > BFI_SGE_INLINE)
+			WARN_ON(1);
+	}
+
+}
+
+static void
+bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
+	       void *caller, struct bfa_s *bfa, int nreq_sgles,
+	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
+	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
+{
+
+	WARN_ON(bfa == NULL);
+
+	bfa_trc(bfa, fcxp->fcxp_tag);
+
+	fcxp->caller = caller;
+
+	bfa_fcxp_init_reqrsp(fcxp, bfa,
+		&fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
+		&fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
+		nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
+
+	bfa_fcxp_init_reqrsp(fcxp, bfa,
+		&fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
+		&fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
+		nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
+
+}
+
+static void
+bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
+{
+	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+	struct bfa_fcxp_wqe_s *wqe;
+
+	if (fcxp->req_rsp)
+		bfa_q_deq(&mod->req_wait_q, &wqe);
+	else
+		bfa_q_deq(&mod->rsp_wait_q, &wqe);
+
+	if (wqe) {
+		bfa_trc(mod->bfa, fcxp->fcxp_tag);
+
+		bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
+			wqe->nrsp_sgles, wqe->req_sga_cbfn,
+			wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
+			wqe->rsp_sglen_cbfn);
+
+		wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
+		return;
+	}
+
+	WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
+	list_del(&fcxp->qe);
+
+	if (fcxp->req_rsp)
+		list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
+	else
+		list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
+}
+
+static void
+bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
+		   bfa_status_t req_status, u32 rsp_len,
+		   u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	/* discarded fcxp completion */
+}
+
+static void
+__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_fcxp_s *fcxp = cbarg;
+
+	if (complete) {
+		fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
+				fcxp->rsp_status, fcxp->rsp_len,
+				fcxp->residue_len, &fcxp->rsp_fchs);
+	} else {
+		bfa_fcxp_free(fcxp);
+	}
+}
+
+static void
+hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
+{
+	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
+	struct bfa_fcxp_s	*fcxp;
+	u16		fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
+
+	bfa_trc(bfa, fcxp_tag);
+
+	fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
+
+	/*
+	 * @todo f/w should not set residue to non-0 when everything
+	 *	 is received.
+	 */
+	if (fcxp_rsp->req_status == BFA_STATUS_OK)
+		fcxp_rsp->residue_len = 0;
+	else
+		fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
+
+	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
+
+	WARN_ON(fcxp->send_cbfn == NULL);
+
+	hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
+
+	if (fcxp->send_cbfn != NULL) {
+		bfa_trc(mod->bfa, (NULL == fcxp->caller));
+		if (fcxp->caller == NULL) {
+			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
+					fcxp_rsp->req_status, fcxp_rsp->rsp_len,
+					fcxp_rsp->residue_len, &fcxp_rsp->fchs);
+			/*
+			 * fcxp automatically freed on return from the callback
+			 */
+			bfa_fcxp_free(fcxp);
+		} else {
+			fcxp->rsp_status = fcxp_rsp->req_status;
+			fcxp->rsp_len = fcxp_rsp->rsp_len;
+			fcxp->residue_len = fcxp_rsp->residue_len;
+			fcxp->rsp_fchs = fcxp_rsp->fchs;
+
+			bfa_cb_queue(bfa, &fcxp->hcb_qe,
+					__bfa_fcxp_send_cbfn, fcxp);
+		}
+	} else {
+		bfa_trc(bfa, (NULL == fcxp->send_cbfn));
+	}
+}
+
+static void
+hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
+		 struct fchs_s *fchs)
+{
+	/*
+	 * TODO: TX ox_id
+	 */
+	if (reqlen > 0) {
+		if (fcxp->use_ireqbuf) {
+			u32	pld_w0 =
+				*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
+
+			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
+					BFA_PL_EID_TX,
+					reqlen + sizeof(struct fchs_s), fchs,
+					pld_w0);
+		} else {
+			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
+					BFA_PL_EID_TX,
+					reqlen + sizeof(struct fchs_s),
+					fchs);
+		}
+	} else {
+		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
+			       reqlen + sizeof(struct fchs_s), fchs);
+	}
+}
+
+static void
+hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
+		 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
+{
+	if (fcxp_rsp->rsp_len > 0) {
+		if (fcxp->use_irspbuf) {
+			u32	pld_w0 =
+				*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
+
+			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
+					      BFA_PL_EID_RX,
+					      (u16) fcxp_rsp->rsp_len,
+					      &fcxp_rsp->fchs, pld_w0);
+		} else {
+			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
+				       BFA_PL_EID_RX,
+				       (u16) fcxp_rsp->rsp_len,
+				       &fcxp_rsp->fchs);
+		}
+	} else {
+		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
+			       (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
+	}
+}
+
+/*
+ * Handler to resume sending fcxp when space in available in cpe queue.
+ */
+static void
+bfa_fcxp_qresume(void *cbarg)
+{
+	struct bfa_fcxp_s		*fcxp = cbarg;
+	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
+	struct bfi_fcxp_send_req_s	*send_req;
+
+	fcxp->reqq_waiting = BFA_FALSE;
+	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
+	bfa_fcxp_queue(fcxp, send_req);
+}
+
+/*
+ * Queue fcxp send request to foimrware.
+ */
+static void
+bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
+{
+	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
+	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
+	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
+	struct bfa_rport_s		*rport = reqi->bfa_rport;
+
+	bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
+		    bfa_fn_lpu(bfa));
+
+	send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
+	if (rport) {
+		send_req->rport_fw_hndl = rport->fw_handle;
+		send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
+		if (send_req->max_frmsz == 0)
+			send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
+	} else {
+		send_req->rport_fw_hndl = 0;
+		send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
+	}
+
+	send_req->vf_id = cpu_to_be16(reqi->vf_id);
+	send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
+	send_req->class = reqi->class;
+	send_req->rsp_timeout = rspi->rsp_timeout;
+	send_req->cts = reqi->cts;
+	send_req->fchs = reqi->fchs;
+
+	send_req->req_len = cpu_to_be32(reqi->req_tot_len);
+	send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
+
+	/*
+	 * setup req sgles
+	 */
+	if (fcxp->use_ireqbuf == 1) {
+		bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
+					BFA_FCXP_REQ_PLD_PA(fcxp));
+	} else {
+		if (fcxp->nreq_sgles > 0) {
+			WARN_ON(fcxp->nreq_sgles != 1);
+			bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
+				fcxp->req_sga_cbfn(fcxp->caller, 0));
+		} else {
+			WARN_ON(reqi->req_tot_len != 0);
+			bfa_alen_set(&send_req->rsp_alen, 0, 0);
+		}
+	}
+
+	/*
+	 * setup rsp sgles
+	 */
+	if (fcxp->use_irspbuf == 1) {
+		WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
+
+		bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
+					BFA_FCXP_RSP_PLD_PA(fcxp));
+	} else {
+		if (fcxp->nrsp_sgles > 0) {
+			WARN_ON(fcxp->nrsp_sgles != 1);
+			bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
+				fcxp->rsp_sga_cbfn(fcxp->caller, 0));
+
+		} else {
+			WARN_ON(rspi->rsp_maxlen != 0);
+			bfa_alen_set(&send_req->rsp_alen, 0, 0);
+		}
+	}
+
+	hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
+
+	bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
+
+	bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
+	bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
+}
+
+/*
+ * Allocate an FCXP instance to send a response or to send a request
+ * that has a response. Request/response buffers are allocated by caller.
+ *
+ * @param[in]	bfa		BFA bfa instance
+ * @param[in]	nreq_sgles	Number of SG elements required for request
+ *				buffer. 0, if fcxp internal buffers are	used.
+ *				Use bfa_fcxp_get_reqbuf() to get the
+ *				internal req buffer.
+ * @param[in]	req_sgles	SG elements describing request buffer. Will be
+ *				copied in by BFA and hence can be freed on
+ *				return from this function.
+ * @param[in]	get_req_sga	function ptr to be called to get a request SG
+ *				Address (given the sge index).
+ * @param[in]	get_req_sglen	function ptr to be called to get a request SG
+ *				len (given the sge index).
+ * @param[in]	get_rsp_sga	function ptr to be called to get a response SG
+ *				Address (given the sge index).
+ * @param[in]	get_rsp_sglen	function ptr to be called to get a response SG
+ *				len (given the sge index).
+ * @param[in]	req		Allocated FCXP is used to send req or rsp?
+ *				request - BFA_TRUE, response - BFA_FALSE
+ *
+ * @return FCXP instance. NULL on failure.
+ */
+struct bfa_fcxp_s *
+bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
+		int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+		bfa_fcxp_get_sglen_t req_sglen_cbfn,
+		bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+		bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
+{
+	struct bfa_fcxp_s *fcxp = NULL;
+
+	WARN_ON(bfa == NULL);
+
+	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
+	if (fcxp == NULL)
+		return NULL;
+
+	bfa_trc(bfa, fcxp->fcxp_tag);
+
+	bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
+			req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
+
+	return fcxp;
+}
+
+/*
+ * Get the internal request buffer pointer
+ *
+ * @param[in]	fcxp	BFA fcxp pointer
+ *
+ * @return		pointer to the internal request buffer
+ */
+void *
+bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
+{
+	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+	void	*reqbuf;
+
+	WARN_ON(fcxp->use_ireqbuf != 1);
+	reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
+				mod->req_pld_sz + mod->rsp_pld_sz);
+	return reqbuf;
+}
+
+u32
+bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
+{
+	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+
+	return mod->req_pld_sz;
+}
+
+/*
+ * Get the internal response buffer pointer
+ *
+ * @param[in]	fcxp	BFA fcxp pointer
+ *
+ * @return		pointer to the internal request buffer
+ */
+void *
+bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
+{
+	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+	void	*fcxp_buf;
+
+	WARN_ON(fcxp->use_irspbuf != 1);
+
+	fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
+				mod->req_pld_sz + mod->rsp_pld_sz);
+
+	/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
+	return ((u8 *) fcxp_buf) + mod->req_pld_sz;
+}
+
+/*
+ * Free the BFA FCXP
+ *
+ * @param[in]	fcxp			BFA fcxp pointer
+ *
+ * @return		void
+ */
+void
+bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
+{
+	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+
+	WARN_ON(fcxp == NULL);
+	bfa_trc(mod->bfa, fcxp->fcxp_tag);
+	bfa_fcxp_put(fcxp);
+}
+
+/*
+ * Send a FCXP request
+ *
+ * @param[in]	fcxp	BFA fcxp pointer
+ * @param[in]	rport	BFA rport pointer. Could be left NULL for WKA rports
+ * @param[in]	vf_id	virtual Fabric ID
+ * @param[in]	lp_tag	lport tag
+ * @param[in]	cts	use Continuous sequence
+ * @param[in]	cos	fc Class of Service
+ * @param[in]	reqlen	request length, does not include FCHS length
+ * @param[in]	fchs	fc Header Pointer. The header content will be copied
+ *			in by BFA.
+ *
+ * @param[in]	cbfn	call back function to be called on receiving
+ *								the response
+ * @param[in]	cbarg	arg for cbfn
+ * @param[in]	rsp_timeout
+ *			response timeout
+ *
+ * @return		bfa_status_t
+ */
+void
+bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
+	      u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
+	      u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
+	      void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
+{
+	struct bfa_s			*bfa  = fcxp->fcxp_mod->bfa;
+	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
+	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
+	struct bfi_fcxp_send_req_s	*send_req;
+
+	bfa_trc(bfa, fcxp->fcxp_tag);
+
+	/*
+	 * setup request/response info
+	 */
+	reqi->bfa_rport = rport;
+	reqi->vf_id = vf_id;
+	reqi->lp_tag = lp_tag;
+	reqi->class = cos;
+	rspi->rsp_timeout = rsp_timeout;
+	reqi->cts = cts;
+	reqi->fchs = *fchs;
+	reqi->req_tot_len = reqlen;
+	rspi->rsp_maxlen = rsp_maxlen;
+	fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
+	fcxp->send_cbarg = cbarg;
+
+	/*
+	 * If no room in CPE queue, wait for space in request queue
+	 */
+	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
+	if (!send_req) {
+		bfa_trc(bfa, fcxp->fcxp_tag);
+		fcxp->reqq_waiting = BFA_TRUE;
+		bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
+		return;
+	}
+
+	bfa_fcxp_queue(fcxp, send_req);
+}
+
+/*
+ * Abort a BFA FCXP
+ *
+ * @param[in]	fcxp	BFA fcxp pointer
+ *
+ * @return		void
+ */
+bfa_status_t
+bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
+{
+	bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
+	WARN_ON(1);
+	return BFA_STATUS_OK;
+}
+
+void
+bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
+	       bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
+	       void *caller, int nreq_sgles,
+	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
+	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
+{
+	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+	if (req)
+		WARN_ON(!list_empty(&mod->fcxp_req_free_q));
+	else
+		WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
+
+	wqe->alloc_cbfn = alloc_cbfn;
+	wqe->alloc_cbarg = alloc_cbarg;
+	wqe->caller = caller;
+	wqe->bfa = bfa;
+	wqe->nreq_sgles = nreq_sgles;
+	wqe->nrsp_sgles = nrsp_sgles;
+	wqe->req_sga_cbfn = req_sga_cbfn;
+	wqe->req_sglen_cbfn = req_sglen_cbfn;
+	wqe->rsp_sga_cbfn = rsp_sga_cbfn;
+	wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
+
+	if (req)
+		list_add_tail(&wqe->qe, &mod->req_wait_q);
+	else
+		list_add_tail(&wqe->qe, &mod->rsp_wait_q);
+}
+
+void
+bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
+{
+	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+	WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
+		!bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
+	list_del(&wqe->qe);
+}
+
+void
+bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
+{
+	/*
+	 * If waiting for room in request queue, cancel reqq wait
+	 * and free fcxp.
+	 */
+	if (fcxp->reqq_waiting) {
+		fcxp->reqq_waiting = BFA_FALSE;
+		bfa_reqq_wcancel(&fcxp->reqq_wqe);
+		bfa_fcxp_free(fcxp);
+		return;
+	}
+
+	fcxp->send_cbfn = bfa_fcxp_null_comp;
+}
+
+void
+bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+	switch (msg->mhdr.msg_id) {
+	case BFI_FCXP_I2H_SEND_RSP:
+		hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
+		break;
+
+	default:
+		bfa_trc(bfa, msg->mhdr.msg_id);
+		WARN_ON(1);
+	}
+}
+
+u32
+bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
+{
+	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+	return mod->rsp_pld_sz;
+}
+
+void
+bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
+{
+	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
+	struct list_head	*qe;
+	int	i;
+
+	for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
+		if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
+			bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
+			list_add_tail(qe, &mod->fcxp_req_unused_q);
+		} else {
+			bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
+			list_add_tail(qe, &mod->fcxp_rsp_unused_q);
+		}
+	}
+}
+
+/*
+ *  BFA LPS state machine functions
+ */
+
+/*
+ * Init state -- no login
+ */
+static void
+bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->bfa_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_LOGIN:
+		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
+			bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
+			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
+		} else {
+			bfa_sm_set_state(lps, bfa_lps_sm_login);
+			bfa_lps_send_login(lps);
+		}
+
+		if (lps->fdisc)
+			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+				BFA_PL_EID_LOGIN, 0, "FDISC Request");
+		else
+			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+				BFA_PL_EID_LOGIN, 0, "FLOGI Request");
+		break;
+
+	case BFA_LPS_SM_LOGOUT:
+		bfa_lps_logout_comp(lps);
+		break;
+
+	case BFA_LPS_SM_DELETE:
+		bfa_lps_free(lps);
+		break;
+
+	case BFA_LPS_SM_RX_CVL:
+	case BFA_LPS_SM_OFFLINE:
+		break;
+
+	case BFA_LPS_SM_FWRSP:
+		/*
+		 * Could happen when fabric detects loopback and discards
+		 * the lps request. Fw will eventually sent out the timeout
+		 * Just ignore
+		 */
+		break;
+	case BFA_LPS_SM_SET_N2N_PID:
+		/*
+		 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
+		 * this event. Ignore this event.
+		 */
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+/*
+ * login is in progress -- awaiting response from firmware
+ */
+static void
+bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->bfa_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_FWRSP:
+		if (lps->status == BFA_STATUS_OK) {
+			bfa_sm_set_state(lps, bfa_lps_sm_online);
+			if (lps->fdisc)
+				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+					BFA_PL_EID_LOGIN, 0, "FDISC Accept");
+			else
+				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+					BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
+			/* If N2N, send the assigned PID to FW */
+			bfa_trc(lps->bfa, lps->fport);
+			bfa_trc(lps->bfa, lps->lp_pid);
+
+			if (!lps->fport && lps->lp_pid)
+				bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
+		} else {
+			bfa_sm_set_state(lps, bfa_lps_sm_init);
+			if (lps->fdisc)
+				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+					BFA_PL_EID_LOGIN, 0,
+					"FDISC Fail (RJT or timeout)");
+			else
+				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+					BFA_PL_EID_LOGIN, 0,
+					"FLOGI Fail (RJT or timeout)");
+		}
+		bfa_lps_login_comp(lps);
+		break;
+
+	case BFA_LPS_SM_OFFLINE:
+	case BFA_LPS_SM_DELETE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		break;
+
+	case BFA_LPS_SM_SET_N2N_PID:
+		bfa_trc(lps->bfa, lps->fport);
+		bfa_trc(lps->bfa, lps->lp_pid);
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+/*
+ * login pending - awaiting space in request queue
+ */
+static void
+bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->bfa_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_RESUME:
+		bfa_sm_set_state(lps, bfa_lps_sm_login);
+		bfa_lps_send_login(lps);
+		break;
+
+	case BFA_LPS_SM_OFFLINE:
+	case BFA_LPS_SM_DELETE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		bfa_reqq_wcancel(&lps->wqe);
+		break;
+
+	case BFA_LPS_SM_RX_CVL:
+		/*
+		 * Login was not even sent out; so when getting out
+		 * of this state, it will appear like a login retry
+		 * after Clear virtual link
+		 */
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+/*
+ * login complete
+ */
+static void
+bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->bfa_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_LOGOUT:
+		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
+			bfa_sm_set_state(lps, bfa_lps_sm_logowait);
+			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
+		} else {
+			bfa_sm_set_state(lps, bfa_lps_sm_logout);
+			bfa_lps_send_logout(lps);
+		}
+		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+			BFA_PL_EID_LOGO, 0, "Logout");
+		break;
+
+	case BFA_LPS_SM_RX_CVL:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+
+		/* Let the vport module know about this event */
+		bfa_lps_cvl_event(lps);
+		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
+		break;
+
+	case BFA_LPS_SM_SET_N2N_PID:
+		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
+			bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
+			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
+		} else
+			bfa_lps_send_set_n2n_pid(lps);
+		break;
+
+	case BFA_LPS_SM_OFFLINE:
+	case BFA_LPS_SM_DELETE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+/*
+ * login complete
+ */
+static void
+bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->bfa_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_RESUME:
+		bfa_sm_set_state(lps, bfa_lps_sm_online);
+		bfa_lps_send_set_n2n_pid(lps);
+		break;
+
+	case BFA_LPS_SM_LOGOUT:
+		bfa_sm_set_state(lps, bfa_lps_sm_logowait);
+		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+			BFA_PL_EID_LOGO, 0, "Logout");
+		break;
+
+	case BFA_LPS_SM_RX_CVL:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		bfa_reqq_wcancel(&lps->wqe);
+
+		/* Let the vport module know about this event */
+		bfa_lps_cvl_event(lps);
+		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
+		break;
+
+	case BFA_LPS_SM_OFFLINE:
+	case BFA_LPS_SM_DELETE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		bfa_reqq_wcancel(&lps->wqe);
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+/*
+ * logout in progress - awaiting firmware response
+ */
+static void
+bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->bfa_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_FWRSP:
+	case BFA_LPS_SM_OFFLINE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		bfa_lps_logout_comp(lps);
+		break;
+
+	case BFA_LPS_SM_DELETE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+/*
+ * logout pending -- awaiting space in request queue
+ */
+static void
+bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->bfa_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_RESUME:
+		bfa_sm_set_state(lps, bfa_lps_sm_logout);
+		bfa_lps_send_logout(lps);
+		break;
+
+	case BFA_LPS_SM_OFFLINE:
+	case BFA_LPS_SM_DELETE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		bfa_reqq_wcancel(&lps->wqe);
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+
+
+/*
+ *  lps_pvt BFA LPS private functions
+ */
+
+/*
+ * return memory requirement
+ */
+void
+bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+		struct bfa_s *bfa)
+{
+	struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
+
+	if (cfg->drvcfg.min_cfg)
+		bfa_mem_kva_setup(minfo, lps_kva,
+			sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
+	else
+		bfa_mem_kva_setup(minfo, lps_kva,
+			sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
+}
+
+/*
+ * bfa module attach at initialization time
+ */
+void
+bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+	struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps;
+	int			i;
+
+	mod->num_lps = BFA_LPS_MAX_LPORTS;
+	if (cfg->drvcfg.min_cfg)
+		mod->num_lps = BFA_LPS_MIN_LPORTS;
+	else
+		mod->num_lps = BFA_LPS_MAX_LPORTS;
+	mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
+
+	bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
+
+	INIT_LIST_HEAD(&mod->lps_free_q);
+	INIT_LIST_HEAD(&mod->lps_active_q);
+	INIT_LIST_HEAD(&mod->lps_login_q);
+
+	for (i = 0; i < mod->num_lps; i++, lps++) {
+		lps->bfa	= bfa;
+		lps->bfa_tag	= (u8) i;
+		lps->reqq	= BFA_REQQ_LPS;
+		bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
+		list_add_tail(&lps->qe, &mod->lps_free_q);
+	}
+}
+
+/*
+ * IOC in disabled state -- consider all lps offline
+ */
+void
+bfa_lps_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps;
+	struct list_head		*qe, *qen;
+
+	list_for_each_safe(qe, qen, &mod->lps_active_q) {
+		lps = (struct bfa_lps_s *) qe;
+		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
+	}
+	list_for_each_safe(qe, qen, &mod->lps_login_q) {
+		lps = (struct bfa_lps_s *) qe;
+		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
+	}
+	list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
+}
+
+/*
+ * Firmware login response
+ */
+static void
+bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps;
+
+	WARN_ON(rsp->bfa_tag >= mod->num_lps);
+	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
+
+	lps->status = rsp->status;
+	switch (rsp->status) {
+	case BFA_STATUS_OK:
+		lps->fw_tag	= rsp->fw_tag;
+		lps->fport	= rsp->f_port;
+		if (lps->fport)
+			lps->lp_pid = rsp->lp_pid;
+		lps->npiv_en	= rsp->npiv_en;
+		lps->pr_bbcred	= be16_to_cpu(rsp->bb_credit);
+		lps->pr_pwwn	= rsp->port_name;
+		lps->pr_nwwn	= rsp->node_name;
+		lps->auth_req	= rsp->auth_req;
+		lps->lp_mac	= rsp->lp_mac;
+		lps->brcd_switch = rsp->brcd_switch;
+		lps->fcf_mac	= rsp->fcf_mac;
+
+		break;
+
+	case BFA_STATUS_FABRIC_RJT:
+		lps->lsrjt_rsn = rsp->lsrjt_rsn;
+		lps->lsrjt_expl = rsp->lsrjt_expl;
+
+		break;
+
+	case BFA_STATUS_EPROTOCOL:
+		lps->ext_status = rsp->ext_status;
+
+		break;
+
+	case BFA_STATUS_VPORT_MAX:
+		if (rsp->ext_status)
+			bfa_lps_no_res(lps, rsp->ext_status);
+		break;
+
+	default:
+		/* Nothing to do with other status */
+		break;
+	}
+
+	list_del(&lps->qe);
+	list_add_tail(&lps->qe, &mod->lps_active_q);
+	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
+}
+
+static void
+bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
+{
+	struct bfa_s		*bfa = first_lps->bfa;
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct list_head	*qe, *qe_next;
+	struct bfa_lps_s	*lps;
+
+	bfa_trc(bfa, count);
+
+	qe = bfa_q_next(first_lps);
+
+	while (count && qe) {
+		qe_next = bfa_q_next(qe);
+		lps = (struct bfa_lps_s *)qe;
+		bfa_trc(bfa, lps->bfa_tag);
+		lps->status = first_lps->status;
+		list_del(&lps->qe);
+		list_add_tail(&lps->qe, &mod->lps_active_q);
+		bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
+		qe = qe_next;
+		count--;
+	}
+}
+
+/*
+ * Firmware logout response
+ */
+static void
+bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps;
+
+	WARN_ON(rsp->bfa_tag >= mod->num_lps);
+	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
+
+	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
+}
+
+/*
+ * Firmware received a Clear virtual link request (for FCoE)
+ */
+static void
+bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps;
+
+	lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
+
+	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
+}
+
+/*
+ * Space is available in request queue, resume queueing request to firmware.
+ */
+static void
+bfa_lps_reqq_resume(void *lps_arg)
+{
+	struct bfa_lps_s	*lps = lps_arg;
+
+	bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
+}
+
+/*
+ * lps is freed -- triggered by vport delete
+ */
+static void
+bfa_lps_free(struct bfa_lps_s *lps)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
+
+	lps->lp_pid = 0;
+	list_del(&lps->qe);
+	list_add_tail(&lps->qe, &mod->lps_free_q);
+}
+
+/*
+ * send login request to firmware
+ */
+static void
+bfa_lps_send_login(struct bfa_lps_s *lps)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
+	struct bfi_lps_login_req_s	*m;
+
+	m = bfa_reqq_next(lps->bfa, lps->reqq);
+	WARN_ON(!m);
+
+	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
+		bfa_fn_lpu(lps->bfa));
+
+	m->bfa_tag	= lps->bfa_tag;
+	m->alpa		= lps->alpa;
+	m->pdu_size	= cpu_to_be16(lps->pdusz);
+	m->pwwn		= lps->pwwn;
+	m->nwwn		= lps->nwwn;
+	m->fdisc	= lps->fdisc;
+	m->auth_en	= lps->auth_en;
+
+	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
+	list_del(&lps->qe);
+	list_add_tail(&lps->qe, &mod->lps_login_q);
+}
+
+/*
+ * send logout request to firmware
+ */
+static void
+bfa_lps_send_logout(struct bfa_lps_s *lps)
+{
+	struct bfi_lps_logout_req_s *m;
+
+	m = bfa_reqq_next(lps->bfa, lps->reqq);
+	WARN_ON(!m);
+
+	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
+		bfa_fn_lpu(lps->bfa));
+
+	m->fw_tag = lps->fw_tag;
+	m->port_name = lps->pwwn;
+	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
+}
+
+/*
+ * send n2n pid set request to firmware
+ */
+static void
+bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
+{
+	struct bfi_lps_n2n_pid_req_s *m;
+
+	m = bfa_reqq_next(lps->bfa, lps->reqq);
+	WARN_ON(!m);
+
+	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
+		bfa_fn_lpu(lps->bfa));
+
+	m->fw_tag = lps->fw_tag;
+	m->lp_pid = lps->lp_pid;
+	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
+}
+
+/*
+ * Indirect login completion handler for non-fcs
+ */
+static void
+bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
+{
+	struct bfa_lps_s *lps	= arg;
+
+	if (!complete)
+		return;
+
+	if (lps->fdisc)
+		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
+	else
+		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
+}
+
+/*
+ * Login completion handler -- direct call for fcs, queue for others
+ */
+static void
+bfa_lps_login_comp(struct bfa_lps_s *lps)
+{
+	if (!lps->bfa->fcs) {
+		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
+			lps);
+		return;
+	}
+
+	if (lps->fdisc)
+		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
+	else
+		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
+}
+
+/*
+ * Indirect logout completion handler for non-fcs
+ */
+static void
+bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
+{
+	struct bfa_lps_s *lps	= arg;
+
+	if (!complete)
+		return;
+
+	if (lps->fdisc)
+		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
+	else
+		bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
+}
+
+/*
+ * Logout completion handler -- direct call for fcs, queue for others
+ */
+static void
+bfa_lps_logout_comp(struct bfa_lps_s *lps)
+{
+	if (!lps->bfa->fcs) {
+		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
+			lps);
+		return;
+	}
+	if (lps->fdisc)
+		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
+}
+
+/*
+ * Clear virtual link completion handler for non-fcs
+ */
+static void
+bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
+{
+	struct bfa_lps_s *lps	= arg;
+
+	if (!complete)
+		return;
+
+	/* Clear virtual link to base port will result in link down */
+	if (lps->fdisc)
+		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
+}
+
+/*
+ * Received Clear virtual link event --direct call for fcs,
+ * queue for others
+ */
+static void
+bfa_lps_cvl_event(struct bfa_lps_s *lps)
+{
+	if (!lps->bfa->fcs) {
+		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
+			lps);
+		return;
+	}
+
+	/* Clear virtual link to base port will result in link down */
+	if (lps->fdisc)
+		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
+}
+
+
+
+/*
+ *  lps_public BFA LPS public functions
+ */
+
+u32
+bfa_lps_get_max_vport(struct bfa_s *bfa)
+{
+	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
+		return BFA_LPS_MAX_VPORTS_SUPP_CT;
+	else
+		return BFA_LPS_MAX_VPORTS_SUPP_CB;
+}
+
+/*
+ * Allocate a lport srvice tag.
+ */
+struct bfa_lps_s  *
+bfa_lps_alloc(struct bfa_s *bfa)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps = NULL;
+
+	bfa_q_deq(&mod->lps_free_q, &lps);
+
+	if (lps == NULL)
+		return NULL;
+
+	list_add_tail(&lps->qe, &mod->lps_active_q);
+
+	bfa_sm_set_state(lps, bfa_lps_sm_init);
+	return lps;
+}
+
+/*
+ * Free lport service tag. This can be called anytime after an alloc.
+ * No need to wait for any pending login/logout completions.
+ */
+void
+bfa_lps_delete(struct bfa_lps_s *lps)
+{
+	bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
+}
+
+/*
+ * Initiate a lport login.
+ */
+void
+bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
+	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
+{
+	lps->uarg	= uarg;
+	lps->alpa	= alpa;
+	lps->pdusz	= pdusz;
+	lps->pwwn	= pwwn;
+	lps->nwwn	= nwwn;
+	lps->fdisc	= BFA_FALSE;
+	lps->auth_en	= auth_en;
+	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
+}
+
+/*
+ * Initiate a lport fdisc login.
+ */
+void
+bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
+	wwn_t nwwn)
+{
+	lps->uarg	= uarg;
+	lps->alpa	= 0;
+	lps->pdusz	= pdusz;
+	lps->pwwn	= pwwn;
+	lps->nwwn	= nwwn;
+	lps->fdisc	= BFA_TRUE;
+	lps->auth_en	= BFA_FALSE;
+	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
+}
+
+
+/*
+ * Initiate a lport FDSIC logout.
+ */
+void
+bfa_lps_fdisclogo(struct bfa_lps_s *lps)
+{
+	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
+}
+
+u8
+bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
+{
+	struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
+
+	return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
+}
+
+/*
+ * Return lport services tag given the pid
+ */
+u8
+bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps;
+	int			i;
+
+	for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
+		if (lps->lp_pid == pid)
+			return lps->bfa_tag;
+	}
+
+	/* Return base port tag anyway */
+	return 0;
+}
+
+
+/*
+ * return port id assigned to the base lport
+ */
+u32
+bfa_lps_get_base_pid(struct bfa_s *bfa)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+
+	return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
+}
+
+/*
+ * Set PID in case of n2n (which is assigned during PLOGI)
+ */
+void
+bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
+{
+	bfa_trc(lps->bfa, lps->bfa_tag);
+	bfa_trc(lps->bfa, n2n_pid);
+
+	lps->lp_pid = n2n_pid;
+	bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
+}
+
+/*
+ * LPS firmware message class handler.
+ */
+void
+bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	union bfi_lps_i2h_msg_u	msg;
+
+	bfa_trc(bfa, m->mhdr.msg_id);
+	msg.msg = m;
+
+	switch (m->mhdr.msg_id) {
+	case BFI_LPS_I2H_LOGIN_RSP:
+		bfa_lps_login_rsp(bfa, msg.login_rsp);
+		break;
+
+	case BFI_LPS_I2H_LOGOUT_RSP:
+		bfa_lps_logout_rsp(bfa, msg.logout_rsp);
+		break;
+
+	case BFI_LPS_I2H_CVL_EVENT:
+		bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
+		break;
+
+	default:
+		bfa_trc(bfa, m->mhdr.msg_id);
+		WARN_ON(1);
+	}
+}
+
+static void
+bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+	struct bfa_aen_entry_s  *aen_entry;
+
+	bfad_get_aen_entry(bfad, aen_entry);
+	if (!aen_entry)
+		return;
+
+	aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
+	aen_entry->aen_data.port.pwwn = fcport->pwwn;
+
+	/* Send the AEN notification */
+	bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
+				  BFA_AEN_CAT_PORT, event);
+}
+
+/*
+ * FC PORT state machine functions
+ */
+static void
+bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
+			enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_START:
+		/*
+		 * Start event after IOC is configured and BFA is started.
+		 */
+		fcport->use_flash_cfg = BFA_TRUE;
+
+		if (bfa_fcport_send_enable(fcport)) {
+			bfa_trc(fcport->bfa, BFA_TRUE);
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		} else {
+			bfa_trc(fcport->bfa, BFA_FALSE);
+			bfa_sm_set_state(fcport,
+					bfa_fcport_sm_enabling_qwait);
+		}
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		/*
+		 * Port is persistently configured to be in enabled state. Do
+		 * not change state. Port enabling is done when START event is
+		 * received.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		/*
+		 * If a port is persistently configured to be disabled, the
+		 * first event will a port disable request.
+		 */
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
+				enum bfa_fcport_sm_event event)
+{
+	char pwwn_buf[BFA_STRING_32];
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_QRESUME:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		bfa_fcport_send_enable(fcport);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		/*
+		 * Already enable is in progress.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		/*
+		 * Just send disable request to firmware when room becomes
+		 * available in request queue.
+		 */
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+			"Base port disabled: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
+		break;
+
+	case BFA_FCPORT_SM_LINKUP:
+	case BFA_FCPORT_SM_LINKDOWN:
+		/*
+		 * Possible to get link events when doing back-to-back
+		 * enable/disables.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		break;
+
+	case BFA_FCPORT_SM_FAA_MISCONFIG:
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event)
+{
+	char pwwn_buf[BFA_STRING_32];
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_FWRSP:
+	case BFA_FCPORT_SM_LINKDOWN:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
+		break;
+
+	case BFA_FCPORT_SM_LINKUP:
+		bfa_fcport_update_linkinfo(fcport);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
+
+		WARN_ON(!fcport->event_cbfn);
+		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		/*
+		 * Already being enabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		if (bfa_fcport_send_disable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_disabling_qwait);
+
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+			"Base port disabled: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		break;
+
+	case BFA_FCPORT_SM_FAA_MISCONFIG:
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event)
+{
+	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
+	char pwwn_buf[BFA_STRING_32];
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_LINKUP:
+		bfa_fcport_update_linkinfo(fcport);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
+		WARN_ON(!fcport->event_cbfn);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
+		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
+
+			bfa_trc(fcport->bfa,
+				pevent->link_state.attr.vc_fcf.fcf.fipenabled);
+			bfa_trc(fcport->bfa,
+				pevent->link_state.attr.vc_fcf.fcf.fipfailed);
+
+			if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
+				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+					BFA_PL_EID_FIP_FCF_DISC, 0,
+					"FIP FCF Discovery Failed");
+			else
+				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+					BFA_PL_EID_FIP_FCF_DISC, 0,
+					"FIP FCF Discovered");
+		}
+
+		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+			"Base port online: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
+
+		/* If QoS is enabled and it is not online, send AEN */
+		if (fcport->cfg.qos_enabled &&
+		    fcport->qos_attr.state != BFA_QOS_ONLINE)
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
+		break;
+
+	case BFA_FCPORT_SM_LINKDOWN:
+		/*
+		 * Possible to get link down event.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		/*
+		 * Already enabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		if (bfa_fcport_send_disable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_disabling_qwait);
+
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+			"Base port disabled: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		break;
+
+	case BFA_FCPORT_SM_FAA_MISCONFIG:
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
+	enum bfa_fcport_sm_event event)
+{
+	char pwwn_buf[BFA_STRING_32];
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_ENABLE:
+		/*
+		 * Already enabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		if (bfa_fcport_send_disable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_disabling_qwait);
+
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+			"Base port offline: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+			"Base port disabled: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
+		break;
+
+	case BFA_FCPORT_SM_LINKDOWN:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
+			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+				"Base port offline: WWN = %s\n", pwwn_buf);
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+		} else {
+			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+				"Base port (WWN = %s) "
+				"lost fabric connectivity\n", pwwn_buf);
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		}
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		bfa_fcport_reset_linkinfo(fcport);
+		wwn2str(pwwn_buf, fcport->pwwn);
+		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
+			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+				"Base port offline: WWN = %s\n", pwwn_buf);
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+		} else {
+			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+				"Base port (WWN = %s) "
+				"lost fabric connectivity\n", pwwn_buf);
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		}
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+		wwn2str(pwwn_buf, fcport->pwwn);
+		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
+			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+				"Base port offline: WWN = %s\n", pwwn_buf);
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+		} else {
+			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+				"Base port (WWN = %s) "
+				"lost fabric connectivity\n", pwwn_buf);
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		}
+		break;
+
+	case BFA_FCPORT_SM_FAA_MISCONFIG:
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
+				 enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_QRESUME:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		bfa_fcport_send_disable(fcport);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		/*
+		 * Already being disabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_LINKUP:
+	case BFA_FCPORT_SM_LINKDOWN:
+		/*
+		 * Possible to get link events when doing back-to-back
+		 * enable/disables.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		break;
+
+	case BFA_FCPORT_SM_FAA_MISCONFIG:
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
+				 enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_QRESUME:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		bfa_fcport_send_disable(fcport);
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_enabling_qwait);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
+		break;
+
+	case BFA_FCPORT_SM_LINKUP:
+	case BFA_FCPORT_SM_LINKDOWN:
+		/*
+		 * Possible to get link events when doing back-to-back
+		 * enable/disables.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event)
+{
+	char pwwn_buf[BFA_STRING_32];
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_FWRSP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		/*
+		 * Already being disabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_enabling_qwait);
+
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+			"Base port enabled: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_LINKUP:
+	case BFA_FCPORT_SM_LINKDOWN:
+		/*
+		 * Possible to get link events when doing back-to-back
+		 * enable/disables.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event)
+{
+	char pwwn_buf[BFA_STRING_32];
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_START:
+		/*
+		 * Ignore start event for a port that is disabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_enabling_qwait);
+
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+			"Base port enabled: WWN = %s\n", pwwn_buf);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		/*
+		 * Already disabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		break;
+
+	case BFA_FCPORT_SM_DPORTENABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
+		break;
+
+	case BFA_FCPORT_SM_DDPORTENABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
+			 enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_START:
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_enabling_qwait);
+		break;
+
+	default:
+		/*
+		 * Ignore all other events.
+		 */
+		;
+	}
+}
+
+/*
+ * Port is enabled. IOC is down/failed.
+ */
+static void
+bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
+			 enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_START:
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_enabling_qwait);
+		break;
+
+	default:
+		/*
+		 * Ignore all events.
+		 */
+		;
+	}
+}
+
+/*
+ * Port is disabled. IOC is down/failed.
+ */
+static void
+bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
+			 enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_START:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		break;
+
+	default:
+		/*
+		 * Ignore all events.
+		 */
+		;
+	}
+}
+
+static void
+bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_DPORTENABLE:
+	case BFA_FCPORT_SM_DISABLE:
+	case BFA_FCPORT_SM_ENABLE:
+	case BFA_FCPORT_SM_START:
+		/*
+		 * Ignore event for a port that is dport
+		 */
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		break;
+
+	case BFA_FCPORT_SM_DPORTDISABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
+			enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_DISABLE:
+	case BFA_FCPORT_SM_DDPORTDISABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		break;
+
+	case BFA_FCPORT_SM_DPORTENABLE:
+	case BFA_FCPORT_SM_DPORTDISABLE:
+	case BFA_FCPORT_SM_ENABLE:
+	case BFA_FCPORT_SM_START:
+		/**
+		 * Ignore event for a port that is ddport
+		 */
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
+			    enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_DPORTENABLE:
+	case BFA_FCPORT_SM_ENABLE:
+	case BFA_FCPORT_SM_START:
+		/*
+		 * Ignore event for a port as there is FAA misconfig
+		 */
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		if (bfa_fcport_send_disable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		else
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
+
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+/*
+ * Link state is down
+ */
+static void
+bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKUP:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
+		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/*
+ * Link state is waiting for down notification
+ */
+static void
+bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKUP:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/*
+ * Link state is waiting for down notification and there is a pending up
+ */
+static void
+bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKDOWN:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
+		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/*
+ * Link state is up
+ */
+static void
+bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKDOWN:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/*
+ * Link state is waiting for up notification
+ */
+static void
+bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKDOWN:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/*
+ * Link state is waiting for up notification and there is a pending down
+ */
+static void
+bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKUP:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/*
+ * Link state is waiting for up notification and there are pending down and up
+ */
+static void
+bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
+			enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKDOWN:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
+		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+static void
+__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_fcport_ln_s *ln = cbarg;
+
+	if (complete)
+		ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
+	else
+		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
+}
+
+/*
+ * Send SCN notification to upper layers.
+ * trunk - false if caller is fcport to ignore fcport event in trunked mode
+ */
+static void
+bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
+	bfa_boolean_t trunk)
+{
+	if (fcport->cfg.trunked && !trunk)
+		return;
+
+	switch (event) {
+	case BFA_PORT_LINKUP:
+		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
+		break;
+	case BFA_PORT_LINKDOWN:
+		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+static void
+bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
+{
+	struct bfa_fcport_s *fcport = ln->fcport;
+
+	if (fcport->bfa->fcs) {
+		fcport->event_cbfn(fcport->event_cbarg, event);
+		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
+	} else {
+		ln->ln_event = event;
+		bfa_cb_queue(fcport->bfa, &ln->ln_qe,
+			__bfa_cb_fcport_event, ln);
+	}
+}
+
+#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
+							BFA_CACHELINE_SZ))
+
+void
+bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+		   struct bfa_s *bfa)
+{
+	struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
+
+	bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
+}
+
+static void
+bfa_fcport_qresume(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = cbarg;
+
+	bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
+}
+
+static void
+bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
+{
+	struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
+
+	fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
+	fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
+	fcport->stats = (union bfa_fcport_stats_u *)
+				bfa_mem_dma_virt(fcport_dma);
+}
+
+/*
+ * Memory initialization.
+ */
+void
+bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
+	struct bfa_fcport_ln_s *ln = &fcport->ln;
+
+	fcport->bfa = bfa;
+	ln->fcport = fcport;
+
+	bfa_fcport_mem_claim(fcport);
+
+	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
+	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
+
+	/*
+	 * initialize time stamp for stats reset
+	 */
+	fcport->stats_reset_time = ktime_get_seconds();
+	fcport->stats_dma_ready = BFA_FALSE;
+
+	/*
+	 * initialize and set default configuration
+	 */
+	port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
+	port_cfg->speed = BFA_PORT_SPEED_AUTO;
+	port_cfg->trunked = BFA_FALSE;
+	port_cfg->maxfrsize = 0;
+
+	port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
+	port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
+	port_cfg->qos_bw.med = BFA_QOS_BW_MED;
+	port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
+
+	fcport->fec_state = BFA_FEC_OFFLINE;
+
+	INIT_LIST_HEAD(&fcport->stats_pending_q);
+	INIT_LIST_HEAD(&fcport->statsclr_pending_q);
+
+	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
+}
+
+void
+bfa_fcport_start(struct bfa_s *bfa)
+{
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
+}
+
+/*
+ * Called when IOC failure is detected.
+ */
+void
+bfa_fcport_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
+	bfa_trunk_iocdisable(bfa);
+}
+
+/*
+ * Update loop info in fcport for SCN online
+ */
+static void
+bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
+			struct bfa_fcport_loop_info_s *loop_info)
+{
+	fcport->myalpa = loop_info->myalpa;
+	fcport->alpabm_valid =
+			loop_info->alpabm_val;
+	memcpy(fcport->alpabm.alpa_bm,
+			loop_info->alpabm.alpa_bm,
+			sizeof(struct fc_alpabm_s));
+}
+
+static void
+bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
+{
+	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
+	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+
+	fcport->speed = pevent->link_state.speed;
+	fcport->topology = pevent->link_state.topology;
+
+	if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
+		bfa_fcport_update_loop_info(fcport,
+				&pevent->link_state.attr.loop_info);
+		return;
+	}
+
+	/* QoS Details */
+	fcport->qos_attr = pevent->link_state.qos_attr;
+	fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
+
+	if (fcport->cfg.bb_cr_enabled)
+		fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
+
+	fcport->fec_state = pevent->link_state.fec_state;
+
+	/*
+	 * update trunk state if applicable
+	 */
+	if (!fcport->cfg.trunked)
+		trunk->attr.state = BFA_TRUNK_DISABLED;
+
+	/* update FCoE specific */
+	fcport->fcoe_vlan =
+		be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
+
+	bfa_trc(fcport->bfa, fcport->speed);
+	bfa_trc(fcport->bfa, fcport->topology);
+}
+
+static void
+bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
+{
+	fcport->speed = BFA_PORT_SPEED_UNKNOWN;
+	fcport->topology = BFA_PORT_TOPOLOGY_NONE;
+	fcport->fec_state = BFA_FEC_OFFLINE;
+}
+
+/*
+ * Send port enable message to firmware.
+ */
+static bfa_boolean_t
+bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
+{
+	struct bfi_fcport_enable_req_s *m;
+
+	/*
+	 * Increment message tag before queue check, so that responses to old
+	 * requests are discarded.
+	 */
+	fcport->msgtag++;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+	if (!m) {
+		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+							&fcport->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
+			bfa_fn_lpu(fcport->bfa));
+	m->nwwn = fcport->nwwn;
+	m->pwwn = fcport->pwwn;
+	m->port_cfg = fcport->cfg;
+	m->msgtag = fcport->msgtag;
+	m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
+	 m->use_flash_cfg = fcport->use_flash_cfg;
+	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
+	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
+	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
+	return BFA_TRUE;
+}
+
+/*
+ * Send port disable message to firmware.
+ */
+static	bfa_boolean_t
+bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
+{
+	struct bfi_fcport_req_s *m;
+
+	/*
+	 * Increment message tag before queue check, so that responses to old
+	 * requests are discarded.
+	 */
+	fcport->msgtag++;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+	if (!m) {
+		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+							&fcport->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
+			bfa_fn_lpu(fcport->bfa));
+	m->msgtag = fcport->msgtag;
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
+
+	return BFA_TRUE;
+}
+
+static void
+bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
+{
+	fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
+	fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
+
+	bfa_trc(fcport->bfa, fcport->pwwn);
+	bfa_trc(fcport->bfa, fcport->nwwn);
+}
+
+static void
+bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
+	struct bfa_qos_stats_s *s)
+{
+	u32	*dip = (u32 *) d;
+	__be32	*sip = (__be32 *) s;
+	int		i;
+
+	/* Now swap the 32 bit fields */
+	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
+		dip[i] = be32_to_cpu(sip[i]);
+}
+
+static void
+bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
+	struct bfa_fcoe_stats_s *s)
+{
+	u32	*dip = (u32 *) d;
+	__be32	*sip = (__be32 *) s;
+	int		i;
+
+	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
+	     i = i + 2) {
+#ifdef __BIG_ENDIAN
+		dip[i] = be32_to_cpu(sip[i]);
+		dip[i + 1] = be32_to_cpu(sip[i + 1]);
+#else
+		dip[i] = be32_to_cpu(sip[i + 1]);
+		dip[i + 1] = be32_to_cpu(sip[i]);
+#endif
+	}
+}
+
+static void
+__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
+	struct bfa_cb_pending_q_s *cb;
+	struct list_head *qe, *qen;
+	union bfa_fcport_stats_u *ret;
+
+	if (complete) {
+		time64_t time = ktime_get_seconds();
+
+		list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
+			bfa_q_deq(&fcport->stats_pending_q, &qe);
+			cb = (struct bfa_cb_pending_q_s *)qe;
+			if (fcport->stats_status == BFA_STATUS_OK) {
+				ret = (union bfa_fcport_stats_u *)cb->data;
+				/* Swap FC QoS or FCoE stats */
+				if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
+					bfa_fcport_qos_stats_swap(&ret->fcqos,
+							&fcport->stats->fcqos);
+				else {
+					bfa_fcport_fcoe_stats_swap(&ret->fcoe,
+							&fcport->stats->fcoe);
+					ret->fcoe.secs_reset =
+						time - fcport->stats_reset_time;
+				}
+			}
+			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
+					fcport->stats_status);
+		}
+		fcport->stats_status = BFA_STATUS_OK;
+	} else {
+		INIT_LIST_HEAD(&fcport->stats_pending_q);
+		fcport->stats_status = BFA_STATUS_OK;
+	}
+}
+
+static void
+bfa_fcport_stats_get_timeout(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+
+	bfa_trc(fcport->bfa, fcport->stats_qfull);
+
+	if (fcport->stats_qfull) {
+		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
+		fcport->stats_qfull = BFA_FALSE;
+	}
+
+	fcport->stats_status = BFA_STATUS_ETIMER;
+	__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
+}
+
+static void
+bfa_fcport_send_stats_get(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+	struct bfi_fcport_req_s *msg;
+
+	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+
+	if (!msg) {
+		fcport->stats_qfull = BFA_TRUE;
+		bfa_reqq_winit(&fcport->stats_reqq_wait,
+				bfa_fcport_send_stats_get, fcport);
+		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+				&fcport->stats_reqq_wait);
+		return;
+	}
+	fcport->stats_qfull = BFA_FALSE;
+
+	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
+			bfa_fn_lpu(fcport->bfa));
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
+}
+
+static void
+__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+	struct bfa_cb_pending_q_s *cb;
+	struct list_head *qe, *qen;
+
+	if (complete) {
+		/*
+		 * re-initialize time stamp for stats reset
+		 */
+		fcport->stats_reset_time = ktime_get_seconds();
+		list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
+			bfa_q_deq(&fcport->statsclr_pending_q, &qe);
+			cb = (struct bfa_cb_pending_q_s *)qe;
+			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
+						fcport->stats_status);
+		}
+		fcport->stats_status = BFA_STATUS_OK;
+	} else {
+		INIT_LIST_HEAD(&fcport->statsclr_pending_q);
+		fcport->stats_status = BFA_STATUS_OK;
+	}
+}
+
+static void
+bfa_fcport_stats_clr_timeout(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+
+	bfa_trc(fcport->bfa, fcport->stats_qfull);
+
+	if (fcport->stats_qfull) {
+		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
+		fcport->stats_qfull = BFA_FALSE;
+	}
+
+	fcport->stats_status = BFA_STATUS_ETIMER;
+	__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
+}
+
+static void
+bfa_fcport_send_stats_clear(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+	struct bfi_fcport_req_s *msg;
+
+	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+
+	if (!msg) {
+		fcport->stats_qfull = BFA_TRUE;
+		bfa_reqq_winit(&fcport->stats_reqq_wait,
+				bfa_fcport_send_stats_clear, fcport);
+		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+						&fcport->stats_reqq_wait);
+		return;
+	}
+	fcport->stats_qfull = BFA_FALSE;
+
+	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
+			bfa_fn_lpu(fcport->bfa));
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
+}
+
+/*
+ * Handle trunk SCN event from firmware.
+ */
+static void
+bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
+{
+	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+	struct bfi_fcport_trunk_link_s *tlink;
+	struct bfa_trunk_link_attr_s *lattr;
+	enum bfa_trunk_state state_prev;
+	int i;
+	int link_bm = 0;
+
+	bfa_trc(fcport->bfa, fcport->cfg.trunked);
+	WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
+		   scn->trunk_state != BFA_TRUNK_OFFLINE);
+
+	bfa_trc(fcport->bfa, trunk->attr.state);
+	bfa_trc(fcport->bfa, scn->trunk_state);
+	bfa_trc(fcport->bfa, scn->trunk_speed);
+
+	/*
+	 * Save off new state for trunk attribute query
+	 */
+	state_prev = trunk->attr.state;
+	if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
+		trunk->attr.state = scn->trunk_state;
+	trunk->attr.speed = scn->trunk_speed;
+	for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
+		lattr = &trunk->attr.link_attr[i];
+		tlink = &scn->tlink[i];
+
+		lattr->link_state = tlink->state;
+		lattr->trunk_wwn  = tlink->trunk_wwn;
+		lattr->fctl	  = tlink->fctl;
+		lattr->speed	  = tlink->speed;
+		lattr->deskew	  = be32_to_cpu(tlink->deskew);
+
+		if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
+			fcport->speed	 = tlink->speed;
+			fcport->topology = BFA_PORT_TOPOLOGY_P2P;
+			link_bm |= 1 << i;
+		}
+
+		bfa_trc(fcport->bfa, lattr->link_state);
+		bfa_trc(fcport->bfa, lattr->trunk_wwn);
+		bfa_trc(fcport->bfa, lattr->fctl);
+		bfa_trc(fcport->bfa, lattr->speed);
+		bfa_trc(fcport->bfa, lattr->deskew);
+	}
+
+	switch (link_bm) {
+	case 3:
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
+		break;
+	case 2:
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
+		break;
+	case 1:
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
+		break;
+	default:
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+			BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
+	}
+
+	/*
+	 * Notify upper layers if trunk state changed.
+	 */
+	if ((state_prev != trunk->attr.state) ||
+		(scn->trunk_state == BFA_TRUNK_OFFLINE)) {
+		bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
+			BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
+	}
+}
+
+static void
+bfa_trunk_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	int i = 0;
+
+	/*
+	 * In trunked mode, notify upper layers that link is down
+	 */
+	if (fcport->cfg.trunked) {
+		if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
+			bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
+
+		fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
+		fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
+		for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
+			fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
+			fcport->trunk.attr.link_attr[i].fctl =
+						BFA_TRUNK_LINK_FCTL_NORMAL;
+			fcport->trunk.attr.link_attr[i].link_state =
+						BFA_TRUNK_LINK_STATE_DN_LINKDN;
+			fcport->trunk.attr.link_attr[i].speed =
+						BFA_PORT_SPEED_UNKNOWN;
+			fcport->trunk.attr.link_attr[i].deskew = 0;
+		}
+	}
+}
+
+/*
+ * Called to initialize port attributes
+ */
+void
+bfa_fcport_init(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	/*
+	 * Initialize port attributes from IOC hardware data.
+	 */
+	bfa_fcport_set_wwns(fcport);
+	if (fcport->cfg.maxfrsize == 0)
+		fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
+	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
+	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
+
+	if (bfa_fcport_is_pbcdisabled(bfa))
+		bfa->modules.port.pbc_disabled = BFA_TRUE;
+
+	WARN_ON(!fcport->cfg.maxfrsize);
+	WARN_ON(!fcport->cfg.rx_bbcredit);
+	WARN_ON(!fcport->speed_sup);
+}
+
+/*
+ * Firmware message handler.
+ */
+void
+bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	union bfi_fcport_i2h_msg_u i2hmsg;
+
+	i2hmsg.msg = msg;
+	fcport->event_arg.i2hmsg = i2hmsg;
+
+	bfa_trc(bfa, msg->mhdr.msg_id);
+	bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
+
+	switch (msg->mhdr.msg_id) {
+	case BFI_FCPORT_I2H_ENABLE_RSP:
+		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
+
+			fcport->stats_dma_ready = BFA_TRUE;
+			if (fcport->use_flash_cfg) {
+				fcport->cfg = i2hmsg.penable_rsp->port_cfg;
+				fcport->cfg.maxfrsize =
+					cpu_to_be16(fcport->cfg.maxfrsize);
+				fcport->cfg.path_tov =
+					cpu_to_be16(fcport->cfg.path_tov);
+				fcport->cfg.q_depth =
+					cpu_to_be16(fcport->cfg.q_depth);
+
+				if (fcport->cfg.trunked)
+					fcport->trunk.attr.state =
+						BFA_TRUNK_OFFLINE;
+				else
+					fcport->trunk.attr.state =
+						BFA_TRUNK_DISABLED;
+				fcport->qos_attr.qos_bw =
+					i2hmsg.penable_rsp->port_cfg.qos_bw;
+				fcport->use_flash_cfg = BFA_FALSE;
+			}
+
+			if (fcport->cfg.qos_enabled)
+				fcport->qos_attr.state = BFA_QOS_OFFLINE;
+			else
+				fcport->qos_attr.state = BFA_QOS_DISABLED;
+
+			fcport->qos_attr.qos_bw_op =
+					i2hmsg.penable_rsp->port_cfg.qos_bw;
+
+			if (fcport->cfg.bb_cr_enabled)
+				fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
+			else
+				fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
+
+			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
+		}
+		break;
+
+	case BFI_FCPORT_I2H_DISABLE_RSP:
+		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
+			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
+		break;
+
+	case BFI_FCPORT_I2H_EVENT:
+		if (fcport->cfg.bb_cr_enabled)
+			fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
+		else
+			fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
+
+		if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
+			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
+		else {
+			if (i2hmsg.event->link_state.linkstate_rsn ==
+			    BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
+				bfa_sm_send_event(fcport,
+						  BFA_FCPORT_SM_FAA_MISCONFIG);
+			else
+				bfa_sm_send_event(fcport,
+						  BFA_FCPORT_SM_LINKDOWN);
+		}
+		fcport->qos_attr.qos_bw_op =
+				i2hmsg.event->link_state.qos_attr.qos_bw_op;
+		break;
+
+	case BFI_FCPORT_I2H_TRUNK_SCN:
+		bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
+		break;
+
+	case BFI_FCPORT_I2H_STATS_GET_RSP:
+		/*
+		 * check for timer pop before processing the rsp
+		 */
+		if (list_empty(&fcport->stats_pending_q) ||
+		    (fcport->stats_status == BFA_STATUS_ETIMER))
+			break;
+
+		bfa_timer_stop(&fcport->timer);
+		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
+		__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
+		break;
+
+	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
+		/*
+		 * check for timer pop before processing the rsp
+		 */
+		if (list_empty(&fcport->statsclr_pending_q) ||
+		    (fcport->stats_status == BFA_STATUS_ETIMER))
+			break;
+
+		bfa_timer_stop(&fcport->timer);
+		fcport->stats_status = BFA_STATUS_OK;
+		__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
+		break;
+
+	case BFI_FCPORT_I2H_ENABLE_AEN:
+		bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
+		break;
+
+	case BFI_FCPORT_I2H_DISABLE_AEN:
+		bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
+		break;
+
+	default:
+		WARN_ON(1);
+	break;
+	}
+}
+
+/*
+ * Registered callback for port events.
+ */
+void
+bfa_fcport_event_register(struct bfa_s *bfa,
+				void (*cbfn) (void *cbarg,
+				enum bfa_port_linkstate event),
+				void *cbarg)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	fcport->event_cbfn = cbfn;
+	fcport->event_cbarg = cbarg;
+}
+
+bfa_status_t
+bfa_fcport_enable(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	if (bfa_fcport_is_pbcdisabled(bfa))
+		return BFA_STATUS_PBC;
+
+	if (bfa_ioc_is_disabled(&bfa->ioc))
+		return BFA_STATUS_IOC_DISABLED;
+
+	if (fcport->diag_busy)
+		return BFA_STATUS_DIAG_BUSY;
+
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcport_disable(struct bfa_s *bfa)
+{
+	if (bfa_fcport_is_pbcdisabled(bfa))
+		return BFA_STATUS_PBC;
+
+	if (bfa_ioc_is_disabled(&bfa->ioc))
+		return BFA_STATUS_IOC_DISABLED;
+
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
+	return BFA_STATUS_OK;
+}
+
+/* If PBC is disabled on port, return error */
+bfa_status_t
+bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+
+	if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
+		bfa_trc(bfa, fcport->pwwn);
+		return BFA_STATUS_PBC;
+	}
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Configure port speed.
+ */
+bfa_status_t
+bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, speed);
+
+	if (fcport->cfg.trunked == BFA_TRUE)
+		return BFA_STATUS_TRUNK_ENABLED;
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+			(speed == BFA_PORT_SPEED_16GBPS))
+		return BFA_STATUS_UNSUPP_SPEED;
+	if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
+		bfa_trc(bfa, fcport->speed_sup);
+		return BFA_STATUS_UNSUPP_SPEED;
+	}
+
+	/* Port speed entered needs to be checked */
+	if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
+		/* For CT2, 1G is not supported */
+		if ((speed == BFA_PORT_SPEED_1GBPS) &&
+		    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
+			return BFA_STATUS_UNSUPP_SPEED;
+
+		/* Already checked for Auto Speed and Max Speed supp */
+		if (!(speed == BFA_PORT_SPEED_1GBPS ||
+		      speed == BFA_PORT_SPEED_2GBPS ||
+		      speed == BFA_PORT_SPEED_4GBPS ||
+		      speed == BFA_PORT_SPEED_8GBPS ||
+		      speed == BFA_PORT_SPEED_16GBPS ||
+		      speed == BFA_PORT_SPEED_AUTO))
+			return BFA_STATUS_UNSUPP_SPEED;
+	} else {
+		if (speed != BFA_PORT_SPEED_10GBPS)
+			return BFA_STATUS_UNSUPP_SPEED;
+	}
+
+	fcport->cfg.speed = speed;
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Get current speed.
+ */
+enum bfa_port_speed
+bfa_fcport_get_speed(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->speed;
+}
+
+/*
+ * Configure port topology.
+ */
+bfa_status_t
+bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, topology);
+	bfa_trc(bfa, fcport->cfg.topology);
+
+	switch (topology) {
+	case BFA_PORT_TOPOLOGY_P2P:
+		break;
+
+	case BFA_PORT_TOPOLOGY_LOOP:
+		if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
+			(fcport->qos_attr.state != BFA_QOS_DISABLED))
+			return BFA_STATUS_ERROR_QOS_ENABLED;
+		if (fcport->cfg.ratelimit != BFA_FALSE)
+			return BFA_STATUS_ERROR_TRL_ENABLED;
+		if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
+			(fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
+			return BFA_STATUS_ERROR_TRUNK_ENABLED;
+		if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
+			(fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
+			return BFA_STATUS_UNSUPP_SPEED;
+		if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
+			return BFA_STATUS_LOOP_UNSUPP_MEZZ;
+		if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
+			return BFA_STATUS_DPORT_ERR;
+		if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
+			return BFA_STATUS_DPORT_ERR;
+		break;
+
+	case BFA_PORT_TOPOLOGY_AUTO:
+		break;
+
+	default:
+		return BFA_STATUS_EINVAL;
+	}
+
+	fcport->cfg.topology = topology;
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Get current topology.
+ */
+enum bfa_port_topology
+bfa_fcport_get_topology(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->topology;
+}
+
+/**
+ * Get config topology.
+ */
+enum bfa_port_topology
+bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->cfg.topology;
+}
+
+bfa_status_t
+bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, alpa);
+	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
+	bfa_trc(bfa, fcport->cfg.hardalpa);
+
+	fcport->cfg.cfg_hardalpa = BFA_TRUE;
+	fcport->cfg.hardalpa = alpa;
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
+	bfa_trc(bfa, fcport->cfg.hardalpa);
+
+	fcport->cfg.cfg_hardalpa = BFA_FALSE;
+	return BFA_STATUS_OK;
+}
+
+bfa_boolean_t
+bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	*alpa = fcport->cfg.hardalpa;
+	return fcport->cfg.cfg_hardalpa;
+}
+
+u8
+bfa_fcport_get_myalpa(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->myalpa;
+}
+
+bfa_status_t
+bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, maxfrsize);
+	bfa_trc(bfa, fcport->cfg.maxfrsize);
+
+	/* with in range */
+	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
+		return BFA_STATUS_INVLD_DFSZ;
+
+	/* power of 2, if not the max frame size of 2112 */
+	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
+		return BFA_STATUS_INVLD_DFSZ;
+
+	fcport->cfg.maxfrsize = maxfrsize;
+	return BFA_STATUS_OK;
+}
+
+u16
+bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->cfg.maxfrsize;
+}
+
+u8
+bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
+{
+	if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
+		return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
+
+	else
+		return 0;
+}
+
+void
+bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
+}
+
+/*
+ * Get port attributes.
+ */
+
+wwn_t
+bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	if (node)
+		return fcport->nwwn;
+	else
+		return fcport->pwwn;
+}
+
+void
+bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	memset(attr, 0, sizeof(struct bfa_port_attr_s));
+
+	attr->nwwn = fcport->nwwn;
+	attr->pwwn = fcport->pwwn;
+
+	attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
+	attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
+
+	memcpy(&attr->pport_cfg, &fcport->cfg,
+		sizeof(struct bfa_port_cfg_s));
+	/* speed attributes */
+	attr->pport_cfg.speed = fcport->cfg.speed;
+	attr->speed_supported = fcport->speed_sup;
+	attr->speed = fcport->speed;
+	attr->cos_supported = FC_CLASS_3;
+
+	/* topology attributes */
+	attr->pport_cfg.topology = fcport->cfg.topology;
+	attr->topology = fcport->topology;
+	attr->pport_cfg.trunked = fcport->cfg.trunked;
+
+	/* beacon attributes */
+	attr->beacon = fcport->beacon;
+	attr->link_e2e_beacon = fcport->link_e2e_beacon;
+
+	attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
+	attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
+	attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
+
+	attr->fec_state = fcport->fec_state;
+
+	/* PBC Disabled State */
+	if (bfa_fcport_is_pbcdisabled(bfa))
+		attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
+	else {
+		if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
+			attr->port_state = BFA_PORT_ST_IOCDIS;
+		else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
+			attr->port_state = BFA_PORT_ST_FWMISMATCH;
+	}
+
+	/* FCoE vlan */
+	attr->fcoe_vlan = fcport->fcoe_vlan;
+}
+
+#define BFA_FCPORT_STATS_TOV	1000
+
+/*
+ * Fetch port statistics (FCQoS or FCoE).
+ */
+bfa_status_t
+bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	if (!bfa_iocfc_is_operational(bfa) ||
+	    !fcport->stats_dma_ready)
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (!list_empty(&fcport->statsclr_pending_q))
+		return BFA_STATUS_DEVBUSY;
+
+	if (list_empty(&fcport->stats_pending_q)) {
+		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
+		bfa_fcport_send_stats_get(fcport);
+		bfa_timer_start(bfa, &fcport->timer,
+				bfa_fcport_stats_get_timeout,
+				fcport, BFA_FCPORT_STATS_TOV);
+	} else
+		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Reset port statistics (FCQoS or FCoE).
+ */
+bfa_status_t
+bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	if (!bfa_iocfc_is_operational(bfa) ||
+	    !fcport->stats_dma_ready)
+		return BFA_STATUS_IOC_NON_OP;
+
+	if (!list_empty(&fcport->stats_pending_q))
+		return BFA_STATUS_DEVBUSY;
+
+	if (list_empty(&fcport->statsclr_pending_q)) {
+		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
+		bfa_fcport_send_stats_clear(fcport);
+		bfa_timer_start(bfa, &fcport->timer,
+				bfa_fcport_stats_clr_timeout,
+				fcport, BFA_FCPORT_STATS_TOV);
+	} else
+		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Fetch port attributes.
+ */
+bfa_boolean_t
+bfa_fcport_is_disabled(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+		BFA_PORT_ST_DISABLED;
+
+}
+
+bfa_boolean_t
+bfa_fcport_is_dport(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+		BFA_PORT_ST_DPORT);
+}
+
+bfa_boolean_t
+bfa_fcport_is_ddport(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+		BFA_PORT_ST_DDPORT);
+}
+
+bfa_status_t
+bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
+
+	bfa_trc(bfa, ioc_type);
+
+	if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
+		return BFA_STATUS_QOS_BW_INVALID;
+
+	if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
+		return BFA_STATUS_QOS_BW_INVALID;
+
+	if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
+	    (qos_bw->low > qos_bw->high))
+		return BFA_STATUS_QOS_BW_INVALID;
+
+	if ((ioc_type == BFA_IOC_TYPE_FC) &&
+	    (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
+		fcport->cfg.qos_bw = *qos_bw;
+
+	return BFA_STATUS_OK;
+}
+
+bfa_boolean_t
+bfa_fcport_is_ratelim(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
+
+}
+
+/*
+ *	Enable/Disable FAA feature in port config
+ */
+void
+bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, state);
+	fcport->cfg.faa_state = state;
+}
+
+/*
+ * Get default minimum ratelim speed
+ */
+enum bfa_port_speed
+bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, fcport->cfg.trl_def_speed);
+	return fcport->cfg.trl_def_speed;
+
+}
+
+void
+bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
+		  bfa_boolean_t link_e2e_beacon)
+{
+	struct bfa_s *bfa = dev;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, beacon);
+	bfa_trc(bfa, link_e2e_beacon);
+	bfa_trc(bfa, fcport->beacon);
+	bfa_trc(bfa, fcport->link_e2e_beacon);
+
+	fcport->beacon = beacon;
+	fcport->link_e2e_beacon = link_e2e_beacon;
+}
+
+bfa_boolean_t
+bfa_fcport_is_linkup(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return	(!fcport->cfg.trunked &&
+		 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
+		(fcport->cfg.trunked &&
+		 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
+}
+
+bfa_boolean_t
+bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->cfg.qos_enabled;
+}
+
+bfa_boolean_t
+bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->cfg.trunked;
+}
+
+bfa_status_t
+bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, on_off);
+
+	if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
+		return BFA_STATUS_BBCR_FC_ONLY;
+
+	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
+		(bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
+		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
+
+	if (on_off) {
+		if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
+			return BFA_STATUS_TOPOLOGY_LOOP;
+
+		if (fcport->cfg.qos_enabled)
+			return BFA_STATUS_ERROR_QOS_ENABLED;
+
+		if (fcport->cfg.trunked)
+			return BFA_STATUS_TRUNK_ENABLED;
+
+		if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
+			(fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
+			return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
+
+		if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
+			return BFA_STATUS_FEATURE_NOT_SUPPORTED;
+
+		if (fcport->cfg.bb_cr_enabled) {
+			if (bb_scn != fcport->cfg.bb_scn)
+				return BFA_STATUS_BBCR_CFG_NO_CHANGE;
+			else
+				return BFA_STATUS_NO_CHANGE;
+		}
+
+		if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
+			bb_scn = BFA_BB_SCN_DEF;
+
+		fcport->cfg.bb_cr_enabled = on_off;
+		fcport->cfg.bb_scn = bb_scn;
+	} else {
+		if (!fcport->cfg.bb_cr_enabled)
+			return BFA_STATUS_NO_CHANGE;
+
+		fcport->cfg.bb_cr_enabled = on_off;
+		fcport->cfg.bb_scn = 0;
+	}
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
+		struct bfa_bbcr_attr_s *bbcr_attr)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
+		return BFA_STATUS_BBCR_FC_ONLY;
+
+	if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
+		return BFA_STATUS_TOPOLOGY_LOOP;
+
+	*bbcr_attr = fcport->bbcr_attr;
+
+	return BFA_STATUS_OK;
+}
+
+void
+bfa_fcport_dportenable(struct bfa_s *bfa)
+{
+	/*
+	 * Assume caller check for port is in disable state
+	 */
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
+	bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
+}
+
+void
+bfa_fcport_dportdisable(struct bfa_s *bfa)
+{
+	/*
+	 * Assume caller check for port is in disable state
+	 */
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
+	bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
+}
+
+void
+bfa_fcport_ddportenable(struct bfa_s *bfa)
+{
+	/*
+	 * Assume caller check for port is in disable state
+	 */
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
+}
+
+void
+bfa_fcport_ddportdisable(struct bfa_s *bfa)
+{
+	/*
+	 * Assume caller check for port is in disable state
+	 */
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
+}
+
+/*
+ * Rport State machine functions
+ */
+/*
+ * Beginning state, only online event expected.
+ */
+static void
+bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_CREATE:
+		bfa_stats(rp, sm_un_cr);
+		bfa_sm_set_state(rp, bfa_rport_sm_created);
+		break;
+
+	default:
+		bfa_stats(rp, sm_un_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+static void
+bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_ONLINE:
+		bfa_stats(rp, sm_cr_on);
+		if (bfa_rport_send_fwcreate(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_cr_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_rport_free(rp);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_cr_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		break;
+
+	default:
+		bfa_stats(rp, sm_cr_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/*
+ * Waiting for rport create response from firmware.
+ */
+static void
+bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_FWRSP:
+		bfa_stats(rp, sm_fwc_rsp);
+		bfa_sm_set_state(rp, bfa_rport_sm_online);
+		bfa_rport_online_cb(rp);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_fwc_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
+		break;
+
+	case BFA_RPORT_SM_OFFLINE:
+		bfa_stats(rp, sm_fwc_off);
+		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_fwc_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		break;
+
+	default:
+		bfa_stats(rp, sm_fwc_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/*
+ * Request queue is full, awaiting queue resume to send create request.
+ */
+static void
+bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_QRESUME:
+		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
+		bfa_rport_send_fwcreate(rp);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_fwc_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_reqq_wcancel(&rp->reqq_wait);
+		bfa_rport_free(rp);
+		break;
+
+	case BFA_RPORT_SM_OFFLINE:
+		bfa_stats(rp, sm_fwc_off);
+		bfa_sm_set_state(rp, bfa_rport_sm_offline);
+		bfa_reqq_wcancel(&rp->reqq_wait);
+		bfa_rport_offline_cb(rp);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_fwc_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		bfa_reqq_wcancel(&rp->reqq_wait);
+		break;
+
+	default:
+		bfa_stats(rp, sm_fwc_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/*
+ * Online state - normal parking state.
+ */
+static void
+bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	struct bfi_rport_qos_scn_s *qos_scn;
+
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_OFFLINE:
+		bfa_stats(rp, sm_on_off);
+		if (bfa_rport_send_fwdelete(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_on_del);
+		if (bfa_rport_send_fwdelete(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_on_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		break;
+
+	case BFA_RPORT_SM_SET_SPEED:
+		bfa_rport_send_fwspeed(rp);
+		break;
+
+	case BFA_RPORT_SM_QOS_SCN:
+		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
+		rp->qos_attr = qos_scn->new_qos_attr;
+		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
+		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
+		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
+		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
+
+		qos_scn->old_qos_attr.qos_flow_id  =
+			be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
+		qos_scn->new_qos_attr.qos_flow_id  =
+			be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
+
+		if (qos_scn->old_qos_attr.qos_flow_id !=
+			qos_scn->new_qos_attr.qos_flow_id)
+			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
+						    qos_scn->old_qos_attr,
+						    qos_scn->new_qos_attr);
+		if (qos_scn->old_qos_attr.qos_priority !=
+			qos_scn->new_qos_attr.qos_priority)
+			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
+						  qos_scn->old_qos_attr,
+						  qos_scn->new_qos_attr);
+		break;
+
+	default:
+		bfa_stats(rp, sm_on_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/*
+ * Firmware rport is being deleted - awaiting f/w response.
+ */
+static void
+bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_FWRSP:
+		bfa_stats(rp, sm_fwd_rsp);
+		bfa_sm_set_state(rp, bfa_rport_sm_offline);
+		bfa_rport_offline_cb(rp);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_fwd_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_fwd_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		bfa_rport_offline_cb(rp);
+		break;
+
+	default:
+		bfa_stats(rp, sm_fwd_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+static void
+bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_QRESUME:
+		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
+		bfa_rport_send_fwdelete(rp);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_fwd_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_fwd_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		bfa_reqq_wcancel(&rp->reqq_wait);
+		bfa_rport_offline_cb(rp);
+		break;
+
+	default:
+		bfa_stats(rp, sm_fwd_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/*
+ * Offline state.
+ */
+static void
+bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_off_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_rport_free(rp);
+		break;
+
+	case BFA_RPORT_SM_ONLINE:
+		bfa_stats(rp, sm_off_on);
+		if (bfa_rport_send_fwcreate(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_off_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		break;
+
+	case BFA_RPORT_SM_OFFLINE:
+		bfa_rport_offline_cb(rp);
+		break;
+
+	default:
+		bfa_stats(rp, sm_off_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/*
+ * Rport is deleted, waiting for firmware response to delete.
+ */
+static void
+bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_FWRSP:
+		bfa_stats(rp, sm_del_fwrsp);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_rport_free(rp);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_del_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_rport_free(rp);
+		break;
+
+	default:
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+static void
+bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_QRESUME:
+		bfa_stats(rp, sm_del_fwrsp);
+		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
+		bfa_rport_send_fwdelete(rp);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_del_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_reqq_wcancel(&rp->reqq_wait);
+		bfa_rport_free(rp);
+		break;
+
+	default:
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/*
+ * Waiting for rport create response from firmware. A delete is pending.
+ */
+static void
+bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
+				enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_FWRSP:
+		bfa_stats(rp, sm_delp_fwrsp);
+		if (bfa_rport_send_fwdelete(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_delp_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_rport_free(rp);
+		break;
+
+	default:
+		bfa_stats(rp, sm_delp_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/*
+ * Waiting for rport create response from firmware. Rport offline is pending.
+ */
+static void
+bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
+				 enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_FWRSP:
+		bfa_stats(rp, sm_offp_fwrsp);
+		if (bfa_rport_send_fwdelete(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_offp_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_offp_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		bfa_rport_offline_cb(rp);
+		break;
+
+	default:
+		bfa_stats(rp, sm_offp_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/*
+ * IOC h/w failed.
+ */
+static void
+bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_OFFLINE:
+		bfa_stats(rp, sm_iocd_off);
+		bfa_rport_offline_cb(rp);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_iocd_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_rport_free(rp);
+		break;
+
+	case BFA_RPORT_SM_ONLINE:
+		bfa_stats(rp, sm_iocd_on);
+		if (bfa_rport_send_fwcreate(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		break;
+
+	default:
+		bfa_stats(rp, sm_iocd_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+
+
+/*
+ *  bfa_rport_private BFA rport private functions
+ */
+
+static void
+__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_rport_s *rp = cbarg;
+
+	if (complete)
+		bfa_cb_rport_online(rp->rport_drv);
+}
+
+static void
+__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_rport_s *rp = cbarg;
+
+	if (complete)
+		bfa_cb_rport_offline(rp->rport_drv);
+}
+
+static void
+bfa_rport_qresume(void *cbarg)
+{
+	struct bfa_rport_s	*rp = cbarg;
+
+	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
+}
+
+void
+bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+		struct bfa_s *bfa)
+{
+	struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
+
+	if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
+		cfg->fwcfg.num_rports = BFA_RPORT_MIN;
+
+	/* kva memory */
+	bfa_mem_kva_setup(minfo, rport_kva,
+		cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
+}
+
+void
+bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
+	struct bfa_rport_s *rp;
+	u16 i;
+
+	INIT_LIST_HEAD(&mod->rp_free_q);
+	INIT_LIST_HEAD(&mod->rp_active_q);
+	INIT_LIST_HEAD(&mod->rp_unused_q);
+
+	rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
+	mod->rps_list = rp;
+	mod->num_rports = cfg->fwcfg.num_rports;
+
+	WARN_ON(!mod->num_rports ||
+		   (mod->num_rports & (mod->num_rports - 1)));
+
+	for (i = 0; i < mod->num_rports; i++, rp++) {
+		memset(rp, 0, sizeof(struct bfa_rport_s));
+		rp->bfa = bfa;
+		rp->rport_tag = i;
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+
+		/*
+		 *  - is unused
+		 */
+		if (i)
+			list_add_tail(&rp->qe, &mod->rp_free_q);
+
+		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
+	}
+
+	/*
+	 * consume memory
+	 */
+	bfa_mem_kva_curp(mod) = (u8 *) rp;
+}
+
+void
+bfa_rport_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
+	struct bfa_rport_s *rport;
+	struct list_head *qe, *qen;
+
+	/* Enqueue unused rport resources to free_q */
+	list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
+
+	list_for_each_safe(qe, qen, &mod->rp_active_q) {
+		rport = (struct bfa_rport_s *) qe;
+		bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
+	}
+}
+
+static struct bfa_rport_s *
+bfa_rport_alloc(struct bfa_rport_mod_s *mod)
+{
+	struct bfa_rport_s *rport;
+
+	bfa_q_deq(&mod->rp_free_q, &rport);
+	if (rport)
+		list_add_tail(&rport->qe, &mod->rp_active_q);
+
+	return rport;
+}
+
+static void
+bfa_rport_free(struct bfa_rport_s *rport)
+{
+	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
+
+	WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
+	list_del(&rport->qe);
+	list_add_tail(&rport->qe, &mod->rp_free_q);
+}
+
+static bfa_boolean_t
+bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
+{
+	struct bfi_rport_create_req_s *m;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
+	if (!m) {
+		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
+			bfa_fn_lpu(rp->bfa));
+	m->bfa_handle = rp->rport_tag;
+	m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
+	m->pid = rp->rport_info.pid;
+	m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
+	m->local_pid = rp->rport_info.local_pid;
+	m->fc_class = rp->rport_info.fc_class;
+	m->vf_en = rp->rport_info.vf_en;
+	m->vf_id = rp->rport_info.vf_id;
+	m->cisc = rp->rport_info.cisc;
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
+	return BFA_TRUE;
+}
+
+static bfa_boolean_t
+bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
+{
+	struct bfi_rport_delete_req_s *m;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
+	if (!m) {
+		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
+			bfa_fn_lpu(rp->bfa));
+	m->fw_handle = rp->fw_handle;
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
+	return BFA_TRUE;
+}
+
+static bfa_boolean_t
+bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
+{
+	struct bfa_rport_speed_req_s *m;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
+	if (!m) {
+		bfa_trc(rp->bfa, rp->rport_info.speed);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
+			bfa_fn_lpu(rp->bfa));
+	m->fw_handle = rp->fw_handle;
+	m->speed = (u8)rp->rport_info.speed;
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
+	return BFA_TRUE;
+}
+
+
+
+/*
+ *  bfa_rport_public
+ */
+
+/*
+ * Rport interrupt processing.
+ */
+void
+bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	union bfi_rport_i2h_msg_u msg;
+	struct bfa_rport_s *rp;
+
+	bfa_trc(bfa, m->mhdr.msg_id);
+
+	msg.msg = m;
+
+	switch (m->mhdr.msg_id) {
+	case BFI_RPORT_I2H_CREATE_RSP:
+		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
+		rp->fw_handle = msg.create_rsp->fw_handle;
+		rp->qos_attr = msg.create_rsp->qos_attr;
+		bfa_rport_set_lunmask(bfa, rp);
+		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
+		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
+		break;
+
+	case BFI_RPORT_I2H_DELETE_RSP:
+		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
+		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
+		bfa_rport_unset_lunmask(bfa, rp);
+		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
+		break;
+
+	case BFI_RPORT_I2H_QOS_SCN:
+		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
+		rp->event_arg.fw_msg = msg.qos_scn_evt;
+		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
+		break;
+
+	case BFI_RPORT_I2H_LIP_SCN_ONLINE:
+		bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
+				&msg.lip_scn->loop_info);
+		bfa_cb_rport_scn_online(bfa);
+		break;
+
+	case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
+		bfa_cb_rport_scn_offline(bfa);
+		break;
+
+	case BFI_RPORT_I2H_NO_DEV:
+		rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
+		bfa_cb_rport_scn_no_dev(rp->rport_drv);
+		break;
+
+	default:
+		bfa_trc(bfa, m->mhdr.msg_id);
+		WARN_ON(1);
+	}
+}
+
+void
+bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
+{
+	struct bfa_rport_mod_s	*mod = BFA_RPORT_MOD(bfa);
+	struct list_head	*qe;
+	int	i;
+
+	for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
+		bfa_q_deq_tail(&mod->rp_free_q, &qe);
+		list_add_tail(qe, &mod->rp_unused_q);
+	}
+}
+
+/*
+ *  bfa_rport_api
+ */
+
+struct bfa_rport_s *
+bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
+{
+	struct bfa_rport_s *rp;
+
+	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
+
+	if (rp == NULL)
+		return NULL;
+
+	rp->bfa = bfa;
+	rp->rport_drv = rport_drv;
+	memset(&rp->stats, 0, sizeof(rp->stats));
+
+	WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
+	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
+
+	return rp;
+}
+
+void
+bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
+{
+	WARN_ON(rport_info->max_frmsz == 0);
+
+	/*
+	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
+	 * responses. Default to minimum size.
+	 */
+	if (rport_info->max_frmsz == 0) {
+		bfa_trc(rport->bfa, rport->rport_tag);
+		rport_info->max_frmsz = FC_MIN_PDUSZ;
+	}
+
+	rport->rport_info = *rport_info;
+	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
+}
+
+void
+bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
+{
+	WARN_ON(speed == 0);
+	WARN_ON(speed == BFA_PORT_SPEED_AUTO);
+
+	if (rport) {
+		rport->rport_info.speed = speed;
+		bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
+	}
+}
+
+/* Set Rport LUN Mask */
+void
+bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
+{
+	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
+	wwn_t	lp_wwn, rp_wwn;
+	u8 lp_tag = (u8)rp->rport_info.lp_tag;
+
+	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
+	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
+
+	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
+					rp->lun_mask = BFA_TRUE;
+	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
+}
+
+/* Unset Rport LUN mask */
+void
+bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
+{
+	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
+	wwn_t	lp_wwn, rp_wwn;
+
+	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
+	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
+
+	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
+				rp->lun_mask = BFA_FALSE;
+	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
+			BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
+}
+
+/*
+ * SGPG related functions
+ */
+
+/*
+ * Compute and return memory needed by FCP(im) module.
+ */
+void
+bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+		struct bfa_s *bfa)
+{
+	struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
+	struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
+	struct bfa_mem_dma_s *seg_ptr;
+	u16	nsegs, idx, per_seg_sgpg, num_sgpg;
+	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
+
+	if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
+		cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
+	else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
+		cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
+
+	num_sgpg = cfg->drvcfg.num_sgpgs;
+
+	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
+	per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
+
+	bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
+		if (num_sgpg >= per_seg_sgpg) {
+			num_sgpg -= per_seg_sgpg;
+			bfa_mem_dma_setup(minfo, seg_ptr,
+					per_seg_sgpg * sgpg_sz);
+		} else
+			bfa_mem_dma_setup(minfo, seg_ptr,
+					num_sgpg * sgpg_sz);
+	}
+
+	/* kva memory */
+	bfa_mem_kva_setup(minfo, sgpg_kva,
+		cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
+}
+
+void
+bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+	struct bfa_sgpg_s *hsgpg;
+	struct bfi_sgpg_s *sgpg;
+	u64 align_len;
+	struct bfa_mem_dma_s *seg_ptr;
+	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
+	u16	i, idx, nsegs, per_seg_sgpg, num_sgpg;
+
+	union {
+		u64 pa;
+		union bfi_addr_u addr;
+	} sgpg_pa, sgpg_pa_tmp;
+
+	INIT_LIST_HEAD(&mod->sgpg_q);
+	INIT_LIST_HEAD(&mod->sgpg_wait_q);
+
+	bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
+
+	mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
+
+	num_sgpg = cfg->drvcfg.num_sgpgs;
+	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
+
+	/* dma/kva mem claim */
+	hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
+
+	bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
+
+		if (!bfa_mem_dma_virt(seg_ptr))
+			break;
+
+		align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
+					     bfa_mem_dma_phys(seg_ptr);
+
+		sgpg = (struct bfi_sgpg_s *)
+			(((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
+		sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
+		WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
+
+		per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
+
+		for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
+			memset(hsgpg, 0, sizeof(*hsgpg));
+			memset(sgpg, 0, sizeof(*sgpg));
+
+			hsgpg->sgpg = sgpg;
+			sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
+			hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
+			list_add_tail(&hsgpg->qe, &mod->sgpg_q);
+
+			sgpg++;
+			hsgpg++;
+			sgpg_pa.pa += sgpg_sz;
+		}
+	}
+
+	bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
+}
+
+bfa_status_t
+bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
+{
+	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+	struct bfa_sgpg_s *hsgpg;
+	int i;
+
+	if (mod->free_sgpgs < nsgpgs)
+		return BFA_STATUS_ENOMEM;
+
+	for (i = 0; i < nsgpgs; i++) {
+		bfa_q_deq(&mod->sgpg_q, &hsgpg);
+		WARN_ON(!hsgpg);
+		list_add_tail(&hsgpg->qe, sgpg_q);
+	}
+
+	mod->free_sgpgs -= nsgpgs;
+	return BFA_STATUS_OK;
+}
+
+void
+bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
+{
+	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+	struct bfa_sgpg_wqe_s *wqe;
+
+	mod->free_sgpgs += nsgpg;
+	WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
+
+	list_splice_tail_init(sgpg_q, &mod->sgpg_q);
+
+	if (list_empty(&mod->sgpg_wait_q))
+		return;
+
+	/*
+	 * satisfy as many waiting requests as possible
+	 */
+	do {
+		wqe = bfa_q_first(&mod->sgpg_wait_q);
+		if (mod->free_sgpgs < wqe->nsgpg)
+			nsgpg = mod->free_sgpgs;
+		else
+			nsgpg = wqe->nsgpg;
+		bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
+		wqe->nsgpg -= nsgpg;
+		if (wqe->nsgpg == 0) {
+			list_del(&wqe->qe);
+			wqe->cbfn(wqe->cbarg);
+		}
+	} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
+}
+
+void
+bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
+{
+	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+
+	WARN_ON(nsgpg <= 0);
+	WARN_ON(nsgpg <= mod->free_sgpgs);
+
+	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
+
+	/*
+	 * allocate any left to this one first
+	 */
+	if (mod->free_sgpgs) {
+		/*
+		 * no one else is waiting for SGPG
+		 */
+		WARN_ON(!list_empty(&mod->sgpg_wait_q));
+		list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
+		wqe->nsgpg -= mod->free_sgpgs;
+		mod->free_sgpgs = 0;
+	}
+
+	list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
+}
+
+void
+bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
+{
+	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+
+	WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
+	list_del(&wqe->qe);
+
+	if (wqe->nsgpg_total != wqe->nsgpg)
+		bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
+				   wqe->nsgpg_total - wqe->nsgpg);
+}
+
+void
+bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
+		   void *cbarg)
+{
+	INIT_LIST_HEAD(&wqe->sgpg_q);
+	wqe->cbfn = cbfn;
+	wqe->cbarg = cbarg;
+}
+
+/*
+ *  UF related functions
+ */
+/*
+ *****************************************************************************
+ * Internal functions
+ *****************************************************************************
+ */
+static void
+__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_uf_s   *uf = cbarg;
+	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
+
+	if (complete)
+		ufm->ufrecv(ufm->cbarg, uf);
+}
+
+static void
+claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
+{
+	struct bfi_uf_buf_post_s *uf_bp_msg;
+	u16 i;
+	u16 buf_len;
+
+	ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
+	uf_bp_msg = ufm->uf_buf_posts;
+
+	for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
+	     i++, uf_bp_msg++) {
+		memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
+
+		uf_bp_msg->buf_tag = i;
+		buf_len = sizeof(struct bfa_uf_buf_s);
+		uf_bp_msg->buf_len = cpu_to_be16(buf_len);
+		bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
+			    bfa_fn_lpu(ufm->bfa));
+		bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
+	}
+
+	/*
+	 * advance pointer beyond consumed memory
+	 */
+	bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
+}
+
+static void
+claim_ufs(struct bfa_uf_mod_s *ufm)
+{
+	u16 i;
+	struct bfa_uf_s   *uf;
+
+	/*
+	 * Claim block of memory for UF list
+	 */
+	ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
+
+	/*
+	 * Initialize UFs and queue it in UF free queue
+	 */
+	for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
+		memset(uf, 0, sizeof(struct bfa_uf_s));
+		uf->bfa = ufm->bfa;
+		uf->uf_tag = i;
+		uf->pb_len = BFA_PER_UF_DMA_SZ;
+		uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
+		uf->buf_pa = ufm_pbs_pa(ufm, i);
+		list_add_tail(&uf->qe, &ufm->uf_free_q);
+	}
+
+	/*
+	 * advance memory pointer
+	 */
+	bfa_mem_kva_curp(ufm) = (u8 *) uf;
+}
+
+static void
+uf_mem_claim(struct bfa_uf_mod_s *ufm)
+{
+	claim_ufs(ufm);
+	claim_uf_post_msgs(ufm);
+}
+
+void
+bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+		struct bfa_s *bfa)
+{
+	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+	struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
+	u32	num_ufs = cfg->fwcfg.num_uf_bufs;
+	struct bfa_mem_dma_s *seg_ptr;
+	u16	nsegs, idx, per_seg_uf = 0;
+
+	nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
+	per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
+
+	bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
+		if (num_ufs >= per_seg_uf) {
+			num_ufs -= per_seg_uf;
+			bfa_mem_dma_setup(minfo, seg_ptr,
+				per_seg_uf * BFA_PER_UF_DMA_SZ);
+		} else
+			bfa_mem_dma_setup(minfo, seg_ptr,
+				num_ufs * BFA_PER_UF_DMA_SZ);
+	}
+
+	/* kva memory */
+	bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
+		(sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
+}
+
+void
+bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+
+	ufm->bfa = bfa;
+	ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
+	INIT_LIST_HEAD(&ufm->uf_free_q);
+	INIT_LIST_HEAD(&ufm->uf_posted_q);
+	INIT_LIST_HEAD(&ufm->uf_unused_q);
+
+	uf_mem_claim(ufm);
+}
+
+static struct bfa_uf_s *
+bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
+{
+	struct bfa_uf_s   *uf;
+
+	bfa_q_deq(&uf_mod->uf_free_q, &uf);
+	return uf;
+}
+
+static void
+bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
+{
+	list_add_tail(&uf->qe, &uf_mod->uf_free_q);
+}
+
+static bfa_status_t
+bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
+{
+	struct bfi_uf_buf_post_s *uf_post_msg;
+
+	uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
+	if (!uf_post_msg)
+		return BFA_STATUS_FAILED;
+
+	memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
+		      sizeof(struct bfi_uf_buf_post_s));
+	bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
+
+	bfa_trc(ufm->bfa, uf->uf_tag);
+
+	list_add_tail(&uf->qe, &ufm->uf_posted_q);
+	return BFA_STATUS_OK;
+}
+
+static void
+bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
+{
+	struct bfa_uf_s   *uf;
+
+	while ((uf = bfa_uf_get(uf_mod)) != NULL) {
+		if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
+			break;
+	}
+}
+
+static void
+uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
+{
+	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+	u16 uf_tag = m->buf_tag;
+	struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
+	struct bfa_uf_buf_s *uf_buf;
+	uint8_t *buf;
+	struct fchs_s *fchs;
+
+	uf_buf = (struct bfa_uf_buf_s *)
+			bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
+	buf = &uf_buf->d[0];
+
+	m->frm_len = be16_to_cpu(m->frm_len);
+	m->xfr_len = be16_to_cpu(m->xfr_len);
+
+	fchs = (struct fchs_s *)uf_buf;
+
+	list_del(&uf->qe);	/* dequeue from posted queue */
+
+	uf->data_ptr = buf;
+	uf->data_len = m->xfr_len;
+
+	WARN_ON(uf->data_len < sizeof(struct fchs_s));
+
+	if (uf->data_len == sizeof(struct fchs_s)) {
+		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
+			       uf->data_len, (struct fchs_s *)buf);
+	} else {
+		u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
+		bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
+				      BFA_PL_EID_RX, uf->data_len,
+				      (struct fchs_s *)buf, pld_w0);
+	}
+
+	if (bfa->fcs)
+		__bfa_cb_uf_recv(uf, BFA_TRUE);
+	else
+		bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
+}
+
+void
+bfa_uf_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+	struct bfa_uf_s *uf;
+	struct list_head *qe, *qen;
+
+	/* Enqueue unused uf resources to free_q */
+	list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
+
+	list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
+		uf = (struct bfa_uf_s *) qe;
+		list_del(&uf->qe);
+		bfa_uf_put(ufm, uf);
+	}
+}
+
+void
+bfa_uf_start(struct bfa_s *bfa)
+{
+	bfa_uf_post_all(BFA_UF_MOD(bfa));
+}
+
+/*
+ * Register handler for all unsolicted receive frames.
+ *
+ * @param[in]	bfa		BFA instance
+ * @param[in]	ufrecv	receive handler function
+ * @param[in]	cbarg	receive handler arg
+ */
+void
+bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
+{
+	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+
+	ufm->ufrecv = ufrecv;
+	ufm->cbarg = cbarg;
+}
+
+/*
+ *	Free an unsolicited frame back to BFA.
+ *
+ * @param[in]		uf		unsolicited frame to be freed
+ *
+ * @return None
+ */
+void
+bfa_uf_free(struct bfa_uf_s *uf)
+{
+	bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
+	bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
+}
+
+
+
+/*
+ *  uf_pub BFA uf module public functions
+ */
+void
+bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+	bfa_trc(bfa, msg->mhdr.msg_id);
+
+	switch (msg->mhdr.msg_id) {
+	case BFI_UF_I2H_FRM_RCVD:
+		uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
+		break;
+
+	default:
+		bfa_trc(bfa, msg->mhdr.msg_id);
+		WARN_ON(1);
+	}
+}
+
+void
+bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
+{
+	struct bfa_uf_mod_s	*mod = BFA_UF_MOD(bfa);
+	struct list_head	*qe;
+	int	i;
+
+	for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
+		bfa_q_deq_tail(&mod->uf_free_q, &qe);
+		list_add_tail(qe, &mod->uf_unused_q);
+	}
+}
+
+/*
+ *	Dport forward declaration
+ */
+
+enum bfa_dport_test_state_e {
+	BFA_DPORT_ST_DISABLED	= 0,	/*!< dport is disabled */
+	BFA_DPORT_ST_INP	= 1,	/*!< test in progress */
+	BFA_DPORT_ST_COMP	= 2,	/*!< test complete successfully */
+	BFA_DPORT_ST_NO_SFP	= 3,	/*!< sfp is not present */
+	BFA_DPORT_ST_NOTSTART	= 4,	/*!< test not start dport is enabled */
+};
+
+/*
+ * BFA DPORT state machine events
+ */
+enum bfa_dport_sm_event {
+	BFA_DPORT_SM_ENABLE	= 1,	/* dport enable event         */
+	BFA_DPORT_SM_DISABLE    = 2,    /* dport disable event        */
+	BFA_DPORT_SM_FWRSP      = 3,    /* fw enable/disable rsp      */
+	BFA_DPORT_SM_QRESUME    = 4,    /* CQ space available         */
+	BFA_DPORT_SM_HWFAIL     = 5,    /* IOC h/w failure            */
+	BFA_DPORT_SM_START	= 6,	/* re-start dport test        */
+	BFA_DPORT_SM_REQFAIL	= 7,	/* request failure            */
+	BFA_DPORT_SM_SCN	= 8,	/* state change notify frm fw */
+};
+
+static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
+				  enum bfa_dport_sm_event event);
+static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
+				  enum bfa_dport_sm_event event);
+static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
+				  enum bfa_dport_sm_event event);
+static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
+				 enum bfa_dport_sm_event event);
+static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
+				 enum bfa_dport_sm_event event);
+static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
+				   enum bfa_dport_sm_event event);
+static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
+					enum bfa_dport_sm_event event);
+static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
+				  enum bfa_dport_sm_event event);
+static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
+				   enum bfa_dport_sm_event event);
+static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
+				   enum bfa_dport_sm_event event);
+static void bfa_dport_qresume(void *cbarg);
+static void bfa_dport_req_comp(struct bfa_dport_s *dport,
+				struct bfi_diag_dport_rsp_s *msg);
+static void bfa_dport_scn(struct bfa_dport_s *dport,
+				struct bfi_diag_dport_scn_s *msg);
+
+/*
+ *	BFA fcdiag module
+ */
+#define BFA_DIAG_QTEST_TOV	1000    /* msec */
+
+/*
+ *	Set port status to busy
+ */
+static void
+bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
+
+	if (fcdiag->lb.lock)
+		fcport->diag_busy = BFA_TRUE;
+	else
+		fcport->diag_busy = BFA_FALSE;
+}
+
+void
+bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	struct bfa_dport_s  *dport = &fcdiag->dport;
+
+	fcdiag->bfa             = bfa;
+	fcdiag->trcmod  = bfa->trcmod;
+	/* The common DIAG attach bfa_diag_attach() will do all memory claim */
+	dport->bfa = bfa;
+	bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+	bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
+	dport->cbfn = NULL;
+	dport->cbarg = NULL;
+	dport->test_state = BFA_DPORT_ST_DISABLED;
+	memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
+}
+
+void
+bfa_fcdiag_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	struct bfa_dport_s *dport = &fcdiag->dport;
+
+	bfa_trc(fcdiag, fcdiag->lb.lock);
+	if (fcdiag->lb.lock) {
+		fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
+		fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
+		fcdiag->lb.lock = 0;
+		bfa_fcdiag_set_busy_status(fcdiag);
+	}
+
+	bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
+}
+
+static void
+bfa_fcdiag_queuetest_timeout(void *cbarg)
+{
+	struct bfa_fcdiag_s       *fcdiag = cbarg;
+	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
+
+	bfa_trc(fcdiag, fcdiag->qtest.all);
+	bfa_trc(fcdiag, fcdiag->qtest.count);
+
+	fcdiag->qtest.timer_active = 0;
+
+	res->status = BFA_STATUS_ETIMER;
+	res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
+	if (fcdiag->qtest.all)
+		res->queue  = fcdiag->qtest.all;
+
+	bfa_trc(fcdiag, BFA_STATUS_ETIMER);
+	fcdiag->qtest.status = BFA_STATUS_ETIMER;
+	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
+	fcdiag->qtest.lock = 0;
+}
+
+static bfa_status_t
+bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
+{
+	u32	i;
+	struct bfi_diag_qtest_req_s *req;
+
+	req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
+	if (!req)
+		return BFA_STATUS_DEVBUSY;
+
+	/* build host command */
+	bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
+		bfa_fn_lpu(fcdiag->bfa));
+
+	for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
+		req->data[i] = QTEST_PAT_DEFAULT;
+
+	bfa_trc(fcdiag, fcdiag->qtest.queue);
+	/* ring door bell */
+	bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
+	return BFA_STATUS_OK;
+}
+
+static void
+bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
+			bfi_diag_qtest_rsp_t *rsp)
+{
+	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
+	bfa_status_t status = BFA_STATUS_OK;
+	int i;
+
+	/* Check timer, should still be active   */
+	if (!fcdiag->qtest.timer_active) {
+		bfa_trc(fcdiag, fcdiag->qtest.timer_active);
+		return;
+	}
+
+	/* update count */
+	fcdiag->qtest.count--;
+
+	/* Check result */
+	for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
+		if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
+			res->status = BFA_STATUS_DATACORRUPTED;
+			break;
+		}
+	}
+
+	if (res->status == BFA_STATUS_OK) {
+		if (fcdiag->qtest.count > 0) {
+			status = bfa_fcdiag_queuetest_send(fcdiag);
+			if (status == BFA_STATUS_OK)
+				return;
+			else
+				res->status = status;
+		} else if (fcdiag->qtest.all > 0 &&
+			fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
+			fcdiag->qtest.count = QTEST_CNT_DEFAULT;
+			fcdiag->qtest.queue++;
+			status = bfa_fcdiag_queuetest_send(fcdiag);
+			if (status == BFA_STATUS_OK)
+				return;
+			else
+				res->status = status;
+		}
+	}
+
+	/* Stop timer when we comp all queue */
+	if (fcdiag->qtest.timer_active) {
+		bfa_timer_stop(&fcdiag->qtest.timer);
+		fcdiag->qtest.timer_active = 0;
+	}
+	res->queue = fcdiag->qtest.queue;
+	res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
+	bfa_trc(fcdiag, res->count);
+	bfa_trc(fcdiag, res->status);
+	fcdiag->qtest.status = res->status;
+	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
+	fcdiag->qtest.lock = 0;
+}
+
+static void
+bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
+			struct bfi_diag_lb_rsp_s *rsp)
+{
+	struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
+
+	res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
+	res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
+	res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
+	res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
+	res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
+	res->status     = rsp->res.status;
+	fcdiag->lb.status = rsp->res.status;
+	bfa_trc(fcdiag, fcdiag->lb.status);
+	fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
+	fcdiag->lb.lock = 0;
+	bfa_fcdiag_set_busy_status(fcdiag);
+}
+
+static bfa_status_t
+bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
+			struct bfa_diag_loopback_s *loopback)
+{
+	struct bfi_diag_lb_req_s *lb_req;
+
+	lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
+	if (!lb_req)
+		return BFA_STATUS_DEVBUSY;
+
+	/* build host command */
+	bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
+		bfa_fn_lpu(fcdiag->bfa));
+
+	lb_req->lb_mode = loopback->lb_mode;
+	lb_req->speed = loopback->speed;
+	lb_req->loopcnt = loopback->loopcnt;
+	lb_req->pattern = loopback->pattern;
+
+	/* ring door bell */
+	bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
+
+	bfa_trc(fcdiag, loopback->lb_mode);
+	bfa_trc(fcdiag, loopback->speed);
+	bfa_trc(fcdiag, loopback->loopcnt);
+	bfa_trc(fcdiag, loopback->pattern);
+	return BFA_STATUS_OK;
+}
+
+/*
+ *	cpe/rme intr handler
+ */
+void
+bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+
+	switch (msg->mhdr.msg_id) {
+	case BFI_DIAG_I2H_LOOPBACK:
+		bfa_fcdiag_loopback_comp(fcdiag,
+				(struct bfi_diag_lb_rsp_s *) msg);
+		break;
+	case BFI_DIAG_I2H_QTEST:
+		bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
+		break;
+	case BFI_DIAG_I2H_DPORT:
+		bfa_dport_req_comp(&fcdiag->dport,
+				(struct bfi_diag_dport_rsp_s *)msg);
+		break;
+	case BFI_DIAG_I2H_DPORT_SCN:
+		bfa_dport_scn(&fcdiag->dport,
+				(struct bfi_diag_dport_scn_s *)msg);
+		break;
+	default:
+		bfa_trc(fcdiag, msg->mhdr.msg_id);
+		WARN_ON(1);
+	}
+}
+
+/*
+ *	Loopback test
+ *
+ *   @param[in] *bfa            - bfa data struct
+ *   @param[in] opmode          - port operation mode
+ *   @param[in] speed           - port speed
+ *   @param[in] lpcnt           - loop count
+ *   @param[in] pat                     - pattern to build packet
+ *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
+ *   @param[in] cbfn            - callback function
+ *   @param[in] cbarg           - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
+		enum bfa_port_speed speed, u32 lpcnt, u32 pat,
+		struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
+		void *cbarg)
+{
+	struct  bfa_diag_loopback_s loopback;
+	struct bfa_port_attr_s attr;
+	bfa_status_t status;
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+
+	if (!bfa_iocfc_is_operational(bfa))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/* if port is PBC disabled, return error */
+	if (bfa_fcport_is_pbcdisabled(bfa)) {
+		bfa_trc(fcdiag, BFA_STATUS_PBC);
+		return BFA_STATUS_PBC;
+	}
+
+	if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
+		bfa_trc(fcdiag, opmode);
+		return BFA_STATUS_PORT_NOT_DISABLED;
+	}
+
+	/*
+	 * Check if input speed is supported by the port mode
+	 */
+	if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
+		if (!(speed == BFA_PORT_SPEED_1GBPS ||
+		      speed == BFA_PORT_SPEED_2GBPS ||
+		      speed == BFA_PORT_SPEED_4GBPS ||
+		      speed == BFA_PORT_SPEED_8GBPS ||
+		      speed == BFA_PORT_SPEED_16GBPS ||
+		      speed == BFA_PORT_SPEED_AUTO)) {
+			bfa_trc(fcdiag, speed);
+			return BFA_STATUS_UNSUPP_SPEED;
+		}
+		bfa_fcport_get_attr(bfa, &attr);
+		bfa_trc(fcdiag, attr.speed_supported);
+		if (speed > attr.speed_supported)
+			return BFA_STATUS_UNSUPP_SPEED;
+	} else {
+		if (speed != BFA_PORT_SPEED_10GBPS) {
+			bfa_trc(fcdiag, speed);
+			return BFA_STATUS_UNSUPP_SPEED;
+		}
+	}
+
+	/*
+	 * For CT2, 1G is not supported
+	 */
+	if ((speed == BFA_PORT_SPEED_1GBPS) &&
+	    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
+		bfa_trc(fcdiag, speed);
+		return BFA_STATUS_UNSUPP_SPEED;
+	}
+
+	/* For Mezz card, port speed entered needs to be checked */
+	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
+		if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
+			if (!(speed == BFA_PORT_SPEED_1GBPS ||
+			      speed == BFA_PORT_SPEED_2GBPS ||
+			      speed == BFA_PORT_SPEED_4GBPS ||
+			      speed == BFA_PORT_SPEED_8GBPS ||
+			      speed == BFA_PORT_SPEED_16GBPS ||
+			      speed == BFA_PORT_SPEED_AUTO))
+				return BFA_STATUS_UNSUPP_SPEED;
+		} else {
+			if (speed != BFA_PORT_SPEED_10GBPS)
+				return BFA_STATUS_UNSUPP_SPEED;
+		}
+	}
+	/* check to see if fcport is dport */
+	if (bfa_fcport_is_dport(bfa)) {
+		bfa_trc(fcdiag, fcdiag->lb.lock);
+		return BFA_STATUS_DPORT_ENABLED;
+	}
+	/* check to see if there is another destructive diag cmd running */
+	if (fcdiag->lb.lock) {
+		bfa_trc(fcdiag, fcdiag->lb.lock);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	fcdiag->lb.lock = 1;
+	loopback.lb_mode = opmode;
+	loopback.speed = speed;
+	loopback.loopcnt = lpcnt;
+	loopback.pattern = pat;
+	fcdiag->lb.result = result;
+	fcdiag->lb.cbfn = cbfn;
+	fcdiag->lb.cbarg = cbarg;
+	memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
+	bfa_fcdiag_set_busy_status(fcdiag);
+
+	/* Send msg to fw */
+	status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
+	return status;
+}
+
+/*
+ *	DIAG queue test command
+ *
+ *   @param[in] *bfa            - bfa data struct
+ *   @param[in] force           - 1: don't do ioc op checking
+ *   @param[in] queue           - queue no. to test
+ *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
+ *   @param[in] cbfn            - callback function
+ *   @param[in] *cbarg          - callback functioin arg
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
+		struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
+		void *cbarg)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	bfa_status_t status;
+	bfa_trc(fcdiag, force);
+	bfa_trc(fcdiag, queue);
+
+	if (!force && !bfa_iocfc_is_operational(bfa))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/* check to see if there is another destructive diag cmd running */
+	if (fcdiag->qtest.lock) {
+		bfa_trc(fcdiag, fcdiag->qtest.lock);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	/* Initialization */
+	fcdiag->qtest.lock = 1;
+	fcdiag->qtest.cbfn = cbfn;
+	fcdiag->qtest.cbarg = cbarg;
+	fcdiag->qtest.result = result;
+	fcdiag->qtest.count = QTEST_CNT_DEFAULT;
+
+	/* Init test results */
+	fcdiag->qtest.result->status = BFA_STATUS_OK;
+	fcdiag->qtest.result->count  = 0;
+
+	/* send */
+	if (queue < BFI_IOC_MAX_CQS) {
+		fcdiag->qtest.result->queue  = (u8)queue;
+		fcdiag->qtest.queue = (u8)queue;
+		fcdiag->qtest.all   = 0;
+	} else {
+		fcdiag->qtest.result->queue  = 0;
+		fcdiag->qtest.queue = 0;
+		fcdiag->qtest.all   = 1;
+	}
+	status = bfa_fcdiag_queuetest_send(fcdiag);
+
+	/* Start a timer */
+	if (status == BFA_STATUS_OK) {
+		bfa_timer_start(bfa, &fcdiag->qtest.timer,
+				bfa_fcdiag_queuetest_timeout, fcdiag,
+				BFA_DIAG_QTEST_TOV);
+		fcdiag->qtest.timer_active = 1;
+	}
+	return status;
+}
+
+/*
+ * DIAG PLB is running
+ *
+ *   @param[in] *bfa    - bfa data struct
+ *
+ *   @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
+}
+
+/*
+ *	D-port
+ */
+#define bfa_dport_result_start(__dport, __mode) do {				\
+		(__dport)->result.start_time = ktime_get_real_seconds();	\
+		(__dport)->result.status = DPORT_TEST_ST_INPRG;			\
+		(__dport)->result.mode = (__mode);				\
+		(__dport)->result.rp_pwwn = (__dport)->rp_pwwn;			\
+		(__dport)->result.rp_nwwn = (__dport)->rp_nwwn;			\
+		(__dport)->result.lpcnt = (__dport)->lpcnt;			\
+} while (0)
+
+static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
+					enum bfi_dport_req req);
+static void
+bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
+{
+	if (dport->cbfn != NULL) {
+		dport->cbfn(dport->cbarg, bfa_status);
+		dport->cbfn = NULL;
+		dport->cbarg = NULL;
+	}
+}
+
+static void
+bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_ENABLE:
+		bfa_fcport_dportenable(dport->bfa);
+		if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
+			bfa_sm_set_state(dport, bfa_dport_sm_enabling);
+		else
+			bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
+		break;
+
+	case BFA_DPORT_SM_DISABLE:
+		/* Already disabled */
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		/* ignore */
+		break;
+
+	case BFA_DPORT_SM_SCN:
+		if (dport->i2hmsg.scn.state ==  BFI_DPORT_SCN_DDPORT_ENABLE) {
+			bfa_fcport_ddportenable(dport->bfa);
+			dport->dynamic = BFA_TRUE;
+			dport->test_state = BFA_DPORT_ST_NOTSTART;
+			bfa_sm_set_state(dport, bfa_dport_sm_enabled);
+		} else {
+			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
+			WARN_ON(1);
+		}
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
+			    enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_QRESUME:
+		bfa_sm_set_state(dport, bfa_dport_sm_enabling);
+		bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_reqq_wcancel(&dport->reqq_wait);
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_FWRSP:
+		memset(&dport->result, 0,
+				sizeof(struct bfa_diag_dport_result_s));
+		if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
+			dport->test_state = BFA_DPORT_ST_NO_SFP;
+		} else {
+			dport->test_state = BFA_DPORT_ST_INP;
+			bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
+		}
+		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
+		break;
+
+	case BFA_DPORT_SM_REQFAIL:
+		dport->test_state = BFA_DPORT_ST_DISABLED;
+		bfa_fcport_dportdisable(dport->bfa);
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_START:
+		if (bfa_dport_send_req(dport, BFI_DPORT_START))
+			bfa_sm_set_state(dport, bfa_dport_sm_starting);
+		else
+			bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
+		break;
+
+	case BFA_DPORT_SM_DISABLE:
+		bfa_fcport_dportdisable(dport->bfa);
+		if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
+			bfa_sm_set_state(dport, bfa_dport_sm_disabling);
+		else
+			bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		break;
+
+	case BFA_DPORT_SM_SCN:
+		switch (dport->i2hmsg.scn.state) {
+		case BFI_DPORT_SCN_TESTCOMP:
+			dport->test_state = BFA_DPORT_ST_COMP;
+			break;
+
+		case BFI_DPORT_SCN_TESTSTART:
+			dport->test_state = BFA_DPORT_ST_INP;
+			break;
+
+		case BFI_DPORT_SCN_TESTSKIP:
+		case BFI_DPORT_SCN_SUBTESTSTART:
+			/* no state change */
+			break;
+
+		case BFI_DPORT_SCN_SFP_REMOVED:
+			dport->test_state = BFA_DPORT_ST_NO_SFP;
+			break;
+
+		case BFI_DPORT_SCN_DDPORT_DISABLE:
+			bfa_fcport_ddportdisable(dport->bfa);
+
+			if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
+				bfa_sm_set_state(dport,
+					 bfa_dport_sm_dynamic_disabling);
+			else
+				bfa_sm_set_state(dport,
+					 bfa_dport_sm_dynamic_disabling_qwait);
+			break;
+
+		case BFI_DPORT_SCN_FCPORT_DISABLE:
+			bfa_fcport_ddportdisable(dport->bfa);
+
+			bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+			dport->dynamic = BFA_FALSE;
+			break;
+
+		default:
+			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
+			bfa_sm_fault(dport->bfa, event);
+		}
+		break;
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
+			     enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_QRESUME:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabling);
+		bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_reqq_wcancel(&dport->reqq_wait);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+		break;
+
+	case BFA_DPORT_SM_SCN:
+		/* ignore */
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_FWRSP:
+		dport->test_state = BFA_DPORT_ST_DISABLED;
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+		break;
+
+	case BFA_DPORT_SM_SCN:
+		/* no state change */
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
+			    enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_QRESUME:
+		bfa_sm_set_state(dport, bfa_dport_sm_starting);
+		bfa_dport_send_req(dport, BFI_DPORT_START);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_reqq_wcancel(&dport->reqq_wait);
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_FWRSP:
+		memset(&dport->result, 0,
+				sizeof(struct bfa_diag_dport_result_s));
+		if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
+			dport->test_state = BFA_DPORT_ST_NO_SFP;
+		} else {
+			dport->test_state = BFA_DPORT_ST_INP;
+			bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
+		}
+		/* fall thru */
+
+	case BFA_DPORT_SM_REQFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
+			       enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_SCN:
+		switch (dport->i2hmsg.scn.state) {
+		case BFI_DPORT_SCN_DDPORT_DISABLED:
+			bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+			dport->dynamic = BFA_FALSE;
+			bfa_fcport_enable(dport->bfa);
+			break;
+
+		default:
+			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
+			bfa_sm_fault(dport->bfa, event);
+
+		}
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static void
+bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
+			    enum bfa_dport_sm_event event)
+{
+	bfa_trc(dport->bfa, event);
+
+	switch (event) {
+	case BFA_DPORT_SM_QRESUME:
+		bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
+		bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
+		break;
+
+	case BFA_DPORT_SM_HWFAIL:
+		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
+		bfa_reqq_wcancel(&dport->reqq_wait);
+		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
+		break;
+
+	case BFA_DPORT_SM_SCN:
+		/* ignore */
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, event);
+	}
+}
+
+static bfa_boolean_t
+bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
+{
+	struct bfi_diag_dport_req_s *m;
+
+	/*
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
+	if (!m) {
+		bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
+		    bfa_fn_lpu(dport->bfa));
+	m->req  = req;
+	if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
+		m->lpcnt = cpu_to_be32(dport->lpcnt);
+		m->payload = cpu_to_be32(dport->payload);
+	}
+
+	/*
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
+
+	return BFA_TRUE;
+}
+
+static void
+bfa_dport_qresume(void *cbarg)
+{
+	struct bfa_dport_s *dport = cbarg;
+
+	bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
+}
+
+static void
+bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
+{
+	msg->status = cpu_to_be32(msg->status);
+	dport->i2hmsg.rsp.status = msg->status;
+	dport->rp_pwwn = msg->pwwn;
+	dport->rp_nwwn = msg->nwwn;
+
+	if ((msg->status == BFA_STATUS_OK) ||
+	    (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
+		bfa_trc(dport->bfa, msg->status);
+		bfa_trc(dport->bfa, dport->rp_pwwn);
+		bfa_trc(dport->bfa, dport->rp_nwwn);
+		bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
+
+	} else {
+		bfa_trc(dport->bfa, msg->status);
+		bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
+	}
+	bfa_cb_fcdiag_dport(dport, msg->status);
+}
+
+static bfa_boolean_t
+bfa_dport_is_sending_req(struct bfa_dport_s *dport)
+{
+	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling)	||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling)	||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_starting)	||
+	    bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
+		return BFA_TRUE;
+	} else {
+		return BFA_FALSE;
+	}
+}
+
+static void
+bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
+{
+	int i;
+	uint8_t subtesttype;
+
+	bfa_trc(dport->bfa, msg->state);
+	dport->i2hmsg.scn.state = msg->state;
+
+	switch (dport->i2hmsg.scn.state) {
+	case BFI_DPORT_SCN_TESTCOMP:
+		dport->result.end_time = ktime_get_real_seconds();
+		bfa_trc(dport->bfa, dport->result.end_time);
+
+		dport->result.status = msg->info.testcomp.status;
+		bfa_trc(dport->bfa, dport->result.status);
+
+		dport->result.roundtrip_latency =
+			cpu_to_be32(msg->info.testcomp.latency);
+		dport->result.est_cable_distance =
+			cpu_to_be32(msg->info.testcomp.distance);
+		dport->result.buffer_required =
+			be16_to_cpu(msg->info.testcomp.numbuffer);
+
+		dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
+		dport->result.speed = msg->info.testcomp.speed;
+
+		bfa_trc(dport->bfa, dport->result.roundtrip_latency);
+		bfa_trc(dport->bfa, dport->result.est_cable_distance);
+		bfa_trc(dport->bfa, dport->result.buffer_required);
+		bfa_trc(dport->bfa, dport->result.frmsz);
+		bfa_trc(dport->bfa, dport->result.speed);
+
+		for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
+			dport->result.subtest[i].status =
+				msg->info.testcomp.subtest_status[i];
+			bfa_trc(dport->bfa, dport->result.subtest[i].status);
+		}
+		break;
+
+	case BFI_DPORT_SCN_TESTSKIP:
+	case BFI_DPORT_SCN_DDPORT_ENABLE:
+		memset(&dport->result, 0,
+				sizeof(struct bfa_diag_dport_result_s));
+		break;
+
+	case BFI_DPORT_SCN_TESTSTART:
+		memset(&dport->result, 0,
+				sizeof(struct bfa_diag_dport_result_s));
+		dport->rp_pwwn = msg->info.teststart.pwwn;
+		dport->rp_nwwn = msg->info.teststart.nwwn;
+		dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
+		bfa_dport_result_start(dport, msg->info.teststart.mode);
+		break;
+
+	case BFI_DPORT_SCN_SUBTESTSTART:
+		subtesttype = msg->info.teststart.type;
+		dport->result.subtest[subtesttype].start_time =
+			ktime_get_real_seconds();
+		dport->result.subtest[subtesttype].status =
+			DPORT_TEST_ST_INPRG;
+
+		bfa_trc(dport->bfa, subtesttype);
+		bfa_trc(dport->bfa,
+			dport->result.subtest[subtesttype].start_time);
+		break;
+
+	case BFI_DPORT_SCN_SFP_REMOVED:
+	case BFI_DPORT_SCN_DDPORT_DISABLED:
+	case BFI_DPORT_SCN_DDPORT_DISABLE:
+	case BFI_DPORT_SCN_FCPORT_DISABLE:
+		dport->result.status = DPORT_TEST_ST_IDLE;
+		break;
+
+	default:
+		bfa_sm_fault(dport->bfa, msg->state);
+	}
+
+	bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
+}
+
+/*
+ * Dport enable
+ *
+ * @param[in] *bfa            - bfa data struct
+ */
+bfa_status_t
+bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
+				bfa_cb_diag_t cbfn, void *cbarg)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	struct bfa_dport_s  *dport = &fcdiag->dport;
+
+	/*
+	 * Dport is not support in MEZZ card
+	 */
+	if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
+		bfa_trc(dport->bfa, BFA_STATUS_PBC);
+		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
+	}
+
+	/*
+	 * Dport is supported in CT2 or above
+	 */
+	if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
+		bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
+		return BFA_STATUS_FEATURE_NOT_SUPPORTED;
+	}
+
+	/*
+	 * Check to see if IOC is down
+	*/
+	if (!bfa_iocfc_is_operational(bfa))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/* if port is PBC disabled, return error */
+	if (bfa_fcport_is_pbcdisabled(bfa)) {
+		bfa_trc(dport->bfa, BFA_STATUS_PBC);
+		return BFA_STATUS_PBC;
+	}
+
+	/*
+	 * Check if port mode is FC port
+	 */
+	if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
+		bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
+		return BFA_STATUS_CMD_NOTSUPP_CNA;
+	}
+
+	/*
+	 * Check if port is in LOOP mode
+	 */
+	if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
+	    (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_TOPOLOGY_LOOP;
+	}
+
+	/*
+	 * Check if port is TRUNK mode
+	 */
+	if (bfa_fcport_is_trunk_enabled(bfa)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_ERROR_TRUNK_ENABLED;
+	}
+
+	/*
+	 * Check if diag loopback is running
+	 */
+	if (bfa_fcdiag_lb_is_running(bfa)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_DIAG_BUSY;
+	}
+
+	/*
+	 * Check to see if port is disable or in dport state
+	 */
+	if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
+	    (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_PORT_NOT_DISABLED;
+	}
+
+	/*
+	 * Check if dport is in dynamic mode
+	 */
+	if (dport->dynamic)
+		return BFA_STATUS_DDPORT_ERR;
+
+	/*
+	 * Check if dport is busy
+	 */
+	if (bfa_dport_is_sending_req(dport))
+		return BFA_STATUS_DEVBUSY;
+
+	/*
+	 * Check if dport is already enabled
+	 */
+	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_DPORT_ENABLED;
+	}
+
+	bfa_trc(dport->bfa, lpcnt);
+	bfa_trc(dport->bfa, pat);
+	dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
+	dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
+	dport->cbfn = cbfn;
+	dport->cbarg = cbarg;
+
+	bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
+	return BFA_STATUS_OK;
+}
+
+/*
+ *	Dport disable
+ *
+ *	@param[in] *bfa            - bfa data struct
+ */
+bfa_status_t
+bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	struct bfa_dport_s *dport = &fcdiag->dport;
+
+	if (bfa_ioc_is_disabled(&bfa->ioc))
+		return BFA_STATUS_IOC_DISABLED;
+
+	/* if port is PBC disabled, return error */
+	if (bfa_fcport_is_pbcdisabled(bfa)) {
+		bfa_trc(dport->bfa, BFA_STATUS_PBC);
+		return BFA_STATUS_PBC;
+	}
+
+	/*
+	 * Check if dport is in dynamic mode
+	 */
+	if (dport->dynamic) {
+		return BFA_STATUS_DDPORT_ERR;
+	}
+
+	/*
+	 * Check to see if port is disable or in dport state
+	 */
+	if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
+	    (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_PORT_NOT_DISABLED;
+	}
+
+	/*
+	 * Check if dport is busy
+	 */
+	if (bfa_dport_is_sending_req(dport))
+		return BFA_STATUS_DEVBUSY;
+
+	/*
+	 * Check if dport is already disabled
+	 */
+	if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_DPORT_DISABLED;
+	}
+
+	dport->cbfn = cbfn;
+	dport->cbarg = cbarg;
+
+	bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Dport start -- restart dport test
+ *
+ *   @param[in] *bfa		- bfa data struct
+ */
+bfa_status_t
+bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
+			bfa_cb_diag_t cbfn, void *cbarg)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	struct bfa_dport_s *dport = &fcdiag->dport;
+
+	/*
+	 * Check to see if IOC is down
+	 */
+	if (!bfa_iocfc_is_operational(bfa))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/*
+	 * Check if dport is in dynamic mode
+	 */
+	if (dport->dynamic)
+		return BFA_STATUS_DDPORT_ERR;
+
+	/*
+	 * Check if dport is busy
+	 */
+	if (bfa_dport_is_sending_req(dport))
+		return BFA_STATUS_DEVBUSY;
+
+	/*
+	 * Check if dport is in enabled state.
+	 * Test can only be restart when previous test has completed
+	 */
+	if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_DPORT_DISABLED;
+
+	} else {
+		if (dport->test_state == BFA_DPORT_ST_NO_SFP)
+			return BFA_STATUS_DPORT_INV_SFP;
+
+		if (dport->test_state == BFA_DPORT_ST_INP)
+			return BFA_STATUS_DEVBUSY;
+
+		WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
+	}
+
+	bfa_trc(dport->bfa, lpcnt);
+	bfa_trc(dport->bfa, pat);
+
+	dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
+	dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
+
+	dport->cbfn = cbfn;
+	dport->cbarg = cbarg;
+
+	bfa_sm_send_event(dport, BFA_DPORT_SM_START);
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Dport show -- return dport test result
+ *
+ *   @param[in] *bfa		- bfa data struct
+ */
+bfa_status_t
+bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
+{
+	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+	struct bfa_dport_s *dport = &fcdiag->dport;
+
+	/*
+	 * Check to see if IOC is down
+	 */
+	if (!bfa_iocfc_is_operational(bfa))
+		return BFA_STATUS_IOC_NON_OP;
+
+	/*
+	 * Check if dport is busy
+	 */
+	if (bfa_dport_is_sending_req(dport))
+		return BFA_STATUS_DEVBUSY;
+
+	/*
+	 * Check if dport is in enabled state.
+	 */
+	if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
+		bfa_trc(dport->bfa, 0);
+		return BFA_STATUS_DPORT_DISABLED;
+
+	}
+
+	/*
+	 * Check if there is SFP
+	 */
+	if (dport->test_state == BFA_DPORT_ST_NO_SFP)
+		return BFA_STATUS_DPORT_INV_SFP;
+
+	memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
+
+	return BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
new file mode 100644
index 0000000..7e8fb62
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -0,0 +1,764 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_SVC_H__
+#define __BFA_SVC_H__
+
+#include "bfa_cs.h"
+#include "bfi_ms.h"
+
+
+/*
+ * Scatter-gather DMA related defines
+ */
+#define BFA_SGPG_MIN	(16)
+#define BFA_SGPG_MAX	(8192)
+
+/*
+ * Alignment macro for SG page allocation
+ */
+#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1))	\
+			      & ~(sizeof(struct bfi_sgpg_s) - 1))
+
+struct bfa_sgpg_wqe_s {
+	struct list_head qe;	/*  queue sg page element	*/
+	int	nsgpg;		/*  pages to be allocated	*/
+	int	nsgpg_total;	/*  total pages required	*/
+	void	(*cbfn) (void *cbarg);	/*  callback function	*/
+	void	*cbarg;		/*  callback arg		*/
+	struct list_head sgpg_q;	/*  queue of alloced sgpgs	*/
+};
+
+struct bfa_sgpg_s {
+	struct list_head  qe;	/*  queue sg page element	*/
+	struct bfi_sgpg_s *sgpg;	/*  va of SG page		*/
+	union bfi_addr_u sgpg_pa;	/*  pa of SG page		*/
+};
+
+/*
+ * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
+ * SG pages required.
+ */
+#define BFA_SGPG_NPAGE(_nsges)  (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
+
+/* Max SGPG dma segs required */
+#define BFA_SGPG_DMA_SEGS	\
+	BFI_MEM_DMA_NSEGS(BFA_SGPG_MAX, (uint32_t)sizeof(struct bfi_sgpg_s))
+
+struct bfa_sgpg_mod_s {
+	struct bfa_s *bfa;
+	int		num_sgpgs;	/*  number of SG pages		*/
+	int		free_sgpgs;	/*  number of free SG pages	*/
+	struct list_head	sgpg_q;		/*  queue of free SG pages */
+	struct list_head	sgpg_wait_q;	/*  wait queue for SG pages */
+	struct bfa_mem_dma_s	dma_seg[BFA_SGPG_DMA_SEGS];
+	struct bfa_mem_kva_s	kva_seg;
+};
+#define BFA_SGPG_MOD(__bfa)	(&(__bfa)->modules.sgpg_mod)
+#define BFA_MEM_SGPG_KVA(__bfa) (&(BFA_SGPG_MOD(__bfa)->kva_seg))
+
+bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
+			     int nsgpgs);
+void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs);
+void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
+		    void (*cbfn) (void *cbarg), void *cbarg);
+void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs);
+void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
+
+
+/*
+ * FCXP related defines
+ */
+#define BFA_FCXP_MIN		(1)
+#define BFA_FCXP_MAX		(256)
+#define BFA_FCXP_MAX_IBUF_SZ	(2 * 1024 + 256)
+#define BFA_FCXP_MAX_LBUF_SZ	(4 * 1024 + 256)
+
+/* Max FCXP dma segs required */
+#define BFA_FCXP_DMA_SEGS						\
+	BFI_MEM_DMA_NSEGS(BFA_FCXP_MAX,					\
+		(u32)BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ)
+
+struct bfa_fcxp_mod_s {
+	struct bfa_s      *bfa;		/* backpointer to BFA */
+	struct bfa_fcxp_s *fcxp_list;	/* array of FCXPs */
+	u16	num_fcxps;	/* max num FCXP requests */
+	struct list_head fcxp_req_free_q; /* free FCXPs used for sending req */
+	struct list_head fcxp_rsp_free_q; /* free FCXPs used for sending req */
+	struct list_head fcxp_active_q;	/* active FCXPs */
+	struct list_head req_wait_q;	/* wait queue for free req_fcxp */
+	struct list_head rsp_wait_q;	/* wait queue for free rsp_fcxp */
+	struct list_head fcxp_req_unused_q;	/* unused req_fcxps */
+	struct list_head fcxp_rsp_unused_q;	/* unused rsp_fcxps */
+	u32	req_pld_sz;
+	u32	rsp_pld_sz;
+	struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS];
+	struct bfa_mem_kva_s kva_seg;
+};
+
+#define BFA_FCXP_MOD(__bfa)		(&(__bfa)->modules.fcxp_mod)
+#define BFA_FCXP_FROM_TAG(__mod, __tag)	(&(__mod)->fcxp_list[__tag])
+#define BFA_MEM_FCXP_KVA(__bfa) (&(BFA_FCXP_MOD(__bfa)->kva_seg))
+
+typedef void    (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
+				   void *cb_arg, bfa_status_t req_status,
+				   u32 rsp_len, u32 resid_len,
+				   struct fchs_s *rsp_fchs);
+
+typedef u64 (*bfa_fcxp_get_sgaddr_t) (void *bfad_fcxp, int sgeid);
+typedef u32 (*bfa_fcxp_get_sglen_t) (void *bfad_fcxp, int sgeid);
+typedef void (*bfa_cb_fcxp_send_t) (void *bfad_fcxp, struct bfa_fcxp_s *fcxp,
+				    void *cbarg, enum bfa_status req_status,
+				    u32 rsp_len, u32 resid_len,
+				    struct fchs_s *rsp_fchs);
+typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
+
+
+
+/*
+ * Information needed for a FCXP request
+ */
+struct bfa_fcxp_req_info_s {
+	struct bfa_rport_s *bfa_rport;
+					/* Pointer to the bfa rport that was
+					 * returned from bfa_rport_create().
+					 * This could be left NULL for WKA or
+					 * for FCXP interactions before the
+					 * rport nexus is established
+					 */
+	struct fchs_s	fchs;	/*  request FC header structure */
+	u8		cts;	/*  continuous sequence */
+	u8		class;	/*  FC class for the request/response */
+	u16	max_frmsz;	/*  max send frame size */
+	u16	vf_id;	/*  vsan tag if applicable */
+	u8		lp_tag;	/*  lport tag */
+	u32	req_tot_len;	/*  request payload total length */
+};
+
+struct bfa_fcxp_rsp_info_s {
+	struct fchs_s	rsp_fchs;
+				/* Response frame's FC header will
+				 * be sent back in this field */
+	u8		rsp_timeout;
+				/* timeout in seconds, 0-no response */
+	u8		rsvd2[3];
+	u32	rsp_maxlen;	/*  max response length expected */
+};
+
+struct bfa_fcxp_s {
+	struct list_head	qe;		/*  fcxp queue element */
+	bfa_sm_t	sm;		/*  state machine */
+	void		*caller;	/*  driver or fcs */
+	struct bfa_fcxp_mod_s *fcxp_mod;
+	/*  back pointer to fcxp mod */
+	u16	fcxp_tag;	/*  internal tag */
+	struct bfa_fcxp_req_info_s req_info;
+	/*  request info */
+	struct bfa_fcxp_rsp_info_s rsp_info;
+	/*  response info */
+	u8	use_ireqbuf;	/*  use internal req buf */
+	u8		use_irspbuf;	/*  use internal rsp buf */
+	u32	nreq_sgles;	/*  num request SGLEs */
+	u32	nrsp_sgles;	/*  num response SGLEs */
+	struct list_head req_sgpg_q;	/*  SG pages for request buf */
+	struct list_head req_sgpg_wqe;	/*  wait queue for req SG page */
+	struct list_head rsp_sgpg_q;	/*  SG pages for response buf */
+	struct list_head rsp_sgpg_wqe;	/*  wait queue for rsp SG page */
+
+	bfa_fcxp_get_sgaddr_t req_sga_cbfn;
+	/*  SG elem addr user function */
+	bfa_fcxp_get_sglen_t req_sglen_cbfn;
+	/*  SG elem len user function */
+	bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
+	/*  SG elem addr user function */
+	bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
+	/*  SG elem len user function */
+	bfa_cb_fcxp_send_t send_cbfn;   /*  send completion callback */
+	void		*send_cbarg;	/*  callback arg */
+	struct bfa_sge_s   req_sge[BFA_FCXP_MAX_SGES];
+	/*  req SG elems */
+	struct bfa_sge_s   rsp_sge[BFA_FCXP_MAX_SGES];
+	/*  rsp SG elems */
+	u8		rsp_status;	/*  comp: rsp status */
+	u32	rsp_len;	/*  comp: actual response len */
+	u32	residue_len;	/*  comp: residual rsp length */
+	struct fchs_s	rsp_fchs;	/*  comp: response fchs */
+	struct bfa_cb_qe_s    hcb_qe;	/*  comp: callback qelem */
+	struct bfa_reqq_wait_s	reqq_wqe;
+	bfa_boolean_t	reqq_waiting;
+	bfa_boolean_t	req_rsp;	/* Used to track req/rsp fcxp */
+};
+
+struct bfa_fcxp_wqe_s {
+	struct list_head		qe;
+	bfa_fcxp_alloc_cbfn_t	alloc_cbfn;
+	void		*alloc_cbarg;
+	void		*caller;
+	struct bfa_s	*bfa;
+	int		nreq_sgles;
+	int		nrsp_sgles;
+	bfa_fcxp_get_sgaddr_t	req_sga_cbfn;
+	bfa_fcxp_get_sglen_t	req_sglen_cbfn;
+	bfa_fcxp_get_sgaddr_t	rsp_sga_cbfn;
+	bfa_fcxp_get_sglen_t	rsp_sglen_cbfn;
+};
+
+#define BFA_FCXP_REQ_PLD(_fcxp)		(bfa_fcxp_get_reqbuf(_fcxp))
+#define BFA_FCXP_RSP_FCHS(_fcxp)	(&((_fcxp)->rsp_info.fchs))
+#define BFA_FCXP_RSP_PLD(_fcxp)		(bfa_fcxp_get_rspbuf(_fcxp))
+
+#define BFA_FCXP_REQ_PLD_PA(_fcxp)					      \
+	bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag,	      \
+		(_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz)
+
+/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
+#define BFA_FCXP_RSP_PLD_PA(_fcxp)					       \
+	(bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag,	       \
+	      (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz) + \
+	      (_fcxp)->fcxp_mod->req_pld_sz)
+
+void	bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+
+/*
+ * RPORT related defines
+ */
+enum bfa_rport_event {
+	BFA_RPORT_SM_CREATE	= 1,	/*  rport create event          */
+	BFA_RPORT_SM_DELETE	= 2,	/*  deleting an existing rport  */
+	BFA_RPORT_SM_ONLINE	= 3,	/*  rport is online             */
+	BFA_RPORT_SM_OFFLINE	= 4,	/*  rport is offline            */
+	BFA_RPORT_SM_FWRSP	= 5,	/*  firmware response           */
+	BFA_RPORT_SM_HWFAIL	= 6,	/*  IOC h/w failure             */
+	BFA_RPORT_SM_QOS_SCN	= 7,	/*  QoS SCN from firmware       */
+	BFA_RPORT_SM_SET_SPEED	= 8,	/*  Set Rport Speed             */
+	BFA_RPORT_SM_QRESUME	= 9,	/*  space in requeue queue      */
+};
+
+#define BFA_RPORT_MIN	4
+
+struct bfa_rport_mod_s {
+	struct bfa_rport_s *rps_list;	/*  list of rports	*/
+	struct list_head	rp_free_q;	/*  free bfa_rports	*/
+	struct list_head	rp_active_q;	/*  free bfa_rports	*/
+	struct list_head	rp_unused_q;	/*  unused bfa rports  */
+	u16	num_rports;	/*  number of rports	*/
+	struct bfa_mem_kva_s	kva_seg;
+};
+
+#define BFA_RPORT_MOD(__bfa)	(&(__bfa)->modules.rport_mod)
+#define BFA_MEM_RPORT_KVA(__bfa) (&(BFA_RPORT_MOD(__bfa)->kva_seg))
+
+/*
+ * Convert rport tag to RPORT
+ */
+#define BFA_RPORT_FROM_TAG(__bfa, _tag)				\
+	(BFA_RPORT_MOD(__bfa)->rps_list +			\
+	 ((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1)))
+
+/*
+ * protected functions
+ */
+void	bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void	bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw);
+
+/*
+ *	BFA rport information.
+ */
+struct bfa_rport_info_s {
+	u16	max_frmsz;	/*  max rcv pdu size		    */
+	u32	pid:24,	/*  remote port ID		    */
+		lp_tag:8;	/*  tag			    */
+	u32	local_pid:24,	/*  local port ID		    */
+		cisc:8;	/*  CIRO supported		    */
+	u8	fc_class;	/*  supported FC classes. enum fc_cos */
+	u8	vf_en;		/*  virtual fabric enable	    */
+	u16	vf_id;		/*  virtual fabric ID		    */
+	enum bfa_port_speed speed;	/*  Rport's current speed	    */
+};
+
+/*
+ * BFA rport data structure
+ */
+struct bfa_rport_s {
+	struct list_head	qe;	/*  queue element		    */
+	bfa_sm_t	sm;		/*  state machine		    */
+	struct bfa_s	*bfa;		/*  backpointer to BFA		    */
+	void		*rport_drv;	/*  fcs/driver rport object	    */
+	u16	fw_handle;	/*  firmware rport handle	    */
+	u16	rport_tag;	/*  BFA rport tag		    */
+	u8	lun_mask;	/*  LUN mask flag		    */
+	struct bfa_rport_info_s rport_info; /*  rport info from fcs/driver */
+	struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq     */
+	struct bfa_cb_qe_s hcb_qe;	/*  BFA callback qelem		    */
+	struct bfa_rport_hal_stats_s stats; /*  BFA rport statistics	    */
+	struct bfa_rport_qos_attr_s qos_attr;
+	union a {
+		bfa_status_t	status;	/*  f/w status */
+		void		*fw_msg; /*  QoS scn event		    */
+	} event_arg;
+};
+#define BFA_RPORT_FC_COS(_rport)	((_rport)->rport_info.fc_class)
+
+
+/*
+ * UF - unsolicited receive related defines
+ */
+
+#define BFA_UF_MIN	(4)
+#define BFA_UF_MAX	(256)
+
+struct bfa_uf_s {
+	struct list_head	qe;	/*  queue element		*/
+	struct bfa_s		*bfa;	/*  bfa instance		*/
+	u16	uf_tag;		/*  identifying tag fw msgs	*/
+	u16	vf_id;
+	u16	src_rport_handle;
+	u16	rsvd;
+	u8		*data_ptr;
+	u16	data_len;	/*  actual receive length	*/
+	u16	pb_len;		/*  posted buffer length	*/
+	void		*buf_kva;	/*  buffer virtual address	*/
+	u64	buf_pa;		/*  buffer physical address	*/
+	struct bfa_cb_qe_s hcb_qe;	/*  comp: BFA comp qelem	*/
+	struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
+};
+
+/*
+ *      Callback prototype for unsolicited frame receive handler.
+ *
+ * @param[in]           cbarg           callback arg for receive handler
+ * @param[in]           uf              unsolicited frame descriptor
+ *
+ * @return None
+ */
+typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
+
+#define BFA_UF_BUFSZ	(2 * 1024 + 256)
+
+struct bfa_uf_buf_s {
+	u8	d[BFA_UF_BUFSZ];
+};
+
+#define BFA_PER_UF_DMA_SZ	\
+	(u32)BFA_ROUNDUP(sizeof(struct bfa_uf_buf_s), BFA_DMA_ALIGN_SZ)
+
+/* Max UF dma segs required */
+#define BFA_UF_DMA_SEGS BFI_MEM_DMA_NSEGS(BFA_UF_MAX, BFA_PER_UF_DMA_SZ)
+
+struct bfa_uf_mod_s {
+	struct bfa_s *bfa;		/*  back pointer to BFA */
+	struct bfa_uf_s *uf_list;	/*  array of UFs */
+	u16	num_ufs;	/*  num unsolicited rx frames */
+	struct list_head	uf_free_q;	/*  free UFs */
+	struct list_head	uf_posted_q;	/*  UFs posted to IOC */
+	struct list_head	uf_unused_q;	/*  unused UF's */
+	struct bfi_uf_buf_post_s *uf_buf_posts;
+	/*  pre-built UF post msgs */
+	bfa_cb_uf_recv_t ufrecv;	/*  uf recv handler function */
+	void		*cbarg;		/*  uf receive handler arg */
+	struct bfa_mem_dma_s	dma_seg[BFA_UF_DMA_SEGS];
+	struct bfa_mem_kva_s	kva_seg;
+};
+
+#define BFA_UF_MOD(__bfa)	(&(__bfa)->modules.uf_mod)
+#define BFA_MEM_UF_KVA(__bfa)	(&(BFA_UF_MOD(__bfa)->kva_seg))
+
+#define ufm_pbs_pa(_ufmod, _uftag)					\
+	bfa_mem_get_dmabuf_pa(_ufmod, _uftag, BFA_PER_UF_DMA_SZ)
+
+void	bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void	bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw);
+
+/*
+ * LPS - bfa lport login/logout service interface
+ */
+struct bfa_lps_s {
+	struct list_head	qe;	/*  queue element		*/
+	struct bfa_s	*bfa;		/*  parent bfa instance	*/
+	bfa_sm_t	sm;		/*  finite state machine	*/
+	u8		bfa_tag;	/*  lport tag		*/
+	u8		fw_tag;		/*  lport fw tag                */
+	u8		reqq;		/*  lport request queue	*/
+	u8		alpa;		/*  ALPA for loop topologies	*/
+	u32	lp_pid;		/*  lport port ID		*/
+	bfa_boolean_t	fdisc;		/*  snd FDISC instead of FLOGI	*/
+	bfa_boolean_t	auth_en;	/*  enable authentication	*/
+	bfa_boolean_t	auth_req;	/*  authentication required	*/
+	bfa_boolean_t	npiv_en;	/*  NPIV is allowed by peer	*/
+	bfa_boolean_t	fport;		/*  attached peer is F_PORT	*/
+	bfa_boolean_t	brcd_switch;	/*  attached peer is brcd sw	*/
+	bfa_status_t	status;		/*  login status		*/
+	u16		pdusz;		/*  max receive PDU size	*/
+	u16		pr_bbcred;	/*  BB_CREDIT from peer		*/
+	u8		lsrjt_rsn;	/*  LSRJT reason		*/
+	u8		lsrjt_expl;	/*  LSRJT explanation		*/
+	u8		lun_mask;	/*  LUN mask flag		*/
+	wwn_t		pwwn;		/*  port wwn of lport		*/
+	wwn_t		nwwn;		/*  node wwn of lport		*/
+	wwn_t		pr_pwwn;	/*  port wwn of lport peer	*/
+	wwn_t		pr_nwwn;	/*  node wwn of lport peer	*/
+	mac_t		lp_mac;		/*  fpma/spma MAC for lport	*/
+	mac_t		fcf_mac;	/*  FCF MAC of lport		*/
+	struct bfa_reqq_wait_s	wqe;	/*  request wait queue element	*/
+	void		*uarg;		/*  user callback arg		*/
+	struct bfa_cb_qe_s hcb_qe;	/*  comp: callback qelem	*/
+	struct bfi_lps_login_rsp_s *loginrsp;
+	bfa_eproto_status_t ext_status;
+};
+
+struct bfa_lps_mod_s {
+	struct list_head		lps_free_q;
+	struct list_head		lps_active_q;
+	struct list_head		lps_login_q;
+	struct bfa_lps_s	*lps_arr;
+	int			num_lps;
+	struct bfa_mem_kva_s	kva_seg;
+};
+
+#define BFA_LPS_MOD(__bfa)		(&(__bfa)->modules.lps_mod)
+#define BFA_LPS_FROM_TAG(__mod, __tag)	(&(__mod)->lps_arr[__tag])
+#define BFA_MEM_LPS_KVA(__bfa)	(&(BFA_LPS_MOD(__bfa)->kva_seg))
+
+/*
+ * external functions
+ */
+void	bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+
+/*
+ * FCPORT related defines
+ */
+
+#define BFA_FCPORT(_bfa)	(&((_bfa)->modules.port))
+
+/*
+ * Link notification data structure
+ */
+struct bfa_fcport_ln_s {
+	struct bfa_fcport_s	*fcport;
+	bfa_sm_t		sm;
+	struct bfa_cb_qe_s	ln_qe;	/*  BFA callback queue elem for ln */
+	enum bfa_port_linkstate ln_event; /*  ln event for callback */
+};
+
+struct bfa_fcport_trunk_s {
+	struct bfa_trunk_attr_s	attr;
+};
+
+/*
+ * BFA FC port data structure
+ */
+struct bfa_fcport_s {
+	struct bfa_s		*bfa;	/*  parent BFA instance */
+	bfa_sm_t		sm;	/*  port state machine */
+	wwn_t			nwwn;	/*  node wwn of physical port */
+	wwn_t			pwwn;	/*  port wwn of physical oprt */
+	enum bfa_port_speed speed_sup;
+	/*  supported speeds */
+	enum bfa_port_speed speed;	/*  current speed */
+	enum bfa_port_topology topology;	/*  current topology */
+	u8			rsvd[3];
+	u8			myalpa;	/*  my ALPA in LOOP topology */
+	u8			alpabm_valid; /* alpa bitmap valid or not */
+	struct fc_alpabm_s	alpabm;	/* alpa bitmap */
+	struct bfa_port_cfg_s	cfg;	/*  current port configuration */
+	bfa_boolean_t		use_flash_cfg; /* get port cfg from flash */
+	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
+	struct bfa_qos_vc_attr_s qos_vc_attr;  /*  VC info from ELP */
+	struct bfa_reqq_wait_s	reqq_wait;
+	/*  to wait for room in reqq */
+	struct bfa_reqq_wait_s	svcreq_wait;
+	/*  to wait for room in reqq */
+	struct bfa_reqq_wait_s	stats_reqq_wait;
+	/*  to wait for room in reqq (stats) */
+	void			*event_cbarg;
+	void			(*event_cbfn) (void *cbarg,
+					       enum bfa_port_linkstate event);
+	union {
+		union bfi_fcport_i2h_msg_u i2hmsg;
+	} event_arg;
+	void			*bfad;	/*  BFA driver handle */
+	struct bfa_fcport_ln_s	ln; /*  Link Notification */
+	struct bfa_cb_qe_s	hcb_qe;	/*  BFA callback queue elem */
+	struct bfa_timer_s	timer;	/*  timer */
+	u32		msgtag;	/*  fimrware msg tag for reply */
+	u8			*stats_kva;
+	u64		stats_pa;
+	union bfa_fcport_stats_u *stats;
+	bfa_status_t		stats_status; /*  stats/statsclr status */
+	struct list_head	stats_pending_q;
+	struct list_head	statsclr_pending_q;
+	bfa_boolean_t		stats_qfull;
+	time64_t		stats_reset_time; /*  stats reset time stamp */
+	bfa_boolean_t		diag_busy; /*  diag busy status */
+	bfa_boolean_t		beacon; /*  port beacon status */
+	bfa_boolean_t		link_e2e_beacon; /*  link beacon status */
+	struct bfa_fcport_trunk_s trunk;
+	u16		fcoe_vlan;
+	struct bfa_mem_dma_s	fcport_dma;
+	bfa_boolean_t		stats_dma_ready;
+	struct bfa_bbcr_attr_s	bbcr_attr;
+	enum bfa_fec_state_s	fec_state;
+};
+
+#define BFA_FCPORT_MOD(__bfa)	(&(__bfa)->modules.fcport)
+#define BFA_MEM_FCPORT_DMA(__bfa) (&(BFA_FCPORT_MOD(__bfa)->fcport_dma))
+
+/*
+ * protected functions
+ */
+void bfa_fcport_init(struct bfa_s *bfa);
+void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+/*
+ * bfa fcport API functions
+ */
+bfa_status_t bfa_fcport_enable(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_disable(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa,
+				  enum bfa_port_speed speed);
+enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
+				     enum bfa_port_topology topo);
+enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa);
+enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
+bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
+u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
+u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa);
+u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa);
+void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr);
+wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
+void bfa_fcport_event_register(struct bfa_s *bfa,
+			void (*event_cbfn) (void *cbarg,
+			enum bfa_port_linkstate event), void *event_cbarg);
+bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_is_dport(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_is_ddport(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_set_qos_bw(struct bfa_s *bfa,
+				   struct bfa_qos_bw_s *qos_bw);
+enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
+
+void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
+bfa_boolean_t     bfa_fcport_is_ratelim(struct bfa_s *bfa);
+void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
+			bfa_boolean_t link_e2e_beacon);
+bfa_boolean_t	bfa_fcport_is_linkup(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
+			struct bfa_cb_pending_q_s *cb);
+bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
+			struct bfa_cb_pending_q_s *cb);
+bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
+void bfa_fcport_dportenable(struct bfa_s *bfa);
+void bfa_fcport_dportdisable(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
+void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state);
+bfa_status_t bfa_fcport_cfg_bbcr(struct bfa_s *bfa,
+			bfa_boolean_t on_off, u8 bb_scn);
+bfa_status_t bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
+			struct bfa_bbcr_attr_s *bbcr_attr);
+
+/*
+ * bfa rport API functions
+ */
+struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
+void bfa_rport_online(struct bfa_rport_s *rport,
+		      struct bfa_rport_info_s *rport_info);
+void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed);
+void bfa_cb_rport_online(void *rport);
+void bfa_cb_rport_offline(void *rport);
+void bfa_cb_rport_qos_scn_flowid(void *rport,
+				 struct bfa_rport_qos_attr_s old_qos_attr,
+				 struct bfa_rport_qos_attr_s new_qos_attr);
+void bfa_cb_rport_scn_online(struct bfa_s *bfa);
+void bfa_cb_rport_scn_offline(struct bfa_s *bfa);
+void bfa_cb_rport_scn_no_dev(void *rp);
+void bfa_cb_rport_qos_scn_prio(void *rport,
+			       struct bfa_rport_qos_attr_s old_qos_attr,
+			       struct bfa_rport_qos_attr_s new_qos_attr);
+
+/*
+ *	Rport LUN masking related
+ */
+#define BFA_RPORT_TAG_INVALID	0xffff
+#define BFA_LP_TAG_INVALID	0xff
+void	bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
+void	bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
+
+/*
+ * bfa fcxp API functions
+ */
+struct bfa_fcxp_s *bfa_fcxp_req_rsp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
+				  int nreq_sgles, int nrsp_sgles,
+				  bfa_fcxp_get_sgaddr_t get_req_sga,
+				  bfa_fcxp_get_sglen_t get_req_sglen,
+				  bfa_fcxp_get_sgaddr_t get_rsp_sga,
+				  bfa_fcxp_get_sglen_t get_rsp_sglen,
+				  bfa_boolean_t req);
+void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
+				bfa_fcxp_alloc_cbfn_t alloc_cbfn,
+				void *cbarg, void *bfad_fcxp,
+				int nreq_sgles, int nrsp_sgles,
+				bfa_fcxp_get_sgaddr_t get_req_sga,
+				bfa_fcxp_get_sglen_t get_req_sglen,
+				bfa_fcxp_get_sgaddr_t get_rsp_sga,
+				bfa_fcxp_get_sglen_t get_rsp_sglen,
+				bfa_boolean_t req);
+void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
+			    struct bfa_fcxp_wqe_s *wqe);
+void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
+
+void *bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp);
+void *bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp);
+
+void bfa_fcxp_free(struct bfa_fcxp_s *fcxp);
+
+void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
+		   u16 vf_id, u8 lp_tag,
+		   bfa_boolean_t cts, enum fc_cos cos,
+		   u32 reqlen, struct fchs_s *fchs,
+		   bfa_cb_fcxp_send_t cbfn,
+		   void *cbarg,
+		   u32 rsp_maxlen, u8 rsp_timeout);
+bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
+u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
+u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
+void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw);
+
+static inline void *
+bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
+{
+	return uf->data_ptr;
+}
+
+static inline   u16
+bfa_uf_get_frmlen(struct bfa_uf_s *uf)
+{
+	return uf->data_len;
+}
+
+/*
+ * bfa uf API functions
+ */
+void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
+			  void *cbarg);
+void bfa_uf_free(struct bfa_uf_s *uf);
+
+/*
+ * bfa lport service api
+ */
+
+u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
+struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
+void bfa_lps_delete(struct bfa_lps_s *lps);
+void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
+		   u16 pdusz, wwn_t pwwn, wwn_t nwwn,
+		   bfa_boolean_t auth_en);
+void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
+		   wwn_t pwwn, wwn_t nwwn);
+void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
+void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid);
+u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag);
+u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
+u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
+void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
+void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
+void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
+void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
+void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
+
+/* FAA specific APIs */
+bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
+			bfa_cb_iocfc_t cbfn, void *cbarg);
+
+/*
+ *	FC DIAG data structure
+ */
+struct bfa_fcdiag_qtest_s {
+	struct bfa_diag_qtest_result_s *result;
+	bfa_cb_diag_t	cbfn;
+	void		*cbarg;
+	struct bfa_timer_s	timer;
+	u32	status;
+	u32	count;
+	u8	lock;
+	u8	queue;
+	u8	all;
+	u8	timer_active;
+};
+
+struct bfa_fcdiag_lb_s {
+	bfa_cb_diag_t   cbfn;
+	void            *cbarg;
+	void            *result;
+	bfa_boolean_t   lock;
+	u32        status;
+};
+
+struct bfa_dport_s {
+	struct bfa_s	*bfa;		/* Back pointer to BFA	*/
+	bfa_sm_t	sm;		/* finite state machine */
+	struct bfa_reqq_wait_s reqq_wait;
+	bfa_cb_diag_t	cbfn;
+	void		*cbarg;
+	union bfi_diag_dport_msg_u i2hmsg;
+	u8		test_state;	/* enum dport_test_state  */
+	u8		dynamic;	/* boolean_t  */
+	u8		rsvd[2];
+	u32		lpcnt;
+	u32		payload;	/* user defined payload pattern */
+	wwn_t		rp_pwwn;
+	wwn_t		rp_nwwn;
+	struct bfa_diag_dport_result_s result;
+};
+
+struct bfa_fcdiag_s {
+	struct bfa_s    *bfa;           /* Back pointer to BFA */
+	struct bfa_trc_mod_s   *trcmod;
+	struct bfa_fcdiag_lb_s lb;
+	struct bfa_fcdiag_qtest_s qtest;
+	struct bfa_dport_s	dport;
+};
+
+#define BFA_FCDIAG_MOD(__bfa)	(&(__bfa)->modules.fcdiag)
+
+void	bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+bfa_status_t	bfa_fcdiag_loopback(struct bfa_s *bfa,
+				enum bfa_port_opmode opmode,
+				enum bfa_port_speed speed, u32 lpcnt, u32 pat,
+				struct bfa_diag_loopback_result_s *result,
+				bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t	bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore,
+			u32 queue, struct bfa_diag_qtest_result_s *result,
+			bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t	bfa_fcdiag_lb_is_running(struct bfa_s *bfa);
+bfa_status_t	bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
+					bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t	bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn,
+				  void *cbarg);
+bfa_status_t	bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
+				bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t	bfa_dport_show(struct bfa_s *bfa,
+				struct bfa_diag_dport_result_s *result);
+
+#endif /* __BFA_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
new file mode 100644
index 0000000..bd7e6a6
--- /dev/null
+++ b/drivers/scsi/bfa/bfad.c
@@ -0,0 +1,1815 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ *  bfad.c Linux driver PCI interface module.
+ */
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/uaccess.h>
+#include <asm/fcntl.h>
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_fcs.h"
+#include "bfa_defs.h"
+#include "bfa.h"
+
+BFA_TRC_FILE(LDRV, BFAD);
+DEFINE_MUTEX(bfad_mutex);
+LIST_HEAD(bfad_list);
+
+static int	bfad_inst;
+static int      num_sgpgs_parm;
+int		supported_fc4s;
+char		*host_name, *os_name, *os_patch;
+int		num_rports, num_ios, num_tms;
+int		num_fcxps, num_ufbufs;
+int		reqq_size, rspq_size, num_sgpgs;
+int		rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
+int		bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
+int		bfa_io_max_sge = BFAD_IO_MAX_SGE;
+int		bfa_log_level = 3; /* WARNING log level */
+int		ioc_auto_recover = BFA_TRUE;
+int		bfa_linkup_delay = -1;
+int		fdmi_enable = BFA_TRUE;
+int		pcie_max_read_reqsz;
+int		bfa_debugfs_enable = 1;
+int		msix_disable_cb = 0, msix_disable_ct = 0;
+int		max_xfer_size = BFAD_MAX_SECTORS >> 1;
+int		max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
+
+/* Firmware releated */
+u32	bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
+u32	*bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
+
+#define BFAD_FW_FILE_CB		"cbfw-3.2.5.1.bin"
+#define BFAD_FW_FILE_CT		"ctfw-3.2.5.1.bin"
+#define BFAD_FW_FILE_CT2	"ct2fw-3.2.5.1.bin"
+
+static u32 *bfad_load_fwimg(struct pci_dev *pdev);
+static void bfad_free_fwimg(void);
+static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
+		u32 *bfi_image_size, char *fw_name);
+
+static const char *msix_name_ct[] = {
+	"ctrl",
+	"cpe0", "cpe1", "cpe2", "cpe3",
+	"rme0", "rme1", "rme2", "rme3" };
+
+static const char *msix_name_cb[] = {
+	"cpe0", "cpe1", "cpe2", "cpe3",
+	"rme0", "rme1", "rme2", "rme3",
+	"eemc", "elpu0", "elpu1", "epss", "mlpu" };
+
+MODULE_FIRMWARE(BFAD_FW_FILE_CB);
+MODULE_FIRMWARE(BFAD_FW_FILE_CT);
+MODULE_FIRMWARE(BFAD_FW_FILE_CT2);
+
+module_param(os_name, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
+module_param(os_patch, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
+module_param(host_name, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
+module_param(num_rports, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
+				"(physical/logical), default=1024");
+module_param(num_ios, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
+module_param(num_tms, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
+module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
+module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
+				"buffers, default=64");
+module_param(reqq_size, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
+				"default=256");
+module_param(rspq_size, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
+				"default=64");
+module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
+module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
+					"Range[>0]");
+module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
+module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
+module_param(bfa_log_level, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, "
+				"Range[Critical:1|Error:2|Warning:3|Info:4]");
+module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
+				"Range[off:0|on:1]");
+module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
+			"boot port. Otherwise 10 secs in RHEL4 & 0 for "
+			"[RHEL5, SLES10, ESX40] Range[>0]");
+module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts for QLogic-415/425/815/825 cards, default=0 Range[false:0|true:1]");
+module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts if possible for QLogic-1010/1020/804/1007/902/1741 cards, default=0, Range[false:0|true:1]");
+module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
+				"Range[false:0|true:1]");
+module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
+		"(use system setting), Range[128|256|512|1024|2048|4096]");
+module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
+		" Range[false:0|true:1]");
+module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
+		" Range[64k|128k|256k|512k|1024k|2048k]");
+module_param(max_rport_logins, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_rport_logins, "Max number of logins to initiator and target rports on a port (physical/logical), default=1024");
+
+static void
+bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
+static void
+bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
+static void
+bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
+static void
+bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
+static void
+bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
+static void
+bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
+static void
+bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
+
+/*
+ * Beginning state for the driver instance, awaiting the pci_probe event
+ */
+static void
+bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+	bfa_trc(bfad, event);
+
+	switch (event) {
+	case BFAD_E_CREATE:
+		bfa_sm_set_state(bfad, bfad_sm_created);
+		bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
+						"%s", "bfad_worker");
+		if (IS_ERR(bfad->bfad_tsk)) {
+			printk(KERN_INFO "bfad[%d]: Kernel thread "
+				"creation failed!\n", bfad->inst_no);
+			bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
+		}
+		bfa_sm_send_event(bfad, BFAD_E_INIT);
+		break;
+
+	case BFAD_E_STOP:
+		/* Ignore stop; already in uninit */
+		break;
+
+	default:
+		bfa_sm_fault(bfad, event);
+	}
+}
+
+/*
+ * Driver Instance is created, awaiting event INIT to initialize the bfad
+ */
+static void
+bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+	unsigned long flags;
+	bfa_status_t ret;
+
+	bfa_trc(bfad, event);
+
+	switch (event) {
+	case BFAD_E_INIT:
+		bfa_sm_set_state(bfad, bfad_sm_initializing);
+
+		init_completion(&bfad->comp);
+
+		/* Enable Interrupt and wait bfa_init completion */
+		if (bfad_setup_intr(bfad)) {
+			printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
+					bfad->inst_no);
+			bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
+			break;
+		}
+
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		bfa_iocfc_init(&bfad->bfa);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+		/* Set up interrupt handler for each vectors */
+		if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
+			bfad_install_msix_handler(bfad)) {
+			printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
+				__func__, bfad->inst_no);
+		}
+
+		bfad_init_timer(bfad);
+
+		wait_for_completion(&bfad->comp);
+
+		if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
+			bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
+		} else {
+			printk(KERN_WARNING
+				"bfa %s: bfa init failed\n",
+				bfad->pci_name);
+			spin_lock_irqsave(&bfad->bfad_lock, flags);
+			bfa_fcs_init(&bfad->bfa_fcs);
+			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+			ret = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
+			if (ret != BFA_STATUS_OK) {
+				init_completion(&bfad->comp);
+
+				spin_lock_irqsave(&bfad->bfad_lock, flags);
+				bfad->pport.flags |= BFAD_PORT_DELETE;
+				bfa_fcs_exit(&bfad->bfa_fcs);
+				spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+				wait_for_completion(&bfad->comp);
+
+				bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
+				break;
+			}
+			bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
+			bfa_sm_send_event(bfad, BFAD_E_HAL_INIT_FAILED);
+		}
+
+		break;
+
+	case BFAD_E_KTHREAD_CREATE_FAILED:
+		bfa_sm_set_state(bfad, bfad_sm_uninit);
+		break;
+
+	default:
+		bfa_sm_fault(bfad, event);
+	}
+}
+
+static void
+bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+	int	retval;
+	unsigned long	flags;
+
+	bfa_trc(bfad, event);
+
+	switch (event) {
+	case BFAD_E_INIT_SUCCESS:
+		kthread_stop(bfad->bfad_tsk);
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		bfad->bfad_tsk = NULL;
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+		retval = bfad_start_ops(bfad);
+		if (retval != BFA_STATUS_OK) {
+			bfa_sm_set_state(bfad, bfad_sm_failed);
+			break;
+		}
+		bfa_sm_set_state(bfad, bfad_sm_operational);
+		break;
+
+	case BFAD_E_INIT_FAILED:
+		bfa_sm_set_state(bfad, bfad_sm_uninit);
+		kthread_stop(bfad->bfad_tsk);
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		bfad->bfad_tsk = NULL;
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		break;
+
+	case BFAD_E_HAL_INIT_FAILED:
+		bfa_sm_set_state(bfad, bfad_sm_failed);
+		break;
+	default:
+		bfa_sm_fault(bfad, event);
+	}
+}
+
+static void
+bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+	int	retval;
+
+	bfa_trc(bfad, event);
+
+	switch (event) {
+	case BFAD_E_INIT_SUCCESS:
+		retval = bfad_start_ops(bfad);
+		if (retval != BFA_STATUS_OK)
+			break;
+		bfa_sm_set_state(bfad, bfad_sm_operational);
+		break;
+
+	case BFAD_E_STOP:
+		bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
+		bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
+		break;
+
+	case BFAD_E_EXIT_COMP:
+		bfa_sm_set_state(bfad, bfad_sm_uninit);
+		bfad_remove_intr(bfad);
+		del_timer_sync(&bfad->hal_tmo);
+		break;
+
+	default:
+		bfa_sm_fault(bfad, event);
+	}
+}
+
+static void
+bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+	bfa_trc(bfad, event);
+
+	switch (event) {
+	case BFAD_E_STOP:
+		bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
+		bfad_fcs_stop(bfad);
+		break;
+
+	default:
+		bfa_sm_fault(bfad, event);
+	}
+}
+
+static void
+bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+	bfa_trc(bfad, event);
+
+	switch (event) {
+	case BFAD_E_FCS_EXIT_COMP:
+		bfa_sm_set_state(bfad, bfad_sm_stopping);
+		bfad_stop(bfad);
+		break;
+
+	default:
+		bfa_sm_fault(bfad, event);
+	}
+}
+
+static void
+bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
+{
+	bfa_trc(bfad, event);
+
+	switch (event) {
+	case BFAD_E_EXIT_COMP:
+		bfa_sm_set_state(bfad, bfad_sm_uninit);
+		bfad_remove_intr(bfad);
+		del_timer_sync(&bfad->hal_tmo);
+		bfad_im_probe_undo(bfad);
+		bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
+		bfad_uncfg_pport(bfad);
+		break;
+
+	default:
+		bfa_sm_fault(bfad, event);
+		break;
+	}
+}
+
+/*
+ *  BFA callbacks
+ */
+void
+bfad_hcb_comp(void *arg, bfa_status_t status)
+{
+	struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
+
+	fcomp->status = status;
+	complete(&fcomp->comp);
+}
+
+/*
+ * bfa_init callback
+ */
+void
+bfa_cb_init(void *drv, bfa_status_t init_status)
+{
+	struct bfad_s	      *bfad = drv;
+
+	if (init_status == BFA_STATUS_OK) {
+		bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
+
+		/*
+		 * If BFAD_HAL_INIT_FAIL flag is set:
+		 * Wake up the kernel thread to start
+		 * the bfad operations after HAL init done
+		 */
+		if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
+			bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
+			wake_up_process(bfad->bfad_tsk);
+		}
+	}
+
+	complete(&bfad->comp);
+}
+
+/*
+ *  BFA_FCS callbacks
+ */
+struct bfad_port_s *
+bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
+		 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
+		 struct bfad_vport_s *vp_drv)
+{
+	bfa_status_t	rc;
+	struct bfad_port_s    *port_drv;
+
+	if (!vp_drv && !vf_drv) {
+		port_drv = &bfad->pport;
+		port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
+	} else if (!vp_drv && vf_drv) {
+		port_drv = &vf_drv->base_port;
+		port_drv->pvb_type = BFAD_PORT_VF_BASE;
+	} else if (vp_drv && !vf_drv) {
+		port_drv = &vp_drv->drv_port;
+		port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
+	} else {
+		port_drv = &vp_drv->drv_port;
+		port_drv->pvb_type = BFAD_PORT_VF_VPORT;
+	}
+
+	port_drv->fcs_port = port;
+	port_drv->roles = roles;
+
+	if (roles & BFA_LPORT_ROLE_FCP_IM) {
+		rc = bfad_im_port_new(bfad, port_drv);
+		if (rc != BFA_STATUS_OK) {
+			bfad_im_port_delete(bfad, port_drv);
+			port_drv = NULL;
+		}
+	}
+
+	return port_drv;
+}
+
+/*
+ * FCS RPORT alloc callback, after successful PLOGI by FCS
+ */
+bfa_status_t
+bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
+		    struct bfad_rport_s **rport_drv)
+{
+	bfa_status_t	rc = BFA_STATUS_OK;
+
+	*rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
+	if (*rport_drv == NULL) {
+		rc = BFA_STATUS_ENOMEM;
+		goto ext;
+	}
+
+	*rport = &(*rport_drv)->fcs_rport;
+
+ext:
+	return rc;
+}
+
+/*
+ * FCS PBC VPORT Create
+ */
+void
+bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
+{
+
+	struct bfa_lport_cfg_s port_cfg = {0};
+	struct bfad_vport_s   *vport;
+	int rc;
+
+	vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC);
+	if (!vport) {
+		bfa_trc(bfad, 0);
+		return;
+	}
+
+	vport->drv_port.bfad = bfad;
+	port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
+	port_cfg.pwwn = pbc_vport.vp_pwwn;
+	port_cfg.nwwn = pbc_vport.vp_nwwn;
+	port_cfg.preboot_vp  = BFA_TRUE;
+
+	rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
+				  &port_cfg, vport);
+
+	if (rc != BFA_STATUS_OK) {
+		bfa_trc(bfad, 0);
+		return;
+	}
+
+	list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
+}
+
+void
+bfad_hal_mem_release(struct bfad_s *bfad)
+{
+	struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
+	struct bfa_mem_dma_s *dma_info, *dma_elem;
+	struct bfa_mem_kva_s *kva_info, *kva_elem;
+	struct list_head *dm_qe, *km_qe;
+
+	dma_info = &hal_meminfo->dma_info;
+	kva_info = &hal_meminfo->kva_info;
+
+	/* Iterate through the KVA meminfo queue */
+	list_for_each(km_qe, &kva_info->qe) {
+		kva_elem = (struct bfa_mem_kva_s *) km_qe;
+		vfree(kva_elem->kva);
+	}
+
+	/* Iterate through the DMA meminfo queue */
+	list_for_each(dm_qe, &dma_info->qe) {
+		dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+		dma_free_coherent(&bfad->pcidev->dev,
+				dma_elem->mem_len, dma_elem->kva,
+				(dma_addr_t) dma_elem->dma);
+	}
+
+	memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
+}
+
+void
+bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
+{
+	if (num_rports > 0)
+		bfa_cfg->fwcfg.num_rports = num_rports;
+	if (num_ios > 0)
+		bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
+	if (num_tms > 0)
+		bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
+	if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX)
+		bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
+	if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX)
+		bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
+	if (reqq_size > 0)
+		bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
+	if (rspq_size > 0)
+		bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
+	if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX)
+		bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
+
+	/*
+	 * populate the hal values back to the driver for sysfs use.
+	 * otherwise, the default values will be shown as 0 in sysfs
+	 */
+	num_rports = bfa_cfg->fwcfg.num_rports;
+	num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
+	num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
+	num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
+	num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
+	reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
+	rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
+	num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
+}
+
+bfa_status_t
+bfad_hal_mem_alloc(struct bfad_s *bfad)
+{
+	struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
+	struct bfa_mem_dma_s *dma_info, *dma_elem;
+	struct bfa_mem_kva_s *kva_info, *kva_elem;
+	struct list_head *dm_qe, *km_qe;
+	bfa_status_t	rc = BFA_STATUS_OK;
+	dma_addr_t	phys_addr;
+
+	bfa_cfg_get_default(&bfad->ioc_cfg);
+	bfad_update_hal_cfg(&bfad->ioc_cfg);
+	bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
+	bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa);
+
+	dma_info = &hal_meminfo->dma_info;
+	kva_info = &hal_meminfo->kva_info;
+
+	/* Iterate through the KVA meminfo queue */
+	list_for_each(km_qe, &kva_info->qe) {
+		kva_elem = (struct bfa_mem_kva_s *) km_qe;
+		kva_elem->kva = vzalloc(kva_elem->mem_len);
+		if (kva_elem->kva == NULL) {
+			bfad_hal_mem_release(bfad);
+			rc = BFA_STATUS_ENOMEM;
+			goto ext;
+		}
+	}
+
+	/* Iterate through the DMA meminfo queue */
+	list_for_each(dm_qe, &dma_info->qe) {
+		dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+		dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev,
+						dma_elem->mem_len,
+						&phys_addr, GFP_KERNEL);
+		if (dma_elem->kva == NULL) {
+			bfad_hal_mem_release(bfad);
+			rc = BFA_STATUS_ENOMEM;
+			goto ext;
+		}
+		dma_elem->dma = phys_addr;
+		memset(dma_elem->kva, 0, dma_elem->mem_len);
+	}
+ext:
+	return rc;
+}
+
+/*
+ * Create a vport under a vf.
+ */
+bfa_status_t
+bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
+		  struct bfa_lport_cfg_s *port_cfg, struct device *dev)
+{
+	struct bfad_vport_s   *vport;
+	int		rc = BFA_STATUS_OK;
+	unsigned long	flags;
+	struct completion fcomp;
+
+	vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
+	if (!vport) {
+		rc = BFA_STATUS_ENOMEM;
+		goto ext;
+	}
+
+	vport->drv_port.bfad = bfad;
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
+				  port_cfg, vport);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (rc != BFA_STATUS_OK)
+		goto ext_free_vport;
+
+	if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
+		rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
+							dev);
+		if (rc != BFA_STATUS_OK)
+			goto ext_free_fcs_vport;
+	}
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfa_fcs_vport_start(&vport->fcs_vport);
+	list_add_tail(&vport->list_entry, &bfad->vport_list);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return BFA_STATUS_OK;
+
+ext_free_fcs_vport:
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	vport->comp_del = &fcomp;
+	init_completion(vport->comp_del);
+	bfa_fcs_vport_delete(&vport->fcs_vport);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	wait_for_completion(vport->comp_del);
+ext_free_vport:
+	kfree(vport);
+ext:
+	return rc;
+}
+
+void
+bfad_bfa_tmo(struct timer_list *t)
+{
+	struct bfad_s	      *bfad = from_timer(bfad, t, hal_tmo);
+	unsigned long	flags;
+	struct list_head	       doneq;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+	bfa_timer_beat(&bfad->bfa.timer_mod);
+
+	bfa_comp_deq(&bfad->bfa, &doneq);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (!list_empty(&doneq)) {
+		bfa_comp_process(&bfad->bfa, &doneq);
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		bfa_comp_free(&bfad->bfa, &doneq);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	}
+
+	mod_timer(&bfad->hal_tmo,
+		  jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
+}
+
+void
+bfad_init_timer(struct bfad_s *bfad)
+{
+	timer_setup(&bfad->hal_tmo, bfad_bfa_tmo, 0);
+
+	mod_timer(&bfad->hal_tmo,
+		  jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
+}
+
+int
+bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
+{
+	int		rc = -ENODEV;
+
+	if (pci_enable_device(pdev)) {
+		printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
+		goto out;
+	}
+
+	if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
+		goto out_disable_device;
+
+	pci_set_master(pdev);
+
+
+	if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
+	    (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
+		   (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
+			printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
+			goto out_release_region;
+		}
+	}
+
+	/* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
+	pci_enable_pcie_error_reporting(pdev);
+
+	bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+	bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
+
+	if (bfad->pci_bar0_kva == NULL) {
+		printk(KERN_ERR "Fail to map bar0\n");
+		goto out_release_region;
+	}
+
+	bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
+	bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
+	bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
+	bfad->hal_pcidev.device_id = pdev->device;
+	bfad->hal_pcidev.ssid = pdev->subsystem_device;
+	bfad->pci_name = pci_name(pdev);
+
+	bfad->pci_attr.vendor_id = pdev->vendor;
+	bfad->pci_attr.device_id = pdev->device;
+	bfad->pci_attr.ssid = pdev->subsystem_device;
+	bfad->pci_attr.ssvid = pdev->subsystem_vendor;
+	bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
+
+	bfad->pcidev = pdev;
+
+	/* Adjust PCIe Maximum Read Request Size */
+	if (pci_is_pcie(pdev) && pcie_max_read_reqsz) {
+		if (pcie_max_read_reqsz >= 128 &&
+		    pcie_max_read_reqsz <= 4096 &&
+		    is_power_of_2(pcie_max_read_reqsz)) {
+			int max_rq = pcie_get_readrq(pdev);
+			printk(KERN_WARNING "BFA[%s]: "
+				"pcie_max_read_request_size is %d, "
+				"reset to %d\n", bfad->pci_name, max_rq,
+				pcie_max_read_reqsz);
+			pcie_set_readrq(pdev, pcie_max_read_reqsz);
+		} else {
+			printk(KERN_WARNING "BFA[%s]: invalid "
+			       "pcie_max_read_request_size %d ignored\n",
+			       bfad->pci_name, pcie_max_read_reqsz);
+		}
+	}
+
+	pci_save_state(pdev);
+
+	return 0;
+
+out_release_region:
+	pci_release_regions(pdev);
+out_disable_device:
+	pci_disable_device(pdev);
+out:
+	return rc;
+}
+
+void
+bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
+{
+	pci_iounmap(pdev, bfad->pci_bar0_kva);
+	pci_iounmap(pdev, bfad->pci_bar2_kva);
+	pci_release_regions(pdev);
+	/* Disable PCIE Advanced Error Recovery (AER) */
+	pci_disable_pcie_error_reporting(pdev);
+	pci_disable_device(pdev);
+}
+
+bfa_status_t
+bfad_drv_init(struct bfad_s *bfad)
+{
+	bfa_status_t	rc;
+	unsigned long	flags;
+
+	bfad->cfg_data.rport_del_timeout = rport_del_timeout;
+	bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
+	bfad->cfg_data.io_max_sge = bfa_io_max_sge;
+	bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
+
+	rc = bfad_hal_mem_alloc(bfad);
+	if (rc != BFA_STATUS_OK) {
+		printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
+		       bfad->inst_no);
+		printk(KERN_WARNING
+			"Not enough memory to attach all QLogic BR-series HBA ports. System may need more memory.\n");
+		return BFA_STATUS_FAILED;
+	}
+
+	bfad->bfa.trcmod = bfad->trcmod;
+	bfad->bfa.plog = &bfad->plog_buf;
+	bfa_plog_init(&bfad->plog_buf);
+	bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
+		     0, "Driver Attach");
+
+	bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
+		   &bfad->hal_pcidev);
+
+	/* FCS INIT */
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfad->bfa_fcs.trcmod = bfad->trcmod;
+	bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
+	bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
+
+	return BFA_STATUS_OK;
+}
+
+void
+bfad_drv_uninit(struct bfad_s *bfad)
+{
+	unsigned long   flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	init_completion(&bfad->comp);
+	bfa_iocfc_stop(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	wait_for_completion(&bfad->comp);
+
+	del_timer_sync(&bfad->hal_tmo);
+	bfa_isr_disable(&bfad->bfa);
+	bfa_detach(&bfad->bfa);
+	bfad_remove_intr(bfad);
+	bfad_hal_mem_release(bfad);
+
+	bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
+}
+
+void
+bfad_drv_start(struct bfad_s *bfad)
+{
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfa_iocfc_start(&bfad->bfa);
+	bfa_fcs_pbc_vport_init(&bfad->bfa_fcs);
+	bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
+	bfad->bfad_flags |= BFAD_HAL_START_DONE;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (bfad->im)
+		flush_workqueue(bfad->im->drv_workq);
+}
+
+void
+bfad_fcs_stop(struct bfad_s *bfad)
+{
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	init_completion(&bfad->comp);
+	bfad->pport.flags |= BFAD_PORT_DELETE;
+	bfa_fcs_exit(&bfad->bfa_fcs);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	wait_for_completion(&bfad->comp);
+
+	bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
+}
+
+void
+bfad_stop(struct bfad_s *bfad)
+{
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	init_completion(&bfad->comp);
+	bfa_iocfc_stop(&bfad->bfa);
+	bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	wait_for_completion(&bfad->comp);
+
+	bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
+}
+
+bfa_status_t
+bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
+{
+	int		rc = BFA_STATUS_OK;
+
+	/* Allocate scsi_host for the physical port */
+	if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
+	    (role & BFA_LPORT_ROLE_FCP_IM)) {
+		if (bfad->pport.im_port == NULL) {
+			rc = BFA_STATUS_FAILED;
+			goto out;
+		}
+
+		rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
+						&bfad->pcidev->dev);
+		if (rc != BFA_STATUS_OK)
+			goto out;
+
+		bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
+	}
+
+	bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
+
+out:
+	return rc;
+}
+
+void
+bfad_uncfg_pport(struct bfad_s *bfad)
+{
+	if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
+	    (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
+		bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
+		bfad_im_port_clean(bfad->pport.im_port);
+		kfree(bfad->pport.im_port);
+		bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
+	}
+
+	bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
+}
+
+bfa_status_t
+bfad_start_ops(struct bfad_s *bfad) {
+
+	int	retval;
+	unsigned long	flags;
+	struct bfad_vport_s *vport, *vport_new;
+	struct bfa_fcs_driver_info_s driver_info;
+
+	/* Limit min/max. xfer size to [64k-32MB] */
+	if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
+		max_xfer_size = BFAD_MIN_SECTORS >> 1;
+	if (max_xfer_size > BFAD_MAX_SECTORS >> 1)
+		max_xfer_size = BFAD_MAX_SECTORS >> 1;
+
+	/* Fill the driver_info info to fcs*/
+	memset(&driver_info, 0, sizeof(driver_info));
+	strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
+		sizeof(driver_info.version));
+	if (host_name)
+		strlcpy(driver_info.host_machine_name, host_name,
+			sizeof(driver_info.host_machine_name));
+	if (os_name)
+		strlcpy(driver_info.host_os_name, os_name,
+			sizeof(driver_info.host_os_name));
+	if (os_patch)
+		strlcpy(driver_info.host_os_patch, os_patch,
+			sizeof(driver_info.host_os_patch));
+
+	strlcpy(driver_info.os_device_name, bfad->pci_name,
+		sizeof(driver_info.os_device_name));
+
+	/* FCS driver info init */
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
+
+	if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
+		bfa_fcs_update_cfg(&bfad->bfa_fcs);
+	else
+		bfa_fcs_init(&bfad->bfa_fcs);
+
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (!(bfad->bfad_flags & BFAD_CFG_PPORT_DONE)) {
+		retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
+		if (retval != BFA_STATUS_OK)
+			return BFA_STATUS_FAILED;
+	}
+
+	/* Setup fc host fixed attribute if the lk supports */
+	bfad_fc_host_init(bfad->pport.im_port);
+
+	/* BFAD level FC4 IM specific resource allocation */
+	retval = bfad_im_probe(bfad);
+	if (retval != BFA_STATUS_OK) {
+		printk(KERN_WARNING "bfad_im_probe failed\n");
+		if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
+			bfa_sm_set_state(bfad, bfad_sm_failed);
+		return BFA_STATUS_FAILED;
+	} else
+		bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
+
+	bfad_drv_start(bfad);
+
+	/* Complete pbc vport create */
+	list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
+				list_entry) {
+		struct fc_vport_identifiers vid;
+		struct fc_vport *fc_vport;
+		char pwwn_buf[BFA_STRING_32];
+
+		memset(&vid, 0, sizeof(vid));
+		vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+		vid.vport_type = FC_PORTTYPE_NPIV;
+		vid.disable = false;
+		vid.node_name = wwn_to_u64((u8 *)
+				(&((vport->fcs_vport).lport.port_cfg.nwwn)));
+		vid.port_name = wwn_to_u64((u8 *)
+				(&((vport->fcs_vport).lport.port_cfg.pwwn)));
+		fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
+		if (!fc_vport) {
+			wwn2str(pwwn_buf, vid.port_name);
+			printk(KERN_WARNING "bfad%d: failed to create pbc vport"
+				" %s\n", bfad->inst_no, pwwn_buf);
+		}
+		list_del(&vport->list_entry);
+		kfree(vport);
+	}
+
+	/*
+	 * If bfa_linkup_delay is set to -1 default; try to retrive the
+	 * value using the bfad_get_linkup_delay(); else use the
+	 * passed in module param value as the bfa_linkup_delay.
+	 */
+	if (bfa_linkup_delay < 0) {
+		bfa_linkup_delay = bfad_get_linkup_delay(bfad);
+		bfad_rport_online_wait(bfad);
+		bfa_linkup_delay = -1;
+	} else
+		bfad_rport_online_wait(bfad);
+
+	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
+
+	return BFA_STATUS_OK;
+}
+
+int
+bfad_worker(void *ptr)
+{
+	struct bfad_s *bfad = ptr;
+	unsigned long flags;
+
+	if (kthread_should_stop())
+		return 0;
+
+	/* Send event BFAD_E_INIT_SUCCESS */
+	bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfad->bfad_tsk = NULL;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+/*
+ *  BFA driver interrupt functions
+ */
+irqreturn_t
+bfad_intx(int irq, void *dev_id)
+{
+	struct bfad_s	*bfad = dev_id;
+	struct list_head	doneq;
+	unsigned long	flags;
+	bfa_boolean_t rc;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	rc = bfa_intx(&bfad->bfa);
+	if (!rc) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		return IRQ_NONE;
+	}
+
+	bfa_comp_deq(&bfad->bfa, &doneq);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (!list_empty(&doneq)) {
+		bfa_comp_process(&bfad->bfa, &doneq);
+
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		bfa_comp_free(&bfad->bfa, &doneq);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	}
+
+	return IRQ_HANDLED;
+
+}
+
+static irqreturn_t
+bfad_msix(int irq, void *dev_id)
+{
+	struct bfad_msix_s *vec = dev_id;
+	struct bfad_s *bfad = vec->bfad;
+	struct list_head doneq;
+	unsigned long   flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+	bfa_msix(&bfad->bfa, vec->msix.entry);
+	bfa_comp_deq(&bfad->bfa, &doneq);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (!list_empty(&doneq)) {
+		bfa_comp_process(&bfad->bfa, &doneq);
+
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		bfa_comp_free(&bfad->bfa, &doneq);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Initialize the MSIX entry table.
+ */
+static void
+bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
+			 int mask, int max_bit)
+{
+	int	i;
+	int	match = 0x00000001;
+
+	for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
+		if (mask & match) {
+			bfad->msix_tab[bfad->nvec].msix.entry = i;
+			bfad->msix_tab[bfad->nvec].bfad = bfad;
+			msix_entries[bfad->nvec].entry = i;
+			bfad->nvec++;
+		}
+
+		match <<= 1;
+	}
+
+}
+
+int
+bfad_install_msix_handler(struct bfad_s *bfad)
+{
+	int i, error = 0;
+
+	for (i = 0; i < bfad->nvec; i++) {
+		sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
+				bfad->pci_name,
+				((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ?
+				msix_name_cb[i] : msix_name_ct[i]));
+
+		error = request_irq(bfad->msix_tab[i].msix.vector,
+				    (irq_handler_t) bfad_msix, 0,
+				    bfad->msix_tab[i].name, &bfad->msix_tab[i]);
+		bfa_trc(bfad, i);
+		bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
+		if (error) {
+			int	j;
+
+			for (j = 0; j < i; j++)
+				free_irq(bfad->msix_tab[j].msix.vector,
+						&bfad->msix_tab[j]);
+
+			bfad->bfad_flags &= ~BFAD_MSIX_ON;
+			pci_disable_msix(bfad->pcidev);
+
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Setup MSIX based interrupt.
+ */
+int
+bfad_setup_intr(struct bfad_s *bfad)
+{
+	int error;
+	u32 mask = 0, i, num_bit = 0, max_bit = 0;
+	struct msix_entry msix_entries[MAX_MSIX_ENTRY];
+	struct pci_dev *pdev = bfad->pcidev;
+	u16	reg;
+
+	/* Call BFA to get the msix map for this PCI function.  */
+	bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
+
+	/* Set up the msix entry table */
+	bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
+
+	if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
+	   (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
+
+		error = pci_enable_msix_exact(bfad->pcidev,
+					      msix_entries, bfad->nvec);
+		/* In CT1 & CT2, try to allocate just one vector */
+		if (error == -ENOSPC && bfa_asic_id_ctc(pdev->device)) {
+			printk(KERN_WARNING "bfa %s: trying one msix "
+			       "vector failed to allocate %d[%d]\n",
+			       bfad->pci_name, bfad->nvec, error);
+			bfad->nvec = 1;
+			error = pci_enable_msix_exact(bfad->pcidev,
+						      msix_entries, 1);
+		}
+
+		if (error) {
+			printk(KERN_WARNING "bfad%d: "
+			       "pci_enable_msix_exact failed (%d), "
+			       "use line based.\n",
+				bfad->inst_no, error);
+			goto line_based;
+		}
+
+		/* Disable INTX in MSI-X mode */
+		pci_read_config_word(pdev, PCI_COMMAND, &reg);
+
+		if (!(reg & PCI_COMMAND_INTX_DISABLE))
+			pci_write_config_word(pdev, PCI_COMMAND,
+				reg | PCI_COMMAND_INTX_DISABLE);
+
+		/* Save the vectors */
+		for (i = 0; i < bfad->nvec; i++) {
+			bfa_trc(bfad, msix_entries[i].vector);
+			bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
+		}
+
+		bfa_msix_init(&bfad->bfa, bfad->nvec);
+
+		bfad->bfad_flags |= BFAD_MSIX_ON;
+
+		return 0;
+	}
+
+line_based:
+	error = request_irq(bfad->pcidev->irq, (irq_handler_t)bfad_intx,
+			    BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad);
+	if (error)
+		return error;
+
+	bfad->bfad_flags |= BFAD_INTX_ON;
+
+	return 0;
+}
+
+void
+bfad_remove_intr(struct bfad_s *bfad)
+{
+	int	i;
+
+	if (bfad->bfad_flags & BFAD_MSIX_ON) {
+		for (i = 0; i < bfad->nvec; i++)
+			free_irq(bfad->msix_tab[i].msix.vector,
+					&bfad->msix_tab[i]);
+
+		pci_disable_msix(bfad->pcidev);
+		bfad->bfad_flags &= ~BFAD_MSIX_ON;
+	} else if (bfad->bfad_flags & BFAD_INTX_ON) {
+		free_irq(bfad->pcidev->irq, bfad);
+	}
+}
+
+/*
+ * PCI probe entry.
+ */
+int
+bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+	struct bfad_s	*bfad;
+	int		error = -ENODEV, retval, i;
+
+	/* For single port cards - only claim function 0 */
+	if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
+		(PCI_FUNC(pdev->devfn) != 0))
+		return -ENODEV;
+
+	bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
+	if (!bfad) {
+		error = -ENOMEM;
+		goto out;
+	}
+
+	bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
+	if (!bfad->trcmod) {
+		printk(KERN_WARNING "Error alloc trace buffer!\n");
+		error = -ENOMEM;
+		goto out_alloc_trace_failure;
+	}
+
+	/* TRACE INIT */
+	bfa_trc_init(bfad->trcmod);
+	bfa_trc(bfad, bfad_inst);
+
+	/* AEN INIT */
+	INIT_LIST_HEAD(&bfad->free_aen_q);
+	INIT_LIST_HEAD(&bfad->active_aen_q);
+	for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
+		list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
+
+	if (!(bfad_load_fwimg(pdev))) {
+		kfree(bfad->trcmod);
+		goto out_alloc_trace_failure;
+	}
+
+	retval = bfad_pci_init(pdev, bfad);
+	if (retval) {
+		printk(KERN_WARNING "bfad_pci_init failure!\n");
+		error = retval;
+		goto out_pci_init_failure;
+	}
+
+	mutex_lock(&bfad_mutex);
+	bfad->inst_no = bfad_inst++;
+	list_add_tail(&bfad->list_entry, &bfad_list);
+	mutex_unlock(&bfad_mutex);
+
+	/* Initializing the state machine: State set to uninit */
+	bfa_sm_set_state(bfad, bfad_sm_uninit);
+
+	spin_lock_init(&bfad->bfad_lock);
+	spin_lock_init(&bfad->bfad_aen_spinlock);
+
+	pci_set_drvdata(pdev, bfad);
+
+	bfad->ref_count = 0;
+	bfad->pport.bfad = bfad;
+	INIT_LIST_HEAD(&bfad->pbc_vport_list);
+	INIT_LIST_HEAD(&bfad->vport_list);
+
+	/* Setup the debugfs node for this bfad */
+	if (bfa_debugfs_enable)
+		bfad_debugfs_init(&bfad->pport);
+
+	retval = bfad_drv_init(bfad);
+	if (retval != BFA_STATUS_OK)
+		goto out_drv_init_failure;
+
+	bfa_sm_send_event(bfad, BFAD_E_CREATE);
+
+	if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
+		goto out_bfad_sm_failure;
+
+	return 0;
+
+out_bfad_sm_failure:
+	bfad_hal_mem_release(bfad);
+out_drv_init_failure:
+	/* Remove the debugfs node for this bfad */
+	kfree(bfad->regdata);
+	bfad_debugfs_exit(&bfad->pport);
+	mutex_lock(&bfad_mutex);
+	bfad_inst--;
+	list_del(&bfad->list_entry);
+	mutex_unlock(&bfad_mutex);
+	bfad_pci_uninit(pdev, bfad);
+out_pci_init_failure:
+	kfree(bfad->trcmod);
+out_alloc_trace_failure:
+	kfree(bfad);
+out:
+	return error;
+}
+
+/*
+ * PCI remove entry.
+ */
+void
+bfad_pci_remove(struct pci_dev *pdev)
+{
+	struct bfad_s	      *bfad = pci_get_drvdata(pdev);
+	unsigned long	flags;
+
+	bfa_trc(bfad, bfad->inst_no);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (bfad->bfad_tsk != NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		kthread_stop(bfad->bfad_tsk);
+	} else {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	}
+
+	/* Send Event BFAD_E_STOP */
+	bfa_sm_send_event(bfad, BFAD_E_STOP);
+
+	/* Driver detach and dealloc mem */
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfa_detach(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	bfad_hal_mem_release(bfad);
+
+	/* Remove the debugfs node for this bfad */
+	kfree(bfad->regdata);
+	bfad_debugfs_exit(&bfad->pport);
+
+	/* Cleaning the BFAD instance */
+	mutex_lock(&bfad_mutex);
+	bfad_inst--;
+	list_del(&bfad->list_entry);
+	mutex_unlock(&bfad_mutex);
+	bfad_pci_uninit(pdev, bfad);
+
+	kfree(bfad->trcmod);
+	kfree(bfad);
+}
+
+/*
+ * PCI Error Recovery entry, error detected.
+ */
+static pci_ers_result_t
+bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	struct bfad_s *bfad = pci_get_drvdata(pdev);
+	unsigned long	flags;
+	pci_ers_result_t ret = PCI_ERS_RESULT_NONE;
+
+	dev_printk(KERN_ERR, &pdev->dev,
+		   "error detected state: %d - flags: 0x%x\n",
+		   state, bfad->bfad_flags);
+
+	switch (state) {
+	case pci_channel_io_normal: /* non-fatal error */
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		bfad->bfad_flags &= ~BFAD_EEH_BUSY;
+		/* Suspend/fail all bfa operations */
+		bfa_ioc_suspend(&bfad->bfa.ioc);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		del_timer_sync(&bfad->hal_tmo);
+		ret = PCI_ERS_RESULT_CAN_RECOVER;
+		break;
+	case pci_channel_io_frozen: /* fatal error */
+		init_completion(&bfad->comp);
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		bfad->bfad_flags |= BFAD_EEH_BUSY;
+		/* Suspend/fail all bfa operations */
+		bfa_ioc_suspend(&bfad->bfa.ioc);
+		bfa_fcs_stop(&bfad->bfa_fcs);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		wait_for_completion(&bfad->comp);
+
+		bfad_remove_intr(bfad);
+		del_timer_sync(&bfad->hal_tmo);
+		pci_disable_device(pdev);
+		ret = PCI_ERS_RESULT_NEED_RESET;
+		break;
+	case pci_channel_io_perm_failure: /* PCI Card is DEAD */
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		bfad->bfad_flags |= BFAD_EEH_BUSY |
+				    BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE;
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+		/* If the error_detected handler is called with the reason
+		 * pci_channel_io_perm_failure - it will subsequently call
+		 * pci_remove() entry point to remove the pci device from the
+		 * system - So defer the cleanup to pci_remove(); cleaning up
+		 * here causes inconsistent state during pci_remove().
+		 */
+		ret = PCI_ERS_RESULT_DISCONNECT;
+		break;
+	default:
+		WARN_ON(1);
+	}
+
+	return ret;
+}
+
+int
+restart_bfa(struct bfad_s *bfad)
+{
+	unsigned long flags;
+	struct pci_dev *pdev = bfad->pcidev;
+
+	bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg,
+		   &bfad->meminfo, &bfad->hal_pcidev);
+
+	/* Enable Interrupt and wait bfa_init completion */
+	if (bfad_setup_intr(bfad)) {
+		dev_printk(KERN_WARNING, &pdev->dev,
+			   "%s: bfad_setup_intr failed\n", bfad->pci_name);
+		bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
+		return -1;
+	}
+
+	init_completion(&bfad->comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfa_iocfc_init(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	/* Set up interrupt handler for each vectors */
+	if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
+	    bfad_install_msix_handler(bfad))
+		dev_printk(KERN_WARNING, &pdev->dev,
+			   "%s: install_msix failed.\n", bfad->pci_name);
+
+	bfad_init_timer(bfad);
+	wait_for_completion(&bfad->comp);
+	bfad_drv_start(bfad);
+
+	return 0;
+}
+
+/*
+ * PCI Error Recovery entry, re-initialize the chip.
+ */
+static pci_ers_result_t
+bfad_pci_slot_reset(struct pci_dev *pdev)
+{
+	struct bfad_s *bfad = pci_get_drvdata(pdev);
+	u8 byte;
+
+	dev_printk(KERN_ERR, &pdev->dev,
+		   "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
+
+	if (pci_enable_device(pdev)) {
+		dev_printk(KERN_ERR, &pdev->dev, "Cannot re-enable "
+			   "PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_restore_state(pdev);
+
+	/*
+	 * Read some byte (e.g. DMA max. payload size which can't
+	 * be 0xff any time) to make sure - we did not hit another PCI error
+	 * in the middle of recovery. If we did, then declare permanent failure.
+	 */
+	pci_read_config_byte(pdev, 0x68, &byte);
+	if (byte == 0xff) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "slot_reset failed ... got another PCI error !\n");
+		goto out_disable_device;
+	}
+
+	pci_save_state(pdev);
+	pci_set_master(pdev);
+
+	if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0)
+		if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
+			goto out_disable_device;
+
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+
+	if (restart_bfa(bfad) == -1)
+		goto out_disable_device;
+
+	pci_enable_pcie_error_reporting(pdev);
+	dev_printk(KERN_WARNING, &pdev->dev,
+		   "slot_reset completed  flags: 0x%x!\n", bfad->bfad_flags);
+
+	return PCI_ERS_RESULT_RECOVERED;
+
+out_disable_device:
+	pci_disable_device(pdev);
+	return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static pci_ers_result_t
+bfad_pci_mmio_enabled(struct pci_dev *pdev)
+{
+	unsigned long	flags;
+	struct bfad_s *bfad = pci_get_drvdata(pdev);
+
+	dev_printk(KERN_INFO, &pdev->dev, "mmio_enabled\n");
+
+	/* Fetch FW diagnostic information */
+	bfa_ioc_debug_save_ftrc(&bfad->bfa.ioc);
+
+	/* Cancel all pending IOs */
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	init_completion(&bfad->comp);
+	bfa_fcs_stop(&bfad->bfa_fcs);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	wait_for_completion(&bfad->comp);
+
+	bfad_remove_intr(bfad);
+	del_timer_sync(&bfad->hal_tmo);
+	pci_disable_device(pdev);
+
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static void
+bfad_pci_resume(struct pci_dev *pdev)
+{
+	unsigned long	flags;
+	struct bfad_s *bfad = pci_get_drvdata(pdev);
+
+	dev_printk(KERN_WARNING, &pdev->dev, "resume\n");
+
+	/* wait until the link is online */
+	bfad_rport_online_wait(bfad);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfad->bfad_flags &= ~BFAD_EEH_BUSY;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+struct pci_device_id bfad_id_table[] = {
+	{
+		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
+		.device = BFA_PCI_DEVICE_ID_FC_8G2P,
+		.subvendor = PCI_ANY_ID,
+		.subdevice = PCI_ANY_ID,
+	},
+	{
+		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
+		.device = BFA_PCI_DEVICE_ID_FC_8G1P,
+		.subvendor = PCI_ANY_ID,
+		.subdevice = PCI_ANY_ID,
+	},
+	{
+		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
+		.device = BFA_PCI_DEVICE_ID_CT,
+		.subvendor = PCI_ANY_ID,
+		.subdevice = PCI_ANY_ID,
+		.class = (PCI_CLASS_SERIAL_FIBER << 8),
+		.class_mask = ~0,
+	},
+	{
+		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
+		.device = BFA_PCI_DEVICE_ID_CT_FC,
+		.subvendor = PCI_ANY_ID,
+		.subdevice = PCI_ANY_ID,
+		.class = (PCI_CLASS_SERIAL_FIBER << 8),
+		.class_mask = ~0,
+	},
+	{
+		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
+		.device = BFA_PCI_DEVICE_ID_CT2,
+		.subvendor = PCI_ANY_ID,
+		.subdevice = PCI_ANY_ID,
+		.class = (PCI_CLASS_SERIAL_FIBER << 8),
+		.class_mask = ~0,
+	},
+
+	{
+		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
+		.device = BFA_PCI_DEVICE_ID_CT2_QUAD,
+		.subvendor = PCI_ANY_ID,
+		.subdevice = PCI_ANY_ID,
+		.class = (PCI_CLASS_SERIAL_FIBER << 8),
+		.class_mask = ~0,
+	},
+	{0, 0},
+};
+
+MODULE_DEVICE_TABLE(pci, bfad_id_table);
+
+/*
+ * PCI error recovery handlers.
+ */
+static struct pci_error_handlers bfad_err_handler = {
+	.error_detected = bfad_pci_error_detected,
+	.slot_reset = bfad_pci_slot_reset,
+	.mmio_enabled = bfad_pci_mmio_enabled,
+	.resume = bfad_pci_resume,
+};
+
+static struct pci_driver bfad_pci_driver = {
+	.name = BFAD_DRIVER_NAME,
+	.id_table = bfad_id_table,
+	.probe = bfad_pci_probe,
+	.remove = bfad_pci_remove,
+	.err_handler = &bfad_err_handler,
+};
+
+/*
+ * Driver module init.
+ */
+static int __init
+bfad_init(void)
+{
+	int		error = 0;
+
+	pr_info("QLogic BR-series BFA FC/FCOE SCSI driver - version: %s\n",
+			BFAD_DRIVER_VERSION);
+
+	if (num_sgpgs > 0)
+		num_sgpgs_parm = num_sgpgs;
+
+	error = bfad_im_module_init();
+	if (error) {
+		error = -ENOMEM;
+		printk(KERN_WARNING "bfad_im_module_init failure\n");
+		goto ext;
+	}
+
+	if (strcmp(FCPI_NAME, " fcpim") == 0)
+		supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
+
+	bfa_auto_recover = ioc_auto_recover;
+	bfa_fcs_rport_set_del_timeout(rport_del_timeout);
+	bfa_fcs_rport_set_max_logins(max_rport_logins);
+
+	error = pci_register_driver(&bfad_pci_driver);
+	if (error) {
+		printk(KERN_WARNING "pci_register_driver failure\n");
+		goto ext;
+	}
+
+	return 0;
+
+ext:
+	bfad_im_module_exit();
+	return error;
+}
+
+/*
+ * Driver module exit.
+ */
+static void __exit
+bfad_exit(void)
+{
+	pci_unregister_driver(&bfad_pci_driver);
+	bfad_im_module_exit();
+	bfad_free_fwimg();
+}
+
+/* Firmware handling */
+static void
+bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
+		u32 *bfi_image_size, char *fw_name)
+{
+	const struct firmware *fw;
+
+	if (request_firmware(&fw, fw_name, &pdev->dev)) {
+		printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
+		*bfi_image = NULL;
+		goto out;
+	}
+
+	*bfi_image = vmalloc(fw->size);
+	if (NULL == *bfi_image) {
+		printk(KERN_ALERT "Fail to allocate buffer for fw image "
+			"size=%x!\n", (u32) fw->size);
+		goto out;
+	}
+
+	memcpy(*bfi_image, fw->data, fw->size);
+	*bfi_image_size = fw->size/sizeof(u32);
+out:
+	release_firmware(fw);
+}
+
+static u32 *
+bfad_load_fwimg(struct pci_dev *pdev)
+{
+	if (bfa_asic_id_ct2(pdev->device)) {
+		if (bfi_image_ct2_size == 0)
+			bfad_read_firmware(pdev, &bfi_image_ct2,
+				&bfi_image_ct2_size, BFAD_FW_FILE_CT2);
+		return bfi_image_ct2;
+	} else if (bfa_asic_id_ct(pdev->device)) {
+		if (bfi_image_ct_size == 0)
+			bfad_read_firmware(pdev, &bfi_image_ct,
+				&bfi_image_ct_size, BFAD_FW_FILE_CT);
+		return bfi_image_ct;
+	} else if (bfa_asic_id_cb(pdev->device)) {
+		if (bfi_image_cb_size == 0)
+			bfad_read_firmware(pdev, &bfi_image_cb,
+				&bfi_image_cb_size, BFAD_FW_FILE_CB);
+		return bfi_image_cb;
+	}
+
+	return NULL;
+}
+
+static void
+bfad_free_fwimg(void)
+{
+	if (bfi_image_ct2_size && bfi_image_ct2)
+		vfree(bfi_image_ct2);
+	if (bfi_image_ct_size && bfi_image_ct)
+		vfree(bfi_image_ct);
+	if (bfi_image_cb_size && bfi_image_cb)
+		vfree(bfi_image_cb);
+}
+
+module_init(bfad_init);
+module_exit(bfad_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("QLogic BR-series Fibre Channel HBA Driver" BFAD_PROTO_NAME);
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_VERSION(BFAD_DRIVER_VERSION);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
new file mode 100644
index 0000000..26b0fa4
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -0,0 +1,997 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ *  bfa_attr.c Linux driver configuration interface module.
+ */
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+
+/*
+ * FC transport template entry, get SCSI target port ID.
+ */
+static void
+bfad_im_get_starget_port_id(struct scsi_target *starget)
+{
+	struct Scsi_Host *shost;
+	struct bfad_im_port_s *im_port;
+	struct bfad_s         *bfad;
+	struct bfad_itnim_s   *itnim = NULL;
+	u32        fc_id = -1;
+	unsigned long   flags;
+
+	shost = dev_to_shost(starget->dev.parent);
+	im_port = (struct bfad_im_port_s *) shost->hostdata[0];
+	bfad = im_port->bfad;
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+	itnim = bfad_get_itnim(im_port, starget->id);
+	if (itnim)
+		fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
+
+	fc_starget_port_id(starget) = fc_id;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+/*
+ * FC transport template entry, get SCSI target nwwn.
+ */
+static void
+bfad_im_get_starget_node_name(struct scsi_target *starget)
+{
+	struct Scsi_Host *shost;
+	struct bfad_im_port_s *im_port;
+	struct bfad_s         *bfad;
+	struct bfad_itnim_s   *itnim = NULL;
+	u64             node_name = 0;
+	unsigned long   flags;
+
+	shost = dev_to_shost(starget->dev.parent);
+	im_port = (struct bfad_im_port_s *) shost->hostdata[0];
+	bfad = im_port->bfad;
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+	itnim = bfad_get_itnim(im_port, starget->id);
+	if (itnim)
+		node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
+
+	fc_starget_node_name(starget) = cpu_to_be64(node_name);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+/*
+ * FC transport template entry, get SCSI target pwwn.
+ */
+static void
+bfad_im_get_starget_port_name(struct scsi_target *starget)
+{
+	struct Scsi_Host *shost;
+	struct bfad_im_port_s *im_port;
+	struct bfad_s         *bfad;
+	struct bfad_itnim_s   *itnim = NULL;
+	u64             port_name = 0;
+	unsigned long   flags;
+
+	shost = dev_to_shost(starget->dev.parent);
+	im_port = (struct bfad_im_port_s *) shost->hostdata[0];
+	bfad = im_port->bfad;
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+	itnim = bfad_get_itnim(im_port, starget->id);
+	if (itnim)
+		port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
+
+	fc_starget_port_name(starget) = cpu_to_be64(port_name);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+/*
+ * FC transport template entry, get SCSI host port ID.
+ */
+static void
+bfad_im_get_host_port_id(struct Scsi_Host *shost)
+{
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_port_s    *port = im_port->port;
+
+	fc_host_port_id(shost) =
+			bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
+}
+
+/*
+ * FC transport template entry, get SCSI host port type.
+ */
+static void
+bfad_im_get_host_port_type(struct Scsi_Host *shost)
+{
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s         *bfad = im_port->bfad;
+	struct bfa_lport_attr_s port_attr;
+
+	bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+
+	switch (port_attr.port_type) {
+	case BFA_PORT_TYPE_NPORT:
+		fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+		break;
+	case BFA_PORT_TYPE_NLPORT:
+		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+		break;
+	case BFA_PORT_TYPE_P2P:
+		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+		break;
+	case BFA_PORT_TYPE_LPORT:
+		fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
+		break;
+	default:
+		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+		break;
+	}
+}
+
+/*
+ * FC transport template entry, get SCSI host port state.
+ */
+static void
+bfad_im_get_host_port_state(struct Scsi_Host *shost)
+{
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s         *bfad = im_port->bfad;
+	struct bfa_port_attr_s attr;
+
+	bfa_fcport_get_attr(&bfad->bfa, &attr);
+
+	switch (attr.port_state) {
+	case BFA_PORT_ST_LINKDOWN:
+		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+		break;
+	case BFA_PORT_ST_LINKUP:
+		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+		break;
+	case BFA_PORT_ST_DISABLED:
+	case BFA_PORT_ST_STOPPED:
+	case BFA_PORT_ST_IOCDOWN:
+	case BFA_PORT_ST_IOCDIS:
+		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+		break;
+	case BFA_PORT_ST_UNINIT:
+	case BFA_PORT_ST_ENABLING_QWAIT:
+	case BFA_PORT_ST_ENABLING:
+	case BFA_PORT_ST_DISABLING_QWAIT:
+	case BFA_PORT_ST_DISABLING:
+	default:
+		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+		break;
+	}
+}
+
+/*
+ * FC transport template entry, get SCSI host active fc4s.
+ */
+static void
+bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
+{
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_port_s    *port = im_port->port;
+
+	memset(fc_host_active_fc4s(shost), 0,
+	       sizeof(fc_host_active_fc4s(shost)));
+
+	if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
+		fc_host_active_fc4s(shost)[2] = 1;
+
+	fc_host_active_fc4s(shost)[7] = 1;
+}
+
+/*
+ * FC transport template entry, get SCSI host link speed.
+ */
+static void
+bfad_im_get_host_speed(struct Scsi_Host *shost)
+{
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s         *bfad = im_port->bfad;
+	struct bfa_port_attr_s attr;
+
+	bfa_fcport_get_attr(&bfad->bfa, &attr);
+	switch (attr.speed) {
+	case BFA_PORT_SPEED_10GBPS:
+		fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+		break;
+	case BFA_PORT_SPEED_16GBPS:
+		fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+		break;
+	case BFA_PORT_SPEED_8GBPS:
+		fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+		break;
+	case BFA_PORT_SPEED_4GBPS:
+		fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+		break;
+	case BFA_PORT_SPEED_2GBPS:
+		fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+		break;
+	case BFA_PORT_SPEED_1GBPS:
+		fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+		break;
+	default:
+		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+		break;
+	}
+}
+
+/*
+ * FC transport template entry, get SCSI host port type.
+ */
+static void
+bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
+{
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_port_s    *port = im_port->port;
+	wwn_t           fabric_nwwn = 0;
+
+	fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port);
+
+	fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn);
+
+}
+
+/*
+ * FC transport template entry, get BFAD statistics.
+ */
+static struct fc_host_statistics *
+bfad_im_get_stats(struct Scsi_Host *shost)
+{
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_hal_comp fcomp;
+	union bfa_port_stats_u *fcstats;
+	struct fc_host_statistics *hstats;
+	bfa_status_t    rc;
+	unsigned long   flags;
+
+	fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL);
+	if (fcstats == NULL)
+		return NULL;
+
+	hstats = &bfad->link_stats;
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	memset(hstats, 0, sizeof(struct fc_host_statistics));
+	rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
+				fcstats, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (rc != BFA_STATUS_OK)
+		return NULL;
+
+	wait_for_completion(&fcomp.comp);
+
+	/* Fill the fc_host_statistics structure */
+	hstats->seconds_since_last_reset = fcstats->fc.secs_reset;
+	hstats->tx_frames = fcstats->fc.tx_frames;
+	hstats->tx_words  = fcstats->fc.tx_words;
+	hstats->rx_frames = fcstats->fc.rx_frames;
+	hstats->rx_words  = fcstats->fc.rx_words;
+	hstats->lip_count = fcstats->fc.lip_count;
+	hstats->nos_count = fcstats->fc.nos_count;
+	hstats->error_frames = fcstats->fc.error_frames;
+	hstats->dumped_frames = fcstats->fc.dropped_frames;
+	hstats->link_failure_count = fcstats->fc.link_failures;
+	hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs;
+	hstats->loss_of_signal_count = fcstats->fc.loss_of_signals;
+	hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs;
+	hstats->invalid_crc_count = fcstats->fc.invalid_crcs;
+
+	kfree(fcstats);
+	return hstats;
+}
+
+/*
+ * FC transport template entry, reset BFAD statistics.
+ */
+static void
+bfad_im_reset_stats(struct Scsi_Host *shost)
+{
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_hal_comp fcomp;
+	unsigned long   flags;
+	bfa_status_t    rc;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp,
+					&fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (rc != BFA_STATUS_OK)
+		return;
+
+	wait_for_completion(&fcomp.comp);
+
+	return;
+}
+
+/*
+ * FC transport template entry, set rport loss timeout.
+ * Update dev_loss_tmo based on the value pushed down by the stack
+ * In case it is lesser than path_tov of driver, set it to path_tov + 1
+ * to ensure that the driver times out before the application
+ */
+static void
+bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+	struct bfad_itnim_data_s *itnim_data = rport->dd_data;
+	struct bfad_itnim_s   *itnim = itnim_data->itnim;
+	struct bfad_s         *bfad = itnim->im->bfad;
+	uint16_t path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
+
+	rport->dev_loss_tmo = timeout;
+	if (timeout < path_tov)
+		rport->dev_loss_tmo = path_tov + 1;
+}
+
+static int
+bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+	char *vname = fc_vport->symbolic_name;
+	struct Scsi_Host *shost = fc_vport->shost;
+	struct bfad_im_port_s *im_port =
+		(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s *bfad = im_port->bfad;
+	struct bfa_lport_cfg_s port_cfg;
+	struct bfad_vport_s *vp;
+	int status = 0, rc;
+	unsigned long flags;
+
+	memset(&port_cfg, 0, sizeof(port_cfg));
+	u64_to_wwn(fc_vport->node_name, (u8 *)&port_cfg.nwwn);
+	u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn);
+	if (strlen(vname) > 0)
+		strcpy((char *)&port_cfg.sym_name, vname);
+	port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) {
+		if (port_cfg.pwwn ==
+				vp->fcs_vport.lport.port_cfg.pwwn) {
+			port_cfg.preboot_vp =
+				vp->fcs_vport.lport.port_cfg.preboot_vp;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev);
+	if (rc == BFA_STATUS_OK) {
+		struct bfad_vport_s *vport;
+		struct bfa_fcs_vport_s *fcs_vport;
+		struct Scsi_Host *vshost;
+
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0,
+					port_cfg.pwwn);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		if (fcs_vport == NULL)
+			return VPCERR_BAD_WWN;
+
+		fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+		if (disable) {
+			spin_lock_irqsave(&bfad->bfad_lock, flags);
+			bfa_fcs_vport_stop(fcs_vport);
+			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+			fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+		}
+
+		vport = fcs_vport->vport_drv;
+		vshost = vport->drv_port.im_port->shost;
+		fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
+		fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
+		fc_host_supported_classes(vshost) = FC_COS_CLASS3;
+
+		memset(fc_host_supported_fc4s(vshost), 0,
+			sizeof(fc_host_supported_fc4s(vshost)));
+
+		/* For FCP type 0x08 */
+		if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
+			fc_host_supported_fc4s(vshost)[2] = 1;
+
+		/* For fibre channel services type 0x20 */
+		fc_host_supported_fc4s(vshost)[7] = 1;
+
+		fc_host_supported_speeds(vshost) =
+				bfad_im_supported_speeds(&bfad->bfa);
+		fc_host_maxframe_size(vshost) =
+				bfa_fcport_get_maxfrsize(&bfad->bfa);
+
+		fc_vport->dd_data = vport;
+		vport->drv_port.im_port->fc_vport = fc_vport;
+	} else if (rc == BFA_STATUS_INVALID_WWN)
+		return VPCERR_BAD_WWN;
+	else if (rc == BFA_STATUS_VPORT_EXISTS)
+		return VPCERR_BAD_WWN;
+	else if (rc == BFA_STATUS_VPORT_MAX)
+		return VPCERR_NO_FABRIC_SUPP;
+	else if (rc == BFA_STATUS_VPORT_WWN_BP)
+		return VPCERR_BAD_WWN;
+	else
+		return FC_VPORT_FAILED;
+
+	return status;
+}
+
+int
+bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s *bfad = im_port->bfad;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags;
+	uint32_t status;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	status = bfa_port_disable(&bfad->bfa.modules.port,
+					bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (status != BFA_STATUS_OK)
+		return -EIO;
+
+	wait_for_completion(&fcomp.comp);
+	if (fcomp.status != BFA_STATUS_OK)
+		return -EIO;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	status = bfa_port_enable(&bfad->bfa.modules.port,
+					bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (status != BFA_STATUS_OK)
+		return -EIO;
+
+	wait_for_completion(&fcomp.comp);
+	if (fcomp.status != BFA_STATUS_OK)
+		return -EIO;
+
+	return 0;
+}
+
+static int
+bfad_im_vport_delete(struct fc_vport *fc_vport)
+{
+	struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) vport->drv_port.im_port;
+	struct bfad_s *bfad = im_port->bfad;
+	struct bfa_fcs_vport_s *fcs_vport;
+	struct Scsi_Host *vshost;
+	wwn_t   pwwn;
+	int rc;
+	unsigned long flags;
+	struct completion fcomp;
+
+	if (im_port->flags & BFAD_PORT_DELETE) {
+		bfad_scsi_host_free(bfad, im_port);
+		list_del(&vport->list_entry);
+		kfree(vport);
+		return 0;
+	}
+
+	vshost = vport->drv_port.im_port->shost;
+	u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (fcs_vport == NULL)
+		return VPCERR_BAD_WWN;
+
+	vport->drv_port.flags |= BFAD_PORT_DELETE;
+
+	vport->comp_del = &fcomp;
+	init_completion(vport->comp_del);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	rc = bfa_fcs_vport_delete(&vport->fcs_vport);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (rc == BFA_STATUS_PBC) {
+		vport->drv_port.flags &= ~BFAD_PORT_DELETE;
+		vport->comp_del = NULL;
+		return -1;
+	}
+
+	wait_for_completion(vport->comp_del);
+
+	bfad_scsi_host_free(bfad, im_port);
+	list_del(&vport->list_entry);
+	kfree(vport);
+
+	return 0;
+}
+
+static int
+bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+	struct bfad_vport_s *vport;
+	struct bfad_s *bfad;
+	struct bfa_fcs_vport_s *fcs_vport;
+	struct Scsi_Host *vshost;
+	wwn_t   pwwn;
+	unsigned long flags;
+
+	vport = (struct bfad_vport_s *)fc_vport->dd_data;
+	bfad = vport->drv_port.bfad;
+	vshost = vport->drv_port.im_port->shost;
+	u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (fcs_vport == NULL)
+		return VPCERR_BAD_WWN;
+
+	if (disable) {
+		bfa_fcs_vport_stop(fcs_vport);
+		fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+	} else {
+		bfa_fcs_vport_start(fcs_vport);
+		fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+	}
+
+	return 0;
+}
+
+void
+bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
+{
+	struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *)vport->drv_port.im_port;
+	struct bfad_s *bfad = im_port->bfad;
+	struct Scsi_Host *vshost = vport->drv_port.im_port->shost;
+	char *sym_name = fc_vport->symbolic_name;
+	struct bfa_fcs_vport_s *fcs_vport;
+	wwn_t	pwwn;
+	unsigned long flags;
+
+	u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (fcs_vport == NULL)
+		return;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (strlen(sym_name) > 0)
+		bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+struct fc_function_template bfad_im_fc_function_template = {
+
+	/* Target dynamic attributes */
+	.get_starget_port_id = bfad_im_get_starget_port_id,
+	.show_starget_port_id = 1,
+	.get_starget_node_name = bfad_im_get_starget_node_name,
+	.show_starget_node_name = 1,
+	.get_starget_port_name = bfad_im_get_starget_port_name,
+	.show_starget_port_name = 1,
+
+	/* Host dynamic attribute */
+	.get_host_port_id = bfad_im_get_host_port_id,
+	.show_host_port_id = 1,
+
+	/* Host fixed attributes */
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_supported_speeds = 1,
+	.show_host_maxframe_size = 1,
+
+	/* More host dynamic attributes */
+	.show_host_port_type = 1,
+	.get_host_port_type = bfad_im_get_host_port_type,
+	.show_host_port_state = 1,
+	.get_host_port_state = bfad_im_get_host_port_state,
+	.show_host_active_fc4s = 1,
+	.get_host_active_fc4s = bfad_im_get_host_active_fc4s,
+	.show_host_speed = 1,
+	.get_host_speed = bfad_im_get_host_speed,
+	.show_host_fabric_name = 1,
+	.get_host_fabric_name = bfad_im_get_host_fabric_name,
+
+	.show_host_symbolic_name = 1,
+
+	/* Statistics */
+	.get_fc_host_stats = bfad_im_get_stats,
+	.reset_fc_host_stats = bfad_im_reset_stats,
+
+	/* Allocation length for host specific data */
+	.dd_fcrport_size = sizeof(struct bfad_itnim_data_s *),
+
+	/* Remote port fixed attributes */
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+	.show_rport_dev_loss_tmo = 1,
+	.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
+	.issue_fc_host_lip = bfad_im_issue_fc_host_lip,
+	.vport_create = bfad_im_vport_create,
+	.vport_delete = bfad_im_vport_delete,
+	.vport_disable = bfad_im_vport_disable,
+	.set_vport_symbolic_name = bfad_im_vport_set_symbolic_name,
+	.bsg_request = bfad_im_bsg_request,
+	.bsg_timeout = bfad_im_bsg_timeout,
+};
+
+struct fc_function_template bfad_im_vport_fc_function_template = {
+
+	/* Target dynamic attributes */
+	.get_starget_port_id = bfad_im_get_starget_port_id,
+	.show_starget_port_id = 1,
+	.get_starget_node_name = bfad_im_get_starget_node_name,
+	.show_starget_node_name = 1,
+	.get_starget_port_name = bfad_im_get_starget_port_name,
+	.show_starget_port_name = 1,
+
+	/* Host dynamic attribute */
+	.get_host_port_id = bfad_im_get_host_port_id,
+	.show_host_port_id = 1,
+
+	/* Host fixed attributes */
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_supported_speeds = 1,
+	.show_host_maxframe_size = 1,
+
+	/* More host dynamic attributes */
+	.show_host_port_type = 1,
+	.get_host_port_type = bfad_im_get_host_port_type,
+	.show_host_port_state = 1,
+	.get_host_port_state = bfad_im_get_host_port_state,
+	.show_host_active_fc4s = 1,
+	.get_host_active_fc4s = bfad_im_get_host_active_fc4s,
+	.show_host_speed = 1,
+	.get_host_speed = bfad_im_get_host_speed,
+	.show_host_fabric_name = 1,
+	.get_host_fabric_name = bfad_im_get_host_fabric_name,
+
+	.show_host_symbolic_name = 1,
+
+	/* Statistics */
+	.get_fc_host_stats = bfad_im_get_stats,
+	.reset_fc_host_stats = bfad_im_reset_stats,
+
+	/* Allocation length for host specific data */
+	.dd_fcrport_size = sizeof(struct bfad_itnim_data_s *),
+
+	/* Remote port fixed attributes */
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+	.show_rport_dev_loss_tmo = 1,
+	.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
+};
+
+/*
+ *  Scsi_Host_attrs SCSI host attributes
+ */
+static ssize_t
+bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s *bfad = im_port->bfad;
+	char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+
+	bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
+	return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
+}
+
+static ssize_t
+bfad_im_model_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s *bfad = im_port->bfad;
+	char model[BFA_ADAPTER_MODEL_NAME_LEN];
+
+	bfa_get_adapter_model(&bfad->bfa, model);
+	return snprintf(buf, PAGE_SIZE, "%s\n", model);
+}
+
+static ssize_t
+bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
+				 char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s *bfad = im_port->bfad;
+	char model[BFA_ADAPTER_MODEL_NAME_LEN];
+	char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
+	int nports = 0;
+
+	bfa_get_adapter_model(&bfad->bfa, model);
+	nports = bfa_get_nports(&bfad->bfa);
+	if (!strcmp(model, "QLogic-425"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 4Gbps PCIe dual port FC HBA");
+	else if (!strcmp(model, "QLogic-825"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 8Gbps PCIe dual port FC HBA");
+	else if (!strcmp(model, "QLogic-42B"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 4Gbps PCIe dual port FC HBA for HP");
+	else if (!strcmp(model, "QLogic-82B"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 8Gbps PCIe dual port FC HBA for HP");
+	else if (!strcmp(model, "QLogic-1010"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 10Gbps single port CNA");
+	else if (!strcmp(model, "QLogic-1020"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 10Gbps dual port CNA");
+	else if (!strcmp(model, "QLogic-1007"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 10Gbps CNA for IBM Blade Center");
+	else if (!strcmp(model, "QLogic-415"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 4Gbps PCIe single port FC HBA");
+	else if (!strcmp(model, "QLogic-815"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 8Gbps PCIe single port FC HBA");
+	else if (!strcmp(model, "QLogic-41B"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 4Gbps PCIe single port FC HBA for HP");
+	else if (!strcmp(model, "QLogic-81B"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 8Gbps PCIe single port FC HBA for HP");
+	else if (!strcmp(model, "QLogic-804"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 8Gbps FC HBA for HP Bladesystem C-class");
+	else if (!strcmp(model, "QLogic-1741"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"QLogic BR-series 10Gbps CNA for Dell M-Series Blade Servers");
+	else if (strstr(model, "QLogic-1860")) {
+		if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc))
+			snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+				"QLogic BR-series 10Gbps single port CNA");
+		else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+			snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+				"QLogic BR-series 16Gbps PCIe single port FC HBA");
+		else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc))
+			snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+				"QLogic BR-series 10Gbps dual port CNA");
+		else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+			snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+				"QLogic BR-series 16Gbps PCIe dual port FC HBA");
+	} else if (!strcmp(model, "QLogic-1867")) {
+		if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+			snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+				"QLogic BR-series 16Gbps PCIe single port FC HBA for IBM");
+		else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+			snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+				"QLogic BR-series 16Gbps PCIe dual port FC HBA for IBM");
+	} else
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"Invalid Model");
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
+}
+
+static ssize_t
+bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
+				 char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_port_s    *port = im_port->port;
+	u64        nwwn;
+
+	nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
+}
+
+static ssize_t
+bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
+				 char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s *bfad = im_port->bfad;
+	struct bfa_lport_attr_s port_attr;
+	char symname[BFA_SYMNAME_MAXLEN];
+
+	bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+	strlcpy(symname, port_attr.port_cfg.sym_name.symname,
+			BFA_SYMNAME_MAXLEN);
+	return snprintf(buf, PAGE_SIZE, "%s\n", symname);
+}
+
+static ssize_t
+bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s *bfad = im_port->bfad;
+	char hw_ver[BFA_VERSION_LEN];
+
+	bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
+	return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
+}
+
+static ssize_t
+bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION);
+}
+
+static ssize_t
+bfad_im_optionrom_version_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s *bfad = im_port->bfad;
+	char optrom_ver[BFA_VERSION_LEN];
+
+	bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
+	return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
+}
+
+static ssize_t
+bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
+				 char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s *bfad = im_port->bfad;
+	char fw_ver[BFA_VERSION_LEN];
+
+	bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
+	return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
+}
+
+static ssize_t
+bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s *bfad = im_port->bfad;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			bfa_get_nports(&bfad->bfa));
+}
+
+static ssize_t
+bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME);
+}
+
+static ssize_t
+bfad_im_num_of_discovered_ports_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_port_s    *port = im_port->port;
+	struct bfad_s         *bfad = im_port->bfad;
+	int        nrports = 2048;
+	struct bfa_rport_qualifier_s *rports = NULL;
+	unsigned long   flags;
+
+	rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s),
+			 GFP_ATOMIC);
+	if (rports == NULL)
+		return snprintf(buf, PAGE_SIZE, "Failed\n");
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	kfree(rports);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", nrports);
+}
+
+static          DEVICE_ATTR(serial_number, S_IRUGO,
+				bfad_im_serial_num_show, NULL);
+static          DEVICE_ATTR(model, S_IRUGO, bfad_im_model_show, NULL);
+static          DEVICE_ATTR(model_description, S_IRUGO,
+				bfad_im_model_desc_show, NULL);
+static          DEVICE_ATTR(node_name, S_IRUGO, bfad_im_node_name_show, NULL);
+static          DEVICE_ATTR(symbolic_name, S_IRUGO,
+				bfad_im_symbolic_name_show, NULL);
+static          DEVICE_ATTR(hardware_version, S_IRUGO,
+				bfad_im_hw_version_show, NULL);
+static          DEVICE_ATTR(driver_version, S_IRUGO,
+				bfad_im_drv_version_show, NULL);
+static          DEVICE_ATTR(option_rom_version, S_IRUGO,
+				bfad_im_optionrom_version_show, NULL);
+static          DEVICE_ATTR(firmware_version, S_IRUGO,
+				bfad_im_fw_version_show, NULL);
+static          DEVICE_ATTR(number_of_ports, S_IRUGO,
+				bfad_im_num_of_ports_show, NULL);
+static          DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL);
+static          DEVICE_ATTR(number_of_discovered_ports, S_IRUGO,
+				bfad_im_num_of_discovered_ports_show, NULL);
+
+struct device_attribute *bfad_im_host_attrs[] = {
+	&dev_attr_serial_number,
+	&dev_attr_model,
+	&dev_attr_model_description,
+	&dev_attr_node_name,
+	&dev_attr_symbolic_name,
+	&dev_attr_hardware_version,
+	&dev_attr_driver_version,
+	&dev_attr_option_rom_version,
+	&dev_attr_firmware_version,
+	&dev_attr_number_of_ports,
+	&dev_attr_driver_name,
+	&dev_attr_number_of_discovered_ports,
+	NULL,
+};
+
+struct device_attribute *bfad_im_vport_attrs[] = {
+	&dev_attr_serial_number,
+	&dev_attr_model,
+	&dev_attr_model_description,
+	&dev_attr_node_name,
+	&dev_attr_symbolic_name,
+	&dev_attr_hardware_version,
+	&dev_attr_driver_version,
+	&dev_attr_option_rom_version,
+	&dev_attr_firmware_version,
+	&dev_attr_number_of_ports,
+	&dev_attr_driver_name,
+	&dev_attr_number_of_discovered_ports,
+	NULL,
+};
+
+
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
new file mode 100644
index 0000000..5d163ca
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -0,0 +1,3594 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfad_bsg.h"
+
+BFA_TRC_FILE(LDRV, BSG);
+
+int
+bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	/* If IOC is not in disabled state - return */
+	if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_OK;
+		return 0;
+	}
+
+	init_completion(&bfad->enable_comp);
+	bfa_iocfc_enable(&bfad->bfa);
+	iocmd->status = BFA_STATUS_OK;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	wait_for_completion(&bfad->enable_comp);
+
+	return 0;
+}
+
+int
+bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_OK;
+		return 0;
+	}
+
+	if (bfad->disable_active) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		return -EBUSY;
+	}
+
+	bfad->disable_active = BFA_TRUE;
+	init_completion(&bfad->disable_comp);
+	bfa_iocfc_disable(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	wait_for_completion(&bfad->disable_comp);
+	bfad->disable_active = BFA_FALSE;
+	iocmd->status = BFA_STATUS_OK;
+
+	return 0;
+}
+
+static int
+bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
+{
+	int	i;
+	struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
+	struct bfad_im_port_s	*im_port;
+	struct bfa_port_attr_s	pattr;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfa_fcport_get_attr(&bfad->bfa, &pattr);
+	iocmd->nwwn = pattr.nwwn;
+	iocmd->pwwn = pattr.pwwn;
+	iocmd->ioc_type = bfa_get_type(&bfad->bfa);
+	iocmd->mac = bfa_get_mac(&bfad->bfa);
+	iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
+	bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
+	iocmd->factorynwwn = pattr.factorynwwn;
+	iocmd->factorypwwn = pattr.factorypwwn;
+	iocmd->bfad_num = bfad->inst_no;
+	im_port = bfad->pport.im_port;
+	iocmd->host = im_port->shost->host_no;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	strcpy(iocmd->name, bfad->adapter_name);
+	strcpy(iocmd->port_name, bfad->port_name);
+	strcpy(iocmd->hwpath, bfad->pci_name);
+
+	/* set adapter hw path */
+	strcpy(iocmd->adapter_hwpath, bfad->pci_name);
+	for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
+		;
+	for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
+		;
+	iocmd->adapter_hwpath[i] = '\0';
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+static int
+bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	/* fill in driver attr info */
+	strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
+	strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
+		BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
+	strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
+		iocmd->ioc_attr.adapter_attr.fw_ver);
+	strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
+		iocmd->ioc_attr.adapter_attr.optrom_ver);
+
+	/* copy chip rev info first otherwise it will be overwritten */
+	memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
+		sizeof(bfad->pci_attr.chip_rev));
+	memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
+		sizeof(struct bfa_ioc_pci_attr_s));
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
+
+	bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
+			unsigned int payload_len)
+{
+	struct bfa_bsg_ioc_fwstats_s *iocmd =
+			(struct bfa_bsg_ioc_fwstats_s *)cmd;
+	void	*iocmd_bufptr;
+	unsigned long	flags;
+
+	if (bfad_chk_iocmd_sz(payload_len,
+			sizeof(struct bfa_bsg_ioc_fwstats_s),
+			sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
+		iocmd->status = BFA_STATUS_VERSION_FAIL;
+		goto out;
+	}
+
+	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		goto out;
+	}
+out:
+	bfa_trc(bfad, 0x6666);
+	return 0;
+}
+
+int
+bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	unsigned long	flags;
+
+	if (v_cmd == IOCMD_IOC_RESET_STATS) {
+		bfa_ioc_clear_stats(&bfad->bfa);
+		iocmd->status = BFA_STATUS_OK;
+	} else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	}
+
+	return 0;
+}
+
+int
+bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
+
+	if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
+		strcpy(bfad->adapter_name, iocmd->name);
+	else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
+		strcpy(bfad->port_name, iocmd->name);
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
+
+	iocmd->status = BFA_STATUS_OK;
+	bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
+
+	return 0;
+}
+
+int
+bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+int
+bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
+					bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		return 0;
+	}
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+	return 0;
+}
+
+int
+bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		return 0;
+	}
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+	return 0;
+}
+
+static int
+bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
+	struct bfa_lport_attr_s	port_attr;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
+	bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
+		iocmd->attr.pid = port_attr.pid;
+	else
+		iocmd->attr.pid = 0;
+
+	iocmd->attr.port_type = port_attr.port_type;
+	iocmd->attr.loopback = port_attr.loopback;
+	iocmd->attr.authfail = port_attr.authfail;
+	strlcpy(iocmd->attr.port_symname.symname,
+		port_attr.port_cfg.sym_name.symname,
+		sizeof(iocmd->attr.port_symname.symname));
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
+			unsigned int payload_len)
+{
+	struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	void	*iocmd_bufptr;
+	unsigned long	flags;
+
+	if (bfad_chk_iocmd_sz(payload_len,
+			sizeof(struct bfa_bsg_port_stats_s),
+			sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
+		iocmd->status = BFA_STATUS_VERSION_FAIL;
+		return 0;
+	}
+
+	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
+				iocmd_bufptr, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		goto out;
+	}
+
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
+					bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		return 0;
+	}
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+	return 0;
+}
+
+int
+bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (v_cmd == IOCMD_PORT_CFG_TOPO)
+		cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
+	else if (v_cmd == IOCMD_PORT_CFG_SPEED)
+		cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
+	else if (v_cmd == IOCMD_PORT_CFG_ALPA)
+		cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
+	else if (v_cmd == IOCMD_PORT_CLR_ALPA)
+		cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
+				(struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+	struct bfa_bsg_bbcr_enable_s *iocmd =
+			(struct bfa_bsg_bbcr_enable_s *)pcmd;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (cmd == IOCMD_PORT_BBCR_ENABLE)
+		rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn);
+	else if (cmd == IOCMD_PORT_BBCR_DISABLE)
+		rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0);
+	else {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		return -EINVAL;
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	iocmd->status = rc;
+	return 0;
+}
+
+int
+bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd)
+{
+	struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status =
+		bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+
+static int
+bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_fcs_lport_s	*fcs_port;
+	struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (fcs_port == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_bsg_lport_stats_s *iocmd =
+			(struct bfa_bsg_lport_stats_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (fcs_port == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_bsg_reset_stats_s *iocmd =
+			(struct bfa_bsg_reset_stats_s *)cmd;
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+	struct list_head *qe, *qen;
+	struct bfa_itnim_s *itnim;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->vpwwn);
+	if (fcs_port == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	bfa_fcs_lport_clear_stats(fcs_port);
+	/* clear IO stats from all active itnims */
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
+			continue;
+		bfa_itnim_clear_stats(itnim);
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_bsg_lport_iostats_s *iocmd =
+			(struct bfa_bsg_lport_iostats_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (fcs_port == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
+			fcs_port->lp_tag);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
+			unsigned int payload_len)
+{
+	struct bfa_bsg_lport_get_rports_s *iocmd =
+			(struct bfa_bsg_lport_get_rports_s *)cmd;
+	struct bfa_fcs_lport_s *fcs_port;
+	unsigned long	flags;
+	void	*iocmd_bufptr;
+
+	if (iocmd->nrports == 0)
+		return -EINVAL;
+
+	if (bfad_chk_iocmd_sz(payload_len,
+			sizeof(struct bfa_bsg_lport_get_rports_s),
+			sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
+			!= BFA_STATUS_OK) {
+		iocmd->status = BFA_STATUS_VERSION_FAIL;
+		return 0;
+	}
+
+	iocmd_bufptr = (char *)iocmd +
+			sizeof(struct bfa_bsg_lport_get_rports_s);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (fcs_port == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		bfa_trc(bfad, 0);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	bfa_fcs_lport_get_rport_quals(fcs_port,
+			(struct bfa_rport_qualifier_s *)iocmd_bufptr,
+			&iocmd->nrports);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_fcs_rport_s *fcs_rport;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (fcs_port == NULL) {
+		bfa_trc(bfad, 0);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	if (iocmd->pid)
+		fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
+						iocmd->rpwwn, iocmd->pid);
+	else
+		fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+	if (fcs_rport == NULL) {
+		bfa_trc(bfad, 0);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		goto out;
+	}
+
+	bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+static int
+bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_rport_scsi_addr_s *iocmd =
+			(struct bfa_bsg_rport_scsi_addr_s *)cmd;
+	struct bfa_fcs_lport_s	*fcs_port;
+	struct bfa_fcs_itnim_s	*fcs_itnim;
+	struct bfad_itnim_s	*drv_itnim;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (fcs_port == NULL) {
+		bfa_trc(bfad, 0);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+	if (fcs_itnim == NULL) {
+		bfa_trc(bfad, 0);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		goto out;
+	}
+
+	drv_itnim = fcs_itnim->itnim_drv;
+
+	if (drv_itnim && drv_itnim->im_port)
+		iocmd->host = drv_itnim->im_port->shost->host_no;
+	else {
+		bfa_trc(bfad, 0);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		goto out;
+	}
+
+	iocmd->target = drv_itnim->scsi_tgt_id;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	iocmd->bus = 0;
+	iocmd->lun = 0;
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_rport_stats_s *iocmd =
+			(struct bfa_bsg_rport_stats_s *)cmd;
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_fcs_rport_s *fcs_rport;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (fcs_port == NULL) {
+		bfa_trc(bfad, 0);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+	if (fcs_rport == NULL) {
+		bfa_trc(bfad, 0);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		goto out;
+	}
+
+	memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
+		sizeof(struct bfa_rport_stats_s));
+	if (bfa_fcs_rport_get_halrport(fcs_rport)) {
+		memcpy((void *)&iocmd->stats.hal_stats,
+		       (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
+			sizeof(struct bfa_rport_hal_stats_s));
+	}
+
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_rport_reset_stats_s *iocmd =
+				(struct bfa_bsg_rport_reset_stats_s *)cmd;
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_fcs_rport_s *fcs_rport;
+	struct bfa_rport_s *rport;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (fcs_port == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+	if (fcs_rport == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		goto out;
+	}
+
+	memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
+	rport = bfa_fcs_rport_get_halrport(fcs_rport);
+	if (rport)
+		memset(&rport->stats, 0, sizeof(rport->stats));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_rport_set_speed_s *iocmd =
+				(struct bfa_bsg_rport_set_speed_s *)cmd;
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_fcs_rport_s *fcs_rport;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (fcs_port == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		goto out;
+	}
+
+	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+	if (fcs_rport == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		goto out;
+	}
+
+	fcs_rport->rpf.assigned_speed  = iocmd->speed;
+	/* Set this speed in f/w only if the RPSC speed is not available */
+	if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
+		if (fcs_rport->bfa_rport)
+			bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_fcs_vport_s *fcs_vport;
+	struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->vpwwn);
+	if (fcs_vport == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+		goto out;
+	}
+
+	bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_fcs_vport_s *fcs_vport;
+	struct bfa_bsg_vport_stats_s *iocmd =
+				(struct bfa_bsg_vport_stats_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->vpwwn);
+	if (fcs_vport == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+		goto out;
+	}
+
+	memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
+		sizeof(struct bfa_vport_stats_s));
+	memcpy((void *)&iocmd->vport_stats.port_stats,
+	       (void *)&fcs_vport->lport.stats,
+		sizeof(struct bfa_lport_stats_s));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_fcs_vport_s *fcs_vport;
+	struct bfa_bsg_reset_stats_s *iocmd =
+				(struct bfa_bsg_reset_stats_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->vpwwn);
+	if (fcs_vport == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+		goto out;
+	}
+
+	memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
+	memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+static int
+bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
+			unsigned int payload_len)
+{
+	struct bfa_bsg_fabric_get_lports_s *iocmd =
+			(struct bfa_bsg_fabric_get_lports_s *)cmd;
+	bfa_fcs_vf_t	*fcs_vf;
+	uint32_t	nports = iocmd->nports;
+	unsigned long	flags;
+	void	*iocmd_bufptr;
+
+	if (nports == 0) {
+		iocmd->status = BFA_STATUS_EINVAL;
+		goto out;
+	}
+
+	if (bfad_chk_iocmd_sz(payload_len,
+		sizeof(struct bfa_bsg_fabric_get_lports_s),
+		sizeof(wwn_t) * iocmd->nports) != BFA_STATUS_OK) {
+		iocmd->status = BFA_STATUS_VERSION_FAIL;
+		goto out;
+	}
+
+	iocmd_bufptr = (char *)iocmd +
+			sizeof(struct bfa_bsg_fabric_get_lports_s);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+	if (fcs_vf == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+		goto out;
+	}
+	bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	iocmd->nports = nports;
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
+{
+	struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else {
+		if (cmd == IOCMD_RATELIM_ENABLE)
+			fcport->cfg.ratelimit = BFA_TRUE;
+		else if (cmd == IOCMD_RATELIM_DISABLE)
+			fcport->cfg.ratelimit = BFA_FALSE;
+
+		if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
+			fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
+
+		iocmd->status = BFA_STATUS_OK;
+	}
+
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+	struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+	/* Auto and speeds greater than the supported speed, are invalid */
+	if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
+	    (iocmd->speed > fcport->speed_sup)) {
+		iocmd->status = BFA_STATUS_UNSUPP_SPEED;
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		return 0;
+	}
+
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else {
+		fcport->cfg.trl_def_speed = iocmd->speed;
+		iocmd->status = BFA_STATUS_OK;
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcpim_modstats_s *iocmd =
+			(struct bfa_bsg_fcpim_modstats_s *)cmd;
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+	struct list_head *qe, *qen;
+	struct bfa_itnim_s *itnim;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	/* accumulate IO stats from itnim */
+	memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
+				(struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+	struct list_head *qe, *qen;
+	struct bfa_itnim_s *itnim;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		bfa_itnim_clear_stats(itnim);
+	}
+	memset(&fcpim->del_itn_stats, 0,
+		sizeof(struct bfa_fcpim_del_itn_stats_s));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
+			(struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
+	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
+		sizeof(struct bfa_fcpim_del_itn_stats_s));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
+	struct bfa_fcs_lport_s	*fcs_port;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->lpwwn);
+	if (!fcs_port)
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+	else
+		iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
+					iocmd->rpwwn, &iocmd->attr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_itnim_iostats_s *iocmd =
+			(struct bfa_bsg_itnim_iostats_s *)cmd;
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_fcs_itnim_s *itnim;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->lpwwn);
+	if (!fcs_port) {
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		bfa_trc(bfad, 0);
+	} else {
+		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+		if (itnim == NULL)
+			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		else {
+			iocmd->status = BFA_STATUS_OK;
+			if (bfa_fcs_itnim_get_halitn(itnim))
+				memcpy((void *)&iocmd->iostats, (void *)
+				&(bfa_fcs_itnim_get_halitn(itnim)->stats),
+				       sizeof(struct bfa_itnim_iostats_s));
+		}
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+static int
+bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_rport_reset_stats_s *iocmd =
+			(struct bfa_bsg_rport_reset_stats_s *)cmd;
+	struct bfa_fcs_lport_s	*fcs_port;
+	struct bfa_fcs_itnim_s	*itnim;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->pwwn);
+	if (!fcs_port)
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+	else {
+		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+		if (itnim == NULL)
+			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		else {
+			iocmd->status = BFA_STATUS_OK;
+			bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
+			bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
+		}
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_itnim_itnstats_s *iocmd =
+			(struct bfa_bsg_itnim_itnstats_s *)cmd;
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_fcs_itnim_s *itnim;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->lpwwn);
+	if (!fcs_port) {
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+		bfa_trc(bfad, 0);
+	} else {
+		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+		if (itnim == NULL)
+			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		else {
+			iocmd->status = BFA_STATUS_OK;
+			bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
+					&iocmd->itnstats);
+		}
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+int
+bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcport_enable(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcport_disable(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
+				&iocmd->pcifn_cfg,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
+				&iocmd->pcifn_id, iocmd->port,
+				iocmd->pcifn_class, iocmd->bw_min,
+				iocmd->bw_max, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
+				iocmd->pcifn_id,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
+				iocmd->pcifn_id, iocmd->bw_min,
+				iocmd->bw_max, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	bfa_trc(bfad, iocmd->status);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+	bfa_trc(bfad, iocmd->status);
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_adapter_cfg_mode_s *iocmd =
+			(struct bfa_bsg_adapter_cfg_mode_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
+				iocmd->cfg.mode, iocmd->cfg.max_pf,
+				iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_port_cfg_mode_s *iocmd =
+			(struct bfa_bsg_port_cfg_mode_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
+				iocmd->instance, iocmd->cfg.mode,
+				iocmd->cfg.max_pf, iocmd->cfg.max_vf,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long   flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
+		iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
+					bfad_hcb_comp, &fcomp);
+	else
+		iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
+					bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
+	struct bfad_hal_comp    fcomp;
+	unsigned long   flags;
+
+	init_completion(&fcomp.comp);
+	iocmd->status = BFA_STATUS_OK;
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+	struct bfa_bsg_cee_attr_s *iocmd =
+				(struct bfa_bsg_cee_attr_s *)cmd;
+	void	*iocmd_bufptr;
+	struct bfad_hal_comp	cee_comp;
+	unsigned long	flags;
+
+	if (bfad_chk_iocmd_sz(payload_len,
+			sizeof(struct bfa_bsg_cee_attr_s),
+			sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
+		iocmd->status = BFA_STATUS_VERSION_FAIL;
+		return 0;
+	}
+
+	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
+
+	cee_comp.status = 0;
+	init_completion(&cee_comp.comp);
+	mutex_lock(&bfad_mutex);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
+					 bfad_hcb_comp, &cee_comp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		mutex_unlock(&bfad_mutex);
+		bfa_trc(bfad, 0x5555);
+		goto out;
+	}
+	wait_for_completion(&cee_comp.comp);
+	mutex_unlock(&bfad_mutex);
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
+			unsigned int payload_len)
+{
+	struct bfa_bsg_cee_stats_s *iocmd =
+				(struct bfa_bsg_cee_stats_s *)cmd;
+	void	*iocmd_bufptr;
+	struct bfad_hal_comp	cee_comp;
+	unsigned long	flags;
+
+	if (bfad_chk_iocmd_sz(payload_len,
+			sizeof(struct bfa_bsg_cee_stats_s),
+			sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
+		iocmd->status = BFA_STATUS_VERSION_FAIL;
+		return 0;
+	}
+
+	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
+
+	cee_comp.status = 0;
+	init_completion(&cee_comp.comp);
+	mutex_lock(&bfad_mutex);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
+					bfad_hcb_comp, &cee_comp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		mutex_unlock(&bfad_mutex);
+		bfa_trc(bfad, 0x5555);
+		goto out;
+	}
+	wait_for_completion(&cee_comp.comp);
+	mutex_unlock(&bfad_mutex);
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		bfa_trc(bfad, 0x5555);
+	return 0;
+}
+
+int
+bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
+	struct bfad_hal_comp	fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	bfa_trc(bfad, iocmd->status);
+	if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
+		goto out;
+
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
+	struct bfad_hal_comp	fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	bfa_trc(bfad, iocmd->status);
+	if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_flash_attr_s *iocmd =
+			(struct bfa_bsg_flash_attr_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
+				iocmd->instance, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
+			unsigned int payload_len)
+{
+	struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+	void	*iocmd_bufptr;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	if (bfad_chk_iocmd_sz(payload_len,
+			sizeof(struct bfa_bsg_flash_s),
+			iocmd->bufsz) != BFA_STATUS_OK) {
+		iocmd->status = BFA_STATUS_VERSION_FAIL;
+		return 0;
+	}
+
+	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+				iocmd->type, iocmd->instance, iocmd_bufptr,
+				iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
+			unsigned int payload_len)
+{
+	struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	void	*iocmd_bufptr;
+	unsigned long	flags;
+
+	if (bfad_chk_iocmd_sz(payload_len,
+			sizeof(struct bfa_bsg_flash_s),
+			iocmd->bufsz) != BFA_STATUS_OK) {
+		iocmd->status = BFA_STATUS_VERSION_FAIL;
+		return 0;
+	}
+
+	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
+				iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_diag_get_temp_s *iocmd =
+			(struct bfa_bsg_diag_get_temp_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
+				&iocmd->result, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	bfa_trc(bfad, iocmd->status);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_diag_memtest_s *iocmd =
+			(struct bfa_bsg_diag_memtest_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long   flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
+				&iocmd->memtest, iocmd->pat,
+				&iocmd->result, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	bfa_trc(bfad, iocmd->status);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_diag_loopback_s *iocmd =
+			(struct bfa_bsg_diag_loopback_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long   flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
+				iocmd->speed, iocmd->lpcnt, iocmd->pat,
+				&iocmd->result, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	bfa_trc(bfad, iocmd->status);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_diag_fwping_s *iocmd =
+			(struct bfa_bsg_diag_fwping_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long   flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
+				iocmd->pattern, &iocmd->result,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	bfa_trc(bfad, iocmd->status);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	bfa_trc(bfad, 0x77771);
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long   flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
+				iocmd->queue, &iocmd->result,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_sfp_show_s *iocmd =
+			(struct bfa_bsg_sfp_show_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long   flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	bfa_trc(bfad, iocmd->status);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+	bfa_trc(bfad, iocmd->status);
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
+	unsigned long   flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
+				&iocmd->ledtest);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+int
+bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_diag_beacon_s *iocmd =
+			(struct bfa_bsg_diag_beacon_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
+				iocmd->beacon, iocmd->link_e2e_beacon,
+				iocmd->second);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+int
+bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_diag_lb_stat_s *iocmd =
+			(struct bfa_bsg_diag_lb_stat_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	bfa_trc(bfad, iocmd->status);
+
+	return 0;
+}
+
+int
+bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd)
+{
+	struct bfa_bsg_dport_enable_s *iocmd =
+				(struct bfa_bsg_dport_enable_s *)pcmd;
+	unsigned long	flags;
+	struct bfad_hal_comp fcomp;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt,
+					iocmd->pat, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		bfa_trc(bfad, iocmd->status);
+	else {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
+	return 0;
+}
+
+int
+bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+	unsigned long	flags;
+	struct bfad_hal_comp fcomp;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		bfa_trc(bfad, iocmd->status);
+	else {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
+	return 0;
+}
+
+int
+bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd)
+{
+	struct bfa_bsg_dport_enable_s *iocmd =
+				(struct bfa_bsg_dport_enable_s *)pcmd;
+	unsigned long   flags;
+	struct bfad_hal_comp fcomp;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt,
+					iocmd->pat, bfad_hcb_comp,
+					&fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+	} else {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
+
+	return 0;
+}
+
+int
+bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd)
+{
+	struct bfa_bsg_diag_dport_show_s *iocmd =
+				(struct bfa_bsg_diag_dport_show_s *)pcmd;
+	unsigned long   flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+
+int
+bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_phy_attr_s *iocmd =
+			(struct bfa_bsg_phy_attr_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
+				&iocmd->attr, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_phy_stats_s *iocmd =
+			(struct bfa_bsg_phy_stats_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
+				&iocmd->stats, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+	struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	void	*iocmd_bufptr;
+	unsigned long	flags;
+
+	if (bfad_chk_iocmd_sz(payload_len,
+			sizeof(struct bfa_bsg_phy_s),
+			iocmd->bufsz) != BFA_STATUS_OK) {
+		iocmd->status = BFA_STATUS_VERSION_FAIL;
+		return 0;
+	}
+
+	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
+				iocmd->instance, iocmd_bufptr, iocmd->bufsz,
+				0, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_vhba_attr_s *iocmd =
+			(struct bfa_bsg_vhba_attr_s *)cmd;
+	struct bfa_vhba_attr_s *attr = &iocmd->attr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	attr->pwwn =  bfad->bfa.ioc.attr->pwwn;
+	attr->nwwn =  bfad->bfa.ioc.attr->nwwn;
+	attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
+	attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
+	attr->path_tov  = bfa_fcpim_path_tov_get(&bfad->bfa);
+	iocmd->status = BFA_STATUS_OK;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+int
+bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+	struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
+	void	*iocmd_bufptr;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	if (bfad_chk_iocmd_sz(payload_len,
+			sizeof(struct bfa_bsg_phy_s),
+			iocmd->bufsz) != BFA_STATUS_OK) {
+		iocmd->status = BFA_STATUS_VERSION_FAIL;
+		return 0;
+	}
+
+	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
+				iocmd->instance, iocmd_bufptr, iocmd->bufsz,
+				0, bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+	void *iocmd_bufptr;
+
+	if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
+		bfa_trc(bfad, sizeof(struct bfa_plog_s));
+		iocmd->status = BFA_STATUS_EINVAL;
+		goto out;
+	}
+
+	iocmd->status = BFA_STATUS_OK;
+	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+	memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
+out:
+	return 0;
+}
+
+#define BFA_DEBUG_FW_CORE_CHUNK_SZ	0x4000U /* 16K chunks for FW dump */
+int
+bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
+			unsigned int payload_len)
+{
+	struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+	void	*iocmd_bufptr;
+	unsigned long	flags;
+	u32 offset;
+
+	if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
+			BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
+		iocmd->status = BFA_STATUS_VERSION_FAIL;
+		return 0;
+	}
+
+	if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
+			!IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
+			!IS_ALIGNED(iocmd->offset, sizeof(u32))) {
+		bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
+		iocmd->status = BFA_STATUS_EINVAL;
+		goto out;
+	}
+
+	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	offset = iocmd->offset;
+	iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
+				&offset, &iocmd->bufsz);
+	iocmd->offset = offset;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	unsigned long	flags;
+
+	if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	} else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
+		bfad->plog_buf.head = bfad->plog_buf.tail = 0;
+	else if (v_cmd == IOCMD_DEBUG_START_DTRC)
+		bfa_trc_init(bfad->trcmod);
+	else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
+		bfa_trc_stop(bfad->trcmod);
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
+
+	if (iocmd->ctl == BFA_TRUE)
+		bfad->plog_buf.plog_enabled = 1;
+	else
+		bfad->plog_buf.plog_enabled = 0;
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_fcpim_profile_s *iocmd =
+				(struct bfa_bsg_fcpim_profile_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
+		iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds());
+	else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
+		iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_itnim_ioprofile_s *iocmd =
+				(struct bfa_bsg_itnim_ioprofile_s *)cmd;
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_fcs_itnim_s *itnim;
+	unsigned long   flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+				iocmd->vf_id, iocmd->lpwwn);
+	if (!fcs_port)
+		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+	else {
+		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+		if (itnim == NULL)
+			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+		else
+			iocmd->status = bfa_itnim_get_ioprofile(
+						bfa_fcs_itnim_get_halitn(itnim),
+						&iocmd->ioprofile);
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+int
+bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcport_stats_s *iocmd =
+				(struct bfa_bsg_fcport_stats_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+	struct bfa_cb_pending_q_s cb_qe;
+
+	init_completion(&fcomp.comp);
+	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+			   &fcomp, &iocmd->stats);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		goto out;
+	}
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+	struct bfa_cb_pending_q_s cb_qe;
+
+	init_completion(&fcomp.comp);
+	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		goto out;
+	}
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+			BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
+			&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+			bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+			BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
+			&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+			bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
+	struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
+	struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
+	pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
+	pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
+	memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
+	iocmd->status = BFA_STATUS_OK;
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+				BFA_FLASH_PART_PXECFG,
+				bfad->bfa.ioc.port_id, &iocmd->cfg,
+				sizeof(struct bfa_ethboot_cfg_s), 0,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+				BFA_FLASH_PART_PXECFG,
+				bfad->bfa.ioc.port_id, &iocmd->cfg,
+				sizeof(struct bfa_ethboot_cfg_s), 0,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK)
+		goto out;
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+	if (bfa_fcport_is_dport(&bfad->bfa)) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		return BFA_STATUS_DPORT_ERR;
+	}
+
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else {
+		if (v_cmd == IOCMD_TRUNK_ENABLE) {
+			trunk->attr.state = BFA_TRUNK_OFFLINE;
+			bfa_fcport_disable(&bfad->bfa);
+			fcport->cfg.trunked = BFA_TRUE;
+		} else if (v_cmd == IOCMD_TRUNK_DISABLE) {
+			trunk->attr.state = BFA_TRUNK_DISABLED;
+			bfa_fcport_disable(&bfad->bfa);
+			fcport->cfg.trunked = BFA_FALSE;
+		}
+
+		if (!bfa_fcport_is_disabled(&bfad->bfa))
+			bfa_fcport_enable(&bfad->bfa);
+
+		iocmd->status = BFA_STATUS_OK;
+	}
+
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else {
+		memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
+			sizeof(struct bfa_trunk_attr_s));
+		iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
+		iocmd->status = BFA_STATUS_OK;
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
+		if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+			iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+		else {
+			if (v_cmd == IOCMD_QOS_ENABLE)
+				fcport->cfg.qos_enabled = BFA_TRUE;
+			else if (v_cmd == IOCMD_QOS_DISABLE) {
+				fcport->cfg.qos_enabled = BFA_FALSE;
+				fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
+				fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
+				fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else {
+		iocmd->attr.state = fcport->qos_attr.state;
+		iocmd->attr.total_bb_cr =
+			be32_to_cpu(fcport->qos_attr.total_bb_cr);
+		iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
+		iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
+		iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
+		iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
+		iocmd->status = BFA_STATUS_OK;
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_qos_vc_attr_s *iocmd =
+				(struct bfa_bsg_qos_vc_attr_s *)cmd;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+	struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
+	unsigned long	flags;
+	u32	i = 0;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
+	iocmd->attr.shared_credit  = be16_to_cpu(bfa_vc_attr->shared_credit);
+	iocmd->attr.elp_opmode_flags  =
+				be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
+
+	/* Individual VC info */
+	while (i < iocmd->attr.total_vc_count) {
+		iocmd->attr.vc_info[i].vc_credit =
+				bfa_vc_attr->vc_info[i].vc_credit;
+		iocmd->attr.vc_info[i].borrow_credit =
+				bfa_vc_attr->vc_info[i].borrow_credit;
+		iocmd->attr.vc_info[i].priority =
+				bfa_vc_attr->vc_info[i].priority;
+		i++;
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	iocmd->status = BFA_STATUS_OK;
+	return 0;
+}
+
+int
+bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcport_stats_s *iocmd =
+				(struct bfa_bsg_fcport_stats_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+	struct bfa_cb_pending_q_s cb_qe;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+
+	init_completion(&fcomp.comp);
+	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+			   &fcomp, &iocmd->stats);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else
+		iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		goto out;
+	}
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long	flags;
+	struct bfa_cb_pending_q_s cb_qe;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+
+	init_completion(&fcomp.comp);
+	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+			   &fcomp, NULL);
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
+		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
+		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
+	else
+		iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status != BFA_STATUS_OK) {
+		bfa_trc(bfad, iocmd->status);
+		goto out;
+	}
+	wait_for_completion(&fcomp.comp);
+	iocmd->status = fcomp.status;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_vf_stats_s *iocmd =
+			(struct bfa_bsg_vf_stats_s *)cmd;
+	struct bfa_fcs_fabric_s	*fcs_vf;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+	if (fcs_vf == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+		goto out;
+	}
+	memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
+		sizeof(struct bfa_vf_stats_s));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+int
+bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_vf_reset_stats_s *iocmd =
+			(struct bfa_bsg_vf_reset_stats_s *)cmd;
+	struct bfa_fcs_fabric_s	*fcs_vf;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+	if (fcs_vf == NULL) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+		goto out;
+	}
+	memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	iocmd->status = BFA_STATUS_OK;
+out:
+	return 0;
+}
+
+/* Function to reset the LUN SCAN mode */
+static void
+bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
+{
+	struct bfad_im_port_s *pport_im = bfad->pport.im_port;
+	struct bfad_vport_s *vport = NULL;
+
+	/* Set the scsi device LUN SCAN flags for base port */
+	bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
+
+	/* Set the scsi device LUN SCAN flags for the vports */
+	list_for_each_entry(vport, &bfad->vport_list, list_entry)
+		bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
+}
+
+int
+bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
+		iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
+		/* Set the LUN Scanning mode to be Sequential scan */
+		if (iocmd->status == BFA_STATUS_OK)
+			bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
+	} else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
+		iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
+		/* Set the LUN Scanning mode to default REPORT_LUNS scan */
+		if (iocmd->status == BFA_STATUS_OK)
+			bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
+	} else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+		iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+int
+bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
+			(struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
+	struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+	struct bfa_bsg_fcpim_lunmask_s *iocmd =
+				(struct bfa_bsg_fcpim_lunmask_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
+		iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
+					&iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
+	else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
+		iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
+					iocmd->vf_id, &iocmd->pwwn,
+					iocmd->rpwwn, iocmd->lun);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return 0;
+}
+
+int
+bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcpim_throttle_s *iocmd =
+			(struct bfa_bsg_fcpim_throttle_s *)cmd;
+	unsigned long   flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa,
+				(void *)&iocmd->throttle);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fcpim_throttle_s *iocmd =
+			(struct bfa_bsg_fcpim_throttle_s *)cmd;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa,
+				iocmd->throttle.cfg_value);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+int
+bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_tfru_s *iocmd =
+			(struct bfa_bsg_tfru_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa),
+				&iocmd->data, iocmd->len, iocmd->offset,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status == BFA_STATUS_OK) {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
+
+	return 0;
+}
+
+int
+bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_tfru_s *iocmd =
+			(struct bfa_bsg_tfru_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa),
+				&iocmd->data, iocmd->len, iocmd->offset,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status == BFA_STATUS_OK) {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
+
+	return 0;
+}
+
+int
+bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fruvpd_s *iocmd =
+			(struct bfa_bsg_fruvpd_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa),
+				&iocmd->data, iocmd->len, iocmd->offset,
+				bfad_hcb_comp, &fcomp);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status == BFA_STATUS_OK) {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
+
+	return 0;
+}
+
+int
+bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fruvpd_s *iocmd =
+			(struct bfa_bsg_fruvpd_s *)cmd;
+	struct bfad_hal_comp fcomp;
+	unsigned long flags = 0;
+
+	init_completion(&fcomp.comp);
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
+				&iocmd->data, iocmd->len, iocmd->offset,
+				bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (iocmd->status == BFA_STATUS_OK) {
+		wait_for_completion(&fcomp.comp);
+		iocmd->status = fcomp.status;
+	}
+
+	return 0;
+}
+
+int
+bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
+{
+	struct bfa_bsg_fruvpd_max_size_s *iocmd =
+			(struct bfa_bsg_fruvpd_max_size_s *)cmd;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa),
+						&iocmd->max_size);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+}
+
+static int
+bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
+		unsigned int payload_len)
+{
+	int rc = -EINVAL;
+
+	switch (cmd) {
+	case IOCMD_IOC_ENABLE:
+		rc = bfad_iocmd_ioc_enable(bfad, iocmd);
+		break;
+	case IOCMD_IOC_DISABLE:
+		rc = bfad_iocmd_ioc_disable(bfad, iocmd);
+		break;
+	case IOCMD_IOC_GET_INFO:
+		rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
+		break;
+	case IOCMD_IOC_GET_ATTR:
+		rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_IOC_GET_STATS:
+		rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
+		break;
+	case IOCMD_IOC_GET_FWSTATS:
+		rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
+		break;
+	case IOCMD_IOC_RESET_STATS:
+	case IOCMD_IOC_RESET_FWSTATS:
+		rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
+		break;
+	case IOCMD_IOC_SET_ADAPTER_NAME:
+	case IOCMD_IOC_SET_PORT_NAME:
+		rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
+		break;
+	case IOCMD_IOCFC_GET_ATTR:
+		rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_IOCFC_SET_INTR:
+		rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
+		break;
+	case IOCMD_PORT_ENABLE:
+		rc = bfad_iocmd_port_enable(bfad, iocmd);
+		break;
+	case IOCMD_PORT_DISABLE:
+		rc = bfad_iocmd_port_disable(bfad, iocmd);
+		break;
+	case IOCMD_PORT_GET_ATTR:
+		rc = bfad_iocmd_port_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_PORT_GET_STATS:
+		rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
+		break;
+	case IOCMD_PORT_RESET_STATS:
+		rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
+		break;
+	case IOCMD_PORT_CFG_TOPO:
+	case IOCMD_PORT_CFG_SPEED:
+	case IOCMD_PORT_CFG_ALPA:
+	case IOCMD_PORT_CLR_ALPA:
+		rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
+		break;
+	case IOCMD_PORT_CFG_MAXFRSZ:
+		rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
+		break;
+	case IOCMD_PORT_BBCR_ENABLE:
+	case IOCMD_PORT_BBCR_DISABLE:
+		rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd);
+		break;
+	case IOCMD_PORT_BBCR_GET_ATTR:
+		rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd);
+		break;
+	case IOCMD_LPORT_GET_ATTR:
+		rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_LPORT_GET_STATS:
+		rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
+		break;
+	case IOCMD_LPORT_RESET_STATS:
+		rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
+		break;
+	case IOCMD_LPORT_GET_IOSTATS:
+		rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
+		break;
+	case IOCMD_LPORT_GET_RPORTS:
+		rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
+		break;
+	case IOCMD_RPORT_GET_ATTR:
+		rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_RPORT_GET_ADDR:
+		rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
+		break;
+	case IOCMD_RPORT_GET_STATS:
+		rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
+		break;
+	case IOCMD_RPORT_RESET_STATS:
+		rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
+		break;
+	case IOCMD_RPORT_SET_SPEED:
+		rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
+		break;
+	case IOCMD_VPORT_GET_ATTR:
+		rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_VPORT_GET_STATS:
+		rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
+		break;
+	case IOCMD_VPORT_RESET_STATS:
+		rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
+		break;
+	case IOCMD_FABRIC_GET_LPORTS:
+		rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
+		break;
+	case IOCMD_RATELIM_ENABLE:
+	case IOCMD_RATELIM_DISABLE:
+		rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
+		break;
+	case IOCMD_RATELIM_DEF_SPEED:
+		rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
+		break;
+	case IOCMD_FCPIM_FAILOVER:
+		rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
+		break;
+	case IOCMD_FCPIM_MODSTATS:
+		rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
+		break;
+	case IOCMD_FCPIM_MODSTATSCLR:
+		rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
+		break;
+	case IOCMD_FCPIM_DEL_ITN_STATS:
+		rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
+		break;
+	case IOCMD_ITNIM_GET_ATTR:
+		rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_ITNIM_GET_IOSTATS:
+		rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
+		break;
+	case IOCMD_ITNIM_RESET_STATS:
+		rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
+		break;
+	case IOCMD_ITNIM_GET_ITNSTATS:
+		rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
+		break;
+	case IOCMD_FCPORT_ENABLE:
+		rc = bfad_iocmd_fcport_enable(bfad, iocmd);
+		break;
+	case IOCMD_FCPORT_DISABLE:
+		rc = bfad_iocmd_fcport_disable(bfad, iocmd);
+		break;
+	case IOCMD_IOC_PCIFN_CFG:
+		rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
+		break;
+	case IOCMD_IOC_FW_SIG_INV:
+		rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd);
+		break;
+	case IOCMD_PCIFN_CREATE:
+		rc = bfad_iocmd_pcifn_create(bfad, iocmd);
+		break;
+	case IOCMD_PCIFN_DELETE:
+		rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
+		break;
+	case IOCMD_PCIFN_BW:
+		rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
+		break;
+	case IOCMD_ADAPTER_CFG_MODE:
+		rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
+		break;
+	case IOCMD_PORT_CFG_MODE:
+		rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
+		break;
+	case IOCMD_FLASH_ENABLE_OPTROM:
+	case IOCMD_FLASH_DISABLE_OPTROM:
+		rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
+		break;
+	case IOCMD_FAA_QUERY:
+		rc = bfad_iocmd_faa_query(bfad, iocmd);
+		break;
+	case IOCMD_CEE_GET_ATTR:
+		rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
+		break;
+	case IOCMD_CEE_GET_STATS:
+		rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
+		break;
+	case IOCMD_CEE_RESET_STATS:
+		rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
+		break;
+	case IOCMD_SFP_MEDIA:
+		rc = bfad_iocmd_sfp_media(bfad, iocmd);
+		 break;
+	case IOCMD_SFP_SPEED:
+		rc = bfad_iocmd_sfp_speed(bfad, iocmd);
+		break;
+	case IOCMD_FLASH_GET_ATTR:
+		rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_FLASH_ERASE_PART:
+		rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
+		break;
+	case IOCMD_FLASH_UPDATE_PART:
+		rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
+		break;
+	case IOCMD_FLASH_READ_PART:
+		rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
+		break;
+	case IOCMD_DIAG_TEMP:
+		rc = bfad_iocmd_diag_temp(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_MEMTEST:
+		rc = bfad_iocmd_diag_memtest(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_LOOPBACK:
+		rc = bfad_iocmd_diag_loopback(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_FWPING:
+		rc = bfad_iocmd_diag_fwping(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_QUEUETEST:
+		rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_SFP:
+		rc = bfad_iocmd_diag_sfp(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_LED:
+		rc = bfad_iocmd_diag_led(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_BEACON_LPORT:
+		rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_LB_STAT:
+		rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_DPORT_ENABLE:
+		rc = bfad_iocmd_diag_dport_enable(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_DPORT_DISABLE:
+		rc = bfad_iocmd_diag_dport_disable(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_DPORT_SHOW:
+		rc = bfad_iocmd_diag_dport_show(bfad, iocmd);
+		break;
+	case IOCMD_DIAG_DPORT_START:
+		rc = bfad_iocmd_diag_dport_start(bfad, iocmd);
+		break;
+	case IOCMD_PHY_GET_ATTR:
+		rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_PHY_GET_STATS:
+		rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
+		break;
+	case IOCMD_PHY_UPDATE_FW:
+		rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
+		break;
+	case IOCMD_PHY_READ_FW:
+		rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
+		break;
+	case IOCMD_VHBA_QUERY:
+		rc = bfad_iocmd_vhba_query(bfad, iocmd);
+		break;
+	case IOCMD_DEBUG_PORTLOG:
+		rc = bfad_iocmd_porglog_get(bfad, iocmd);
+		break;
+	case IOCMD_DEBUG_FW_CORE:
+		rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
+		break;
+	case IOCMD_DEBUG_FW_STATE_CLR:
+	case IOCMD_DEBUG_PORTLOG_CLR:
+	case IOCMD_DEBUG_START_DTRC:
+	case IOCMD_DEBUG_STOP_DTRC:
+		rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
+		break;
+	case IOCMD_DEBUG_PORTLOG_CTL:
+		rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
+		break;
+	case IOCMD_FCPIM_PROFILE_ON:
+	case IOCMD_FCPIM_PROFILE_OFF:
+		rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
+		break;
+	case IOCMD_ITNIM_GET_IOPROFILE:
+		rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
+		break;
+	case IOCMD_FCPORT_GET_STATS:
+		rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
+		break;
+	case IOCMD_FCPORT_RESET_STATS:
+		rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
+		break;
+	case IOCMD_BOOT_CFG:
+		rc = bfad_iocmd_boot_cfg(bfad, iocmd);
+		break;
+	case IOCMD_BOOT_QUERY:
+		rc = bfad_iocmd_boot_query(bfad, iocmd);
+		break;
+	case IOCMD_PREBOOT_QUERY:
+		rc = bfad_iocmd_preboot_query(bfad, iocmd);
+		break;
+	case IOCMD_ETHBOOT_CFG:
+		rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
+		break;
+	case IOCMD_ETHBOOT_QUERY:
+		rc = bfad_iocmd_ethboot_query(bfad, iocmd);
+		break;
+	case IOCMD_TRUNK_ENABLE:
+	case IOCMD_TRUNK_DISABLE:
+		rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
+		break;
+	case IOCMD_TRUNK_GET_ATTR:
+		rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_QOS_ENABLE:
+	case IOCMD_QOS_DISABLE:
+		rc = bfad_iocmd_qos(bfad, iocmd, cmd);
+		break;
+	case IOCMD_QOS_GET_ATTR:
+		rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
+		break;
+	case IOCMD_QOS_GET_VC_ATTR:
+		rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
+		break;
+	case IOCMD_QOS_GET_STATS:
+		rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
+		break;
+	case IOCMD_QOS_RESET_STATS:
+		rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
+		break;
+	case IOCMD_QOS_SET_BW:
+		rc = bfad_iocmd_qos_set_bw(bfad, iocmd);
+		break;
+	case IOCMD_VF_GET_STATS:
+		rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
+		break;
+	case IOCMD_VF_RESET_STATS:
+		rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
+		break;
+	case IOCMD_FCPIM_LUNMASK_ENABLE:
+	case IOCMD_FCPIM_LUNMASK_DISABLE:
+	case IOCMD_FCPIM_LUNMASK_CLEAR:
+		rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
+		break;
+	case IOCMD_FCPIM_LUNMASK_QUERY:
+		rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
+		break;
+	case IOCMD_FCPIM_LUNMASK_ADD:
+	case IOCMD_FCPIM_LUNMASK_DELETE:
+		rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
+		break;
+	case IOCMD_FCPIM_THROTTLE_QUERY:
+		rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd);
+		break;
+	case IOCMD_FCPIM_THROTTLE_SET:
+		rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd);
+		break;
+	/* TFRU */
+	case IOCMD_TFRU_READ:
+		rc = bfad_iocmd_tfru_read(bfad, iocmd);
+		break;
+	case IOCMD_TFRU_WRITE:
+		rc = bfad_iocmd_tfru_write(bfad, iocmd);
+		break;
+	/* FRU */
+	case IOCMD_FRUVPD_READ:
+		rc = bfad_iocmd_fruvpd_read(bfad, iocmd);
+		break;
+	case IOCMD_FRUVPD_UPDATE:
+		rc = bfad_iocmd_fruvpd_update(bfad, iocmd);
+		break;
+	case IOCMD_FRUVPD_GET_MAX_SIZE:
+		rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int
+bfad_im_bsg_vendor_request(struct bsg_job *job)
+{
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
+	struct Scsi_Host *shost = fc_bsg_to_shost(job);
+	struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
+	struct bfad_s *bfad = im_port->bfad;
+	void *payload_kbuf;
+	int rc = -EINVAL;
+
+	/* Allocate a temp buffer to hold the passed in user space command */
+	payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+	if (!payload_kbuf) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt, payload_kbuf,
+			  job->request_payload.payload_len);
+
+	/* Invoke IOCMD handler - to handle all the vendor command requests */
+	rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
+				job->request_payload.payload_len);
+	if (rc != BFA_STATUS_OK)
+		goto error;
+
+	/* Copy the response data to the job->reply_payload sg_list */
+	sg_copy_from_buffer(job->reply_payload.sg_list,
+			    job->reply_payload.sg_cnt,
+			    payload_kbuf,
+			    job->reply_payload.payload_len);
+
+	/* free the command buffer */
+	kfree(payload_kbuf);
+
+	/* Fill the BSG job reply data */
+	job->reply_len = job->reply_payload.payload_len;
+	bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+	bsg_reply->result = rc;
+
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return rc;
+error:
+	/* free the command buffer */
+	kfree(payload_kbuf);
+out:
+	bsg_reply->result = rc;
+	job->reply_len = sizeof(uint32_t);
+	bsg_reply->reply_payload_rcv_len = 0;
+	return rc;
+}
+
+/* FC passthru call backs */
+u64
+bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
+{
+	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
+	struct bfa_sge_s  *sge;
+	u64	addr;
+
+	sge = drv_fcxp->req_sge + sgeid;
+	addr = (u64)(size_t) sge->sg_addr;
+	return addr;
+}
+
+u32
+bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
+{
+	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
+	struct bfa_sge_s	*sge;
+
+	sge = drv_fcxp->req_sge + sgeid;
+	return sge->sg_len;
+}
+
+u64
+bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
+{
+	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
+	struct bfa_sge_s	*sge;
+	u64	addr;
+
+	sge = drv_fcxp->rsp_sge + sgeid;
+	addr = (u64)(size_t) sge->sg_addr;
+	return addr;
+}
+
+u32
+bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
+{
+	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
+	struct bfa_sge_s	*sge;
+
+	sge = drv_fcxp->rsp_sge + sgeid;
+	return sge->sg_len;
+}
+
+void
+bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
+		bfa_status_t req_status, u32 rsp_len, u32 resid_len,
+		struct fchs_s *rsp_fchs)
+{
+	struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+
+	drv_fcxp->req_status = req_status;
+	drv_fcxp->rsp_len = rsp_len;
+
+	/* bfa_fcxp will be automatically freed by BFA */
+	drv_fcxp->bfa_fcxp = NULL;
+	complete(&drv_fcxp->comp);
+}
+
+struct bfad_buf_info *
+bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
+		 uint32_t payload_len, uint32_t *num_sgles)
+{
+	struct bfad_buf_info	*buf_base, *buf_info;
+	struct bfa_sge_s	*sg_table;
+	int sge_num = 1;
+
+	buf_base = kcalloc(sizeof(struct bfad_buf_info) +
+				sizeof(struct bfa_sge_s),
+			   sge_num, GFP_KERNEL);
+	if (!buf_base)
+		return NULL;
+
+	sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
+			(sizeof(struct bfad_buf_info) * sge_num));
+
+	/* Allocate dma coherent memory */
+	buf_info = buf_base;
+	buf_info->size = payload_len;
+	buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev,
+					     buf_info->size, &buf_info->phys,
+					     GFP_KERNEL);
+	if (!buf_info->virt)
+		goto out_free_mem;
+
+	/* copy the linear bsg buffer to buf_info */
+	memcpy(buf_info->virt, payload_kbuf, buf_info->size);
+
+	/*
+	 * Setup SG table
+	 */
+	sg_table->sg_len = buf_info->size;
+	sg_table->sg_addr = (void *)(size_t) buf_info->phys;
+
+	*num_sgles = sge_num;
+
+	return buf_base;
+
+out_free_mem:
+	kfree(buf_base);
+	return NULL;
+}
+
+void
+bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
+		   uint32_t num_sgles)
+{
+	int i;
+	struct bfad_buf_info *buf_info = buf_base;
+
+	if (buf_base) {
+		for (i = 0; i < num_sgles; buf_info++, i++) {
+			if (buf_info->virt != NULL)
+				dma_free_coherent(&bfad->pcidev->dev,
+					buf_info->size, buf_info->virt,
+					buf_info->phys);
+		}
+		kfree(buf_base);
+	}
+}
+
+int
+bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp,
+		   bfa_bsg_fcpt_t *bsg_fcpt)
+{
+	struct bfa_fcxp_s *hal_fcxp;
+	struct bfad_s	*bfad = drv_fcxp->port->bfad;
+	unsigned long	flags;
+	uint8_t	lp_tag;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+	/* Allocate bfa_fcxp structure */
+	hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
+				  drv_fcxp->num_req_sgles,
+				  drv_fcxp->num_rsp_sgles,
+				  bfad_fcxp_get_req_sgaddr_cb,
+				  bfad_fcxp_get_req_sglen_cb,
+				  bfad_fcxp_get_rsp_sgaddr_cb,
+				  bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
+	if (!hal_fcxp) {
+		bfa_trc(bfad, 0);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		return BFA_STATUS_ENOMEM;
+	}
+
+	drv_fcxp->bfa_fcxp = hal_fcxp;
+
+	lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
+
+	bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
+		      bsg_fcpt->cts, bsg_fcpt->cos,
+		      job->request_payload.payload_len,
+		      &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
+		      job->reply_payload.payload_len, bsg_fcpt->tsecs);
+
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return BFA_STATUS_OK;
+}
+
+int
+bfad_im_bsg_els_ct_request(struct bsg_job *job)
+{
+	struct bfa_bsg_data *bsg_data;
+	struct Scsi_Host *shost = fc_bsg_to_shost(job);
+	struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
+	struct bfad_s *bfad = im_port->bfad;
+	bfa_bsg_fcpt_t *bsg_fcpt;
+	struct bfad_fcxp    *drv_fcxp;
+	struct bfa_fcs_lport_s *fcs_port;
+	struct bfa_fcs_rport_s *fcs_rport;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	uint32_t command_type = bsg_request->msgcode;
+	unsigned long flags;
+	struct bfad_buf_info *rsp_buf_info;
+	void *req_kbuf = NULL, *rsp_kbuf = NULL;
+	int rc = -EINVAL;
+
+	job->reply_len  = sizeof(uint32_t);	/* Atleast uint32_t reply_len */
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	/* Get the payload passed in from userspace */
+	bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) +
+					    sizeof(struct fc_bsg_request));
+	if (bsg_data == NULL)
+		goto out;
+
+	/*
+	 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
+	 * buffer of size bsg_data->payload_len
+	 */
+	bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
+	if (!bsg_fcpt) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	if (copy_from_user((uint8_t *)bsg_fcpt,
+				(void *)(unsigned long)bsg_data->payload,
+				bsg_data->payload_len)) {
+		kfree(bsg_fcpt);
+		rc = -EIO;
+		goto out;
+	}
+
+	drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
+	if (drv_fcxp == NULL) {
+		kfree(bsg_fcpt);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
+					bsg_fcpt->lpwwn);
+	if (fcs_port == NULL) {
+		bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		goto out_free_mem;
+	}
+
+	/* Check if the port is online before sending FC Passthru cmd */
+	if (!bfa_fcs_lport_is_online(fcs_port)) {
+		bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		goto out_free_mem;
+	}
+
+	drv_fcxp->port = fcs_port->bfad_port;
+
+	if (drv_fcxp->port->bfad == 0)
+		drv_fcxp->port->bfad = bfad;
+
+	/* Fetch the bfa_rport - if nexus needed */
+	if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
+	    command_type == FC_BSG_HST_CT) {
+		/* BSG HST commands: no nexus needed */
+		drv_fcxp->bfa_rport = NULL;
+
+	} else if (command_type == FC_BSG_RPT_ELS ||
+		   command_type == FC_BSG_RPT_CT) {
+		/* BSG RPT commands: nexus needed */
+		fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
+							    bsg_fcpt->dpwwn);
+		if (fcs_rport == NULL) {
+			bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
+			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+			goto out_free_mem;
+		}
+
+		drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
+
+	} else { /* Unknown BSG msgcode; return -EINVAL */
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		goto out_free_mem;
+	}
+
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	/* allocate memory for req / rsp buffers */
+	req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+	if (!req_kbuf) {
+		printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
+				bfad->pci_name);
+		rc = -ENOMEM;
+		goto out_free_mem;
+	}
+
+	rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
+	if (!rsp_kbuf) {
+		printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
+				bfad->pci_name);
+		rc = -ENOMEM;
+		goto out_free_mem;
+	}
+
+	/* map req sg - copy the sg_list passed in to the linear buffer */
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt, req_kbuf,
+			  job->request_payload.payload_len);
+
+	drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
+					job->request_payload.payload_len,
+					&drv_fcxp->num_req_sgles);
+	if (!drv_fcxp->reqbuf_info) {
+		printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
+				bfad->pci_name);
+		rc = -ENOMEM;
+		goto out_free_mem;
+	}
+
+	drv_fcxp->req_sge = (struct bfa_sge_s *)
+			    (((uint8_t *)drv_fcxp->reqbuf_info) +
+			    (sizeof(struct bfad_buf_info) *
+					drv_fcxp->num_req_sgles));
+
+	/* map rsp sg */
+	drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
+					job->reply_payload.payload_len,
+					&drv_fcxp->num_rsp_sgles);
+	if (!drv_fcxp->rspbuf_info) {
+		printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
+				bfad->pci_name);
+		rc = -ENOMEM;
+		goto out_free_mem;
+	}
+
+	rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
+	drv_fcxp->rsp_sge = (struct bfa_sge_s  *)
+			    (((uint8_t *)drv_fcxp->rspbuf_info) +
+			    (sizeof(struct bfad_buf_info) *
+					drv_fcxp->num_rsp_sgles));
+
+	/* fcxp send */
+	init_completion(&drv_fcxp->comp);
+	rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
+	if (rc == BFA_STATUS_OK) {
+		wait_for_completion(&drv_fcxp->comp);
+		bsg_fcpt->status = drv_fcxp->req_status;
+	} else {
+		bsg_fcpt->status = rc;
+		goto out_free_mem;
+	}
+
+	/* fill the job->reply data */
+	if (drv_fcxp->req_status == BFA_STATUS_OK) {
+		job->reply_len = drv_fcxp->rsp_len;
+		bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
+		bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+	} else {
+		bsg_reply->reply_payload_rcv_len =
+					sizeof(struct fc_bsg_ctels_reply);
+		job->reply_len = sizeof(uint32_t);
+		bsg_reply->reply_data.ctels_reply.status =
+						FC_CTELS_STATUS_REJECT;
+	}
+
+	/* Copy the response data to the reply_payload sg list */
+	sg_copy_from_buffer(job->reply_payload.sg_list,
+			    job->reply_payload.sg_cnt,
+			    (uint8_t *)rsp_buf_info->virt,
+			    job->reply_payload.payload_len);
+
+out_free_mem:
+	bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
+			   drv_fcxp->num_rsp_sgles);
+	bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
+			   drv_fcxp->num_req_sgles);
+	kfree(req_kbuf);
+	kfree(rsp_kbuf);
+
+	/* Need a copy to user op */
+	if (copy_to_user((void *)(unsigned long)bsg_data->payload,
+			(void *)bsg_fcpt, bsg_data->payload_len))
+		rc = -EIO;
+
+	kfree(bsg_fcpt);
+	kfree(drv_fcxp);
+out:
+	bsg_reply->result = rc;
+
+	if (rc == BFA_STATUS_OK)
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+
+	return rc;
+}
+
+int
+bfad_im_bsg_request(struct bsg_job *job)
+{
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	uint32_t rc = BFA_STATUS_OK;
+
+	switch (bsg_request->msgcode) {
+	case FC_BSG_HST_VENDOR:
+		/* Process BSG HST Vendor requests */
+		rc = bfad_im_bsg_vendor_request(job);
+		break;
+	case FC_BSG_HST_ELS_NOLOGIN:
+	case FC_BSG_RPT_ELS:
+	case FC_BSG_HST_CT:
+	case FC_BSG_RPT_CT:
+		/* Process BSG ELS/CT commands */
+		rc = bfad_im_bsg_els_ct_request(job);
+		break;
+	default:
+		bsg_reply->result = rc = -EINVAL;
+		bsg_reply->reply_payload_rcv_len = 0;
+		break;
+	}
+
+	return rc;
+}
+
+int
+bfad_im_bsg_timeout(struct bsg_job *job)
+{
+	/* Don't complete the BSG job request - return -EAGAIN
+	 * to reset bsg job timeout : for ELS/CT pass thru we
+	 * already have timer to track the request.
+	 */
+	return -EAGAIN;
+}
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
new file mode 100644
index 0000000..917e140
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -0,0 +1,837 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#ifndef BFAD_BSG_H
+#define BFAD_BSG_H
+
+#include "bfa_defs.h"
+#include "bfa_defs_fcs.h"
+
+/* Definitions of vendor unique structures and command codes passed in
+ * using FC_BSG_HST_VENDOR message code.
+ */
+enum {
+	IOCMD_IOC_ENABLE = 0x1,
+	IOCMD_IOC_DISABLE,
+	IOCMD_IOC_GET_ATTR,
+	IOCMD_IOC_GET_INFO,
+	IOCMD_IOC_GET_STATS,
+	IOCMD_IOC_GET_FWSTATS,
+	IOCMD_IOC_RESET_STATS,
+	IOCMD_IOC_RESET_FWSTATS,
+	IOCMD_IOC_SET_ADAPTER_NAME,
+	IOCMD_IOC_SET_PORT_NAME,
+	IOCMD_IOC_FW_SIG_INV,
+	IOCMD_IOCFC_GET_ATTR,
+	IOCMD_IOCFC_SET_INTR,
+	IOCMD_PORT_ENABLE,
+	IOCMD_PORT_DISABLE,
+	IOCMD_PORT_GET_ATTR,
+	IOCMD_PORT_GET_STATS,
+	IOCMD_PORT_RESET_STATS,
+	IOCMD_PORT_CFG_TOPO,
+	IOCMD_PORT_CFG_SPEED,
+	IOCMD_PORT_CFG_ALPA,
+	IOCMD_PORT_CFG_MAXFRSZ,
+	IOCMD_PORT_CLR_ALPA,
+	IOCMD_PORT_BBCR_ENABLE,
+	IOCMD_PORT_BBCR_DISABLE,
+	IOCMD_PORT_BBCR_GET_ATTR,
+	IOCMD_LPORT_GET_ATTR,
+	IOCMD_LPORT_GET_RPORTS,
+	IOCMD_LPORT_GET_STATS,
+	IOCMD_LPORT_RESET_STATS,
+	IOCMD_LPORT_GET_IOSTATS,
+	IOCMD_RPORT_GET_ATTR,
+	IOCMD_RPORT_GET_ADDR,
+	IOCMD_RPORT_GET_STATS,
+	IOCMD_RPORT_RESET_STATS,
+	IOCMD_RPORT_SET_SPEED,
+	IOCMD_VPORT_GET_ATTR,
+	IOCMD_VPORT_GET_STATS,
+	IOCMD_VPORT_RESET_STATS,
+	IOCMD_FABRIC_GET_LPORTS,
+	IOCMD_RATELIM_ENABLE,
+	IOCMD_RATELIM_DISABLE,
+	IOCMD_RATELIM_DEF_SPEED,
+	IOCMD_FCPIM_FAILOVER,
+	IOCMD_FCPIM_MODSTATS,
+	IOCMD_FCPIM_MODSTATSCLR,
+	IOCMD_FCPIM_DEL_ITN_STATS,
+	IOCMD_ITNIM_GET_ATTR,
+	IOCMD_ITNIM_GET_IOSTATS,
+	IOCMD_ITNIM_RESET_STATS,
+	IOCMD_ITNIM_GET_ITNSTATS,
+	IOCMD_IOC_PCIFN_CFG,
+	IOCMD_FCPORT_ENABLE,
+	IOCMD_FCPORT_DISABLE,
+	IOCMD_PCIFN_CREATE,
+	IOCMD_PCIFN_DELETE,
+	IOCMD_PCIFN_BW,
+	IOCMD_ADAPTER_CFG_MODE,
+	IOCMD_PORT_CFG_MODE,
+	IOCMD_FLASH_ENABLE_OPTROM,
+	IOCMD_FLASH_DISABLE_OPTROM,
+	IOCMD_FAA_QUERY,
+	IOCMD_CEE_GET_ATTR,
+	IOCMD_CEE_GET_STATS,
+	IOCMD_CEE_RESET_STATS,
+	IOCMD_SFP_MEDIA,
+	IOCMD_SFP_SPEED,
+	IOCMD_FLASH_GET_ATTR,
+	IOCMD_FLASH_ERASE_PART,
+	IOCMD_FLASH_UPDATE_PART,
+	IOCMD_FLASH_READ_PART,
+	IOCMD_DIAG_TEMP,
+	IOCMD_DIAG_MEMTEST,
+	IOCMD_DIAG_LOOPBACK,
+	IOCMD_DIAG_FWPING,
+	IOCMD_DIAG_QUEUETEST,
+	IOCMD_DIAG_SFP,
+	IOCMD_DIAG_LED,
+	IOCMD_DIAG_BEACON_LPORT,
+	IOCMD_DIAG_LB_STAT,
+	IOCMD_PHY_GET_ATTR,
+	IOCMD_PHY_GET_STATS,
+	IOCMD_PHY_UPDATE_FW,
+	IOCMD_PHY_READ_FW,
+	IOCMD_VHBA_QUERY,
+	IOCMD_DEBUG_PORTLOG,
+	IOCMD_DEBUG_FW_CORE,
+	IOCMD_DEBUG_FW_STATE_CLR,
+	IOCMD_DEBUG_PORTLOG_CLR,
+	IOCMD_DEBUG_START_DTRC,
+	IOCMD_DEBUG_STOP_DTRC,
+	IOCMD_DEBUG_PORTLOG_CTL,
+	IOCMD_FCPIM_PROFILE_ON,
+	IOCMD_FCPIM_PROFILE_OFF,
+	IOCMD_ITNIM_GET_IOPROFILE,
+	IOCMD_FCPORT_GET_STATS,
+	IOCMD_FCPORT_RESET_STATS,
+	IOCMD_BOOT_CFG,
+	IOCMD_BOOT_QUERY,
+	IOCMD_PREBOOT_QUERY,
+	IOCMD_ETHBOOT_CFG,
+	IOCMD_ETHBOOT_QUERY,
+	IOCMD_TRUNK_ENABLE,
+	IOCMD_TRUNK_DISABLE,
+	IOCMD_TRUNK_GET_ATTR,
+	IOCMD_QOS_ENABLE,
+	IOCMD_QOS_DISABLE,
+	IOCMD_QOS_GET_ATTR,
+	IOCMD_QOS_GET_VC_ATTR,
+	IOCMD_QOS_GET_STATS,
+	IOCMD_QOS_RESET_STATS,
+	IOCMD_VF_GET_STATS,
+	IOCMD_VF_RESET_STATS,
+	IOCMD_FCPIM_LUNMASK_ENABLE,
+	IOCMD_FCPIM_LUNMASK_DISABLE,
+	IOCMD_FCPIM_LUNMASK_CLEAR,
+	IOCMD_FCPIM_LUNMASK_QUERY,
+	IOCMD_FCPIM_LUNMASK_ADD,
+	IOCMD_FCPIM_LUNMASK_DELETE,
+	IOCMD_DIAG_DPORT_ENABLE,
+	IOCMD_DIAG_DPORT_DISABLE,
+	IOCMD_QOS_SET_BW,
+	IOCMD_FCPIM_THROTTLE_QUERY,
+	IOCMD_FCPIM_THROTTLE_SET,
+	IOCMD_TFRU_READ,
+	IOCMD_TFRU_WRITE,
+	IOCMD_FRUVPD_READ,
+	IOCMD_FRUVPD_UPDATE,
+	IOCMD_FRUVPD_GET_MAX_SIZE,
+	IOCMD_DIAG_DPORT_SHOW,
+	IOCMD_DIAG_DPORT_START,
+};
+
+struct bfa_bsg_gen_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+};
+
+struct bfa_bsg_portlogctl_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	bfa_boolean_t	ctl;
+	int		inst_no;
+};
+
+struct bfa_bsg_fcpim_profile_s {
+	bfa_status_t    status;
+	u16		bfad_num;
+	u16		rsvd;
+};
+
+struct bfa_bsg_itnim_ioprofile_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		lpwwn;
+	wwn_t		rpwwn;
+	struct bfa_itnim_ioprofile_s ioprofile;
+};
+
+struct bfa_bsg_fcport_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	union bfa_fcport_stats_u stats;
+};
+
+struct bfa_bsg_ioc_name_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	char		name[BFA_ADAPTER_SYM_NAME_LEN];
+};
+
+struct bfa_bsg_ioc_info_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	char		serialnum[64];
+	char		hwpath[BFA_STRING_32];
+	char		adapter_hwpath[BFA_STRING_32];
+	char		guid[BFA_ADAPTER_SYM_NAME_LEN*2];
+	char		name[BFA_ADAPTER_SYM_NAME_LEN];
+	char		port_name[BFA_ADAPTER_SYM_NAME_LEN];
+	char		eth_name[BFA_ADAPTER_SYM_NAME_LEN];
+	wwn_t		pwwn;
+	wwn_t		nwwn;
+	wwn_t		factorypwwn;
+	wwn_t		factorynwwn;
+	mac_t		mac;
+	mac_t		factory_mac; /* Factory mac address */
+	mac_t		current_mac; /* Currently assigned mac address */
+	enum bfa_ioc_type_e	ioc_type;
+	u16		pvid; /* Port vlan id */
+	u16		rsvd1;
+	u32		host;
+	u32		bandwidth; /* For PF support */
+	u32		rsvd2;
+};
+
+struct bfa_bsg_ioc_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_ioc_attr_s  ioc_attr;
+};
+
+struct bfa_bsg_ioc_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_ioc_stats_s ioc_stats;
+};
+
+struct bfa_bsg_ioc_fwstats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		buf_size;
+	u32		rsvd1;
+	u64		buf_ptr;
+};
+
+struct bfa_bsg_iocfc_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_iocfc_attr_s	iocfc_attr;
+};
+
+struct bfa_bsg_iocfc_intr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_iocfc_intr_attr_s attr;
+};
+
+struct bfa_bsg_port_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_port_attr_s	attr;
+};
+
+struct bfa_bsg_port_cfg_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		param;
+	u32		rsvd1;
+};
+
+struct bfa_bsg_port_cfg_maxfrsize_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		maxfrsize;
+};
+
+struct bfa_bsg_port_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		buf_size;
+	u32		rsvd1;
+	u64		buf_ptr;
+};
+
+struct bfa_bsg_lport_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		pwwn;
+	struct bfa_lport_attr_s port_attr;
+};
+
+struct bfa_bsg_lport_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		pwwn;
+	struct bfa_lport_stats_s port_stats;
+};
+
+struct bfa_bsg_lport_iostats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		pwwn;
+	struct bfa_itnim_iostats_s iostats;
+};
+
+struct bfa_bsg_lport_get_rports_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		pwwn;
+	u64		rbuf_ptr;
+	u32		nrports;
+	u32		rsvd;
+};
+
+struct bfa_bsg_rport_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		pwwn;
+	wwn_t		rpwwn;
+	u32		pid;
+	u32		rsvd;
+	struct bfa_rport_attr_s attr;
+};
+
+struct bfa_bsg_rport_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		pwwn;
+	wwn_t		rpwwn;
+	struct bfa_rport_stats_s stats;
+};
+
+struct bfa_bsg_rport_scsi_addr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		pwwn;
+	wwn_t		rpwwn;
+	u32		host;
+	u32		bus;
+	u32		target;
+	u32		lun;
+};
+
+struct bfa_bsg_rport_reset_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		pwwn;
+	wwn_t		rpwwn;
+};
+
+struct bfa_bsg_rport_set_speed_s {
+	bfa_status_t		status;
+	u16			bfad_num;
+	u16			vf_id;
+	enum bfa_port_speed	speed;
+	u32			rsvd;
+	wwn_t			pwwn;
+	wwn_t			rpwwn;
+};
+
+struct bfa_bsg_vport_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		vpwwn;
+	struct bfa_vport_attr_s vport_attr;
+};
+
+struct bfa_bsg_vport_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		vpwwn;
+	struct bfa_vport_stats_s vport_stats;
+};
+
+struct bfa_bsg_reset_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		vpwwn;
+};
+
+struct bfa_bsg_fabric_get_lports_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	u64		buf_ptr;
+	u32		nports;
+	u32		rsvd;
+};
+
+struct bfa_bsg_trl_speed_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_fcpim_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		param;
+};
+
+struct bfa_bsg_fcpim_modstats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	struct bfa_itnim_iostats_s modstats;
+};
+
+struct bfa_bsg_fcpim_del_itn_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	struct bfa_fcpim_del_itn_stats_s modstats;
+};
+
+struct bfa_bsg_fcpim_modstatsclr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+};
+
+struct bfa_bsg_itnim_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		lpwwn;
+	wwn_t		rpwwn;
+	struct bfa_itnim_attr_s	attr;
+};
+
+struct bfa_bsg_itnim_iostats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		lpwwn;
+	wwn_t		rpwwn;
+	struct bfa_itnim_iostats_s iostats;
+};
+
+struct bfa_bsg_itnim_itnstats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		lpwwn;
+	wwn_t		rpwwn;
+	struct bfa_itnim_stats_s itnstats;
+};
+
+struct bfa_bsg_pcifn_cfg_s {
+	bfa_status_t		status;
+	u16			bfad_num;
+	u16			rsvd;
+	struct bfa_ablk_cfg_s	pcifn_cfg;
+};
+
+struct bfa_bsg_pcifn_s {
+	bfa_status_t		status;
+	u16			bfad_num;
+	u16			pcifn_id;
+	u16			bw_min;
+	u16			bw_max;
+	u8			port;
+	enum bfi_pcifn_class	pcifn_class;
+	u8			rsvd[1];
+};
+
+struct bfa_bsg_adapter_cfg_mode_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_adapter_cfg_mode_s	cfg;
+};
+
+struct bfa_bsg_port_cfg_mode_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		instance;
+	struct bfa_port_cfg_mode_s cfg;
+};
+
+struct bfa_bsg_bbcr_enable_s {
+	bfa_status_t    status;
+	u16		bfad_num;
+	u8		bb_scn;
+	u8		rsvd;
+};
+
+struct bfa_bsg_bbcr_attr_s {
+	bfa_status_t    status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_bbcr_attr_s attr;
+};
+
+struct bfa_bsg_faa_attr_s {
+	bfa_status_t		status;
+	u16			bfad_num;
+	u16			rsvd;
+	struct bfa_faa_attr_s	faa_attr;
+};
+
+struct bfa_bsg_cee_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		buf_size;
+	u32		rsvd1;
+	u64		buf_ptr;
+};
+
+struct bfa_bsg_cee_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		buf_size;
+	u32		rsvd1;
+	u64		buf_ptr;
+};
+
+struct bfa_bsg_sfp_media_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	enum bfa_defs_sfp_media_e media;
+};
+
+struct bfa_bsg_sfp_speed_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_flash_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_flash_attr_s attr;
+};
+
+struct bfa_bsg_flash_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u8		instance;
+	u8		rsvd;
+	enum  bfa_flash_part_type type;
+	int		bufsz;
+	u64		buf_ptr;
+};
+
+struct bfa_bsg_diag_get_temp_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_diag_results_tempsensor_s result;
+};
+
+struct bfa_bsg_diag_memtest_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd[3];
+	u32		pat;
+	struct bfa_diag_memtest_result result;
+	struct bfa_diag_memtest_s memtest;
+};
+
+struct bfa_bsg_diag_loopback_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	enum bfa_port_opmode opmode;
+	enum bfa_port_speed speed;
+	u32		lpcnt;
+	u32		pat;
+	struct bfa_diag_loopback_result_s result;
+};
+
+struct bfa_bsg_diag_dport_show_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_diag_dport_result_s result;
+};
+
+struct bfa_bsg_dport_enable_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u16		lpcnt;
+	u16		pat;
+};
+
+struct bfa_bsg_diag_fwping_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		cnt;
+	u32		pattern;
+	struct bfa_diag_results_fwping result;
+};
+
+struct bfa_bsg_diag_qtest_s {
+	bfa_status_t	status;
+	u16	bfad_num;
+	u16	rsvd;
+	u32	force;
+	u32	queue;
+	struct bfa_diag_qtest_result_s result;
+};
+
+struct bfa_bsg_sfp_show_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct sfp_mem_s sfp;
+};
+
+struct bfa_bsg_diag_led_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_diag_ledtest_s ledtest;
+};
+
+struct bfa_bsg_diag_beacon_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	bfa_boolean_t   beacon;
+	bfa_boolean_t   link_e2e_beacon;
+	u32		second;
+};
+
+struct bfa_bsg_diag_lb_stat_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+};
+
+struct bfa_bsg_phy_attr_s {
+	bfa_status_t	status;
+	u16	bfad_num;
+	u16	instance;
+	struct bfa_phy_attr_s	attr;
+};
+
+struct bfa_bsg_phy_s {
+	bfa_status_t	status;
+	u16	bfad_num;
+	u16	instance;
+	u64	bufsz;
+	u64	buf_ptr;
+};
+
+struct bfa_bsg_debug_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		bufsz;
+	int		inst_no;
+	u64		buf_ptr;
+	u64		offset;
+};
+
+struct bfa_bsg_phy_stats_s {
+	bfa_status_t	status;
+	u16	bfad_num;
+	u16	instance;
+	struct bfa_phy_stats_s	stats;
+};
+
+struct bfa_bsg_vhba_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		pcifn_id;
+	struct bfa_vhba_attr_s	attr;
+};
+
+struct bfa_bsg_boot_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_boot_cfg_s	cfg;
+};
+
+struct bfa_bsg_preboot_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_boot_pbc_s	cfg;
+};
+
+struct bfa_bsg_ethboot_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct  bfa_ethboot_cfg_s  cfg;
+};
+
+struct bfa_bsg_trunk_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_trunk_attr_s attr;
+};
+
+struct bfa_bsg_qos_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_qos_attr_s	attr;
+};
+
+struct bfa_bsg_qos_vc_attr_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_qos_vc_attr_s attr;
+};
+
+struct bfa_bsg_qos_bw_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	struct bfa_qos_bw_s qos_bw;
+};
+
+struct bfa_bsg_vf_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	struct bfa_vf_stats_s	stats;
+};
+
+struct bfa_bsg_vf_reset_stats_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+};
+
+struct bfa_bsg_fcpim_lunmask_query_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	struct bfa_lunmask_cfg_s lun_mask;
+};
+
+struct bfa_bsg_fcpim_lunmask_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	wwn_t		pwwn;
+	wwn_t		rpwwn;
+	struct scsi_lun	lun;
+};
+
+struct bfa_bsg_fcpim_throttle_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		vf_id;
+	struct bfa_defs_fcpim_throttle_s throttle;
+};
+
+#define BFA_TFRU_DATA_SIZE		64
+#define BFA_MAX_FRUVPD_TRANSFER_SIZE	0x1000
+
+struct bfa_bsg_tfru_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		offset;
+	u32		len;
+	u8		data[BFA_TFRU_DATA_SIZE];
+};
+
+struct bfa_bsg_fruvpd_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd1;
+	u32		offset;
+	u32		len;
+	u8		data[BFA_MAX_FRUVPD_TRANSFER_SIZE];
+	u8		trfr_cmpl;
+	u8		rsvd2[3];
+};
+
+struct bfa_bsg_fruvpd_max_size_s {
+	bfa_status_t	status;
+	u16		bfad_num;
+	u16		rsvd;
+	u32		max_size;
+};
+
+struct bfa_bsg_fcpt_s {
+	bfa_status_t    status;
+	u16		vf_id;
+	wwn_t		lpwwn;
+	wwn_t		dpwwn;
+	u32		tsecs;
+	int		cts;
+	enum fc_cos	cos;
+	struct fchs_s	fchs;
+};
+#define bfa_bsg_fcpt_t struct bfa_bsg_fcpt_s
+
+#pragma pack(1)
+struct bfa_bsg_data {
+	int payload_len;
+	u64 payload;
+};
+#pragma pack()
+
+#define bfad_chk_iocmd_sz(__payload_len, __hdrsz, __bufsz)	\
+	(((__payload_len) != ((__hdrsz) + (__bufsz))) ?		\
+	 BFA_STATUS_FAILED : BFA_STATUS_OK)
+
+#endif /* BFAD_BSG_H */
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
new file mode 100644
index 0000000..349cfe7
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -0,0 +1,530 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+
+/*
+ * BFA debufs interface
+ *
+ * To access the interface, debugfs file system should be mounted
+ * if not already mounted using:
+ * mount -t debugfs none /sys/kernel/debug
+ *
+ * BFA Hierarchy:
+ *	- bfa/pci_dev:<pci_name>
+ * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bfa
+ *
+ * Debugging service available per pci_dev:
+ * fwtrc:  To collect current firmware trace.
+ * drvtrc: To collect current driver trace
+ * fwsave: To collect last saved fw trace as a result of firmware crash.
+ * regwr:  To write one word to chip register
+ * regrd:  To read one or more words from chip register.
+ */
+
+struct bfad_debug_info {
+	char *debug_buffer;
+	void *i_private;
+	int buffer_len;
+};
+
+static int
+bfad_debugfs_open_drvtrc(struct inode *inode, struct file *file)
+{
+	struct bfad_port_s *port = inode->i_private;
+	struct bfad_s *bfad = port->bfad;
+	struct bfad_debug_info *debug;
+
+	debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
+	if (!debug)
+		return -ENOMEM;
+
+	debug->debug_buffer = (void *) bfad->trcmod;
+	debug->buffer_len = sizeof(struct bfa_trc_mod_s);
+
+	file->private_data = debug;
+
+	return 0;
+}
+
+static int
+bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
+{
+	struct bfad_port_s *port = inode->i_private;
+	struct bfad_s *bfad = port->bfad;
+	struct bfad_debug_info *fw_debug;
+	unsigned long flags;
+	int rc;
+
+	fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
+	if (!fw_debug)
+		return -ENOMEM;
+
+	fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
+
+	fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
+	if (!fw_debug->debug_buffer) {
+		kfree(fw_debug);
+		printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n",
+				bfad->inst_no);
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
+			fw_debug->debug_buffer,
+			&fw_debug->buffer_len);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (rc != BFA_STATUS_OK) {
+		vfree(fw_debug->debug_buffer);
+		fw_debug->debug_buffer = NULL;
+		kfree(fw_debug);
+		printk(KERN_INFO "bfad[%d]: Failed to collect fwtrc\n",
+				bfad->inst_no);
+		return -ENOMEM;
+	}
+
+	file->private_data = fw_debug;
+
+	return 0;
+}
+
+static int
+bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
+{
+	struct bfad_port_s *port = inode->i_private;
+	struct bfad_s *bfad = port->bfad;
+	struct bfad_debug_info *fw_debug;
+	unsigned long flags;
+	int rc;
+
+	fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
+	if (!fw_debug)
+		return -ENOMEM;
+
+	fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
+
+	fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
+	if (!fw_debug->debug_buffer) {
+		kfree(fw_debug);
+		printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n",
+				bfad->inst_no);
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
+			fw_debug->debug_buffer,
+			&fw_debug->buffer_len);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	if (rc != BFA_STATUS_OK) {
+		vfree(fw_debug->debug_buffer);
+		fw_debug->debug_buffer = NULL;
+		kfree(fw_debug);
+		printk(KERN_INFO "bfad[%d]: Failed to collect fwsave\n",
+				bfad->inst_no);
+		return -ENOMEM;
+	}
+
+	file->private_data = fw_debug;
+
+	return 0;
+}
+
+static int
+bfad_debugfs_open_reg(struct inode *inode, struct file *file)
+{
+	struct bfad_debug_info *reg_debug;
+
+	reg_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
+	if (!reg_debug)
+		return -ENOMEM;
+
+	reg_debug->i_private = inode->i_private;
+
+	file->private_data = reg_debug;
+
+	return 0;
+}
+
+/* Changes the current file position */
+static loff_t
+bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
+{
+	struct bfad_debug_info *debug = file->private_data;
+	return fixed_size_llseek(file, offset, orig,
+				debug->buffer_len);
+}
+
+static ssize_t
+bfad_debugfs_read(struct file *file, char __user *buf,
+			size_t nbytes, loff_t *pos)
+{
+	struct bfad_debug_info *debug = file->private_data;
+
+	if (!debug || !debug->debug_buffer)
+		return 0;
+
+	return simple_read_from_buffer(buf, nbytes, pos,
+				debug->debug_buffer, debug->buffer_len);
+}
+
+#define BFA_REG_CT_ADDRSZ	(0x40000)
+#define BFA_REG_CB_ADDRSZ	(0x20000)
+#define BFA_REG_ADDRSZ(__ioc)	\
+	((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ?	\
+	 BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
+#define BFA_REG_ADDRMSK(__ioc)	(BFA_REG_ADDRSZ(__ioc) - 1)
+
+static bfa_status_t
+bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
+{
+	u8	area;
+
+	/* check [16:15] */
+	area = (offset >> 15) & 0x7;
+	if (area == 0) {
+		/* PCIe core register */
+		if ((offset + (len<<2)) > 0x8000)    /* 8k dwords or 32KB */
+			return BFA_STATUS_EINVAL;
+	} else if (area == 0x1) {
+		/* CB 32 KB memory page */
+		if ((offset + (len<<2)) > 0x10000)    /* 8k dwords or 32KB */
+			return BFA_STATUS_EINVAL;
+	} else {
+		/* CB register space 64KB */
+		if ((offset + (len<<2)) > BFA_REG_ADDRMSK(&bfa->ioc))
+			return BFA_STATUS_EINVAL;
+	}
+	return BFA_STATUS_OK;
+}
+
+static ssize_t
+bfad_debugfs_read_regrd(struct file *file, char __user *buf,
+		size_t nbytes, loff_t *pos)
+{
+	struct bfad_debug_info *regrd_debug = file->private_data;
+	struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private;
+	struct bfad_s *bfad = port->bfad;
+	ssize_t rc;
+
+	if (!bfad->regdata)
+		return 0;
+
+	rc = simple_read_from_buffer(buf, nbytes, pos,
+			bfad->regdata, bfad->reglen);
+
+	if ((*pos + nbytes) >= bfad->reglen) {
+		kfree(bfad->regdata);
+		bfad->regdata = NULL;
+		bfad->reglen = 0;
+	}
+
+	return rc;
+}
+
+static ssize_t
+bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
+		size_t nbytes, loff_t *ppos)
+{
+	struct bfad_debug_info *regrd_debug = file->private_data;
+	struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private;
+	struct bfad_s *bfad = port->bfad;
+	struct bfa_s *bfa = &bfad->bfa;
+	struct bfa_ioc_s *ioc = &bfa->ioc;
+	int addr, rc, i;
+	u32 len;
+	u32 *regbuf;
+	void __iomem *rb, *reg_addr;
+	unsigned long flags;
+	void *kern_buf;
+
+	kern_buf = memdup_user(buf, nbytes);
+	if (IS_ERR(kern_buf))
+		return PTR_ERR(kern_buf);
+
+	rc = sscanf(kern_buf, "%x:%x", &addr, &len);
+	if (rc < 2 || len > (UINT_MAX >> 2)) {
+		printk(KERN_INFO
+			"bfad[%d]: %s failed to read user buf\n",
+			bfad->inst_no, __func__);
+		kfree(kern_buf);
+		return -EINVAL;
+	}
+
+	kfree(kern_buf);
+	kfree(bfad->regdata);
+	bfad->regdata = NULL;
+	bfad->reglen = 0;
+
+	bfad->regdata = kzalloc(len << 2, GFP_KERNEL);
+	if (!bfad->regdata) {
+		printk(KERN_INFO "bfad[%d]: Failed to allocate regrd buffer\n",
+				bfad->inst_no);
+		return -ENOMEM;
+	}
+
+	bfad->reglen = len << 2;
+	rb = bfa_ioc_bar0(ioc);
+	addr &= BFA_REG_ADDRMSK(ioc);
+
+	/* offset and len sanity check */
+	rc = bfad_reg_offset_check(bfa, addr, len);
+	if (rc) {
+		printk(KERN_INFO "bfad[%d]: Failed reg offset check\n",
+				bfad->inst_no);
+		kfree(bfad->regdata);
+		bfad->regdata = NULL;
+		bfad->reglen = 0;
+		return -EINVAL;
+	}
+
+	reg_addr = rb + addr;
+	regbuf =  (u32 *)bfad->regdata;
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	for (i = 0; i < len; i++) {
+		*regbuf = readl(reg_addr);
+		regbuf++;
+		reg_addr += sizeof(u32);
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return nbytes;
+}
+
+static ssize_t
+bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
+		size_t nbytes, loff_t *ppos)
+{
+	struct bfad_debug_info *debug = file->private_data;
+	struct bfad_port_s *port = (struct bfad_port_s *)debug->i_private;
+	struct bfad_s *bfad = port->bfad;
+	struct bfa_s *bfa = &bfad->bfa;
+	struct bfa_ioc_s *ioc = &bfa->ioc;
+	int addr, val, rc;
+	void __iomem *reg_addr;
+	unsigned long flags;
+	void *kern_buf;
+
+	kern_buf = memdup_user(buf, nbytes);
+	if (IS_ERR(kern_buf))
+		return PTR_ERR(kern_buf);
+
+	rc = sscanf(kern_buf, "%x:%x", &addr, &val);
+	if (rc < 2) {
+		printk(KERN_INFO
+			"bfad[%d]: %s failed to read user buf\n",
+			bfad->inst_no, __func__);
+		kfree(kern_buf);
+		return -EINVAL;
+	}
+	kfree(kern_buf);
+
+	addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
+
+	/* offset and len sanity check */
+	rc = bfad_reg_offset_check(bfa, addr, 1);
+	if (rc) {
+		printk(KERN_INFO
+			"bfad[%d]: Failed reg offset check\n",
+			bfad->inst_no);
+		return -EINVAL;
+	}
+
+	reg_addr = (bfa_ioc_bar0(ioc)) + addr;
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	writel(val, reg_addr);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return nbytes;
+}
+
+static int
+bfad_debugfs_release(struct inode *inode, struct file *file)
+{
+	struct bfad_debug_info *debug = file->private_data;
+
+	if (!debug)
+		return 0;
+
+	file->private_data = NULL;
+	kfree(debug);
+	return 0;
+}
+
+static int
+bfad_debugfs_release_fwtrc(struct inode *inode, struct file *file)
+{
+	struct bfad_debug_info *fw_debug = file->private_data;
+
+	if (!fw_debug)
+		return 0;
+
+	if (fw_debug->debug_buffer)
+		vfree(fw_debug->debug_buffer);
+
+	file->private_data = NULL;
+	kfree(fw_debug);
+	return 0;
+}
+
+static const struct file_operations bfad_debugfs_op_drvtrc = {
+	.owner		=	THIS_MODULE,
+	.open		=	bfad_debugfs_open_drvtrc,
+	.llseek		=	bfad_debugfs_lseek,
+	.read		=	bfad_debugfs_read,
+	.release	=	bfad_debugfs_release,
+};
+
+static const struct file_operations bfad_debugfs_op_fwtrc = {
+	.owner		=	THIS_MODULE,
+	.open		=	bfad_debugfs_open_fwtrc,
+	.llseek		=	bfad_debugfs_lseek,
+	.read		=	bfad_debugfs_read,
+	.release	=	bfad_debugfs_release_fwtrc,
+};
+
+static const struct file_operations bfad_debugfs_op_fwsave = {
+	.owner		=	THIS_MODULE,
+	.open		=	bfad_debugfs_open_fwsave,
+	.llseek		=	bfad_debugfs_lseek,
+	.read		=	bfad_debugfs_read,
+	.release	=	bfad_debugfs_release_fwtrc,
+};
+
+static const struct file_operations bfad_debugfs_op_regrd = {
+	.owner		=	THIS_MODULE,
+	.open		=	bfad_debugfs_open_reg,
+	.llseek		=	bfad_debugfs_lseek,
+	.read		=	bfad_debugfs_read_regrd,
+	.write		=	bfad_debugfs_write_regrd,
+	.release	=	bfad_debugfs_release,
+};
+
+static const struct file_operations bfad_debugfs_op_regwr = {
+	.owner		=	THIS_MODULE,
+	.open		=	bfad_debugfs_open_reg,
+	.llseek		=	bfad_debugfs_lseek,
+	.write		=	bfad_debugfs_write_regwr,
+	.release	=	bfad_debugfs_release,
+};
+
+struct bfad_debugfs_entry {
+	const char *name;
+	umode_t	mode;
+	const struct file_operations *fops;
+};
+
+static const struct bfad_debugfs_entry bfad_debugfs_files[] = {
+	{ "drvtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_drvtrc, },
+	{ "fwtrc",  S_IFREG|S_IRUGO, &bfad_debugfs_op_fwtrc,  },
+	{ "fwsave", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwsave, },
+	{ "regrd",  S_IFREG|S_IRUGO|S_IWUSR, &bfad_debugfs_op_regrd,  },
+	{ "regwr",  S_IFREG|S_IWUSR, &bfad_debugfs_op_regwr,  },
+};
+
+static struct dentry *bfa_debugfs_root;
+static atomic_t bfa_debugfs_port_count;
+
+inline void
+bfad_debugfs_init(struct bfad_port_s *port)
+{
+	struct bfad_s *bfad = port->bfad;
+	const struct bfad_debugfs_entry *file;
+	char name[64];
+	int i;
+
+	if (!bfa_debugfs_enable)
+		return;
+
+	/* Setup the BFA debugfs root directory*/
+	if (!bfa_debugfs_root) {
+		bfa_debugfs_root = debugfs_create_dir("bfa", NULL);
+		atomic_set(&bfa_debugfs_port_count, 0);
+		if (!bfa_debugfs_root) {
+			printk(KERN_WARNING
+				"BFA debugfs root dir creation failed\n");
+			goto err;
+		}
+	}
+
+	/* Setup the pci_dev debugfs directory for the port */
+	snprintf(name, sizeof(name), "pci_dev:%s", bfad->pci_name);
+	if (!port->port_debugfs_root) {
+		port->port_debugfs_root =
+			debugfs_create_dir(name, bfa_debugfs_root);
+		if (!port->port_debugfs_root) {
+			printk(KERN_WARNING
+				"bfa %s: debugfs root creation failed\n",
+				bfad->pci_name);
+			goto err;
+		}
+
+		atomic_inc(&bfa_debugfs_port_count);
+
+		for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) {
+			file = &bfad_debugfs_files[i];
+			bfad->bfad_dentry_files[i] =
+					debugfs_create_file(file->name,
+							file->mode,
+							port->port_debugfs_root,
+							port,
+							file->fops);
+			if (!bfad->bfad_dentry_files[i]) {
+				printk(KERN_WARNING
+					"bfa %s: debugfs %s creation failed\n",
+					bfad->pci_name, file->name);
+				goto err;
+			}
+		}
+	}
+
+err:
+	return;
+}
+
+inline void
+bfad_debugfs_exit(struct bfad_port_s *port)
+{
+	struct bfad_s *bfad = port->bfad;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) {
+		if (bfad->bfad_dentry_files[i]) {
+			debugfs_remove(bfad->bfad_dentry_files[i]);
+			bfad->bfad_dentry_files[i] = NULL;
+		}
+	}
+
+	/* Remove the pci_dev debugfs directory for the port */
+	if (port->port_debugfs_root) {
+		debugfs_remove(port->port_debugfs_root);
+		port->port_debugfs_root = NULL;
+		atomic_dec(&bfa_debugfs_port_count);
+	}
+
+	/* Remove the BFA debugfs root directory */
+	if (atomic_read(&bfa_debugfs_port_count) == 0) {
+		debugfs_remove(bfa_debugfs_root);
+		bfa_debugfs_root = NULL;
+	}
+}
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
new file mode 100644
index 0000000..4fe980a
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * Contains base driver definitions.
+ */
+
+/*
+ *  bfa_drv.h Linux driver data structures.
+ */
+
+#ifndef __BFAD_DRV_H__
+#define __BFAD_DRV_H__
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/aer.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_bsg_fc.h>
+#include <scsi/scsi_devinfo.h>
+
+#include "bfa_modules.h"
+#include "bfa_fcs.h"
+#include "bfa_defs_fcs.h"
+
+#include "bfa_plog.h"
+#include "bfa_cs.h"
+
+#define BFAD_DRIVER_NAME	"bfa"
+#ifdef BFA_DRIVER_VERSION
+#define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
+#else
+#define BFAD_DRIVER_VERSION    "3.2.25.1"
+#endif
+
+#define BFAD_PROTO_NAME FCPI_NAME
+#define BFAD_IRQ_FLAGS IRQF_SHARED
+
+#ifndef FC_PORTSPEED_8GBIT
+#define FC_PORTSPEED_8GBIT 0x10
+#endif
+
+/*
+ * BFAD flags
+ */
+#define BFAD_MSIX_ON				0x00000001
+#define BFAD_HAL_INIT_DONE			0x00000002
+#define BFAD_DRV_INIT_DONE			0x00000004
+#define BFAD_CFG_PPORT_DONE			0x00000008
+#define BFAD_HAL_START_DONE			0x00000010
+#define BFAD_PORT_ONLINE			0x00000020
+#define BFAD_RPORT_ONLINE			0x00000040
+#define BFAD_FCS_INIT_DONE			0x00000080
+#define BFAD_HAL_INIT_FAIL			0x00000100
+#define BFAD_FC4_PROBE_DONE			0x00000200
+#define BFAD_PORT_DELETE			0x00000001
+#define BFAD_INTX_ON				0x00000400
+#define BFAD_EEH_BUSY				0x00000800
+#define BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE	0x00001000
+/*
+ * BFAD related definition
+ */
+#define SCSI_SCAN_DELAY		HZ
+#define BFAD_STOP_TIMEOUT	30
+#define BFAD_SUSPEND_TIMEOUT	BFAD_STOP_TIMEOUT
+
+/*
+ * BFAD configuration parameter default values
+ */
+#define BFAD_LUN_QUEUE_DEPTH	32
+#define BFAD_IO_MAX_SGE		SG_ALL
+#define BFAD_MIN_SECTORS	128 /* 64k   */
+#define BFAD_MAX_SECTORS	0xFFFF  /* 32 MB */
+
+#define bfad_isr_t irq_handler_t
+
+#define MAX_MSIX_ENTRY 22
+
+struct bfad_msix_s {
+	struct bfad_s *bfad;
+	struct msix_entry msix;
+	char name[32];
+};
+
+/*
+ * Only append to the enums defined here to avoid any versioning
+ * needed between trace utility and driver version
+ */
+enum {
+	BFA_TRC_LDRV_BFAD		= 1,
+	BFA_TRC_LDRV_IM			= 2,
+	BFA_TRC_LDRV_BSG		= 3,
+};
+
+enum bfad_port_pvb_type {
+	BFAD_PORT_PHYS_BASE = 0,
+	BFAD_PORT_PHYS_VPORT = 1,
+	BFAD_PORT_VF_BASE = 2,
+	BFAD_PORT_VF_VPORT = 3,
+};
+
+/*
+ * PORT data structure
+ */
+struct bfad_port_s {
+	struct list_head list_entry;
+	struct bfad_s	*bfad;
+	struct bfa_fcs_lport_s *fcs_port;
+	u32	roles;
+	s32		flags;
+	u32	supported_fc4s;
+	enum bfad_port_pvb_type pvb_type;
+	struct bfad_im_port_s *im_port;	/* IM specific data */
+	/* port debugfs specific data */
+	struct dentry *port_debugfs_root;
+};
+
+/*
+ * VPORT data structure
+ */
+struct bfad_vport_s {
+	struct bfad_port_s     drv_port;
+	struct bfa_fcs_vport_s fcs_vport;
+	struct completion *comp_del;
+	struct list_head list_entry;
+};
+
+/*
+ * VF data structure
+ */
+struct bfad_vf_s {
+	bfa_fcs_vf_t    fcs_vf;
+	struct bfad_port_s    base_port;	/* base port for vf */
+	struct bfad_s   *bfad;
+};
+
+struct bfad_cfg_param_s {
+	u32	rport_del_timeout;
+	u32	ioc_queue_depth;
+	u32	lun_queue_depth;
+	u32	io_max_sge;
+	u32	binding_method;
+};
+
+union bfad_tmp_buf {
+	/* From struct bfa_adapter_attr_s */
+	char		manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
+	char		serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+	char		model[BFA_ADAPTER_MODEL_NAME_LEN];
+	char		fw_ver[BFA_VERSION_LEN];
+	char		optrom_ver[BFA_VERSION_LEN];
+
+	/* From struct bfa_ioc_pci_attr_s */
+	u8		chip_rev[BFA_IOC_CHIP_REV_LEN];  /*  chip revision */
+
+	wwn_t		wwn[BFA_FCS_MAX_LPORTS];
+};
+
+/*
+ * BFAD (PCI function) data structure
+ */
+struct bfad_s {
+	bfa_sm_t	sm;	/* state machine */
+	struct list_head list_entry;
+	struct bfa_s	bfa;
+	struct bfa_fcs_s bfa_fcs;
+	struct pci_dev *pcidev;
+	const char *pci_name;
+	struct bfa_pcidev_s hal_pcidev;
+	struct bfa_ioc_pci_attr_s pci_attr;
+	void __iomem   *pci_bar0_kva;
+	void __iomem   *pci_bar2_kva;
+	struct completion comp;
+	struct completion suspend;
+	struct completion enable_comp;
+	struct completion disable_comp;
+	bfa_boolean_t   disable_active;
+	struct bfad_port_s     pport;	/* physical port of the BFAD */
+	struct bfa_meminfo_s meminfo;
+	struct bfa_iocfc_cfg_s   ioc_cfg;
+	u32	inst_no;	/* BFAD instance number */
+	u32	bfad_flags;
+	spinlock_t      bfad_lock;
+	struct task_struct *bfad_tsk;
+	struct bfad_cfg_param_s cfg_data;
+	struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
+	int		nvec;
+	char	adapter_name[BFA_ADAPTER_SYM_NAME_LEN];
+	char	port_name[BFA_ADAPTER_SYM_NAME_LEN];
+	struct timer_list hal_tmo;
+	unsigned long   hs_start;
+	struct bfad_im_s *im;		/* IM specific data */
+	struct bfa_trc_mod_s  *trcmod;
+	struct bfa_plog_s      plog_buf;
+	int		ref_count;
+	union bfad_tmp_buf tmp_buf;
+	struct fc_host_statistics link_stats;
+	struct list_head pbc_vport_list;
+	/* debugfs specific data */
+	char *regdata;
+	u32 reglen;
+	struct dentry *bfad_dentry_files[5];
+	struct list_head	free_aen_q;
+	struct list_head	active_aen_q;
+	struct bfa_aen_entry_s	aen_list[BFA_AEN_MAX_ENTRY];
+	spinlock_t		bfad_aen_spinlock;
+	struct list_head	vport_list;
+};
+
+/* BFAD state machine events */
+enum bfad_sm_event {
+	BFAD_E_CREATE			= 1,
+	BFAD_E_KTHREAD_CREATE_FAILED	= 2,
+	BFAD_E_INIT			= 3,
+	BFAD_E_INIT_SUCCESS		= 4,
+	BFAD_E_HAL_INIT_FAILED		= 5,
+	BFAD_E_INIT_FAILED		= 6,
+	BFAD_E_FCS_EXIT_COMP		= 7,
+	BFAD_E_EXIT_COMP		= 8,
+	BFAD_E_STOP			= 9
+};
+
+/*
+ * RPORT data structure
+ */
+struct bfad_rport_s {
+	struct bfa_fcs_rport_s fcs_rport;
+};
+
+struct bfad_buf_info {
+	void		*virt;
+	dma_addr_t      phys;
+	u32	size;
+};
+
+struct bfad_fcxp {
+	struct bfad_port_s    *port;
+	struct bfa_rport_s *bfa_rport;
+	bfa_status_t    req_status;
+	u16	tag;
+	u16	rsp_len;
+	u16	rsp_maxlen;
+	u8		use_ireqbuf;
+	u8		use_irspbuf;
+	u32	num_req_sgles;
+	u32	num_rsp_sgles;
+	struct fchs_s	fchs;
+	void		*reqbuf_info;
+	void		*rspbuf_info;
+	struct bfa_sge_s  *req_sge;
+	struct bfa_sge_s  *rsp_sge;
+	fcxp_send_cb_t  send_cbfn;
+	void		*send_cbarg;
+	void		*bfa_fcxp;
+	struct completion comp;
+};
+
+struct bfad_hal_comp {
+	bfa_status_t    status;
+	struct completion comp;
+};
+
+#define BFA_LOG(level, bfad, mask, fmt, arg...)				\
+do {									\
+	if (((mask) == 4) || (level[1] <= '4'))				\
+		dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg);	\
+} while (0)
+
+bfa_status_t	bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
+				  struct bfa_lport_cfg_s *port_cfg,
+				  struct device *dev);
+bfa_status_t	bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
+			       struct bfa_lport_cfg_s *port_cfg);
+bfa_status_t	bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role);
+bfa_status_t	bfad_drv_init(struct bfad_s *bfad);
+bfa_status_t	bfad_start_ops(struct bfad_s *bfad);
+void		bfad_drv_start(struct bfad_s *bfad);
+void		bfad_uncfg_pport(struct bfad_s *bfad);
+void		bfad_stop(struct bfad_s *bfad);
+void		bfad_fcs_stop(struct bfad_s *bfad);
+void		bfad_remove_intr(struct bfad_s *bfad);
+void		bfad_hal_mem_release(struct bfad_s *bfad);
+void		bfad_hcb_comp(void *arg, bfa_status_t status);
+
+int		bfad_setup_intr(struct bfad_s *bfad);
+void		bfad_remove_intr(struct bfad_s *bfad);
+void		bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
+bfa_status_t	bfad_hal_mem_alloc(struct bfad_s *bfad);
+void		bfad_bfa_tmo(struct timer_list *t);
+void		bfad_init_timer(struct bfad_s *bfad);
+int		bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
+void		bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
+void		bfad_drv_uninit(struct bfad_s *bfad);
+int		bfad_worker(void *ptr);
+void		bfad_debugfs_init(struct bfad_port_s *port);
+void		bfad_debugfs_exit(struct bfad_port_s *port);
+
+void bfad_pci_remove(struct pci_dev *pdev);
+int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
+void bfad_rport_online_wait(struct bfad_s *bfad);
+int bfad_get_linkup_delay(struct bfad_s *bfad);
+int bfad_install_msix_handler(struct bfad_s *bfad);
+
+extern struct idr bfad_im_port_index;
+extern struct pci_device_id bfad_id_table[];
+extern struct list_head bfad_list;
+extern char	*os_name;
+extern char	*os_patch;
+extern char	*host_name;
+extern int	num_rports;
+extern int	num_ios;
+extern int	num_tms;
+extern int	num_fcxps;
+extern int	num_ufbufs;
+extern int	reqq_size;
+extern int	rspq_size;
+extern int	num_sgpgs;
+extern int      rport_del_timeout;
+extern int      bfa_lun_queue_depth;
+extern int      bfa_io_max_sge;
+extern int      bfa_log_level;
+extern int      ioc_auto_recover;
+extern int      bfa_linkup_delay;
+extern int      msix_disable_cb;
+extern int      msix_disable_ct;
+extern int      fdmi_enable;
+extern int      supported_fc4s;
+extern int	pcie_max_read_reqsz;
+extern int	max_xfer_size;
+extern int bfa_debugfs_enable;
+extern struct mutex bfad_mutex;
+
+#endif /* __BFAD_DRV_H__ */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
new file mode 100644
index 0000000..c4a3331
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -0,0 +1,1342 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ *  bfad_im.c Linux driver IM module.
+ */
+
+#include <linux/export.h>
+
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfa_fcs.h"
+
+BFA_TRC_FILE(LDRV, IM);
+
+DEFINE_IDR(bfad_im_port_index);
+struct scsi_transport_template *bfad_im_scsi_transport_template;
+struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
+static void bfad_im_itnim_work_handler(struct work_struct *work);
+static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd);
+static int bfad_im_slave_alloc(struct scsi_device *sdev);
+static void bfad_im_fc_rport_add(struct bfad_im_port_s  *im_port,
+				struct bfad_itnim_s *itnim);
+
+void
+bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
+			enum bfi_ioim_status io_status, u8 scsi_status,
+			int sns_len, u8 *sns_info, s32 residue)
+{
+	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+	struct bfad_s         *bfad = drv;
+	struct bfad_itnim_data_s *itnim_data;
+	struct bfad_itnim_s *itnim;
+	u8         host_status = DID_OK;
+
+	switch (io_status) {
+	case BFI_IOIM_STS_OK:
+		bfa_trc(bfad, scsi_status);
+		scsi_set_resid(cmnd, 0);
+
+		if (sns_len > 0) {
+			bfa_trc(bfad, sns_len);
+			if (sns_len > SCSI_SENSE_BUFFERSIZE)
+				sns_len = SCSI_SENSE_BUFFERSIZE;
+			memcpy(cmnd->sense_buffer, sns_info, sns_len);
+		}
+
+		if (residue > 0) {
+			bfa_trc(bfad, residue);
+			scsi_set_resid(cmnd, residue);
+			if (!sns_len && (scsi_status == SAM_STAT_GOOD) &&
+				(scsi_bufflen(cmnd) - residue) <
+					cmnd->underflow) {
+				bfa_trc(bfad, 0);
+				host_status = DID_ERROR;
+			}
+		}
+		cmnd->result = host_status << 16 | scsi_status;
+
+		break;
+
+	case BFI_IOIM_STS_TIMEDOUT:
+		cmnd->result = DID_TIME_OUT << 16;
+		break;
+	case BFI_IOIM_STS_PATHTOV:
+		cmnd->result = DID_TRANSPORT_DISRUPTED << 16;
+		break;
+	default:
+		cmnd->result = DID_ERROR << 16;
+	}
+
+	/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
+	if (cmnd->device->host != NULL)
+		scsi_dma_unmap(cmnd);
+
+	cmnd->host_scribble = NULL;
+	bfa_trc(bfad, cmnd->result);
+
+	itnim_data = cmnd->device->hostdata;
+	if (itnim_data) {
+		itnim = itnim_data->itnim;
+		if (!cmnd->result && itnim &&
+			 (bfa_lun_queue_depth > cmnd->device->queue_depth)) {
+			/* Queue depth adjustment for good status completion */
+			bfad_ramp_up_qdepth(itnim, cmnd->device);
+		} else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) {
+			/* qfull handling */
+			bfad_handle_qfull(itnim, cmnd->device);
+		}
+	}
+
+	cmnd->scsi_done(cmnd);
+}
+
+void
+bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
+{
+	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+	struct bfad_itnim_data_s *itnim_data;
+	struct bfad_itnim_s *itnim;
+
+	cmnd->result = DID_OK << 16 | SCSI_STATUS_GOOD;
+
+	/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
+	if (cmnd->device->host != NULL)
+		scsi_dma_unmap(cmnd);
+
+	cmnd->host_scribble = NULL;
+
+	/* Queue depth adjustment */
+	if (bfa_lun_queue_depth > cmnd->device->queue_depth) {
+		itnim_data = cmnd->device->hostdata;
+		if (itnim_data) {
+			itnim = itnim_data->itnim;
+			if (itnim)
+				bfad_ramp_up_qdepth(itnim, cmnd->device);
+		}
+	}
+
+	cmnd->scsi_done(cmnd);
+}
+
+void
+bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio)
+{
+	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
+	struct bfad_s         *bfad = drv;
+
+	cmnd->result = DID_ERROR << 16;
+
+	/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
+	if (cmnd->device->host != NULL)
+		scsi_dma_unmap(cmnd);
+
+	bfa_trc(bfad, cmnd->result);
+	cmnd->host_scribble = NULL;
+}
+
+void
+bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
+		   enum bfi_tskim_status tsk_status)
+{
+	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk;
+	wait_queue_head_t *wq;
+
+	cmnd->SCp.Status |= tsk_status << 1;
+	set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status);
+	wq = (wait_queue_head_t *) cmnd->SCp.ptr;
+	cmnd->SCp.ptr = NULL;
+
+	if (wq)
+		wake_up(wq);
+}
+
+/*
+ *  Scsi_Host_template SCSI host template
+ */
+/*
+ * Scsi_Host template entry, returns BFAD PCI info.
+ */
+static const char *
+bfad_im_info(struct Scsi_Host *shost)
+{
+	static char     bfa_buf[256];
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s *bfad = im_port->bfad;
+
+	memset(bfa_buf, 0, sizeof(bfa_buf));
+	snprintf(bfa_buf, sizeof(bfa_buf),
+		"QLogic BR-series FC/FCOE Adapter, hwpath: %s driver: %s",
+		bfad->pci_name, BFAD_DRIVER_VERSION);
+
+	return bfa_buf;
+}
+
+/*
+ * Scsi_Host template entry, aborts the specified SCSI command.
+ *
+ * Returns: SUCCESS or FAILED.
+ */
+static int
+bfad_im_abort_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host *shost = cmnd->device->host;
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s         *bfad = im_port->bfad;
+	struct bfa_ioim_s *hal_io;
+	unsigned long   flags;
+	u32        timeout;
+	int             rc = FAILED;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	hal_io = (struct bfa_ioim_s *) cmnd->host_scribble;
+	if (!hal_io) {
+		/* IO has been completed, return success */
+		rc = SUCCESS;
+		goto out;
+	}
+	if (hal_io->dio != (struct bfad_ioim_s *) cmnd) {
+		rc = FAILED;
+		goto out;
+	}
+
+	bfa_trc(bfad, hal_io->iotag);
+	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+		"scsi%d: abort cmnd %p iotag %x\n",
+		im_port->shost->host_no, cmnd, hal_io->iotag);
+	(void) bfa_ioim_abort(hal_io);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	/* Need to wait until the command get aborted */
+	timeout = 10;
+	while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(timeout);
+		if (timeout < 4 * HZ)
+			timeout *= 2;
+	}
+
+	cmnd->scsi_done(cmnd);
+	bfa_trc(bfad, hal_io->iotag);
+	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+		"scsi%d: complete abort 0x%p iotag 0x%x\n",
+		im_port->shost->host_no, cmnd, hal_io->iotag);
+	return SUCCESS;
+out:
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	return rc;
+}
+
+static bfa_status_t
+bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
+		     struct bfad_itnim_s *itnim)
+{
+	struct bfa_tskim_s *tskim;
+	struct bfa_itnim_s *bfa_itnim;
+	bfa_status_t    rc = BFA_STATUS_OK;
+	struct scsi_lun scsilun;
+
+	tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
+	if (!tskim) {
+		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+			"target reset, fail to allocate tskim\n");
+		rc = BFA_STATUS_FAILED;
+		goto out;
+	}
+
+	/*
+	 * Set host_scribble to NULL to avoid aborting a task command if
+	 * happens.
+	 */
+	cmnd->host_scribble = NULL;
+	cmnd->SCp.Status = 0;
+	bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
+	/*
+	 * bfa_itnim can be NULL if the port gets disconnected and the bfa
+	 * and fcs layers have cleaned up their nexus with the targets and
+	 * the same has not been cleaned up by the shim
+	 */
+	if (bfa_itnim == NULL) {
+		bfa_tskim_free(tskim);
+		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+			"target reset, bfa_itnim is NULL\n");
+		rc = BFA_STATUS_FAILED;
+		goto out;
+	}
+
+	memset(&scsilun, 0, sizeof(scsilun));
+	bfa_tskim_start(tskim, bfa_itnim, scsilun,
+			    FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO);
+out:
+	return rc;
+}
+
+/*
+ * Scsi_Host template entry, resets a LUN and abort its all commands.
+ *
+ * Returns: SUCCESS or FAILED.
+ *
+ */
+static int
+bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host *shost = cmnd->device->host;
+	struct bfad_im_port_s *im_port =
+			(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
+	struct bfad_s         *bfad = im_port->bfad;
+	struct bfa_tskim_s *tskim;
+	struct bfad_itnim_s   *itnim;
+	struct bfa_itnim_s *bfa_itnim;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+	int             rc = SUCCESS;
+	unsigned long   flags;
+	enum bfi_tskim_status task_status;
+	struct scsi_lun scsilun;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	itnim = itnim_data->itnim;
+	if (!itnim) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		rc = FAILED;
+		goto out;
+	}
+
+	tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
+	if (!tskim) {
+		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+				"LUN reset, fail to allocate tskim");
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		rc = FAILED;
+		goto out;
+	}
+
+	/*
+	 * Set host_scribble to NULL to avoid aborting a task command
+	 * if happens.
+	 */
+	cmnd->host_scribble = NULL;
+	cmnd->SCp.ptr = (char *)&wq;
+	cmnd->SCp.Status = 0;
+	bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
+	/*
+	 * bfa_itnim can be NULL if the port gets disconnected and the bfa
+	 * and fcs layers have cleaned up their nexus with the targets and
+	 * the same has not been cleaned up by the shim
+	 */
+	if (bfa_itnim == NULL) {
+		bfa_tskim_free(tskim);
+		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+			"lun reset, bfa_itnim is NULL\n");
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		rc = FAILED;
+		goto out;
+	}
+	int_to_scsilun(cmnd->device->lun, &scsilun);
+	bfa_tskim_start(tskim, bfa_itnim, scsilun,
+			    FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	wait_event(wq, test_bit(IO_DONE_BIT,
+			(unsigned long *)&cmnd->SCp.Status));
+
+	task_status = cmnd->SCp.Status >> 1;
+	if (task_status != BFI_TSKIM_STS_OK) {
+		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+			"LUN reset failure, status: %d\n", task_status);
+		rc = FAILED;
+	}
+
+out:
+	return rc;
+}
+
+/*
+ * Scsi_Host template entry, resets the target and abort all commands.
+ */
+static int
+bfad_im_reset_target_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host *shost = cmnd->device->host;
+	struct scsi_target *starget = scsi_target(cmnd->device);
+	struct bfad_im_port_s *im_port =
+				(struct bfad_im_port_s *) shost->hostdata[0];
+	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_itnim_s   *itnim;
+	unsigned long   flags;
+	u32        rc, rtn = FAILED;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+	enum bfi_tskim_status task_status;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	itnim = bfad_get_itnim(im_port, starget->id);
+	if (itnim) {
+		cmnd->SCp.ptr = (char *)&wq;
+		rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
+		if (rc == BFA_STATUS_OK) {
+			/* wait target reset to complete */
+			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+			wait_event(wq, test_bit(IO_DONE_BIT,
+					(unsigned long *)&cmnd->SCp.Status));
+			spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+			task_status = cmnd->SCp.Status >> 1;
+			if (task_status != BFI_TSKIM_STS_OK)
+				BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+					"target reset failure,"
+					" status: %d\n", task_status);
+			else
+				rtn = SUCCESS;
+		}
+	}
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return rtn;
+}
+
+/*
+ * Scsi_Host template entry slave_destroy.
+ */
+static void
+bfad_im_slave_destroy(struct scsi_device *sdev)
+{
+	sdev->hostdata = NULL;
+	return;
+}
+
+/*
+ *  BFA FCS itnim callbacks
+ */
+
+/*
+ * BFA FCS itnim alloc callback, after successful PRLI
+ * Context: Interrupt
+ */
+int
+bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
+		    struct bfad_itnim_s **itnim_drv)
+{
+	*itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC);
+	if (*itnim_drv == NULL)
+		return -ENOMEM;
+
+	(*itnim_drv)->im = bfad->im;
+	*itnim = &(*itnim_drv)->fcs_itnim;
+	(*itnim_drv)->state = ITNIM_STATE_NONE;
+
+	/*
+	 * Initiaze the itnim_work
+	 */
+	INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler);
+	bfad->bfad_flags |= BFAD_RPORT_ONLINE;
+	return 0;
+}
+
+/*
+ * BFA FCS itnim free callback.
+ * Context: Interrupt. bfad_lock is held
+ */
+void
+bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
+{
+	struct bfad_port_s    *port;
+	wwn_t wwpn;
+	u32 fcid;
+	char wwpn_str[32], fcid_str[16];
+	struct bfad_im_s	*im = itnim_drv->im;
+
+	/* online to free state transtion should not happen */
+	WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE);
+
+	itnim_drv->queue_work = 1;
+	/* offline request is not yet done, use the same request to free */
+	if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING)
+		itnim_drv->queue_work = 0;
+
+	itnim_drv->state = ITNIM_STATE_FREE;
+	port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
+	itnim_drv->im_port = port->im_port;
+	wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim);
+	fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
+	wwn2str(wwpn_str, wwpn);
+	fcid2str(fcid_str, fcid);
+	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+		"ITNIM FREE scsi%d: FCID: %s WWPN: %s\n",
+		port->im_port->shost->host_no,
+		fcid_str, wwpn_str);
+
+	/* ITNIM processing */
+	if (itnim_drv->queue_work)
+		queue_work(im->drv_workq, &itnim_drv->itnim_work);
+}
+
+/*
+ * BFA FCS itnim online callback.
+ * Context: Interrupt. bfad_lock is held
+ */
+void
+bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
+{
+	struct bfad_port_s    *port;
+	struct bfad_im_s	*im = itnim_drv->im;
+
+	itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim);
+	port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
+	itnim_drv->state = ITNIM_STATE_ONLINE;
+	itnim_drv->queue_work = 1;
+	itnim_drv->im_port = port->im_port;
+
+	/* ITNIM processing */
+	if (itnim_drv->queue_work)
+		queue_work(im->drv_workq, &itnim_drv->itnim_work);
+}
+
+/*
+ * BFA FCS itnim offline callback.
+ * Context: Interrupt. bfad_lock is held
+ */
+void
+bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
+{
+	struct bfad_port_s    *port;
+	struct bfad_s *bfad;
+	struct bfad_im_s	*im = itnim_drv->im;
+
+	port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
+	bfad = port->bfad;
+	if ((bfad->pport.flags & BFAD_PORT_DELETE) ||
+		 (port->flags & BFAD_PORT_DELETE)) {
+		itnim_drv->state = ITNIM_STATE_OFFLINE;
+		return;
+	}
+	itnim_drv->im_port = port->im_port;
+	itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING;
+	itnim_drv->queue_work = 1;
+
+	/* ITNIM processing */
+	if (itnim_drv->queue_work)
+		queue_work(im->drv_workq, &itnim_drv->itnim_work);
+}
+
+/*
+ * Allocate a Scsi_Host for a port.
+ */
+int
+bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
+			struct device *dev)
+{
+	struct bfad_im_port_pointer *im_portp;
+	int error = 1;
+
+	mutex_lock(&bfad_mutex);
+	error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL);
+	if (error < 0) {
+		mutex_unlock(&bfad_mutex);
+		printk(KERN_WARNING "idr_alloc failure\n");
+		goto out;
+	}
+	im_port->idr_id = error;
+	mutex_unlock(&bfad_mutex);
+
+	im_port->shost = bfad_scsi_host_alloc(im_port, bfad);
+	if (!im_port->shost) {
+		error = 1;
+		goto out_free_idr;
+	}
+
+	im_portp = shost_priv(im_port->shost);
+	im_portp->p = im_port;
+	im_port->shost->unique_id = im_port->idr_id;
+	im_port->shost->this_id = -1;
+	im_port->shost->max_id = MAX_FCP_TARGET;
+	im_port->shost->max_lun = MAX_FCP_LUN;
+	im_port->shost->max_cmd_len = 16;
+	im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth;
+	if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
+		im_port->shost->transportt = bfad_im_scsi_transport_template;
+	else
+		im_port->shost->transportt =
+				bfad_im_scsi_vport_transport_template;
+
+	error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev);
+	if (error) {
+		printk(KERN_WARNING "scsi_add_host failure %d\n", error);
+		goto out_fc_rel;
+	}
+
+	return 0;
+
+out_fc_rel:
+	scsi_host_put(im_port->shost);
+	im_port->shost = NULL;
+out_free_idr:
+	mutex_lock(&bfad_mutex);
+	idr_remove(&bfad_im_port_index, im_port->idr_id);
+	mutex_unlock(&bfad_mutex);
+out:
+	return error;
+}
+
+void
+bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
+{
+	bfa_trc(bfad, bfad->inst_no);
+	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n",
+			im_port->shost->host_no);
+
+	fc_remove_host(im_port->shost);
+
+	scsi_remove_host(im_port->shost);
+	scsi_host_put(im_port->shost);
+
+	mutex_lock(&bfad_mutex);
+	idr_remove(&bfad_im_port_index, im_port->idr_id);
+	mutex_unlock(&bfad_mutex);
+}
+
+static void
+bfad_im_port_delete_handler(struct work_struct *work)
+{
+	struct bfad_im_port_s *im_port =
+		container_of(work, struct bfad_im_port_s, port_delete_work);
+
+	if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
+		im_port->flags |= BFAD_PORT_DELETE;
+		fc_vport_terminate(im_port->fc_vport);
+	}
+}
+
+bfa_status_t
+bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port)
+{
+	int             rc = BFA_STATUS_OK;
+	struct bfad_im_port_s *im_port;
+
+	im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC);
+	if (im_port == NULL) {
+		rc = BFA_STATUS_ENOMEM;
+		goto ext;
+	}
+	port->im_port = im_port;
+	im_port->port = port;
+	im_port->bfad = bfad;
+
+	INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler);
+	INIT_LIST_HEAD(&im_port->itnim_mapped_list);
+	INIT_LIST_HEAD(&im_port->binding_list);
+
+ext:
+	return rc;
+}
+
+void
+bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
+{
+	struct bfad_im_port_s *im_port = port->im_port;
+
+	queue_work(bfad->im->drv_workq,
+				&im_port->port_delete_work);
+}
+
+void
+bfad_im_port_clean(struct bfad_im_port_s *im_port)
+{
+	struct bfad_fcp_binding *bp, *bp_new;
+	unsigned long flags;
+	struct bfad_s *bfad =  im_port->bfad;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	list_for_each_entry_safe(bp, bp_new, &im_port->binding_list,
+					list_entry) {
+		list_del(&bp->list_entry);
+		kfree(bp);
+	}
+
+	/* the itnim_mapped_list must be empty at this time */
+	WARN_ON(!list_empty(&im_port->itnim_mapped_list));
+
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+static void bfad_aen_im_notify_handler(struct work_struct *work)
+{
+	struct bfad_im_s *im =
+		container_of(work, struct bfad_im_s, aen_im_notify_work);
+	struct bfa_aen_entry_s *aen_entry;
+	struct bfad_s *bfad = im->bfad;
+	struct Scsi_Host *shost = bfad->pport.im_port->shost;
+	void *event_data;
+	unsigned long flags;
+
+	while (!list_empty(&bfad->active_aen_q)) {
+		spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
+		bfa_q_deq(&bfad->active_aen_q, &aen_entry);
+		spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
+		event_data = (char *)aen_entry + sizeof(struct list_head);
+		fc_host_post_vendor_event(shost, fc_get_event_number(),
+				sizeof(struct bfa_aen_entry_s) -
+				sizeof(struct list_head),
+				(char *)event_data, BFAD_NL_VENDOR_ID);
+		spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
+		list_add_tail(&aen_entry->qe, &bfad->free_aen_q);
+		spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
+	}
+}
+
+bfa_status_t
+bfad_im_probe(struct bfad_s *bfad)
+{
+	struct bfad_im_s      *im;
+
+	im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL);
+	if (im == NULL)
+		return BFA_STATUS_ENOMEM;
+
+	bfad->im = im;
+	im->bfad = bfad;
+
+	if (bfad_thread_workq(bfad) != BFA_STATUS_OK) {
+		kfree(im);
+		return BFA_STATUS_FAILED;
+	}
+
+	INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
+	return BFA_STATUS_OK;
+}
+
+void
+bfad_im_probe_undo(struct bfad_s *bfad)
+{
+	if (bfad->im) {
+		bfad_destroy_workq(bfad->im);
+		kfree(bfad->im);
+		bfad->im = NULL;
+	}
+}
+
+struct Scsi_Host *
+bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
+{
+	struct scsi_host_template *sht;
+
+	if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
+		sht = &bfad_im_scsi_host_template;
+	else
+		sht = &bfad_im_vport_template;
+
+	if (max_xfer_size != BFAD_MAX_SECTORS >> 1)
+		sht->max_sectors = max_xfer_size << 1;
+
+	sht->sg_tablesize = bfad->cfg_data.io_max_sge;
+
+	return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer));
+}
+
+void
+bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
+{
+	if (!(im_port->flags & BFAD_PORT_DELETE))
+		flush_workqueue(bfad->im->drv_workq);
+	bfad_im_scsi_host_free(im_port->bfad, im_port);
+	bfad_im_port_clean(im_port);
+	kfree(im_port);
+}
+
+void
+bfad_destroy_workq(struct bfad_im_s *im)
+{
+	if (im && im->drv_workq) {
+		flush_workqueue(im->drv_workq);
+		destroy_workqueue(im->drv_workq);
+		im->drv_workq = NULL;
+	}
+}
+
+bfa_status_t
+bfad_thread_workq(struct bfad_s *bfad)
+{
+	struct bfad_im_s      *im = bfad->im;
+
+	bfa_trc(bfad, 0);
+	snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d",
+		 bfad->inst_no);
+	im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
+	if (!im->drv_workq)
+		return BFA_STATUS_FAILED;
+
+	return BFA_STATUS_OK;
+}
+
+/*
+ * Scsi_Host template entry.
+ *
+ * Description:
+ * OS entry point to adjust the queue_depths on a per-device basis.
+ * Called once per device during the bus scan.
+ * Return non-zero if fails.
+ */
+static int
+bfad_im_slave_configure(struct scsi_device *sdev)
+{
+	scsi_change_queue_depth(sdev, bfa_lun_queue_depth);
+	return 0;
+}
+
+struct scsi_host_template bfad_im_scsi_host_template = {
+	.module = THIS_MODULE,
+	.name = BFAD_DRIVER_NAME,
+	.info = bfad_im_info,
+	.queuecommand = bfad_im_queuecommand,
+	.eh_timed_out = fc_eh_timed_out,
+	.eh_abort_handler = bfad_im_abort_handler,
+	.eh_device_reset_handler = bfad_im_reset_lun_handler,
+	.eh_target_reset_handler = bfad_im_reset_target_handler,
+
+	.slave_alloc = bfad_im_slave_alloc,
+	.slave_configure = bfad_im_slave_configure,
+	.slave_destroy = bfad_im_slave_destroy,
+
+	.this_id = -1,
+	.sg_tablesize = BFAD_IO_MAX_SGE,
+	.cmd_per_lun = 3,
+	.use_clustering = ENABLE_CLUSTERING,
+	.shost_attrs = bfad_im_host_attrs,
+	.max_sectors = BFAD_MAX_SECTORS,
+	.vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
+};
+
+struct scsi_host_template bfad_im_vport_template = {
+	.module = THIS_MODULE,
+	.name = BFAD_DRIVER_NAME,
+	.info = bfad_im_info,
+	.queuecommand = bfad_im_queuecommand,
+	.eh_timed_out = fc_eh_timed_out,
+	.eh_abort_handler = bfad_im_abort_handler,
+	.eh_device_reset_handler = bfad_im_reset_lun_handler,
+	.eh_target_reset_handler = bfad_im_reset_target_handler,
+
+	.slave_alloc = bfad_im_slave_alloc,
+	.slave_configure = bfad_im_slave_configure,
+	.slave_destroy = bfad_im_slave_destroy,
+
+	.this_id = -1,
+	.sg_tablesize = BFAD_IO_MAX_SGE,
+	.cmd_per_lun = 3,
+	.use_clustering = ENABLE_CLUSTERING,
+	.shost_attrs = bfad_im_vport_attrs,
+	.max_sectors = BFAD_MAX_SECTORS,
+};
+
+bfa_status_t
+bfad_im_module_init(void)
+{
+	bfad_im_scsi_transport_template =
+		fc_attach_transport(&bfad_im_fc_function_template);
+	if (!bfad_im_scsi_transport_template)
+		return BFA_STATUS_ENOMEM;
+
+	bfad_im_scsi_vport_transport_template =
+		fc_attach_transport(&bfad_im_vport_fc_function_template);
+	if (!bfad_im_scsi_vport_transport_template) {
+		fc_release_transport(bfad_im_scsi_transport_template);
+		return BFA_STATUS_ENOMEM;
+	}
+
+	return BFA_STATUS_OK;
+}
+
+void
+bfad_im_module_exit(void)
+{
+	if (bfad_im_scsi_transport_template)
+		fc_release_transport(bfad_im_scsi_transport_template);
+
+	if (bfad_im_scsi_vport_transport_template)
+		fc_release_transport(bfad_im_scsi_vport_transport_template);
+
+	idr_destroy(&bfad_im_port_index);
+}
+
+void
+bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
+{
+	struct scsi_device *tmp_sdev;
+
+	if (((jiffies - itnim->last_ramp_up_time) >
+		BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) &&
+		((jiffies - itnim->last_queue_full_time) >
+		BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) {
+		shost_for_each_device(tmp_sdev, sdev->host) {
+			if (bfa_lun_queue_depth > tmp_sdev->queue_depth) {
+				if (tmp_sdev->id != sdev->id)
+					continue;
+				scsi_change_queue_depth(tmp_sdev,
+					tmp_sdev->queue_depth + 1);
+
+				itnim->last_ramp_up_time = jiffies;
+			}
+		}
+	}
+}
+
+void
+bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
+{
+	struct scsi_device *tmp_sdev;
+
+	itnim->last_queue_full_time = jiffies;
+
+	shost_for_each_device(tmp_sdev, sdev->host) {
+		if (tmp_sdev->id != sdev->id)
+			continue;
+		scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
+	}
+}
+
+struct bfad_itnim_s *
+bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
+{
+	struct bfad_itnim_s   *itnim = NULL;
+
+	/* Search the mapped list for this target ID */
+	list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) {
+		if (id == itnim->scsi_tgt_id)
+			return itnim;
+	}
+
+	return NULL;
+}
+
+/*
+ * Function is invoked from the SCSI Host Template slave_alloc() entry point.
+ * Has the logic to query the LUN Mask database to check if this LUN needs to
+ * be made visible to the SCSI mid-layer or not.
+ *
+ * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack.
+ * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack.
+ */
+static int
+bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
+				  struct fc_rport *rport)
+{
+	struct bfad_itnim_data_s *itnim_data =
+				(struct bfad_itnim_data_s *) rport->dd_data;
+	struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
+	struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport;
+	struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa);
+	int i = 0, ret = -ENXIO;
+
+	for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+		if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE &&
+		    scsilun_to_int(&lun_list[i].lun) == sdev->lun &&
+		    lun_list[i].rp_tag == bfa_rport->rport_tag &&
+		    lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) {
+			ret = BFA_STATUS_OK;
+			break;
+		}
+	}
+	return ret;
+}
+
+/*
+ * Scsi_Host template entry slave_alloc
+ */
+static int
+bfad_im_slave_alloc(struct scsi_device *sdev)
+{
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct bfad_itnim_data_s *itnim_data;
+	struct bfa_s *bfa;
+
+	if (!rport || fc_remote_port_chkready(rport))
+		return -ENXIO;
+
+	itnim_data = (struct bfad_itnim_data_s *) rport->dd_data;
+	bfa = itnim_data->itnim->bfa_itnim->bfa;
+
+	if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
+		/*
+		 * We should not mask LUN 0 - since this will translate
+		 * to no LUN / TARGET for SCSI ml resulting no scan.
+		 */
+		if (sdev->lun == 0) {
+			sdev->sdev_bflags |= BLIST_NOREPORTLUN |
+					     BLIST_SPARSELUN;
+			goto done;
+		}
+
+		/*
+		 * Query LUN Mask configuration - to expose this LUN
+		 * to the SCSI mid-layer or to mask it.
+		 */
+		if (bfad_im_check_if_make_lun_visible(sdev, rport) !=
+							BFA_STATUS_OK)
+			return -ENXIO;
+	}
+done:
+	sdev->hostdata = rport->dd_data;
+
+	return 0;
+}
+
+u32
+bfad_im_supported_speeds(struct bfa_s *bfa)
+{
+	struct bfa_ioc_attr_s *ioc_attr;
+	u32 supported_speed = 0;
+
+	ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL);
+	if (!ioc_attr)
+		return 0;
+
+	bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
+	if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_16GBPS)
+		supported_speed |=  FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+				FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT;
+	else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
+		if (ioc_attr->adapter_attr.is_mezz) {
+			supported_speed |= FC_PORTSPEED_8GBIT |
+				FC_PORTSPEED_4GBIT |
+				FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+		} else {
+			supported_speed |= FC_PORTSPEED_8GBIT |
+				FC_PORTSPEED_4GBIT |
+				FC_PORTSPEED_2GBIT;
+		}
+	} else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
+		supported_speed |=  FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
+				FC_PORTSPEED_1GBIT;
+	} else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
+		supported_speed |= FC_PORTSPEED_10GBIT;
+	}
+	kfree(ioc_attr);
+	return supported_speed;
+}
+
+void
+bfad_fc_host_init(struct bfad_im_port_s *im_port)
+{
+	struct Scsi_Host *host = im_port->shost;
+	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_port_s    *port = im_port->port;
+	char symname[BFA_SYMNAME_MAXLEN];
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+
+	fc_host_node_name(host) =
+		cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port)));
+	fc_host_port_name(host) =
+		cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port)));
+	fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
+
+	fc_host_supported_classes(host) = FC_COS_CLASS3;
+
+	memset(fc_host_supported_fc4s(host), 0,
+	       sizeof(fc_host_supported_fc4s(host)));
+	if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
+		/* For FCP type 0x08 */
+		fc_host_supported_fc4s(host)[2] = 1;
+	/* For fibre channel services type 0x20 */
+	fc_host_supported_fc4s(host)[7] = 1;
+
+	strlcpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
+		BFA_SYMNAME_MAXLEN);
+	sprintf(fc_host_symbolic_name(host), "%s", symname);
+
+	fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
+	fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
+}
+
+static void
+bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
+{
+	struct fc_rport_identifiers rport_ids;
+	struct fc_rport *fc_rport;
+	struct bfad_itnim_data_s *itnim_data;
+
+	rport_ids.node_name =
+		cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
+	rport_ids.port_name =
+		cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
+	rport_ids.port_id =
+		bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
+	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+	itnim->fc_rport = fc_rport =
+		fc_remote_port_add(im_port->shost, 0, &rport_ids);
+
+	if (!fc_rport)
+		return;
+
+	fc_rport->maxframe_size =
+		bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim);
+	fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim);
+
+	itnim_data = fc_rport->dd_data;
+	itnim_data->itnim = itnim;
+
+	rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+	if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
+		fc_remote_port_rolechg(fc_rport, rport_ids.roles);
+
+	if ((fc_rport->scsi_target_id != -1)
+	    && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
+		itnim->scsi_tgt_id = fc_rport->scsi_target_id;
+
+	itnim->channel = fc_rport->channel;
+
+	return;
+}
+
+/*
+ * Work queue handler using FC transport service
+* Context: kernel
+ */
+static void
+bfad_im_itnim_work_handler(struct work_struct *work)
+{
+	struct bfad_itnim_s   *itnim = container_of(work, struct bfad_itnim_s,
+							itnim_work);
+	struct bfad_im_s      *im = itnim->im;
+	struct bfad_s         *bfad = im->bfad;
+	struct bfad_im_port_s *im_port;
+	unsigned long   flags;
+	struct fc_rport *fc_rport;
+	wwn_t wwpn;
+	u32 fcid;
+	char wwpn_str[32], fcid_str[16];
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	im_port = itnim->im_port;
+	bfa_trc(bfad, itnim->state);
+	switch (itnim->state) {
+	case ITNIM_STATE_ONLINE:
+		if (!itnim->fc_rport) {
+			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+			bfad_im_fc_rport_add(im_port, itnim);
+			spin_lock_irqsave(&bfad->bfad_lock, flags);
+			wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
+			fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
+			wwn2str(wwpn_str, wwpn);
+			fcid2str(fcid_str, fcid);
+			list_add_tail(&itnim->list_entry,
+				&im_port->itnim_mapped_list);
+			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+				"ITNIM ONLINE Target: %d:0:%d "
+				"FCID: %s WWPN: %s\n",
+				im_port->shost->host_no,
+				itnim->scsi_tgt_id,
+				fcid_str, wwpn_str);
+		} else {
+			printk(KERN_WARNING
+				"%s: itnim %llx is already in online state\n",
+				__func__,
+				bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
+		}
+
+		break;
+	case ITNIM_STATE_OFFLINE_PENDING:
+		itnim->state = ITNIM_STATE_OFFLINE;
+		if (itnim->fc_rport) {
+			fc_rport = itnim->fc_rport;
+			((struct bfad_itnim_data_s *)
+				fc_rport->dd_data)->itnim = NULL;
+			itnim->fc_rport = NULL;
+			if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
+				spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+				fc_rport->dev_loss_tmo =
+					bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
+				fc_remote_port_delete(fc_rport);
+				spin_lock_irqsave(&bfad->bfad_lock, flags);
+			}
+			wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
+			fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
+			wwn2str(wwpn_str, wwpn);
+			fcid2str(fcid_str, fcid);
+			list_del(&itnim->list_entry);
+			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+				"ITNIM OFFLINE Target: %d:0:%d "
+				"FCID: %s WWPN: %s\n",
+				im_port->shost->host_no,
+				itnim->scsi_tgt_id,
+				fcid_str, wwpn_str);
+		}
+		break;
+	case ITNIM_STATE_FREE:
+		if (itnim->fc_rport) {
+			fc_rport = itnim->fc_rport;
+			((struct bfad_itnim_data_s *)
+				fc_rport->dd_data)->itnim = NULL;
+			itnim->fc_rport = NULL;
+			if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
+				spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+				fc_rport->dev_loss_tmo =
+					bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
+				fc_remote_port_delete(fc_rport);
+				spin_lock_irqsave(&bfad->bfad_lock, flags);
+			}
+			list_del(&itnim->list_entry);
+		}
+
+		kfree(itnim);
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
+/*
+ * Scsi_Host template entry, queue a SCSI command to the BFAD.
+ */
+static int
+bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+{
+	struct bfad_im_port_s *im_port =
+		(struct bfad_im_port_s *) cmnd->device->host->hostdata[0];
+	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
+	struct bfad_itnim_s   *itnim;
+	struct bfa_ioim_s *hal_io;
+	unsigned long   flags;
+	int             rc;
+	int       sg_cnt = 0;
+	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+
+	rc = fc_remote_port_chkready(rport);
+	if (rc) {
+		cmnd->result = rc;
+		done(cmnd);
+		return 0;
+	}
+
+	if (bfad->bfad_flags & BFAD_EEH_BUSY) {
+		if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE)
+			cmnd->result = DID_NO_CONNECT << 16;
+		else
+			cmnd->result = DID_REQUEUE << 16;
+		done(cmnd);
+		return 0;
+	}
+
+	sg_cnt = scsi_dma_map(cmnd);
+	if (sg_cnt < 0)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	cmnd->scsi_done = done;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) {
+		printk(KERN_WARNING
+			"bfad%d, queuecommand %p %x failed, BFA stopped\n",
+		       bfad->inst_no, cmnd, cmnd->cmnd[0]);
+		cmnd->result = DID_NO_CONNECT << 16;
+		goto out_fail_cmd;
+	}
+
+
+	itnim = itnim_data->itnim;
+	if (!itnim) {
+		cmnd->result = DID_IMM_RETRY << 16;
+		goto out_fail_cmd;
+	}
+
+	hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd,
+				    itnim->bfa_itnim, sg_cnt);
+	if (!hal_io) {
+		printk(KERN_WARNING "hal_io failure\n");
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		scsi_dma_unmap(cmnd);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	cmnd->host_scribble = (char *)hal_io;
+	bfa_ioim_start(hal_io);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+	return 0;
+
+out_fail_cmd:
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	scsi_dma_unmap(cmnd);
+	if (done)
+		done(cmnd);
+
+	return 0;
+}
+
+static DEF_SCSI_QCMD(bfad_im_queuecommand)
+
+void
+bfad_rport_online_wait(struct bfad_s *bfad)
+{
+	int i;
+	int rport_delay = 10;
+
+	for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
+		&& i < bfa_linkup_delay; i++) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(HZ);
+	}
+
+	if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
+		rport_delay = rport_delay < bfa_linkup_delay ?
+			rport_delay : bfa_linkup_delay;
+		for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
+			&& i < rport_delay; i++) {
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_timeout(HZ);
+		}
+
+		if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) {
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_timeout(rport_delay * HZ);
+		}
+	}
+}
+
+int
+bfad_get_linkup_delay(struct bfad_s *bfad)
+{
+	u8		nwwns = 0;
+	wwn_t		wwns[BFA_PREBOOT_BOOTLUN_MAX];
+	int		linkup_delay;
+
+	/*
+	 * Querying for the boot target port wwns
+	 * -- read from boot information in flash.
+	 * If nwwns > 0 => boot over SAN and set linkup_delay = 30
+	 * else => local boot machine set linkup_delay = 0
+	 */
+
+	bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns);
+
+	if (nwwns > 0)
+		/* If Boot over SAN set linkup_delay = 30sec */
+		linkup_delay = 30;
+	else
+		/* If local boot; no linkup_delay */
+		linkup_delay = 0;
+
+	return linkup_delay;
+}
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
new file mode 100644
index 0000000..e61ed8d
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFAD_IM_H__
+#define __BFAD_IM_H__
+
+#include "bfa_fcs.h"
+
+#define FCPI_NAME " fcpim"
+
+#ifndef KOBJ_NAME_LEN
+#define KOBJ_NAME_LEN           20
+#endif
+
+bfa_status_t bfad_im_module_init(void);
+void bfad_im_module_exit(void);
+bfa_status_t bfad_im_probe(struct bfad_s *bfad);
+void bfad_im_probe_undo(struct bfad_s *bfad);
+bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port);
+void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port);
+void bfad_im_port_clean(struct bfad_im_port_s *im_port);
+int  bfad_im_scsi_host_alloc(struct bfad_s *bfad,
+		struct bfad_im_port_s *im_port, struct device *dev);
+void bfad_im_scsi_host_free(struct bfad_s *bfad,
+				struct bfad_im_port_s *im_port);
+u32 bfad_im_supported_speeds(struct bfa_s *bfa);
+
+#define MAX_FCP_TARGET 1024
+#define MAX_FCP_LUN 16384
+#define BFAD_TARGET_RESET_TMO 60
+#define BFAD_LUN_RESET_TMO 60
+#define BFA_QUEUE_FULL_RAMP_UP_TIME 120
+
+/*
+ * itnim flags
+ */
+#define IO_DONE_BIT			0
+
+struct bfad_itnim_data_s {
+	struct bfad_itnim_s *itnim;
+};
+
+struct bfad_im_port_s {
+	struct bfad_s         *bfad;
+	struct bfad_port_s    *port;
+	struct work_struct port_delete_work;
+	int             idr_id;
+	u16        cur_scsi_id;
+	u16	flags;
+	struct list_head binding_list;
+	struct Scsi_Host *shost;
+	struct list_head itnim_mapped_list;
+	struct fc_vport *fc_vport;
+};
+
+struct bfad_im_port_pointer {
+	struct bfad_im_port_s *p;
+};
+
+static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host)
+{
+	struct bfad_im_port_pointer *im_portp = shost_priv(host);
+	return im_portp->p;
+}
+
+enum bfad_itnim_state {
+	ITNIM_STATE_NONE,
+	ITNIM_STATE_ONLINE,
+	ITNIM_STATE_OFFLINE_PENDING,
+	ITNIM_STATE_OFFLINE,
+	ITNIM_STATE_TIMEOUT,
+	ITNIM_STATE_FREE,
+};
+
+/*
+ * Per itnim data structure
+ */
+struct bfad_itnim_s {
+	struct list_head list_entry;
+	struct bfa_fcs_itnim_s fcs_itnim;
+	struct work_struct itnim_work;
+	u32        flags;
+	enum bfad_itnim_state state;
+	struct bfad_im_s *im;
+	struct bfad_im_port_s *im_port;
+	struct bfad_rport_s *drv_rport;
+	struct fc_rport *fc_rport;
+	struct bfa_itnim_s *bfa_itnim;
+	u16        scsi_tgt_id;
+	u16	   channel;
+	u16        queue_work;
+	unsigned long	last_ramp_up_time;
+	unsigned long	last_queue_full_time;
+};
+
+enum bfad_binding_type {
+	FCP_PWWN_BINDING = 0x1,
+	FCP_NWWN_BINDING = 0x2,
+	FCP_FCID_BINDING = 0x3,
+};
+
+struct bfad_fcp_binding {
+	struct list_head list_entry;
+	enum bfad_binding_type binding_type;
+	u16        scsi_target_id;
+	u32        fc_id;
+	wwn_t           nwwn;
+	wwn_t           pwwn;
+};
+
+struct bfad_im_s {
+	struct bfad_s         *bfad;
+	struct workqueue_struct *drv_workq;
+	char            drv_workq_name[KOBJ_NAME_LEN];
+	struct work_struct	aen_im_notify_work;
+};
+
+#define bfad_get_aen_entry(_drv, _entry) do {				\
+	unsigned long	_flags;						\
+	spin_lock_irqsave(&(_drv)->bfad_aen_spinlock, _flags);		\
+	bfa_q_deq(&(_drv)->free_aen_q, &(_entry));			\
+	if (_entry)							\
+		list_add_tail(&(_entry)->qe, &(_drv)->active_aen_q);	\
+	spin_unlock_irqrestore(&(_drv)->bfad_aen_spinlock, _flags);	\
+} while (0)
+
+/* post fc_host vendor event */
+static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
+					     struct bfad_s *drv, int cnt,
+					     enum bfa_aen_category cat,
+					     enum bfa_ioc_aen_event evt)
+{
+	struct timespec64 ts;
+
+	ktime_get_real_ts64(&ts);
+	/*
+	 * 'unsigned long aen_tv_sec' overflows in y2106 on 32-bit
+	 * architectures, or in 2038 if user space interprets it
+	 * as 'signed'.
+	 */
+	entry->aen_tv_sec = ts.tv_sec;
+	entry->aen_tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+	entry->bfad_num = drv->inst_no;
+	entry->seq_num = cnt;
+	entry->aen_category = cat;
+	entry->aen_type = evt;
+	if (drv->bfad_flags & BFAD_FC4_PROBE_DONE)
+		queue_work(drv->im->drv_workq, &drv->im->aen_im_notify_work);
+}
+
+struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
+				struct bfad_s *);
+bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
+void bfad_destroy_workq(struct bfad_im_s *im);
+void bfad_fc_host_init(struct bfad_im_port_s *im_port);
+void bfad_scsi_host_free(struct bfad_s *bfad,
+				 struct bfad_im_port_s *im_port);
+void bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim,
+				 struct scsi_device *sdev);
+void bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
+struct bfad_itnim_s *bfad_get_itnim(struct bfad_im_port_s *im_port, int id);
+
+extern struct scsi_host_template bfad_im_scsi_host_template;
+extern struct scsi_host_template bfad_im_vport_template;
+extern struct fc_function_template bfad_im_fc_function_template;
+extern struct fc_function_template bfad_im_vport_fc_function_template;
+extern struct scsi_transport_template *bfad_im_scsi_transport_template;
+extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
+
+extern struct device_attribute *bfad_im_host_attrs[];
+extern struct device_attribute *bfad_im_vport_attrs[];
+
+irqreturn_t bfad_intx(int irq, void *dev_id);
+
+int bfad_im_bsg_request(struct bsg_job *job);
+int bfad_im_bsg_timeout(struct bsg_job *job);
+
+/*
+ * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
+ * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
+ *
+ * Internally iterate's over all the ITNIM's part of the im_port & set's the
+ * sdev_bflags for the scsi_device associated with LUN #0.
+ */
+#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do {		\
+	struct scsi_device *__sdev = NULL;				\
+	struct bfad_itnim_s *__itnim = NULL;				\
+	u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN;		\
+	list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list),	\
+			    list_entry) {				\
+		__sdev = scsi_device_lookup((__im_port)->shost,		\
+					    __itnim->channel,		\
+					    __itnim->scsi_tgt_id, 0);	\
+		if (__sdev) {						\
+			if ((__lunmask_cfg) == BFA_TRUE)		\
+				__sdev->sdev_bflags |= scan_flags;	\
+			else						\
+				__sdev->sdev_bflags &= ~scan_flags;	\
+			scsi_device_put(__sdev);			\
+		}							\
+	}								\
+} while (0)
+
+#endif
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
new file mode 100644
index 0000000..5f698d0
--- /dev/null
+++ b/drivers/scsi/bfa/bfi.h
@@ -0,0 +1,1325 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFI_H__
+#define __BFI_H__
+
+#include "bfa_defs.h"
+#include "bfa_defs_svc.h"
+
+#pragma pack(1)
+
+/* Per dma segment max size */
+#define BFI_MEM_DMA_SEG_SZ	(131072)
+
+/* Get number of dma segments required */
+#define BFI_MEM_DMA_NSEGS(_num_reqs, _req_sz)				\
+	((u16)(((((_num_reqs) * (_req_sz)) + BFI_MEM_DMA_SEG_SZ - 1) &	\
+	 ~(BFI_MEM_DMA_SEG_SZ - 1)) / BFI_MEM_DMA_SEG_SZ))
+
+/* Get num dma reqs - that fit in a segment */
+#define BFI_MEM_NREQS_SEG(_rqsz) (BFI_MEM_DMA_SEG_SZ / (_rqsz))
+
+/* Get segment num from tag */
+#define BFI_MEM_SEG_FROM_TAG(_tag, _rqsz) ((_tag) / BFI_MEM_NREQS_SEG(_rqsz))
+
+/* Get dma req offset in a segment */
+#define BFI_MEM_SEG_REQ_OFFSET(_tag, _sz)	\
+	((_tag) - (BFI_MEM_SEG_FROM_TAG(_tag, _sz) * BFI_MEM_NREQS_SEG(_sz)))
+
+/*
+ * BFI FW image type
+ */
+#define	BFI_FLASH_CHUNK_SZ			256	/*  Flash chunk size */
+#define	BFI_FLASH_CHUNK_SZ_WORDS	(BFI_FLASH_CHUNK_SZ/sizeof(u32))
+#define BFI_FLASH_IMAGE_SZ		0x100000
+
+/*
+ * Msg header common to all msgs
+ */
+struct bfi_mhdr_s {
+	u8		msg_class;	/*  @ref bfi_mclass_t		    */
+	u8		msg_id;		/*  msg opcode with in the class   */
+	union {
+		struct {
+			u8	qid;
+			u8	fn_lpu;	/*  msg destination		    */
+		} h2i;
+		u16	i2htok;	/*  token in msgs to host	    */
+	} mtag;
+};
+
+#define bfi_fn_lpu(__fn, __lpu)	((__fn) << 1 | (__lpu))
+#define bfi_mhdr_2_fn(_mh)	((_mh)->mtag.h2i.fn_lpu >> 1)
+
+#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do {		\
+	(_mh).msg_class		= (_mc);      \
+	(_mh).msg_id		= (_op);      \
+	(_mh).mtag.h2i.fn_lpu	= (_fn_lpu);      \
+} while (0)
+
+#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do {		\
+	(_mh).msg_class		= (_mc);      \
+	(_mh).msg_id		= (_op);      \
+	(_mh).mtag.i2htok	= (_i2htok);      \
+} while (0)
+
+/*
+ * Message opcodes: 0-127 to firmware, 128-255 to host
+ */
+#define BFI_I2H_OPCODE_BASE	128
+#define BFA_I2HM(_x)		((_x) + BFI_I2H_OPCODE_BASE)
+
+/*
+ ****************************************************************************
+ *
+ * Scatter Gather Element and Page definition
+ *
+ ****************************************************************************
+ */
+
+#define BFI_SGE_INLINE	1
+#define BFI_SGE_INLINE_MAX	(BFI_SGE_INLINE + 1)
+
+/*
+ * SG Flags
+ */
+enum {
+	BFI_SGE_DATA		= 0,	/*  data address, not last	     */
+	BFI_SGE_DATA_CPL	= 1,	/*  data addr, last in current page */
+	BFI_SGE_DATA_LAST	= 3,	/*  data address, last		     */
+	BFI_SGE_LINK		= 2,	/*  link address		     */
+	BFI_SGE_PGDLEN		= 2,	/*  cumulative data length for page */
+};
+
+/*
+ * DMA addresses
+ */
+union bfi_addr_u {
+	struct {
+		__be32	addr_lo;
+		__be32	addr_hi;
+	} a32;
+};
+
+/*
+ * Scatter Gather Element used for fast-path IO requests
+ */
+struct bfi_sge_s {
+#ifdef __BIG_ENDIAN
+	u32	flags:2,
+			rsvd:2,
+			sg_len:28;
+#else
+	u32	sg_len:28,
+			rsvd:2,
+			flags:2;
+#endif
+	union bfi_addr_u sga;
+};
+
+/**
+ * Generic DMA addr-len pair.
+ */
+struct bfi_alen_s {
+	union bfi_addr_u	al_addr;	/* DMA addr of buffer	*/
+	u32			al_len;		/* length of buffer	*/
+};
+
+/*
+ * Scatter Gather Page
+ */
+#define BFI_SGPG_DATA_SGES		7
+#define BFI_SGPG_SGES_MAX		(BFI_SGPG_DATA_SGES + 1)
+#define BFI_SGPG_RSVD_WD_LEN	8
+struct bfi_sgpg_s {
+	struct bfi_sge_s sges[BFI_SGPG_SGES_MAX];
+	u32	rsvd[BFI_SGPG_RSVD_WD_LEN];
+};
+
+/* FCP module definitions */
+#define BFI_IO_MAX	(2000)
+#define BFI_IOIM_SNSLEN	(256)
+#define BFI_IOIM_SNSBUF_SEGS	\
+	BFI_MEM_DMA_NSEGS(BFI_IO_MAX, BFI_IOIM_SNSLEN)
+
+/*
+ * Large Message structure - 128 Bytes size Msgs
+ */
+#define BFI_LMSG_SZ		128
+#define BFI_LMSG_PL_WSZ	\
+			((BFI_LMSG_SZ - sizeof(struct bfi_mhdr_s)) / 4)
+
+struct bfi_msg_s {
+	struct bfi_mhdr_s mhdr;
+	u32	pl[BFI_LMSG_PL_WSZ];
+};
+
+/*
+ * Mailbox message structure
+ */
+#define BFI_MBMSG_SZ		7
+struct bfi_mbmsg_s {
+	struct bfi_mhdr_s	mh;
+	u32		pl[BFI_MBMSG_SZ];
+};
+
+/*
+ * Supported PCI function class codes (personality)
+ */
+enum bfi_pcifn_class {
+	BFI_PCIFN_CLASS_FC  = 0x0c04,
+	BFI_PCIFN_CLASS_ETH = 0x0200,
+};
+
+/*
+ * Message Classes
+ */
+enum bfi_mclass {
+	BFI_MC_IOC		= 1,	/*  IO Controller (IOC)	    */
+	BFI_MC_DIAG		= 2,    /*  Diagnostic Msgs            */
+	BFI_MC_FLASH		= 3,	/*  Flash message class	*/
+	BFI_MC_CEE		= 4,	/*  CEE	*/
+	BFI_MC_FCPORT		= 5,	/*  FC port			    */
+	BFI_MC_IOCFC		= 6,	/*  FC - IO Controller (IOC)	    */
+	BFI_MC_ABLK		= 7,	/*  ASIC block configuration	    */
+	BFI_MC_UF		= 8,	/*  Unsolicited frame receive	    */
+	BFI_MC_FCXP		= 9,	/*  FC Transport		    */
+	BFI_MC_LPS		= 10,	/*  lport fc login services	    */
+	BFI_MC_RPORT		= 11,	/*  Remote port		    */
+	BFI_MC_ITN		= 12,	/*  I-T nexus (Initiator mode)	    */
+	BFI_MC_IOIM_READ	= 13,	/*  read IO (Initiator mode)	    */
+	BFI_MC_IOIM_WRITE	= 14,	/*  write IO (Initiator mode)	    */
+	BFI_MC_IOIM_IO		= 15,	/*  IO (Initiator mode)	    */
+	BFI_MC_IOIM		= 16,	/*  IO (Initiator mode)	    */
+	BFI_MC_IOIM_IOCOM	= 17,	/*  good IO completion		    */
+	BFI_MC_TSKIM		= 18,	/*  Initiator Task management	    */
+	BFI_MC_PORT		= 21,	/*  Physical port		    */
+	BFI_MC_SFP		= 22,	/*  SFP module	*/
+	BFI_MC_PHY		= 25,   /*  External PHY message class	*/
+	BFI_MC_FRU		= 34,
+	BFI_MC_MAX		= 35
+};
+
+#define BFI_IOC_MAX_CQS		4
+#define BFI_IOC_MAX_CQS_ASIC	8
+#define BFI_IOC_MSGLEN_MAX	32	/* 32 bytes */
+
+/*
+ *----------------------------------------------------------------------
+ *				IOC
+ *----------------------------------------------------------------------
+ */
+
+/*
+ * Different asic generations
+ */
+enum bfi_asic_gen {
+	BFI_ASIC_GEN_CB		= 1,	/* crossbow 8G FC		*/
+	BFI_ASIC_GEN_CT		= 2,	/* catapult 8G FC or 10G CNA	*/
+	BFI_ASIC_GEN_CT2	= 3,	/* catapult-2 16G FC or 10G CNA	*/
+};
+
+enum bfi_asic_mode {
+	BFI_ASIC_MODE_FC	= 1,	/* FC upto 8G speed		*/
+	BFI_ASIC_MODE_FC16	= 2,	/* FC upto 16G speed		*/
+	BFI_ASIC_MODE_ETH	= 3,	/* Ethernet ports		*/
+	BFI_ASIC_MODE_COMBO	= 4,	/* FC 16G and Ethernet 10G port	*/
+};
+
+enum bfi_ioc_h2i_msgs {
+	BFI_IOC_H2I_ENABLE_REQ		= 1,
+	BFI_IOC_H2I_DISABLE_REQ		= 2,
+	BFI_IOC_H2I_GETATTR_REQ		= 3,
+	BFI_IOC_H2I_DBG_SYNC		= 4,
+	BFI_IOC_H2I_DBG_DUMP		= 5,
+};
+
+enum bfi_ioc_i2h_msgs {
+	BFI_IOC_I2H_ENABLE_REPLY	= BFA_I2HM(1),
+	BFI_IOC_I2H_DISABLE_REPLY	= BFA_I2HM(2),
+	BFI_IOC_I2H_GETATTR_REPLY	= BFA_I2HM(3),
+	BFI_IOC_I2H_HBEAT		= BFA_I2HM(4),
+	BFI_IOC_I2H_ACQ_ADDR_REPLY	= BFA_I2HM(5),
+};
+
+/*
+ * BFI_IOC_H2I_GETATTR_REQ message
+ */
+struct bfi_ioc_getattr_req_s {
+	struct bfi_mhdr_s	mh;
+	union bfi_addr_u	attr_addr;
+};
+
+#define BFI_IOC_ATTR_UUID_SZ	16
+struct bfi_ioc_attr_s {
+	wwn_t		mfg_pwwn;	/*  Mfg port wwn	   */
+	wwn_t		mfg_nwwn;	/*  Mfg node wwn	   */
+	mac_t		mfg_mac;	/*  Mfg mac		   */
+	u8		port_mode;	/* bfi_port_mode	   */
+	u8		rsvd_a;
+	wwn_t		pwwn;
+	wwn_t		nwwn;
+	mac_t		mac;		/*  PBC or Mfg mac	   */
+	u16	rsvd_b;
+	mac_t		fcoe_mac;
+	u16	rsvd_c;
+	char		brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+	u8		pcie_gen;
+	u8		pcie_lanes_orig;
+	u8		pcie_lanes;
+	u8		rx_bbcredit;	/*  receive buffer credits */
+	u32	adapter_prop;	/*  adapter properties     */
+	u16	maxfrsize;	/*  max receive frame size */
+	char		asic_rev;
+	u8		rsvd_d;
+	char		fw_version[BFA_VERSION_LEN];
+	char		optrom_version[BFA_VERSION_LEN];
+	struct		bfa_mfg_vpd_s	vpd;
+	u32	card_type;	/*  card type			*/
+	u8	mfg_day;	/* manufacturing day */
+	u8	mfg_month;	/* manufacturing month */
+	u16	mfg_year;	/* manufacturing year */
+	u8	uuid[BFI_IOC_ATTR_UUID_SZ];	/*!< chinook uuid */
+};
+
+/*
+ * BFI_IOC_I2H_GETATTR_REPLY message
+ */
+struct bfi_ioc_getattr_reply_s {
+	struct	bfi_mhdr_s	mh;	/*  Common msg header		*/
+	u8			status;	/*  cfg reply status		*/
+	u8			rsvd[3];
+};
+
+/*
+ * Firmware memory page offsets
+ */
+#define BFI_IOC_SMEM_PG0_CB	(0x40)
+#define BFI_IOC_SMEM_PG0_CT	(0x180)
+
+/*
+ * Firmware statistic offset
+ */
+#define BFI_IOC_FWSTATS_OFF	(0x6B40)
+#define BFI_IOC_FWSTATS_SZ	(4096)
+
+/*
+ * Firmware trace offset
+ */
+#define BFI_IOC_TRC_OFF		(0x4b00)
+#define BFI_IOC_TRC_ENTS	256
+
+#define BFI_IOC_FW_SIGNATURE	(0xbfadbfad)
+#define BFA_IOC_FW_INV_SIGN	(0xdeaddead)
+#define BFI_IOC_MD5SUM_SZ	4
+
+struct bfi_ioc_fwver_s {
+#ifdef __BIG_ENDIAN
+	uint8_t patch;
+	uint8_t maint;
+	uint8_t minor;
+	uint8_t major;
+	uint8_t rsvd[2];
+	uint8_t build;
+	uint8_t phase;
+#else
+	uint8_t major;
+	uint8_t minor;
+	uint8_t maint;
+	uint8_t patch;
+	uint8_t phase;
+	uint8_t build;
+	uint8_t rsvd[2];
+#endif
+};
+
+struct bfi_ioc_image_hdr_s {
+	u32	signature;	/* constant signature		*/
+	u8	asic_gen;	/* asic generation		*/
+	u8	asic_mode;
+	u8	port0_mode;	/* device mode for port 0	*/
+	u8	port1_mode;	/* device mode for port 1	*/
+	u32	exec;		/* exec vector			*/
+	u32	bootenv;	/* firmware boot env		*/
+	u32	rsvd_b[2];
+	struct bfi_ioc_fwver_s	fwver;
+	u32	md5sum[BFI_IOC_MD5SUM_SZ];
+};
+
+enum bfi_ioc_img_ver_cmp_e {
+	BFI_IOC_IMG_VER_INCOMP,
+	BFI_IOC_IMG_VER_OLD,
+	BFI_IOC_IMG_VER_SAME,
+	BFI_IOC_IMG_VER_BETTER
+};
+
+#define BFI_FWBOOT_DEVMODE_OFF		4
+#define BFI_FWBOOT_TYPE_OFF		8
+#define BFI_FWBOOT_ENV_OFF		12
+#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \
+	(((u32)(__asic_gen)) << 24 |		\
+	 ((u32)(__asic_mode)) << 16 |		\
+	 ((u32)(__p0_mode)) << 8 |		\
+	 ((u32)(__p1_mode)))
+
+enum bfi_fwboot_type {
+	BFI_FWBOOT_TYPE_NORMAL  = 0,
+	BFI_FWBOOT_TYPE_FLASH   = 1,
+	BFI_FWBOOT_TYPE_MEMTEST = 2,
+};
+
+#define BFI_FWBOOT_TYPE_NORMAL	0
+#define BFI_FWBOOT_TYPE_MEMTEST	2
+#define BFI_FWBOOT_ENV_OS       0
+
+enum bfi_port_mode {
+	BFI_PORT_MODE_FC	= 1,
+	BFI_PORT_MODE_ETH	= 2,
+};
+
+struct bfi_ioc_hbeat_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		*/
+	u32	   hb_count;	/*  current heart beat count	*/
+};
+
+/*
+ * IOC hardware/firmware state
+ */
+enum bfi_ioc_state {
+	BFI_IOC_UNINIT		= 0,	/*  not initialized		     */
+	BFI_IOC_INITING		= 1,	/*  h/w is being initialized	     */
+	BFI_IOC_HWINIT		= 2,	/*  h/w is initialized		     */
+	BFI_IOC_CFG		= 3,	/*  IOC configuration in progress   */
+	BFI_IOC_OP		= 4,	/*  IOC is operational		     */
+	BFI_IOC_DISABLING	= 5,	/*  IOC is being disabled	     */
+	BFI_IOC_DISABLED	= 6,	/*  IOC is disabled		     */
+	BFI_IOC_CFG_DISABLED	= 7,	/*  IOC is being disabled;transient */
+	BFI_IOC_FAIL		= 8,	/*  IOC heart-beat failure	     */
+	BFI_IOC_MEMTEST		= 9,	/*  IOC is doing memtest	     */
+};
+
+#define BFA_IOC_CB_JOIN_SH	16
+#define BFA_IOC_CB_FWSTATE_MASK	0x0000ffff
+#define BFA_IOC_CB_JOIN_MASK	0xffff0000
+
+#define BFI_IOC_ENDIAN_SIG  0x12345678
+
+enum {
+	BFI_ADAPTER_TYPE_FC	= 0x01,		/*  FC adapters	   */
+	BFI_ADAPTER_TYPE_MK	= 0x0f0000,	/*  adapter type mask     */
+	BFI_ADAPTER_TYPE_SH	= 16,	        /*  adapter type shift    */
+	BFI_ADAPTER_NPORTS_MK	= 0xff00,	/*  number of ports mask  */
+	BFI_ADAPTER_NPORTS_SH	= 8,	        /*  number of ports shift */
+	BFI_ADAPTER_SPEED_MK	= 0xff,		/*  adapter speed mask    */
+	BFI_ADAPTER_SPEED_SH	= 0,	        /*  adapter speed shift   */
+	BFI_ADAPTER_PROTO	= 0x100000,	/*  prototype adapaters   */
+	BFI_ADAPTER_TTV		= 0x200000,	/*  TTV debug capable     */
+	BFI_ADAPTER_UNSUPP	= 0x400000,	/*  unknown adapter type  */
+};
+
+#define BFI_ADAPTER_GETP(__prop, __adap_prop)			\
+	(((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >>	\
+		BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_SETP(__prop, __val)				\
+	((__val) << BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_IS_PROTO(__adap_type)			\
+	((__adap_type) & BFI_ADAPTER_PROTO)
+#define BFI_ADAPTER_IS_TTV(__adap_type)				\
+	((__adap_type) & BFI_ADAPTER_TTV)
+#define BFI_ADAPTER_IS_UNSUPP(__adap_type)			\
+	((__adap_type) & BFI_ADAPTER_UNSUPP)
+#define BFI_ADAPTER_IS_SPECIAL(__adap_type)			\
+	((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO |	\
+			BFI_ADAPTER_UNSUPP))
+
+/*
+ * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
+ */
+struct bfi_ioc_ctrl_req_s {
+	struct bfi_mhdr_s	mh;
+	u16			clscode;
+	u16			rsvd;
+	u32		tv_sec;
+};
+#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
+#define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s;
+
+/*
+ * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
+ */
+struct bfi_ioc_ctrl_reply_s {
+	struct bfi_mhdr_s	mh;		/*  Common msg header     */
+	u8			status;		/*  enable/disable status */
+	u8			port_mode;	/*  bfa_mode_s	*/
+	u8			cap_bm;		/*  capability bit mask */
+	u8			rsvd;
+};
+#define bfi_ioc_enable_reply_t struct bfi_ioc_ctrl_reply_s;
+#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
+
+#define BFI_IOC_MSGSZ   8
+/*
+ * H2I Messages
+ */
+union bfi_ioc_h2i_msg_u {
+	struct bfi_mhdr_s		mh;
+	struct bfi_ioc_ctrl_req_s	enable_req;
+	struct bfi_ioc_ctrl_req_s	disable_req;
+	struct bfi_ioc_getattr_req_s	getattr_req;
+	u32			mboxmsg[BFI_IOC_MSGSZ];
+};
+
+/*
+ * I2H Messages
+ */
+union bfi_ioc_i2h_msg_u {
+	struct bfi_mhdr_s		mh;
+	struct bfi_ioc_ctrl_reply_s	fw_event;
+	u32			mboxmsg[BFI_IOC_MSGSZ];
+};
+
+
+/*
+ *----------------------------------------------------------------------
+ *				PBC
+ *----------------------------------------------------------------------
+ */
+
+#define BFI_PBC_MAX_BLUNS	8
+#define BFI_PBC_MAX_VPORTS	16
+#define BFI_PBC_PORT_DISABLED	2
+
+/*
+ * PBC boot lun configuration
+ */
+struct bfi_pbc_blun_s {
+	wwn_t		tgt_pwwn;
+	struct scsi_lun	tgt_lun;
+};
+
+/*
+ * PBC virtual port configuration
+ */
+struct bfi_pbc_vport_s {
+	wwn_t		vp_pwwn;
+	wwn_t		vp_nwwn;
+};
+
+/*
+ * BFI pre-boot configuration information
+ */
+struct bfi_pbc_s {
+	u8		port_enabled;
+	u8		boot_enabled;
+	u8		nbluns;
+	u8		nvports;
+	u8		port_speed;
+	u8		rsvd_a;
+	u16	hss;
+	wwn_t		pbc_pwwn;
+	wwn_t		pbc_nwwn;
+	struct bfi_pbc_blun_s blun[BFI_PBC_MAX_BLUNS];
+	struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS];
+};
+
+/*
+ *----------------------------------------------------------------------
+ *				MSGQ
+ *----------------------------------------------------------------------
+ */
+#define BFI_MSGQ_FULL(_q)	(((_q->pi + 1) % _q->q_depth) == _q->ci)
+#define BFI_MSGQ_EMPTY(_q)	(_q->pi == _q->ci)
+#define BFI_MSGQ_UPDATE_CI(_q)	(_q->ci = (_q->ci + 1) % _q->q_depth)
+#define BFI_MSGQ_UPDATE_PI(_q)	(_q->pi = (_q->pi + 1) % _q->q_depth)
+
+/* q_depth must be power of 2 */
+#define BFI_MSGQ_FREE_CNT(_q)	((_q->ci - _q->pi - 1) & (_q->q_depth - 1))
+
+enum bfi_msgq_h2i_msgs_e {
+	BFI_MSGQ_H2I_INIT_REQ	= 1,
+	BFI_MSGQ_H2I_DOORBELL	= 2,
+	BFI_MSGQ_H2I_SHUTDOWN	= 3,
+};
+
+enum bfi_msgq_i2h_msgs_e {
+	BFI_MSGQ_I2H_INIT_RSP	= 1,
+	BFI_MSGQ_I2H_DOORBELL	= 2,
+};
+
+
+/* Messages(commands/responsed/AENS will have the following header */
+struct bfi_msgq_mhdr_s {
+	u8		msg_class;
+	u8		msg_id;
+	u16	msg_token;
+	u16	num_entries;
+	u8		enet_id;
+	u8		rsvd[1];
+};
+
+#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do {        \
+	(_mh).msg_class		= (_mc);      \
+	(_mh).msg_id		= (_mid);      \
+	(_mh).msg_token		= (_tok);      \
+	(_mh).enet_id		= (_enet_id);      \
+} while (0)
+
+/*
+ * Mailbox  for messaging interface
+ *
+*/
+#define BFI_MSGQ_CMD_ENTRY_SIZE		(64)    /* TBD */
+#define BFI_MSGQ_RSP_ENTRY_SIZE		(64)    /* TBD */
+#define BFI_MSGQ_MSG_SIZE_MAX		(2048)  /* TBD */
+
+struct bfi_msgq_s {
+	union bfi_addr_u addr;
+	u16 q_depth;     /* Total num of entries in the queue */
+	u8 rsvd[2];
+};
+
+/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */
+struct bfi_msgq_cfg_req_s {
+	struct bfi_mhdr_s mh;
+	struct bfi_msgq_s cmdq;
+	struct bfi_msgq_s rspq;
+};
+
+/* BFI_ENET_MSGQ_CFG_RSP */
+struct bfi_msgq_cfg_rsp_s {
+	struct bfi_mhdr_s mh;
+	u8 cmd_status;
+	u8 rsvd[3];
+};
+
+
+/* BFI_MSGQ_H2I_DOORBELL */
+struct bfi_msgq_h2i_db_s {
+	struct bfi_mhdr_s mh;
+	u16 cmdq_pi;
+	u16 rspq_ci;
+};
+
+/* BFI_MSGQ_I2H_DOORBELL */
+struct bfi_msgq_i2h_db_s {
+	struct bfi_mhdr_s mh;
+	u16 rspq_pi;
+	u16 cmdq_ci;
+};
+
+#pragma pack()
+
+/* BFI port specific */
+#pragma pack(1)
+
+enum bfi_port_h2i {
+	BFI_PORT_H2I_ENABLE_REQ         = (1),
+	BFI_PORT_H2I_DISABLE_REQ        = (2),
+	BFI_PORT_H2I_GET_STATS_REQ      = (3),
+	BFI_PORT_H2I_CLEAR_STATS_REQ    = (4),
+};
+
+enum bfi_port_i2h {
+	BFI_PORT_I2H_ENABLE_RSP         = BFA_I2HM(1),
+	BFI_PORT_I2H_DISABLE_RSP        = BFA_I2HM(2),
+	BFI_PORT_I2H_GET_STATS_RSP      = BFA_I2HM(3),
+	BFI_PORT_I2H_CLEAR_STATS_RSP    = BFA_I2HM(4),
+};
+
+/*
+ * Generic REQ type
+ */
+struct bfi_port_generic_req_s {
+	struct bfi_mhdr_s  mh;          /*  msg header		*/
+	u32     msgtag;         /*  msgtag for reply                */
+	u32     rsvd;
+};
+
+/*
+ * Generic RSP type
+ */
+struct bfi_port_generic_rsp_s {
+	struct bfi_mhdr_s  mh;          /*  common msg header               */
+	u8              status;         /*  port enable status              */
+	u8              rsvd[3];
+	u32     msgtag;         /*  msgtag for reply                */
+};
+
+/*
+ * BFI_PORT_H2I_GET_STATS_REQ
+ */
+struct bfi_port_get_stats_req_s {
+	struct bfi_mhdr_s  mh;          /*  common msg header               */
+	union bfi_addr_u   dma_addr;
+};
+
+union bfi_port_h2i_msg_u {
+	struct bfi_mhdr_s               mh;
+	struct bfi_port_generic_req_s   enable_req;
+	struct bfi_port_generic_req_s   disable_req;
+	struct bfi_port_get_stats_req_s getstats_req;
+	struct bfi_port_generic_req_s   clearstats_req;
+};
+
+union bfi_port_i2h_msg_u {
+	struct bfi_mhdr_s               mh;
+	struct bfi_port_generic_rsp_s   enable_rsp;
+	struct bfi_port_generic_rsp_s   disable_rsp;
+	struct bfi_port_generic_rsp_s   getstats_rsp;
+	struct bfi_port_generic_rsp_s   clearstats_rsp;
+};
+
+/*
+ *----------------------------------------------------------------------
+ *				ABLK
+ *----------------------------------------------------------------------
+ */
+enum bfi_ablk_h2i_msgs_e {
+	BFI_ABLK_H2I_QUERY		= 1,
+	BFI_ABLK_H2I_ADPT_CONFIG	= 2,
+	BFI_ABLK_H2I_PORT_CONFIG	= 3,
+	BFI_ABLK_H2I_PF_CREATE		= 4,
+	BFI_ABLK_H2I_PF_DELETE		= 5,
+	BFI_ABLK_H2I_PF_UPDATE		= 6,
+	BFI_ABLK_H2I_OPTROM_ENABLE	= 7,
+	BFI_ABLK_H2I_OPTROM_DISABLE	= 8,
+};
+
+enum bfi_ablk_i2h_msgs_e {
+	BFI_ABLK_I2H_QUERY		= BFA_I2HM(BFI_ABLK_H2I_QUERY),
+	BFI_ABLK_I2H_ADPT_CONFIG	= BFA_I2HM(BFI_ABLK_H2I_ADPT_CONFIG),
+	BFI_ABLK_I2H_PORT_CONFIG	= BFA_I2HM(BFI_ABLK_H2I_PORT_CONFIG),
+	BFI_ABLK_I2H_PF_CREATE		= BFA_I2HM(BFI_ABLK_H2I_PF_CREATE),
+	BFI_ABLK_I2H_PF_DELETE		= BFA_I2HM(BFI_ABLK_H2I_PF_DELETE),
+	BFI_ABLK_I2H_PF_UPDATE		= BFA_I2HM(BFI_ABLK_H2I_PF_UPDATE),
+	BFI_ABLK_I2H_OPTROM_ENABLE	= BFA_I2HM(BFI_ABLK_H2I_OPTROM_ENABLE),
+	BFI_ABLK_I2H_OPTROM_DISABLE	= BFA_I2HM(BFI_ABLK_H2I_OPTROM_DISABLE),
+};
+
+/* BFI_ABLK_H2I_QUERY */
+struct bfi_ablk_h2i_query_s {
+	struct bfi_mhdr_s	mh;
+	union bfi_addr_u	addr;
+};
+
+/* BFI_ABL_H2I_ADPT_CONFIG, BFI_ABLK_H2I_PORT_CONFIG */
+struct bfi_ablk_h2i_cfg_req_s {
+	struct bfi_mhdr_s	mh;
+	u8			mode;
+	u8			port;
+	u8			max_pf;
+	u8			max_vf;
+};
+
+/*
+ * BFI_ABLK_H2I_PF_CREATE, BFI_ABLK_H2I_PF_DELETE,
+ */
+struct bfi_ablk_h2i_pf_req_s {
+	struct bfi_mhdr_s	mh;
+	u8			pcifn;
+	u8			port;
+	u16			pers;
+	u16			bw_min; /* percent BW @ max speed */
+	u16			bw_max; /* percent BW @ max speed */
+};
+
+/* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */
+struct bfi_ablk_h2i_optrom_s {
+	struct bfi_mhdr_s	mh;
+};
+
+/*
+ * BFI_ABLK_I2H_QUERY
+ * BFI_ABLK_I2H_PORT_CONFIG
+ * BFI_ABLK_I2H_PF_CREATE
+ * BFI_ABLK_I2H_PF_DELETE
+ * BFI_ABLK_I2H_PF_UPDATE
+ * BFI_ABLK_I2H_OPTROM_ENABLE
+ * BFI_ABLK_I2H_OPTROM_DISABLE
+ */
+struct bfi_ablk_i2h_rsp_s {
+	struct bfi_mhdr_s	mh;
+	u8			status;
+	u8			pcifn;
+	u8			port_mode;
+};
+
+
+/*
+ *	CEE module specific messages
+ */
+
+/* Mailbox commands from host to firmware */
+enum bfi_cee_h2i_msgs_e {
+	BFI_CEE_H2I_GET_CFG_REQ = 1,
+	BFI_CEE_H2I_RESET_STATS = 2,
+	BFI_CEE_H2I_GET_STATS_REQ = 3,
+};
+
+enum bfi_cee_i2h_msgs_e {
+	BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
+	BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
+	BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
+};
+
+/*
+ * H2I command structure for resetting the stats
+ */
+struct bfi_cee_reset_stats_s {
+	struct bfi_mhdr_s  mh;
+};
+
+/*
+ * Get configuration  command from host
+ */
+struct bfi_cee_get_req_s {
+	struct bfi_mhdr_s	mh;
+	union bfi_addr_u	dma_addr;
+};
+
+/*
+ * Reply message from firmware
+ */
+struct bfi_cee_get_rsp_s {
+	struct bfi_mhdr_s	mh;
+	u8			cmd_status;
+	u8			rsvd[3];
+};
+
+/*
+ * Reply message from firmware
+ */
+struct bfi_cee_stats_rsp_s {
+	struct bfi_mhdr_s	mh;
+	u8			cmd_status;
+	u8			rsvd[3];
+};
+
+/* Mailbox message structures from firmware to host	*/
+union bfi_cee_i2h_msg_u {
+	struct bfi_mhdr_s		mh;
+	struct bfi_cee_get_rsp_s	get_rsp;
+	struct bfi_cee_stats_rsp_s	stats_rsp;
+};
+
+/*
+ * SFP related
+ */
+
+enum bfi_sfp_h2i_e {
+	BFI_SFP_H2I_SHOW	= 1,
+	BFI_SFP_H2I_SCN		= 2,
+};
+
+enum bfi_sfp_i2h_e {
+	BFI_SFP_I2H_SHOW = BFA_I2HM(BFI_SFP_H2I_SHOW),
+	BFI_SFP_I2H_SCN	 = BFA_I2HM(BFI_SFP_H2I_SCN),
+};
+
+/*
+ *	SFP state change notification
+ */
+struct bfi_sfp_scn_s {
+	struct bfi_mhdr_s mhr;	/* host msg header        */
+	u8	event;
+	u8	sfpid;
+	u8	pomlvl;	/* pom level: normal/warning/alarm */
+	u8	is_elb;	/* e-loopback */
+};
+
+/*
+ *	SFP state
+ */
+enum bfa_sfp_stat_e {
+	BFA_SFP_STATE_INIT	= 0,	/* SFP state is uninit	*/
+	BFA_SFP_STATE_REMOVED	= 1,	/* SFP is removed	*/
+	BFA_SFP_STATE_INSERTED	= 2,	/* SFP is inserted	*/
+	BFA_SFP_STATE_VALID	= 3,	/* SFP is valid		*/
+	BFA_SFP_STATE_UNSUPPORT	= 4,	/* SFP is unsupport	*/
+	BFA_SFP_STATE_FAILED	= 5,	/* SFP i2c read fail	*/
+};
+
+/*
+ *  SFP memory access type
+ */
+enum bfi_sfp_mem_e {
+	BFI_SFP_MEM_ALL		= 0x1,  /* access all data field */
+	BFI_SFP_MEM_DIAGEXT	= 0x2,  /* access diag ext data field only */
+};
+
+struct bfi_sfp_req_s {
+	struct bfi_mhdr_s	mh;
+	u8			memtype;
+	u8			rsvd[3];
+	struct bfi_alen_s	alen;
+};
+
+struct bfi_sfp_rsp_s {
+	struct bfi_mhdr_s	mh;
+	u8			status;
+	u8			state;
+	u8			rsvd[2];
+};
+
+/*
+ *	FLASH module specific
+ */
+enum bfi_flash_h2i_msgs {
+	BFI_FLASH_H2I_QUERY_REQ = 1,
+	BFI_FLASH_H2I_ERASE_REQ = 2,
+	BFI_FLASH_H2I_WRITE_REQ = 3,
+	BFI_FLASH_H2I_READ_REQ = 4,
+	BFI_FLASH_H2I_BOOT_VER_REQ = 5,
+};
+
+enum bfi_flash_i2h_msgs {
+	BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
+	BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
+	BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
+	BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
+	BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
+	BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
+};
+
+/*
+ * Flash query request
+ */
+struct bfi_flash_query_req_s {
+	struct bfi_mhdr_s mh;	/* Common msg header */
+	struct bfi_alen_s alen;
+};
+
+/*
+ * Flash erase request
+ */
+struct bfi_flash_erase_req_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	u32	type;	/* partition type */
+	u8	instance; /* partition instance */
+	u8	rsv[3];
+};
+
+/*
+ * Flash write request
+ */
+struct bfi_flash_write_req_s {
+	struct bfi_mhdr_s mh;	/* Common msg header */
+	struct bfi_alen_s alen;
+	u32	type;	/* partition type */
+	u8	instance; /* partition instance */
+	u8	last;
+	u8	rsv[2];
+	u32	offset;
+	u32	length;
+};
+
+/*
+ * Flash read request
+ */
+struct bfi_flash_read_req_s {
+	struct bfi_mhdr_s mh;	/* Common msg header */
+	u32	type;		/* partition type */
+	u8	instance;	/* partition instance */
+	u8	rsv[3];
+	u32	offset;
+	u32	length;
+	struct bfi_alen_s alen;
+};
+
+/*
+ * Flash query response
+ */
+struct bfi_flash_query_rsp_s {
+	struct bfi_mhdr_s mh;	/* Common msg header */
+	u32	status;
+};
+
+/*
+ * Flash read response
+ */
+struct bfi_flash_read_rsp_s {
+	struct bfi_mhdr_s mh;	/* Common msg header */
+	u32	type;       /* partition type */
+	u8	instance;   /* partition instance */
+	u8	rsv[3];
+	u32	status;
+	u32	length;
+};
+
+/*
+ * Flash write response
+ */
+struct bfi_flash_write_rsp_s {
+	struct bfi_mhdr_s mh;	/* Common msg header */
+	u32	type;       /* partition type */
+	u8	instance;   /* partition instance */
+	u8	rsv[3];
+	u32	status;
+	u32	length;
+};
+
+/*
+ * Flash erase response
+ */
+struct bfi_flash_erase_rsp_s {
+	struct bfi_mhdr_s mh;	/* Common msg header */
+	u32	type;		/* partition type */
+	u8	instance;	/* partition instance */
+	u8	rsv[3];
+	u32	status;
+};
+
+/*
+ * Flash event notification
+ */
+struct bfi_flash_event_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	bfa_status_t		status;
+	u32			param;
+};
+
+/*
+ *----------------------------------------------------------------------
+ *				DIAG
+ *----------------------------------------------------------------------
+ */
+enum bfi_diag_h2i {
+	BFI_DIAG_H2I_PORTBEACON = 1,
+	BFI_DIAG_H2I_LOOPBACK = 2,
+	BFI_DIAG_H2I_FWPING = 3,
+	BFI_DIAG_H2I_TEMPSENSOR = 4,
+	BFI_DIAG_H2I_LEDTEST = 5,
+	BFI_DIAG_H2I_QTEST      = 6,
+	BFI_DIAG_H2I_DPORT	= 7,
+};
+
+enum bfi_diag_i2h {
+	BFI_DIAG_I2H_PORTBEACON = BFA_I2HM(BFI_DIAG_H2I_PORTBEACON),
+	BFI_DIAG_I2H_LOOPBACK = BFA_I2HM(BFI_DIAG_H2I_LOOPBACK),
+	BFI_DIAG_I2H_FWPING = BFA_I2HM(BFI_DIAG_H2I_FWPING),
+	BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR),
+	BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST),
+	BFI_DIAG_I2H_QTEST      = BFA_I2HM(BFI_DIAG_H2I_QTEST),
+	BFI_DIAG_I2H_DPORT	= BFA_I2HM(BFI_DIAG_H2I_DPORT),
+	BFI_DIAG_I2H_DPORT_SCN	= BFA_I2HM(8),
+};
+
+#define BFI_DIAG_MAX_SGES	2
+#define BFI_DIAG_DMA_BUF_SZ	(2 * 1024)
+#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
+#define BFI_BOOT_MEMTEST_RES_SIG  0xA0A1A2A3
+
+struct bfi_diag_lb_req_s {
+	struct bfi_mhdr_s mh;
+	u32	loopcnt;
+	u32	pattern;
+	u8	lb_mode;        /*!< bfa_port_opmode_t */
+	u8	speed;          /*!< bfa_port_speed_t */
+	u8	rsvd[2];
+};
+
+struct bfi_diag_lb_rsp_s {
+	struct bfi_mhdr_s  mh;          /* 4 bytes */
+	struct bfa_diag_loopback_result_s res; /* 16 bytes */
+};
+
+struct bfi_diag_fwping_req_s {
+	struct bfi_mhdr_s mh;	/* 4 bytes */
+	struct bfi_alen_s alen; /* 12 bytes */
+	u32	data;           /* user input data pattern */
+	u32	count;          /* user input dma count */
+	u8	qtag;           /* track CPE vc */
+	u8	rsv[3];
+};
+
+struct bfi_diag_fwping_rsp_s {
+	struct bfi_mhdr_s  mh;          /* 4 bytes */
+	u32	data;           /* user input data pattern    */
+	u8	qtag;           /* track CPE vc               */
+	u8	dma_status;     /* dma status                 */
+	u8	rsv[2];
+};
+
+/*
+ * Temperature Sensor
+ */
+struct bfi_diag_ts_req_s {
+	struct bfi_mhdr_s mh;	/* 4 bytes */
+	u16	temp;           /* 10-bit A/D value */
+	u16	brd_temp;       /* 9-bit board temp */
+	u8	status;
+	u8	ts_junc;        /* show junction tempsensor   */
+	u8	ts_brd;         /* show board tempsensor      */
+	u8	rsv;
+};
+#define bfi_diag_ts_rsp_t struct bfi_diag_ts_req_s
+
+struct bfi_diag_ledtest_req_s {
+	struct bfi_mhdr_s  mh;  /* 4 bytes */
+	u8	cmd;
+	u8	color;
+	u8	portid;
+	u8	led;    /* bitmap of LEDs to be tested */
+	u16	freq;   /* no. of blinks every 10 secs */
+	u8	rsv[2];
+};
+
+/* notify host led operation is done */
+struct bfi_diag_ledtest_rsp_s {
+	struct bfi_mhdr_s  mh;  /* 4 bytes */
+};
+
+struct bfi_diag_portbeacon_req_s {
+	struct bfi_mhdr_s  mh;  /* 4 bytes */
+	u32	period; /* beaconing period */
+	u8	beacon; /* 1: beacon on */
+	u8	rsvd[3];
+};
+
+/* notify host the beacon is off */
+struct bfi_diag_portbeacon_rsp_s {
+	struct bfi_mhdr_s  mh;  /* 4 bytes */
+};
+
+struct bfi_diag_qtest_req_s {
+	struct bfi_mhdr_s	mh;             /* 4 bytes */
+	u32	data[BFI_LMSG_PL_WSZ]; /* fill up tcm prefetch area */
+};
+#define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s
+
+/*
+ *	D-port test
+ */
+enum bfi_dport_req {
+	BFI_DPORT_DISABLE	= 0,	/* disable dport request	*/
+	BFI_DPORT_ENABLE	= 1,	/* enable dport request		*/
+	BFI_DPORT_START		= 2,	/* start dport request	*/
+	BFI_DPORT_SHOW		= 3,	/* show dport request	*/
+	BFI_DPORT_DYN_DISABLE	= 4,	/* disable dynamic dport request */
+};
+
+enum bfi_dport_scn {
+	BFI_DPORT_SCN_TESTSTART		= 1,
+	BFI_DPORT_SCN_TESTCOMP		= 2,
+	BFI_DPORT_SCN_SFP_REMOVED	= 3,
+	BFI_DPORT_SCN_DDPORT_ENABLE	= 4,
+	BFI_DPORT_SCN_DDPORT_DISABLE	= 5,
+	BFI_DPORT_SCN_FCPORT_DISABLE	= 6,
+	BFI_DPORT_SCN_SUBTESTSTART	= 7,
+	BFI_DPORT_SCN_TESTSKIP		= 8,
+	BFI_DPORT_SCN_DDPORT_DISABLED	= 9,
+};
+
+struct bfi_diag_dport_req_s {
+	struct bfi_mhdr_s	mh;	/* 4 bytes                      */
+	u8			req;	/* request 1: enable 0: disable	*/
+	u8			rsvd[3];
+	u32			lpcnt;
+	u32			payload;
+};
+
+struct bfi_diag_dport_rsp_s {
+	struct bfi_mhdr_s	mh;	/* header 4 bytes		*/
+	bfa_status_t		status;	/* reply status			*/
+	wwn_t			pwwn;	/* switch port wwn. 8 bytes	*/
+	wwn_t			nwwn;	/* switch node wwn. 8 bytes	*/
+};
+
+struct bfi_diag_dport_scn_teststart_s {
+	wwn_t	pwwn;	/* switch port wwn. 8 bytes */
+	wwn_t	nwwn;	/* switch node wwn. 8 bytes */
+	u8	type;	/* bfa_diag_dport_test_type_e */
+	u8	mode;	/* bfa_diag_dport_test_opmode */
+	u8	rsvd[2];
+	u32	numfrm; /* from switch uint in 1M */
+};
+
+struct bfi_diag_dport_scn_testcomp_s {
+	u8	status; /* bfa_diag_dport_test_status_e */
+	u8	speed;  /* bfa_port_speed_t  */
+	u16	numbuffer; /* from switch  */
+	u8	subtest_status[DPORT_TEST_MAX];  /* 4 bytes */
+	u32	latency;   /* from switch  */
+	u32	distance;  /* from swtich unit in meters  */
+			/* Buffers required to saturate the link */
+	u16	frm_sz;	/* from switch for buf_reqd */
+	u8	rsvd[2];
+};
+
+struct bfi_diag_dport_scn_s {		/* max size == RDS_RMESZ	*/
+	struct bfi_mhdr_s	mh;	/* header 4 bytes		*/
+	u8			state;  /* new state			*/
+	u8			rsvd[3];
+	union {
+		struct bfi_diag_dport_scn_teststart_s teststart;
+		struct bfi_diag_dport_scn_testcomp_s testcomp;
+	} info;
+};
+
+union bfi_diag_dport_msg_u {
+	struct bfi_diag_dport_req_s	req;
+	struct bfi_diag_dport_rsp_s	rsp;
+	struct bfi_diag_dport_scn_s	scn;
+};
+
+/*
+ *	PHY module specific
+ */
+enum bfi_phy_h2i_msgs_e {
+	BFI_PHY_H2I_QUERY_REQ = 1,
+	BFI_PHY_H2I_STATS_REQ = 2,
+	BFI_PHY_H2I_WRITE_REQ = 3,
+	BFI_PHY_H2I_READ_REQ = 4,
+};
+
+enum bfi_phy_i2h_msgs_e {
+	BFI_PHY_I2H_QUERY_RSP = BFA_I2HM(1),
+	BFI_PHY_I2H_STATS_RSP = BFA_I2HM(2),
+	BFI_PHY_I2H_WRITE_RSP = BFA_I2HM(3),
+	BFI_PHY_I2H_READ_RSP = BFA_I2HM(4),
+};
+
+/*
+ * External PHY query request
+ */
+struct bfi_phy_query_req_s {
+	struct bfi_mhdr_s	mh;             /* Common msg header */
+	u8			instance;
+	u8			rsv[3];
+	struct bfi_alen_s	alen;
+};
+
+/*
+ * External PHY stats request
+ */
+struct bfi_phy_stats_req_s {
+	struct bfi_mhdr_s	mh;             /* Common msg header */
+	u8			instance;
+	u8			rsv[3];
+	struct bfi_alen_s	alen;
+};
+
+/*
+ * External PHY write request
+ */
+struct bfi_phy_write_req_s {
+	struct bfi_mhdr_s	mh;             /* Common msg header */
+	u8		instance;
+	u8		last;
+	u8		rsv[2];
+	u32		offset;
+	u32		length;
+	struct bfi_alen_s	alen;
+};
+
+/*
+ * External PHY read request
+ */
+struct bfi_phy_read_req_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	u8		instance;
+	u8		rsv[3];
+	u32		offset;
+	u32		length;
+	struct bfi_alen_s	alen;
+};
+
+/*
+ * External PHY query response
+ */
+struct bfi_phy_query_rsp_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	u32			status;
+};
+
+/*
+ * External PHY stats response
+ */
+struct bfi_phy_stats_rsp_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	u32			status;
+};
+
+/*
+ * External PHY read response
+ */
+struct bfi_phy_read_rsp_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	u32			status;
+	u32		length;
+};
+
+/*
+ * External PHY write response
+ */
+struct bfi_phy_write_rsp_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	u32			status;
+	u32			length;
+};
+
+enum bfi_fru_h2i_msgs {
+	BFI_FRUVPD_H2I_WRITE_REQ = 1,
+	BFI_FRUVPD_H2I_READ_REQ = 2,
+	BFI_TFRU_H2I_WRITE_REQ = 3,
+	BFI_TFRU_H2I_READ_REQ = 4,
+};
+
+enum bfi_fru_i2h_msgs {
+	BFI_FRUVPD_I2H_WRITE_RSP = BFA_I2HM(1),
+	BFI_FRUVPD_I2H_READ_RSP = BFA_I2HM(2),
+	BFI_TFRU_I2H_WRITE_RSP = BFA_I2HM(3),
+	BFI_TFRU_I2H_READ_RSP = BFA_I2HM(4),
+};
+
+/*
+ * FRU write request
+ */
+struct bfi_fru_write_req_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	u8			last;
+	u8			rsv_1[3];
+	u8			trfr_cmpl;
+	u8			rsv_2[3];
+	u32			offset;
+	u32			length;
+	struct bfi_alen_s	alen;
+};
+
+/*
+ * FRU read request
+ */
+struct bfi_fru_read_req_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	u32			offset;
+	u32			length;
+	struct bfi_alen_s	alen;
+};
+
+/*
+ * FRU response
+ */
+struct bfi_fru_rsp_s {
+	struct bfi_mhdr_s	mh;	/* Common msg header */
+	u32			status;
+	u32			length;
+};
+#pragma pack()
+
+#endif /* __BFI_H__ */
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
new file mode 100644
index 0000000..ccbd9e3
--- /dev/null
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -0,0 +1,879 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFI_MS_H__
+#define __BFI_MS_H__
+
+#include "bfi.h"
+#include "bfa_fc.h"
+#include "bfa_defs_svc.h"
+
+#pragma pack(1)
+
+enum bfi_iocfc_h2i_msgs {
+	BFI_IOCFC_H2I_CFG_REQ		= 1,
+	BFI_IOCFC_H2I_SET_INTR_REQ	= 2,
+	BFI_IOCFC_H2I_UPDATEQ_REQ	= 3,
+	BFI_IOCFC_H2I_FAA_QUERY_REQ	= 4,
+	BFI_IOCFC_H2I_ADDR_REQ		= 5,
+};
+
+enum bfi_iocfc_i2h_msgs {
+	BFI_IOCFC_I2H_CFG_REPLY		= BFA_I2HM(1),
+	BFI_IOCFC_I2H_UPDATEQ_RSP	= BFA_I2HM(3),
+	BFI_IOCFC_I2H_FAA_QUERY_RSP	= BFA_I2HM(4),
+	BFI_IOCFC_I2H_ADDR_MSG		= BFA_I2HM(5),
+};
+
+struct bfi_iocfc_cfg_s {
+	u8	num_cqs;	/*  Number of CQs to be used	*/
+	u8	 sense_buf_len;	/*  SCSI sense length	    */
+	u16	rsvd_1;
+	u32	endian_sig;	/*  endian signature of host     */
+	u8	rsvd_2;
+	u8	single_msix_vec;
+	u8	rsvd[2];
+	__be16	num_ioim_reqs;
+	__be16	num_fwtio_reqs;
+
+
+	/*
+	 * Request and response circular queue base addresses, size and
+	 * shadow index pointers.
+	 */
+	union bfi_addr_u  req_cq_ba[BFI_IOC_MAX_CQS];
+	union bfi_addr_u  req_shadow_ci[BFI_IOC_MAX_CQS];
+	__be16    req_cq_elems[BFI_IOC_MAX_CQS];
+	union bfi_addr_u  rsp_cq_ba[BFI_IOC_MAX_CQS];
+	union bfi_addr_u  rsp_shadow_pi[BFI_IOC_MAX_CQS];
+	__be16    rsp_cq_elems[BFI_IOC_MAX_CQS];
+
+	union bfi_addr_u  stats_addr;	/*  DMA-able address for stats	  */
+	union bfi_addr_u  cfgrsp_addr;	/*  config response dma address  */
+	union bfi_addr_u  ioim_snsbase[BFI_IOIM_SNSBUF_SEGS];
+					/*  IO sense buf base addr segments */
+	struct bfa_iocfc_intr_attr_s intr_attr; /*  IOC interrupt attributes */
+};
+
+/*
+ * Boot target wwn information for this port. This contains either the stored
+ * or discovered boot target port wwns for the port.
+ */
+struct bfi_iocfc_bootwwns {
+	wwn_t		wwn[BFA_BOOT_BOOTLUN_MAX];
+	u8		nwwns;
+	u8		rsvd[7];
+};
+
+/**
+ * Queue configuration response from firmware
+ */
+struct bfi_iocfc_qreg_s {
+	u32	cpe_q_ci_off[BFI_IOC_MAX_CQS];
+	u32	cpe_q_pi_off[BFI_IOC_MAX_CQS];
+	u32	cpe_qctl_off[BFI_IOC_MAX_CQS];
+	u32	rme_q_ci_off[BFI_IOC_MAX_CQS];
+	u32	rme_q_pi_off[BFI_IOC_MAX_CQS];
+	u32	rme_qctl_off[BFI_IOC_MAX_CQS];
+	u8	hw_qid[BFI_IOC_MAX_CQS];
+};
+
+struct bfi_iocfc_cfgrsp_s {
+	struct bfa_iocfc_fwcfg_s	fwcfg;
+	struct bfa_iocfc_intr_attr_s	intr_attr;
+	struct bfi_iocfc_bootwwns	bootwwns;
+	struct bfi_pbc_s		pbc_cfg;
+	struct bfi_iocfc_qreg_s		qreg;
+};
+
+/*
+ * BFI_IOCFC_H2I_CFG_REQ message
+ */
+struct bfi_iocfc_cfg_req_s {
+	struct bfi_mhdr_s      mh;
+	union bfi_addr_u      ioc_cfg_dma_addr;
+};
+
+
+/*
+ * BFI_IOCFC_I2H_CFG_REPLY message
+ */
+struct bfi_iocfc_cfg_reply_s {
+	struct bfi_mhdr_s  mh;		/*  Common msg header	  */
+	u8	 cfg_success;	/*  cfg reply status	   */
+	u8	 lpu_bm;		/*  LPUs assigned for this IOC */
+	u8	 rsvd[2];
+};
+
+
+/*
+ * BFI_IOCFC_H2I_SET_INTR_REQ message
+ */
+struct bfi_iocfc_set_intr_req_s {
+	struct bfi_mhdr_s mh;		/*  common msg header		*/
+	u8		coalesce;	/*  enable intr coalescing	*/
+	u8		rsvd[3];
+	__be16	delay;		/*  delay timer 0..1125us	*/
+	__be16	latency;	/*  latency timer 0..225us	*/
+};
+
+
+/*
+ * BFI_IOCFC_H2I_UPDATEQ_REQ message
+ */
+struct bfi_iocfc_updateq_req_s {
+	struct bfi_mhdr_s mh;		/*  common msg header		*/
+	u32 reqq_ba;		/*  reqq base addr		*/
+	u32 rspq_ba;		/*  rspq base addr		*/
+	u32 reqq_sci;		/*  reqq shadow ci		*/
+	u32 rspq_spi;		/*  rspq shadow pi		*/
+};
+
+
+/*
+ * BFI_IOCFC_I2H_UPDATEQ_RSP message
+ */
+struct bfi_iocfc_updateq_rsp_s {
+	struct bfi_mhdr_s mh;		/*  common msg header	*/
+	u8	status;			/*  updateq  status	*/
+	u8	rsvd[3];
+};
+
+
+/*
+ * H2I Messages
+ */
+union bfi_iocfc_h2i_msg_u {
+	struct bfi_mhdr_s		mh;
+	struct bfi_iocfc_cfg_req_s	cfg_req;
+	struct bfi_iocfc_updateq_req_s updateq_req;
+	u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+
+/*
+ * I2H Messages
+ */
+union bfi_iocfc_i2h_msg_u {
+	struct bfi_mhdr_s		mh;
+	struct bfi_iocfc_cfg_reply_s	cfg_reply;
+	struct bfi_iocfc_updateq_rsp_s updateq_rsp;
+	u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+/*
+ * BFI_IOCFC_H2I_FAA_ENABLE_REQ BFI_IOCFC_H2I_FAA_DISABLE_REQ message
+ */
+struct bfi_faa_en_dis_s {
+	struct bfi_mhdr_s mh;	/* common msg header    */
+};
+
+struct bfi_faa_addr_msg_s {
+	struct  bfi_mhdr_s mh;	/* common msg header	*/
+	u8	rsvd[4];
+	wwn_t	pwwn;		/* Fabric acquired PWWN	*/
+	wwn_t	nwwn;		/* Fabric acquired PWWN	*/
+};
+
+/*
+ * BFI_IOCFC_H2I_FAA_QUERY_REQ message
+ */
+struct bfi_faa_query_s {
+	struct bfi_mhdr_s mh;	/* common msg header    */
+	u8	faa_status;	/* FAA status           */
+	u8	addr_source;	/* PWWN source          */
+	u8	rsvd[2];
+	wwn_t	faa;		/* Fabric acquired PWWN	*/
+};
+
+/*
+ * BFI_IOCFC_I2H_FAA_ENABLE_RSP, BFI_IOCFC_I2H_FAA_DISABLE_RSP message
+ */
+struct bfi_faa_en_dis_rsp_s {
+	struct bfi_mhdr_s mh;	/* common msg header    */
+	u8	status;		/* updateq  status      */
+	u8	rsvd[3];
+};
+
+/*
+ * BFI_IOCFC_I2H_FAA_QUERY_RSP message
+ */
+#define bfi_faa_query_rsp_t struct bfi_faa_query_s
+
+enum bfi_fcport_h2i {
+	BFI_FCPORT_H2I_ENABLE_REQ		= (1),
+	BFI_FCPORT_H2I_DISABLE_REQ		= (2),
+	BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ	= (3),
+	BFI_FCPORT_H2I_STATS_GET_REQ		= (4),
+	BFI_FCPORT_H2I_STATS_CLEAR_REQ		= (5),
+};
+
+
+enum bfi_fcport_i2h {
+	BFI_FCPORT_I2H_ENABLE_RSP		= BFA_I2HM(1),
+	BFI_FCPORT_I2H_DISABLE_RSP		= BFA_I2HM(2),
+	BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP	= BFA_I2HM(3),
+	BFI_FCPORT_I2H_STATS_GET_RSP		= BFA_I2HM(4),
+	BFI_FCPORT_I2H_STATS_CLEAR_RSP		= BFA_I2HM(5),
+	BFI_FCPORT_I2H_EVENT			= BFA_I2HM(6),
+	BFI_FCPORT_I2H_TRUNK_SCN		= BFA_I2HM(7),
+	BFI_FCPORT_I2H_ENABLE_AEN		= BFA_I2HM(8),
+	BFI_FCPORT_I2H_DISABLE_AEN		= BFA_I2HM(9),
+};
+
+
+/*
+ * Generic REQ type
+ */
+struct bfi_fcport_req_s {
+	struct bfi_mhdr_s  mh;		/*  msg header			    */
+	u32	   msgtag;	/*  msgtag for reply		    */
+};
+
+/*
+ * Generic RSP type
+ */
+struct bfi_fcport_rsp_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		    */
+	u8		   status;	/*  port enable status		    */
+	u8		   rsvd[3];
+	struct	bfa_port_cfg_s port_cfg;/* port configuration	*/
+	u32	msgtag;			/* msgtag for reply	*/
+};
+
+/*
+ * BFI_FCPORT_H2I_ENABLE_REQ
+ */
+struct bfi_fcport_enable_req_s {
+	struct bfi_mhdr_s  mh;		/*  msg header			    */
+	u32	   rsvd1;
+	wwn_t		   nwwn;	/*  node wwn of physical port	    */
+	wwn_t		   pwwn;	/*  port wwn of physical port	    */
+	struct bfa_port_cfg_s port_cfg; /*  port configuration	    */
+	union bfi_addr_u   stats_dma_addr; /*  DMA address for stats	    */
+	u32	   msgtag;	/*  msgtag for reply		    */
+	u8	use_flash_cfg;	/* get prot cfg from flash */
+	u8	rsvd2[3];
+};
+
+/*
+ * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
+ */
+struct bfi_fcport_set_svc_params_req_s {
+	struct bfi_mhdr_s  mh;		/*  msg header */
+	__be16	   tx_bbcredit;	/*  Tx credits */
+	u8	rsvd[2];
+};
+
+/*
+ * BFI_FCPORT_I2H_EVENT
+ */
+struct bfi_fcport_event_s {
+	struct bfi_mhdr_s	mh;	/*  common msg header */
+	struct bfa_port_link_s	link_state;
+};
+
+/*
+ * BFI_FCPORT_I2H_TRUNK_SCN
+ */
+struct bfi_fcport_trunk_link_s {
+	wwn_t			trunk_wwn;
+	u8			fctl;		/* bfa_trunk_link_fctl_t */
+	u8			state;		/* bfa_trunk_link_state_t */
+	u8			speed;		/* bfa_port_speed_t */
+	u8			rsvd;
+	__be32		deskew;
+};
+
+#define BFI_FCPORT_MAX_LINKS	2
+struct bfi_fcport_trunk_scn_s {
+	struct bfi_mhdr_s	mh;
+	u8			trunk_state;	/* bfa_trunk_state_t */
+	u8			trunk_speed;	/* bfa_port_speed_t */
+	u8			rsvd_a[2];
+	struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS];
+};
+
+/*
+ * fcport H2I message
+ */
+union bfi_fcport_h2i_msg_u {
+	struct bfi_mhdr_s			*mhdr;
+	struct bfi_fcport_enable_req_s		*penable;
+	struct bfi_fcport_req_s			*pdisable;
+	struct bfi_fcport_set_svc_params_req_s	*psetsvcparams;
+	struct bfi_fcport_req_s			*pstatsget;
+	struct bfi_fcport_req_s			*pstatsclear;
+};
+
+/*
+ * fcport I2H message
+ */
+union bfi_fcport_i2h_msg_u {
+	struct bfi_msg_s			*msg;
+	struct bfi_fcport_rsp_s			*penable_rsp;
+	struct bfi_fcport_rsp_s			*pdisable_rsp;
+	struct bfi_fcport_rsp_s			*psetsvcparams_rsp;
+	struct bfi_fcport_rsp_s			*pstatsget_rsp;
+	struct bfi_fcport_rsp_s			*pstatsclear_rsp;
+	struct bfi_fcport_event_s		*event;
+	struct bfi_fcport_trunk_scn_s		*trunk_scn;
+};
+
+enum bfi_fcxp_h2i {
+	BFI_FCXP_H2I_SEND_REQ = 1,
+};
+
+enum bfi_fcxp_i2h {
+	BFI_FCXP_I2H_SEND_RSP = BFA_I2HM(1),
+};
+
+#define BFA_FCXP_MAX_SGES	2
+
+/*
+ * FCXP send request structure
+ */
+struct bfi_fcxp_send_req_s {
+	struct bfi_mhdr_s  mh;		/*  Common msg header		    */
+	__be16	fcxp_tag;	/*  driver request tag		    */
+	__be16	max_frmsz;	/*  max send frame size	    */
+	__be16	vf_id;		/*  vsan tag if applicable	    */
+	u16	rport_fw_hndl;	/*  FW Handle for the remote port  */
+	u8	 class;		/*  FC class used for req/rsp	    */
+	u8	 rsp_timeout;	/*  timeout in secs, 0-no response */
+	u8	 cts;		/*  continue sequence		    */
+	u8	 lp_fwtag;	/*  lport tag			    */
+	struct fchs_s	fchs;	/*  request FC header structure    */
+	__be32	req_len;	/*  request payload length	    */
+	__be32	rsp_maxlen;	/*  max response length expected   */
+	struct bfi_alen_s req_alen;	/* request buffer	*/
+	struct bfi_alen_s rsp_alen;	/* response buffer	*/
+};
+
+/*
+ * FCXP send response structure
+ */
+struct bfi_fcxp_send_rsp_s {
+	struct bfi_mhdr_s  mh;		/*  Common msg header		    */
+	__be16	fcxp_tag;	/*  send request tag		    */
+	u8	 req_status;	/*  request status		    */
+	u8	 rsvd;
+	__be32	rsp_len;	/*  actual response length	    */
+	__be32	residue_len;	/*  residual response length	    */
+	struct fchs_s	fchs;	/*  response FC header structure   */
+};
+
+enum bfi_uf_h2i {
+	BFI_UF_H2I_BUF_POST = 1,
+};
+
+enum bfi_uf_i2h {
+	BFI_UF_I2H_FRM_RCVD = BFA_I2HM(1),
+};
+
+#define BFA_UF_MAX_SGES	2
+
+struct bfi_uf_buf_post_s {
+	struct bfi_mhdr_s  mh;		/*  Common msg header		*/
+	u16	buf_tag;	/*  buffer tag			*/
+	__be16	buf_len;	/*  total buffer length	*/
+	struct bfi_alen_s alen;	/* buffer address/len pair	*/
+};
+
+struct bfi_uf_frm_rcvd_s {
+	struct bfi_mhdr_s  mh;		/*  Common msg header		*/
+	u16	buf_tag;	/*  buffer tag			*/
+	u16	rsvd;
+	u16	frm_len;	/*  received frame length	*/
+	u16	xfr_len;	/*  tranferred length		*/
+};
+
+enum bfi_lps_h2i_msgs {
+	BFI_LPS_H2I_LOGIN_REQ	= 1,
+	BFI_LPS_H2I_LOGOUT_REQ	= 2,
+	BFI_LPS_H2I_N2N_PID_REQ = 3,
+};
+
+enum bfi_lps_i2h_msgs {
+	BFI_LPS_I2H_LOGIN_RSP	= BFA_I2HM(1),
+	BFI_LPS_I2H_LOGOUT_RSP	= BFA_I2HM(2),
+	BFI_LPS_I2H_CVL_EVENT	= BFA_I2HM(3),
+};
+
+struct bfi_lps_login_req_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		*/
+	u8		bfa_tag;
+	u8		alpa;
+	__be16		pdu_size;
+	wwn_t		pwwn;
+	wwn_t		nwwn;
+	u8		fdisc;
+	u8		auth_en;
+	u8		lps_role;
+	u8		bb_scn;
+	u32		vvl_flag;
+};
+
+struct bfi_lps_login_rsp_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		*/
+	u8		fw_tag;
+	u8		status;
+	u8		lsrjt_rsn;
+	u8		lsrjt_expl;
+	wwn_t		port_name;
+	wwn_t		node_name;
+	__be16		bb_credit;
+	u8		f_port;
+	u8		npiv_en;
+	u32	lp_pid:24;
+	u32	auth_req:8;
+	mac_t		lp_mac;
+	mac_t		fcf_mac;
+	u8		ext_status;
+	u8		brcd_switch;	/*  attached peer is brcd switch */
+	u8		bfa_tag;
+	u8		rsvd;
+};
+
+struct bfi_lps_logout_req_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		*/
+	u8		fw_tag;
+	u8		rsvd[3];
+	wwn_t		port_name;
+};
+
+struct bfi_lps_logout_rsp_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		*/
+	u8		bfa_tag;
+	u8		status;
+	u8		rsvd[2];
+};
+
+struct bfi_lps_cvl_event_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		*/
+	u8		bfa_tag;
+	u8		rsvd[3];
+};
+
+struct bfi_lps_n2n_pid_req_s {
+	struct bfi_mhdr_s	mh;	/*  common msg header		*/
+	u8	fw_tag;
+	u32	lp_pid:24;
+};
+
+union bfi_lps_h2i_msg_u {
+	struct bfi_mhdr_s		*msg;
+	struct bfi_lps_login_req_s	*login_req;
+	struct bfi_lps_logout_req_s	*logout_req;
+	struct bfi_lps_n2n_pid_req_s	*n2n_pid_req;
+};
+
+union bfi_lps_i2h_msg_u {
+	struct bfi_msg_s		*msg;
+	struct bfi_lps_login_rsp_s	*login_rsp;
+	struct bfi_lps_logout_rsp_s	*logout_rsp;
+	struct bfi_lps_cvl_event_s	*cvl_event;
+};
+
+enum bfi_rport_h2i_msgs {
+	BFI_RPORT_H2I_CREATE_REQ = 1,
+	BFI_RPORT_H2I_DELETE_REQ = 2,
+	BFI_RPORT_H2I_SET_SPEED_REQ  = 3,
+};
+
+enum bfi_rport_i2h_msgs {
+	BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1),
+	BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2),
+	BFI_RPORT_I2H_QOS_SCN    = BFA_I2HM(3),
+	BFI_RPORT_I2H_LIP_SCN_ONLINE =	BFA_I2HM(4),
+	BFI_RPORT_I2H_LIP_SCN_OFFLINE = BFA_I2HM(5),
+	BFI_RPORT_I2H_NO_DEV	= BFA_I2HM(6),
+};
+
+struct bfi_rport_create_req_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		*/
+	u16	bfa_handle;	/*  host rport handle		*/
+	__be16	max_frmsz;	/*  max rcv pdu size		*/
+	u32	pid:24,	/*  remote port ID		*/
+		lp_fwtag:8;	/*  local port tag		*/
+	u32	local_pid:24,	/*  local port ID		*/
+		cisc:8;
+	u8	fc_class;	/*  supported FC classes	*/
+	u8	vf_en;		/*  virtual fabric enable	*/
+	u16	vf_id;		/*  virtual fabric ID		*/
+};
+
+struct bfi_rport_create_rsp_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		*/
+	u8		status;		/*  rport creation status	*/
+	u8		rsvd[3];
+	u16	bfa_handle;	/*  host rport handle		*/
+	u16	fw_handle;	/*  firmware rport handle	*/
+	struct bfa_rport_qos_attr_s qos_attr;  /* QoS Attributes */
+};
+
+struct bfa_rport_speed_req_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		*/
+	u16	fw_handle;	/*  firmware rport handle	*/
+	u8		speed;		/*  rport's speed via RPSC	*/
+	u8		rsvd;
+};
+
+struct bfi_rport_delete_req_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		*/
+	u16	fw_handle;	/*  firmware rport handle	*/
+	u16	rsvd;
+};
+
+struct bfi_rport_delete_rsp_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		*/
+	u16	bfa_handle;	/*  host rport handle		*/
+	u8		status;		/*  rport deletion status	*/
+	u8		rsvd;
+};
+
+struct bfi_rport_qos_scn_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		*/
+	u16	bfa_handle;	/*  host rport handle		*/
+	u16	rsvd;
+	struct bfa_rport_qos_attr_s old_qos_attr;  /* Old QoS Attributes */
+	struct bfa_rport_qos_attr_s new_qos_attr;  /* New QoS Attributes */
+};
+
+struct bfi_rport_lip_scn_s {
+	struct bfi_mhdr_s  mh;		/*!< common msg header	*/
+	u16	bfa_handle;	/*!< host rport handle	*/
+	u8		status;		/*!< scn online status	*/
+	u8		rsvd;
+	struct bfa_fcport_loop_info_s	loop_info;
+};
+
+union bfi_rport_h2i_msg_u {
+	struct bfi_msg_s		*msg;
+	struct bfi_rport_create_req_s	*create_req;
+	struct bfi_rport_delete_req_s	*delete_req;
+	struct bfi_rport_speed_req_s	*speed_req;
+};
+
+union bfi_rport_i2h_msg_u {
+	struct bfi_msg_s		*msg;
+	struct bfi_rport_create_rsp_s	*create_rsp;
+	struct bfi_rport_delete_rsp_s	*delete_rsp;
+	struct bfi_rport_qos_scn_s	*qos_scn_evt;
+	struct bfi_rport_lip_scn_s	*lip_scn;
+};
+
+/*
+ * Initiator mode I-T nexus interface defines.
+ */
+
+enum bfi_itn_h2i {
+	BFI_ITN_H2I_CREATE_REQ = 1,	/*  i-t nexus creation */
+	BFI_ITN_H2I_DELETE_REQ = 2,	/*  i-t nexus deletion */
+};
+
+enum bfi_itn_i2h {
+	BFI_ITN_I2H_CREATE_RSP = BFA_I2HM(1),
+	BFI_ITN_I2H_DELETE_RSP = BFA_I2HM(2),
+	BFI_ITN_I2H_SLER_EVENT = BFA_I2HM(3),
+};
+
+struct bfi_itn_create_req_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		 */
+	u16	fw_handle;	/*  f/w handle for itnim	 */
+	u8	class;		/*  FC class for IO		 */
+	u8	seq_rec;	/*  sequence recovery support	 */
+	u8	msg_no;		/*  seq id of the msg		 */
+	u8	role;
+};
+
+struct bfi_itn_create_rsp_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		 */
+	u16	bfa_handle;	/*  bfa handle for itnim	 */
+	u8	status;		/*  fcp request status		 */
+	u8	seq_id;		/*  seq id of the msg		 */
+};
+
+struct bfi_itn_delete_req_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		 */
+	u16	fw_handle;	/*  f/w itnim handle		 */
+	u8	seq_id;		/*  seq id of the msg		 */
+	u8	rsvd;
+};
+
+struct bfi_itn_delete_rsp_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		 */
+	u16	bfa_handle;	/*  bfa handle for itnim	 */
+	u8	status;		/*  fcp request status		 */
+	u8	seq_id;		/*  seq id of the msg		 */
+};
+
+struct bfi_itn_sler_event_s {
+	struct bfi_mhdr_s  mh;		/*  common msg header		 */
+	u16	bfa_handle;	/*  bfa handle for itnim	 */
+	u16	rsvd;
+};
+
+union bfi_itn_h2i_msg_u {
+	struct bfi_itn_create_req_s *create_req;
+	struct bfi_itn_delete_req_s *delete_req;
+	struct bfi_msg_s	*msg;
+};
+
+union bfi_itn_i2h_msg_u {
+	struct bfi_itn_create_rsp_s *create_rsp;
+	struct bfi_itn_delete_rsp_s *delete_rsp;
+	struct bfi_itn_sler_event_s *sler_event;
+	struct bfi_msg_s	*msg;
+};
+
+/*
+ * Initiator mode IO interface defines.
+ */
+
+enum bfi_ioim_h2i {
+	BFI_IOIM_H2I_IOABORT_REQ = 1,	/*  IO abort request	 */
+	BFI_IOIM_H2I_IOCLEANUP_REQ = 2,	/*  IO cleanup request	 */
+};
+
+enum bfi_ioim_i2h {
+	BFI_IOIM_I2H_IO_RSP = BFA_I2HM(1),	/*  non-fp IO response	 */
+	BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2),	/*  ABORT rsp	 */
+};
+
+/*
+ * IO command DIF info
+ */
+struct bfi_ioim_dif_s {
+	u32	dif_info[4];
+};
+
+/*
+ * FCP IO messages overview
+ *
+ * @note
+ * - Max CDB length supported is 64 bytes.
+ * - SCSI Linked commands and SCSI bi-directional Commands not
+ *	supported.
+ *
+ */
+struct bfi_ioim_req_s {
+	struct bfi_mhdr_s  mh;		/*  Common msg header		 */
+	__be16	io_tag;		/*  I/O tag			 */
+	u16	rport_hdl;	/*  itnim/rport firmware handle */
+	struct fcp_cmnd_s	cmnd;	/*  IO request info	*/
+
+	/*
+	 * SG elements array within the IO request must be double word
+	 * aligned. This alignment is required to optimize SGM setup for the IO.
+	 */
+	struct bfi_sge_s	sges[BFI_SGE_INLINE_MAX];
+	u8	io_timeout;
+	u8	dif_en;
+	u8	rsvd_a[2];
+	struct bfi_ioim_dif_s  dif;
+};
+
+/*
+ *	This table shows various IO status codes from firmware and their
+ *	meaning. Host driver can use these status codes to further process
+ *	IO completions.
+ *
+ *	BFI_IOIM_STS_OK		: IO completed with error free SCSI &
+ *					transport status.
+ *					 io-tag can be reused.
+ *
+ *	BFA_IOIM_STS_SCSI_ERR		: IO completed with scsi error.
+ *	- io-tag can be reused.
+ *
+ *	BFI_IOIM_STS_HOST_ABORTED	: IO was aborted successfully due to
+ *						host request.
+ *					- io-tag cannot be reused yet.
+ *
+ *	BFI_IOIM_STS_ABORTED		: IO was aborted successfully
+ *						internally by f/w.
+ *					- io-tag cannot be reused yet.
+ *
+ *	BFI_IOIM_STS_TIMEDOUT	: IO timedout and ABTS/RRQ is happening
+ *					in the firmware and
+ *					- io-tag cannot be reused yet.
+ *
+ *	BFI_IOIM_STS_SQER_NEEDED	: Firmware could not recover the IO
+ *					  with sequence level error
+ *	logic and hence host needs to retry
+ *					  this IO with a different IO tag
+ *					- io-tag cannot be used yet.
+ *
+ *	BFI_IOIM_STS_NEXUS_ABORT	: Second Level Error Recovery from host
+ *					  is required because 2 consecutive ABTS
+ *					  timedout and host needs logout and
+ *					  re-login with the target
+ *					- io-tag cannot be used yet.
+ *
+ *	BFI_IOIM_STS_UNDERRUN	: IO completed with SCSI status good,
+ *					  but the data tranferred is less than
+ *					  the fcp data length in the command.
+ *					  ex. SCSI INQUIRY where transferred
+ *					  data length and residue count in FCP
+ *					  response accounts for total fcp-dl
+ *					  - io-tag can be reused.
+ *
+ *	BFI_IOIM_STS_OVERRUN	: IO completed with SCSI status good,
+ *					  but the data transerred is more than
+ *					  fcp data length in the command. ex.
+ *					  TAPE IOs where blocks can of unequal
+ *					  lengths.
+ *					- io-tag can be reused.
+ *
+ *	BFI_IOIM_STS_RES_FREE	: Firmware has completed using io-tag
+ *					  during abort process
+ *					- io-tag can be reused.
+ *
+ *	BFI_IOIM_STS_PROTO_ERR	: Firmware detected a protocol error.
+ *					  ex target sent more data than
+ *					  requested, or there was data frame
+ *					  loss and other reasons
+ *					- io-tag cannot be used yet.
+ *
+ *	BFI_IOIM_STS_DIF_ERR	: Firwmare detected DIF error. ex: DIF
+ *					CRC err or Ref Tag err or App tag err.
+ *					- io-tag can be reused.
+ *
+ *	BFA_IOIM_STS_TSK_MGT_ABORT	: IO was aborted because of Task
+ *					  Management command from the host
+ *					  - io-tag can be reused.
+ *
+ *	BFI_IOIM_STS_UTAG		: Firmware does not know about this
+ *					  io_tag.
+ *					- io-tag can be reused.
+ */
+enum bfi_ioim_status {
+	BFI_IOIM_STS_OK = 0,
+	BFI_IOIM_STS_HOST_ABORTED = 1,
+	BFI_IOIM_STS_ABORTED = 2,
+	BFI_IOIM_STS_TIMEDOUT = 3,
+	BFI_IOIM_STS_RES_FREE = 4,
+	BFI_IOIM_STS_SQER_NEEDED = 5,
+	BFI_IOIM_STS_PROTO_ERR = 6,
+	BFI_IOIM_STS_UTAG = 7,
+	BFI_IOIM_STS_PATHTOV = 8,
+};
+
+/*
+ * I/O response message
+ */
+struct bfi_ioim_rsp_s {
+	struct bfi_mhdr_s	mh;	/*  common msg header		*/
+	__be16	io_tag;		/*  completed IO tag		 */
+	u16	bfa_rport_hndl;	/*  releated rport handle	 */
+	u8	io_status;	/*  IO completion status	 */
+	u8	reuse_io_tag;	/*  IO tag can be reused	*/
+	u16	abort_tag;	/*  host abort request tag	*/
+	u8		scsi_status;	/*  scsi status from target	 */
+	u8		sns_len;	/*  scsi sense length		 */
+	u8		resid_flags;	/*  IO residue flags		 */
+	u8		rsvd_a;
+	__be32	residue;	/*  IO residual length in bytes */
+	u32	rsvd_b[3];
+};
+
+struct bfi_ioim_abort_req_s {
+	struct bfi_mhdr_s  mh;	/*  Common msg header  */
+	__be16	io_tag;	/*  I/O tag	*/
+	u16	abort_tag;	/*  unique request tag */
+};
+
+/*
+ * Initiator mode task management command interface defines.
+ */
+
+enum bfi_tskim_h2i {
+	BFI_TSKIM_H2I_TM_REQ	= 1, /*  task-mgmt command	*/
+	BFI_TSKIM_H2I_ABORT_REQ = 2, /*  task-mgmt command	*/
+};
+
+enum bfi_tskim_i2h {
+	BFI_TSKIM_I2H_TM_RSP = BFA_I2HM(1),
+};
+
+struct bfi_tskim_req_s {
+	struct bfi_mhdr_s  mh;	/*  Common msg header	*/
+	__be16	tsk_tag;	/*  task management tag	*/
+	u16	itn_fhdl;	/*  itn firmware handle	*/
+	struct 	scsi_lun lun;	/*  LU number	*/
+	u8	tm_flags;	/*  see enum fcp_tm_cmnd	*/
+	u8	t_secs;	/*  Timeout value in seconds	*/
+	u8	rsvd[2];
+};
+
+struct bfi_tskim_abortreq_s {
+	struct bfi_mhdr_s  mh;	/*  Common msg header	*/
+	__be16	tsk_tag;	/*  task management tag	*/
+	u16	rsvd;
+};
+
+enum bfi_tskim_status {
+	/*
+	 * Following are FCP-4 spec defined status codes,
+	 * **DO NOT CHANGE THEM **
+	 */
+	BFI_TSKIM_STS_OK	= 0,
+	BFI_TSKIM_STS_NOT_SUPP = 4,
+	BFI_TSKIM_STS_FAILED	= 5,
+
+	/*
+	 * Defined by BFA
+	 */
+	BFI_TSKIM_STS_TIMEOUT  = 10,	/*  TM request timedout	*/
+	BFI_TSKIM_STS_ABORTED  = 11,	/*  Aborted on host request */
+	BFI_TSKIM_STS_UTAG     = 12,	/*  unknown tag for request */
+};
+
+struct bfi_tskim_rsp_s {
+	struct bfi_mhdr_s  mh;		/*  Common msg header		 */
+	__be16	tsk_tag;	/*  task mgmt cmnd tag		 */
+	u8	tsk_status;	/*  @ref bfi_tskim_status */
+	u8	rsvd;
+};
+
+#pragma pack()
+
+/*
+ * Crossbow PCI MSI-X vector defines
+ */
+enum {
+	BFI_MSIX_CPE_QMIN_CB = 0,
+	BFI_MSIX_CPE_QMAX_CB = 7,
+	BFI_MSIX_RME_QMIN_CB = 8,
+	BFI_MSIX_RME_QMAX_CB = 15,
+	BFI_MSIX_CB_MAX = 22,
+};
+
+/*
+ * Catapult FC PCI MSI-X vector defines
+ */
+enum {
+	BFI_MSIX_LPU_ERR_CT = 0,
+	BFI_MSIX_CPE_QMIN_CT = 1,
+	BFI_MSIX_CPE_QMAX_CT = 4,
+	BFI_MSIX_RME_QMIN_CT = 5,
+	BFI_MSIX_RME_QMAX_CT = 8,
+	BFI_MSIX_CT_MAX = 9,
+};
+
+#endif /* __BFI_MS_H__ */
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
new file mode 100644
index 0000000..fd5b876
--- /dev/null
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -0,0 +1,460 @@
+/*
+ * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
+ * Copyright (c) 2014- QLogic Corporation.
+ * All rights reserved
+ * www.qlogic.com
+ *
+ * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * bfi_reg.h ASIC register defines for all QLogic BR-series adapter ASICs
+ */
+
+#ifndef __BFI_REG_H__
+#define __BFI_REG_H__
+
+#define HOSTFN0_INT_STATUS		0x00014000	/* cb/ct	*/
+#define HOSTFN1_INT_STATUS		0x00014100	/* cb/ct	*/
+#define HOSTFN2_INT_STATUS		0x00014300	/* ct		*/
+#define HOSTFN3_INT_STATUS		0x00014400	/* ct		*/
+#define HOSTFN0_INT_MSK			0x00014004	/* cb/ct	*/
+#define HOSTFN1_INT_MSK			0x00014104	/* cb/ct	*/
+#define HOSTFN2_INT_MSK			0x00014304	/* ct		*/
+#define HOSTFN3_INT_MSK			0x00014404	/* ct		*/
+
+#define HOST_PAGE_NUM_FN0		0x00014008	/* cb/ct	*/
+#define HOST_PAGE_NUM_FN1		0x00014108	/* cb/ct	*/
+#define HOST_PAGE_NUM_FN2		0x00014308	/* ct		*/
+#define HOST_PAGE_NUM_FN3		0x00014408	/* ct		*/
+
+#define APP_PLL_LCLK_CTL_REG		0x00014204	/* cb/ct	*/
+#define __P_LCLK_PLL_LOCK		0x80000000
+#define __APP_PLL_LCLK_SRAM_USE_100MHZ	0x00100000
+#define __APP_PLL_LCLK_RESET_TIMER_MK	0x000e0000
+#define __APP_PLL_LCLK_RESET_TIMER_SH	17
+#define __APP_PLL_LCLK_RESET_TIMER(_v)	((_v) << __APP_PLL_LCLK_RESET_TIMER_SH)
+#define __APP_PLL_LCLK_LOGIC_SOFT_RESET	0x00010000
+#define __APP_PLL_LCLK_CNTLMT0_1_MK	0x0000c000
+#define __APP_PLL_LCLK_CNTLMT0_1_SH	14
+#define __APP_PLL_LCLK_CNTLMT0_1(_v)	((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH)
+#define __APP_PLL_LCLK_JITLMT0_1_MK	0x00003000
+#define __APP_PLL_LCLK_JITLMT0_1_SH	12
+#define __APP_PLL_LCLK_JITLMT0_1(_v)	((_v) << __APP_PLL_LCLK_JITLMT0_1_SH)
+#define __APP_PLL_LCLK_HREF		0x00000800
+#define __APP_PLL_LCLK_HDIV		0x00000400
+#define __APP_PLL_LCLK_P0_1_MK		0x00000300
+#define __APP_PLL_LCLK_P0_1_SH		8
+#define __APP_PLL_LCLK_P0_1(_v)		((_v) << __APP_PLL_LCLK_P0_1_SH)
+#define __APP_PLL_LCLK_Z0_2_MK		0x000000e0
+#define __APP_PLL_LCLK_Z0_2_SH		5
+#define __APP_PLL_LCLK_Z0_2(_v)		((_v) << __APP_PLL_LCLK_Z0_2_SH)
+#define __APP_PLL_LCLK_RSEL200500	0x00000010
+#define __APP_PLL_LCLK_ENARST		0x00000008
+#define __APP_PLL_LCLK_BYPASS		0x00000004
+#define __APP_PLL_LCLK_LRESETN		0x00000002
+#define __APP_PLL_LCLK_ENABLE		0x00000001
+#define APP_PLL_SCLK_CTL_REG		0x00014208	/* cb/ct	*/
+#define __P_SCLK_PLL_LOCK		0x80000000
+#define __APP_PLL_SCLK_RESET_TIMER_MK	0x000e0000
+#define __APP_PLL_SCLK_RESET_TIMER_SH	17
+#define __APP_PLL_SCLK_RESET_TIMER(_v)	((_v) << __APP_PLL_SCLK_RESET_TIMER_SH)
+#define __APP_PLL_SCLK_LOGIC_SOFT_RESET	0x00010000
+#define __APP_PLL_SCLK_CNTLMT0_1_MK	0x0000c000
+#define __APP_PLL_SCLK_CNTLMT0_1_SH	14
+#define __APP_PLL_SCLK_CNTLMT0_1(_v)	((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH)
+#define __APP_PLL_SCLK_JITLMT0_1_MK	0x00003000
+#define __APP_PLL_SCLK_JITLMT0_1_SH	12
+#define __APP_PLL_SCLK_JITLMT0_1(_v)	((_v) << __APP_PLL_SCLK_JITLMT0_1_SH)
+#define __APP_PLL_SCLK_HREF		0x00000800
+#define __APP_PLL_SCLK_HDIV		0x00000400
+#define __APP_PLL_SCLK_P0_1_MK		0x00000300
+#define __APP_PLL_SCLK_P0_1_SH		8
+#define __APP_PLL_SCLK_P0_1(_v)		((_v) << __APP_PLL_SCLK_P0_1_SH)
+#define __APP_PLL_SCLK_Z0_2_MK		0x000000e0
+#define __APP_PLL_SCLK_Z0_2_SH		5
+#define __APP_PLL_SCLK_Z0_2(_v)		((_v) << __APP_PLL_SCLK_Z0_2_SH)
+#define __APP_PLL_SCLK_RSEL200500	0x00000010
+#define __APP_PLL_SCLK_ENARST		0x00000008
+#define __APP_PLL_SCLK_BYPASS		0x00000004
+#define __APP_PLL_SCLK_LRESETN		0x00000002
+#define __APP_PLL_SCLK_ENABLE		0x00000001
+#define __ENABLE_MAC_AHB_1		0x00800000	/* ct		*/
+#define __ENABLE_MAC_AHB_0		0x00400000	/* ct		*/
+#define __ENABLE_MAC_1			0x00200000	/* ct		*/
+#define __ENABLE_MAC_0			0x00100000	/* ct		*/
+
+#define HOST_SEM0_REG			0x00014230	/* cb/ct	*/
+#define HOST_SEM1_REG			0x00014234	/* cb/ct	*/
+#define HOST_SEM2_REG			0x00014238	/* cb/ct	*/
+#define HOST_SEM3_REG			0x0001423c	/* cb/ct	*/
+#define HOST_SEM4_REG			0x00014610	/* cb/ct	*/
+#define HOST_SEM5_REG			0x00014614	/* cb/ct	*/
+#define HOST_SEM6_REG			0x00014618	/* cb/ct	*/
+#define HOST_SEM7_REG			0x0001461c	/* cb/ct	*/
+#define HOST_SEM0_INFO_REG		0x00014240	/* cb/ct	*/
+#define HOST_SEM1_INFO_REG		0x00014244	/* cb/ct	*/
+#define HOST_SEM2_INFO_REG		0x00014248	/* cb/ct	*/
+#define HOST_SEM3_INFO_REG		0x0001424c	/* cb/ct	*/
+#define HOST_SEM4_INFO_REG		0x00014620	/* cb/ct	*/
+#define HOST_SEM5_INFO_REG		0x00014624	/* cb/ct	*/
+#define HOST_SEM6_INFO_REG		0x00014628	/* cb/ct	*/
+#define HOST_SEM7_INFO_REG		0x0001462c	/* cb/ct	*/
+
+#define HOSTFN0_LPU0_CMD_STAT		0x00019000	/* cb/ct	*/
+#define HOSTFN0_LPU1_CMD_STAT		0x00019004	/* cb/ct	*/
+#define HOSTFN1_LPU0_CMD_STAT		0x00019010	/* cb/ct	*/
+#define HOSTFN1_LPU1_CMD_STAT		0x00019014	/* cb/ct	*/
+#define HOSTFN2_LPU0_CMD_STAT		0x00019150	/* ct		*/
+#define HOSTFN2_LPU1_CMD_STAT		0x00019154	/* ct		*/
+#define HOSTFN3_LPU0_CMD_STAT		0x00019160	/* ct		*/
+#define HOSTFN3_LPU1_CMD_STAT		0x00019164	/* ct		*/
+#define LPU0_HOSTFN0_CMD_STAT		0x00019008	/* cb/ct	*/
+#define LPU1_HOSTFN0_CMD_STAT		0x0001900c	/* cb/ct	*/
+#define LPU0_HOSTFN1_CMD_STAT		0x00019018	/* cb/ct	*/
+#define LPU1_HOSTFN1_CMD_STAT		0x0001901c	/* cb/ct	*/
+#define LPU0_HOSTFN2_CMD_STAT		0x00019158	/* ct		*/
+#define LPU1_HOSTFN2_CMD_STAT		0x0001915c	/* ct		*/
+#define LPU0_HOSTFN3_CMD_STAT		0x00019168	/* ct		*/
+#define LPU1_HOSTFN3_CMD_STAT		0x0001916c	/* ct		*/
+
+#define PSS_CTL_REG			0x00018800	/* cb/ct	*/
+#define __PSS_I2C_CLK_DIV_MK		0x007f0000
+#define __PSS_I2C_CLK_DIV_SH		16
+#define __PSS_I2C_CLK_DIV(_v)		((_v) << __PSS_I2C_CLK_DIV_SH)
+#define __PSS_LMEM_INIT_DONE		0x00001000
+#define __PSS_LMEM_RESET		0x00000200
+#define __PSS_LMEM_INIT_EN		0x00000100
+#define __PSS_LPU1_RESET		0x00000002
+#define __PSS_LPU0_RESET		0x00000001
+#define PSS_ERR_STATUS_REG		0x00018810	/* cb/ct	*/
+#define ERR_SET_REG			0x00018818	/* cb/ct	*/
+#define PSS_GPIO_OUT_REG		0x000188c0	/* cb/ct	*/
+#define __PSS_GPIO_OUT_REG		0x00000fff
+#define PSS_GPIO_OE_REG			0x000188c8	/* cb/ct	*/
+#define __PSS_GPIO_OE_REG		0x000000ff
+
+#define HOSTFN0_LPU_MBOX0_0		0x00019200	/* cb/ct	*/
+#define HOSTFN1_LPU_MBOX0_8		0x00019260	/* cb/ct	*/
+#define LPU_HOSTFN0_MBOX0_0		0x00019280	/* cb/ct	*/
+#define LPU_HOSTFN1_MBOX0_8		0x000192e0	/* cb/ct	*/
+#define HOSTFN2_LPU_MBOX0_0		0x00019400	/* ct		*/
+#define HOSTFN3_LPU_MBOX0_8		0x00019460	/* ct		*/
+#define LPU_HOSTFN2_MBOX0_0		0x00019480	/* ct		*/
+#define LPU_HOSTFN3_MBOX0_8		0x000194e0	/* ct		*/
+
+#define HOST_MSIX_ERR_INDEX_FN0		0x0001400c	/* ct		*/
+#define HOST_MSIX_ERR_INDEX_FN1		0x0001410c	/* ct		*/
+#define HOST_MSIX_ERR_INDEX_FN2		0x0001430c	/* ct		*/
+#define HOST_MSIX_ERR_INDEX_FN3		0x0001440c	/* ct		*/
+
+#define MBIST_CTL_REG			0x00014220	/* ct		*/
+#define __EDRAM_BISTR_START		0x00000004
+#define MBIST_STAT_REG			0x00014224	/* ct		*/
+#define ETH_MAC_SER_REG			0x00014288	/* ct		*/
+#define __APP_EMS_CKBUFAMPIN		0x00000020
+#define __APP_EMS_REFCLKSEL		0x00000010
+#define __APP_EMS_CMLCKSEL		0x00000008
+#define __APP_EMS_REFCKBUFEN2		0x00000004
+#define __APP_EMS_REFCKBUFEN1		0x00000002
+#define __APP_EMS_CHANNEL_SEL		0x00000001
+#define FNC_PERS_REG			0x00014604	/* ct		*/
+#define __F3_FUNCTION_ACTIVE		0x80000000
+#define __F3_FUNCTION_MODE		0x40000000
+#define __F3_PORT_MAP_MK		0x30000000
+#define __F3_PORT_MAP_SH		28
+#define __F3_PORT_MAP(_v)		((_v) << __F3_PORT_MAP_SH)
+#define __F3_VM_MODE			0x08000000
+#define __F3_INTX_STATUS_MK		0x07000000
+#define __F3_INTX_STATUS_SH		24
+#define __F3_INTX_STATUS(_v)		((_v) << __F3_INTX_STATUS_SH)
+#define __F2_FUNCTION_ACTIVE		0x00800000
+#define __F2_FUNCTION_MODE		0x00400000
+#define __F2_PORT_MAP_MK		0x00300000
+#define __F2_PORT_MAP_SH		20
+#define __F2_PORT_MAP(_v)		((_v) << __F2_PORT_MAP_SH)
+#define __F2_VM_MODE			0x00080000
+#define __F2_INTX_STATUS_MK		0x00070000
+#define __F2_INTX_STATUS_SH		16
+#define __F2_INTX_STATUS(_v)		((_v) << __F2_INTX_STATUS_SH)
+#define __F1_FUNCTION_ACTIVE		0x00008000
+#define __F1_FUNCTION_MODE		0x00004000
+#define __F1_PORT_MAP_MK		0x00003000
+#define __F1_PORT_MAP_SH		12
+#define __F1_PORT_MAP(_v)		((_v) << __F1_PORT_MAP_SH)
+#define __F1_VM_MODE			0x00000800
+#define __F1_INTX_STATUS_MK		0x00000700
+#define __F1_INTX_STATUS_SH		8
+#define __F1_INTX_STATUS(_v)		((_v) << __F1_INTX_STATUS_SH)
+#define __F0_FUNCTION_ACTIVE		0x00000080
+#define __F0_FUNCTION_MODE		0x00000040
+#define __F0_PORT_MAP_MK		0x00000030
+#define __F0_PORT_MAP_SH		4
+#define __F0_PORT_MAP(_v)		((_v) << __F0_PORT_MAP_SH)
+#define __F0_VM_MODE			0x00000008
+#define __F0_INTX_STATUS		0x00000007
+enum {
+	__F0_INTX_STATUS_MSIX = 0x0,
+	__F0_INTX_STATUS_INTA = 0x1,
+	__F0_INTX_STATUS_INTB = 0x2,
+	__F0_INTX_STATUS_INTC = 0x3,
+	__F0_INTX_STATUS_INTD = 0x4,
+};
+
+#define OP_MODE				0x0001460c	/* ct		*/
+#define __APP_ETH_CLK_LOWSPEED		0x00000004
+#define __GLOBAL_CORECLK_HALFSPEED	0x00000002
+#define __GLOBAL_FCOE_MODE		0x00000001
+#define FW_INIT_HALT_P0			0x000191ac	/* ct		*/
+#define __FW_INIT_HALT_P		0x00000001
+#define FW_INIT_HALT_P1			0x000191bc	/* ct		*/
+#define PMM_1T_RESET_REG_P0		0x0002381c	/* ct		*/
+#define __PMM_1T_RESET_P		0x00000001
+#define PMM_1T_RESET_REG_P1		0x00023c1c	/* ct		*/
+
+/**
+ * Catapult-2 specific defines
+ */
+#define CT2_PCI_CPQ_BASE		0x00030000
+#define CT2_PCI_APP_BASE		0x00030100
+#define CT2_PCI_ETH_BASE		0x00030400
+
+/*
+ * APP block registers
+ */
+#define CT2_HOSTFN_INT_STATUS		(CT2_PCI_APP_BASE + 0x00)
+#define CT2_HOSTFN_INTR_MASK		(CT2_PCI_APP_BASE + 0x04)
+#define CT2_HOSTFN_PERSONALITY0		(CT2_PCI_APP_BASE + 0x08)
+#define __PME_STATUS_			0x00200000
+#define __PF_VF_BAR_SIZE_MODE__MK	0x00180000
+#define __PF_VF_BAR_SIZE_MODE__SH	19
+#define __PF_VF_BAR_SIZE_MODE_(_v)	((_v) << __PF_VF_BAR_SIZE_MODE__SH)
+#define __FC_LL_PORT_MAP__MK		0x00060000
+#define __FC_LL_PORT_MAP__SH		17
+#define __FC_LL_PORT_MAP_(_v)		((_v) << __FC_LL_PORT_MAP__SH)
+#define __PF_VF_ACTIVE_			0x00010000
+#define __PF_VF_CFG_RDY_		0x00008000
+#define __PF_VF_ENABLE_			0x00004000
+#define __PF_DRIVER_ACTIVE_		0x00002000
+#define __PF_PME_SEND_ENABLE_		0x00001000
+#define __PF_EXROM_OFFSET__MK		0x00000ff0
+#define __PF_EXROM_OFFSET__SH		4
+#define __PF_EXROM_OFFSET_(_v)		((_v) << __PF_EXROM_OFFSET__SH)
+#define __FC_LL_MODE_			0x00000008
+#define __PF_INTX_PIN_			0x00000007
+#define CT2_HOSTFN_PERSONALITY1		(CT2_PCI_APP_BASE + 0x0C)
+#define __PF_NUM_QUEUES1__MK		0xff000000
+#define __PF_NUM_QUEUES1__SH		24
+#define __PF_NUM_QUEUES1_(_v)		((_v) << __PF_NUM_QUEUES1__SH)
+#define __PF_VF_QUE_OFFSET1__MK		0x00ff0000
+#define __PF_VF_QUE_OFFSET1__SH		16
+#define __PF_VF_QUE_OFFSET1_(_v)	((_v) << __PF_VF_QUE_OFFSET1__SH)
+#define __PF_VF_NUM_QUEUES__MK		0x0000ff00
+#define __PF_VF_NUM_QUEUES__SH		8
+#define __PF_VF_NUM_QUEUES_(_v)		((_v) << __PF_VF_NUM_QUEUES__SH)
+#define __PF_VF_QUE_OFFSET_		0x000000ff
+#define CT2_HOSTFN_PAGE_NUM		(CT2_PCI_APP_BASE + 0x18)
+#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR	(CT2_PCI_APP_BASE + 0x38)
+
+/*
+ * Catapult-2 CPQ block registers
+ */
+#define CT2_HOSTFN_LPU0_MBOX0		(CT2_PCI_CPQ_BASE + 0x00)
+#define CT2_HOSTFN_LPU1_MBOX0		(CT2_PCI_CPQ_BASE + 0x20)
+#define CT2_LPU0_HOSTFN_MBOX0		(CT2_PCI_CPQ_BASE + 0x40)
+#define CT2_LPU1_HOSTFN_MBOX0		(CT2_PCI_CPQ_BASE + 0x60)
+#define CT2_HOSTFN_LPU0_CMD_STAT	(CT2_PCI_CPQ_BASE + 0x80)
+#define CT2_HOSTFN_LPU1_CMD_STAT	(CT2_PCI_CPQ_BASE + 0x84)
+#define CT2_LPU0_HOSTFN_CMD_STAT	(CT2_PCI_CPQ_BASE + 0x88)
+#define CT2_LPU1_HOSTFN_CMD_STAT	(CT2_PCI_CPQ_BASE + 0x8c)
+#define CT2_HOSTFN_LPU0_READ_STAT	(CT2_PCI_CPQ_BASE + 0x90)
+#define CT2_HOSTFN_LPU1_READ_STAT	(CT2_PCI_CPQ_BASE + 0x94)
+#define CT2_LPU0_HOSTFN_MBOX0_MSK	(CT2_PCI_CPQ_BASE + 0x98)
+#define CT2_LPU1_HOSTFN_MBOX0_MSK	(CT2_PCI_CPQ_BASE + 0x9C)
+#define CT2_HOST_SEM0_REG		0x000148f0
+#define CT2_HOST_SEM1_REG		0x000148f4
+#define CT2_HOST_SEM2_REG		0x000148f8
+#define CT2_HOST_SEM3_REG		0x000148fc
+#define CT2_HOST_SEM4_REG		0x00014900
+#define CT2_HOST_SEM5_REG		0x00014904
+#define CT2_HOST_SEM6_REG		0x00014908
+#define CT2_HOST_SEM7_REG		0x0001490c
+#define CT2_HOST_SEM0_INFO_REG		0x000148b0
+#define CT2_HOST_SEM1_INFO_REG		0x000148b4
+#define CT2_HOST_SEM2_INFO_REG		0x000148b8
+#define CT2_HOST_SEM3_INFO_REG		0x000148bc
+#define CT2_HOST_SEM4_INFO_REG		0x000148c0
+#define CT2_HOST_SEM5_INFO_REG		0x000148c4
+#define CT2_HOST_SEM6_INFO_REG		0x000148c8
+#define CT2_HOST_SEM7_INFO_REG		0x000148cc
+
+#define CT2_APP_PLL_LCLK_CTL_REG	0x00014808
+#define __APP_LPUCLK_HALFSPEED		0x40000000
+#define __APP_PLL_LCLK_LOAD		0x20000000
+#define __APP_PLL_LCLK_FBCNT_MK		0x1fe00000
+#define __APP_PLL_LCLK_FBCNT_SH		21
+#define __APP_PLL_LCLK_FBCNT(_v)	((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+	__APP_PLL_LCLK_FBCNT_425_MHZ = 6,
+	__APP_PLL_LCLK_FBCNT_468_MHZ = 4,
+};
+#define __APP_PLL_LCLK_EXTFB		0x00000800
+#define __APP_PLL_LCLK_ENOUTS		0x00000400
+#define __APP_PLL_LCLK_RATE		0x00000010
+#define CT2_APP_PLL_SCLK_CTL_REG	0x0001480c
+#define __P_SCLK_PLL_LOCK		0x80000000
+#define __APP_PLL_SCLK_REFCLK_SEL	0x40000000
+#define __APP_PLL_SCLK_CLK_DIV2		0x20000000
+#define __APP_PLL_SCLK_LOAD		0x10000000
+#define __APP_PLL_SCLK_FBCNT_MK		0x0ff00000
+#define __APP_PLL_SCLK_FBCNT_SH		20
+#define __APP_PLL_SCLK_FBCNT(_v)	((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+	__APP_PLL_SCLK_FBCNT_NORM = 6,
+	__APP_PLL_SCLK_FBCNT_10G_FC = 10,
+};
+#define __APP_PLL_SCLK_EXTFB		0x00000800
+#define __APP_PLL_SCLK_ENOUTS		0x00000400
+#define __APP_PLL_SCLK_RATE		0x00000010
+#define CT2_PCIE_MISC_REG		0x00014804
+#define __ETH_CLK_ENABLE_PORT1		0x00000010
+#define CT2_CHIP_MISC_PRG		0x000148a4
+#define __ETH_CLK_ENABLE_PORT0		0x00004000
+#define __APP_LPU_SPEED			0x00000002
+#define CT2_MBIST_STAT_REG		0x00014818
+#define CT2_MBIST_CTL_REG		0x0001481c
+#define CT2_PMM_1T_CONTROL_REG_P0	0x0002381c
+#define __PMM_1T_PNDB_P			0x00000002
+#define CT2_PMM_1T_CONTROL_REG_P1	0x00023c1c
+#define CT2_WGN_STATUS			0x00014990
+#define __A2T_AHB_LOAD			0x00000800
+#define __WGN_READY			0x00000400
+#define __GLBL_PF_VF_CFG_RDY		0x00000200
+#define CT2_NFC_STS_REG			0x00027410
+#define CT2_NFC_CSR_CLR_REG		0x00027420
+#define CT2_NFC_CSR_SET_REG		0x00027424
+#define __HALT_NFC_CONTROLLER		0x00000002
+#define __NFC_CONTROLLER_HALTED		0x00001000
+#define CT2_RSC_GPR15_REG		0x0002765c
+#define CT2_CSI_FW_CTL_REG		0x00027080
+#define CT2_CSI_FW_CTL_SET_REG		0x00027088
+#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
+
+#define CT2_CSI_MAC0_CONTROL_REG	0x000270d0
+#define __CSI_MAC_RESET			0x00000010
+#define __CSI_MAC_AHB_RESET		0x00000008
+#define CT2_CSI_MAC1_CONTROL_REG	0x000270d4
+#define CT2_CSI_MAC_CONTROL_REG(__n)	\
+	(CT2_CSI_MAC0_CONTROL_REG +	\
+	(__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
+
+#define CT2_NFC_FLASH_STS_REG		0x00014834
+#define __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS	0x00000020
+/*
+ * Name semaphore registers based on usage
+ */
+#define BFA_IOC0_HBEAT_REG		HOST_SEM0_INFO_REG
+#define BFA_IOC0_STATE_REG		HOST_SEM1_INFO_REG
+#define BFA_IOC1_HBEAT_REG		HOST_SEM2_INFO_REG
+#define BFA_IOC1_STATE_REG		HOST_SEM3_INFO_REG
+#define BFA_FW_USE_COUNT		HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC		HOST_SEM5_INFO_REG
+
+/*
+ * CT2 semaphore register locations changed
+ */
+#define CT2_BFA_IOC0_HBEAT_REG		CT2_HOST_SEM0_INFO_REG
+#define CT2_BFA_IOC0_STATE_REG		CT2_HOST_SEM1_INFO_REG
+#define CT2_BFA_IOC1_HBEAT_REG		CT2_HOST_SEM2_INFO_REG
+#define CT2_BFA_IOC1_STATE_REG		CT2_HOST_SEM3_INFO_REG
+#define CT2_BFA_FW_USE_COUNT		CT2_HOST_SEM4_INFO_REG
+#define CT2_BFA_IOC_FAIL_SYNC		CT2_HOST_SEM5_INFO_REG
+
+#define CPE_Q_NUM(__fn, __q)	(((__fn) << 2) + (__q))
+#define RME_Q_NUM(__fn, __q)	(((__fn) << 2) + (__q))
+
+/*
+ * And corresponding host interrupt status bit field defines
+ */
+#define __HFN_INT_CPE_Q0	0x00000001U
+#define __HFN_INT_CPE_Q1	0x00000002U
+#define __HFN_INT_CPE_Q2	0x00000004U
+#define __HFN_INT_CPE_Q3	0x00000008U
+#define __HFN_INT_CPE_Q4	0x00000010U
+#define __HFN_INT_CPE_Q5	0x00000020U
+#define __HFN_INT_CPE_Q6	0x00000040U
+#define __HFN_INT_CPE_Q7	0x00000080U
+#define __HFN_INT_RME_Q0	0x00000100U
+#define __HFN_INT_RME_Q1	0x00000200U
+#define __HFN_INT_RME_Q2	0x00000400U
+#define __HFN_INT_RME_Q3	0x00000800U
+#define __HFN_INT_RME_Q4	0x00001000U
+#define __HFN_INT_RME_Q5	0x00002000U
+#define __HFN_INT_RME_Q6	0x00004000U
+#define __HFN_INT_RME_Q7	0x00008000U
+#define __HFN_INT_ERR_EMC	0x00010000U
+#define __HFN_INT_ERR_LPU0	0x00020000U
+#define __HFN_INT_ERR_LPU1	0x00040000U
+#define __HFN_INT_ERR_PSS	0x00080000U
+#define __HFN_INT_MBOX_LPU0	0x00100000U
+#define __HFN_INT_MBOX_LPU1	0x00200000U
+#define __HFN_INT_MBOX1_LPU0	0x00400000U
+#define __HFN_INT_MBOX1_LPU1	0x00800000U
+#define __HFN_INT_LL_HALT	0x01000000U
+#define __HFN_INT_CPE_MASK	0x000000ffU
+#define __HFN_INT_RME_MASK	0x0000ff00U
+#define __HFN_INT_ERR_MASK	\
+	(__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \
+	 __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)
+#define __HFN_INT_FN0_MASK	\
+	(__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+	 __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+	 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0)
+#define __HFN_INT_FN1_MASK	\
+	(__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+	 __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+	 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1)
+
+/*
+ * Host interrupt status defines for catapult-2
+ */
+#define __HFN_INT_MBOX_LPU0_CT2	0x00010000U
+#define __HFN_INT_MBOX_LPU1_CT2	0x00020000U
+#define __HFN_INT_ERR_PSS_CT2	0x00040000U
+#define __HFN_INT_ERR_LPU0_CT2	0x00080000U
+#define __HFN_INT_ERR_LPU1_CT2	0x00100000U
+#define __HFN_INT_CPQ_HALT_CT2	0x00200000U
+#define __HFN_INT_ERR_WGN_CT2	0x00400000U
+#define __HFN_INT_ERR_LEHRX_CT2	0x00800000U
+#define __HFN_INT_ERR_LEHTX_CT2	0x01000000U
+#define __HFN_INT_ERR_MASK_CT2	\
+	(__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \
+	 __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \
+	 __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \
+	 __HFN_INT_ERR_LEHTX_CT2)
+#define __HFN_INT_FN0_MASK_CT2	\
+	(__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+	 __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+	 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2)
+#define __HFN_INT_FN1_MASK_CT2	\
+	(__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+	 __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+	 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2)
+
+/*
+ * asic memory map.
+ */
+#define PSS_SMEM_PAGE_START		0x8000
+#define PSS_SMEM_PGNUM(_pg0, _ma)	((_pg0) + ((_ma) >> 15))
+#define PSS_SMEM_PGOFF(_ma)		((_ma) & 0x7fff)
+
+#endif /* __BFI_REG_H__ */