Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 94f4d8f..e390f8c 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
  *
  * Module interface and handling of zfcp data structures.
  *
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2017
  */
 
 /*
@@ -124,6 +124,9 @@
 {
 	int retval = -ENOMEM;
 
+	if (zfcp_experimental_dix)
+		pr_warn("DIX is enabled. It is experimental and might cause problems\n");
+
 	zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
 						  sizeof(struct fsf_qtcb));
 	if (!zfcp_fsf_qtcb_cache)
@@ -248,43 +251,36 @@
 
 static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
 {
-	if (adapter->pool.erp_req)
-		mempool_destroy(adapter->pool.erp_req);
-	if (adapter->pool.scsi_req)
-		mempool_destroy(adapter->pool.scsi_req);
-	if (adapter->pool.scsi_abort)
-		mempool_destroy(adapter->pool.scsi_abort);
-	if (adapter->pool.qtcb_pool)
-		mempool_destroy(adapter->pool.qtcb_pool);
-	if (adapter->pool.status_read_req)
-		mempool_destroy(adapter->pool.status_read_req);
-	if (adapter->pool.sr_data)
-		mempool_destroy(adapter->pool.sr_data);
-	if (adapter->pool.gid_pn)
-		mempool_destroy(adapter->pool.gid_pn);
+	mempool_destroy(adapter->pool.erp_req);
+	mempool_destroy(adapter->pool.scsi_req);
+	mempool_destroy(adapter->pool.scsi_abort);
+	mempool_destroy(adapter->pool.qtcb_pool);
+	mempool_destroy(adapter->pool.status_read_req);
+	mempool_destroy(adapter->pool.sr_data);
+	mempool_destroy(adapter->pool.gid_pn);
 }
 
 /**
  * zfcp_status_read_refill - refill the long running status_read_requests
  * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
  *
- * Returns: 0 on success, 1 otherwise
- *
- * if there are 16 or more status_read requests missing an adapter_reopen
- * is triggered
+ * Return:
+ * * 0 on success meaning at least one status read is pending
+ * * 1 if posting failed and not a single status read buffer is pending,
+ *     also triggers adapter reopen recovery
  */
 int zfcp_status_read_refill(struct zfcp_adapter *adapter)
 {
-	while (atomic_read(&adapter->stat_miss) > 0)
+	while (atomic_add_unless(&adapter->stat_miss, -1, 0))
 		if (zfcp_fsf_status_read(adapter->qdio)) {
+			atomic_inc(&adapter->stat_miss); /* undo add -1 */
 			if (atomic_read(&adapter->stat_miss) >=
 			    adapter->stat_read_buf_num) {
 				zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
 				return 1;
 			}
 			break;
-		} else
-			atomic_dec(&adapter->stat_miss);
+		}
 	return 0;
 }
 
@@ -407,7 +403,6 @@
 		goto failed;
 
 	/* report size limit per scatter-gather segment */
-	adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
 	adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
 
 	adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
@@ -542,45 +537,3 @@
 	zfcp_ccw_adapter_put(adapter);
 	return ERR_PTR(retval);
 }
-
-/**
- * zfcp_sg_free_table - free memory used by scatterlists
- * @sg: pointer to scatterlist
- * @count: number of scatterlist which are to be free'ed
- * the scatterlist are expected to reference pages always
- */
-void zfcp_sg_free_table(struct scatterlist *sg, int count)
-{
-	int i;
-
-	for (i = 0; i < count; i++, sg++)
-		if (sg)
-			free_page((unsigned long) sg_virt(sg));
-		else
-			break;
-}
-
-/**
- * zfcp_sg_setup_table - init scatterlist and allocate, assign buffers
- * @sg: pointer to struct scatterlist
- * @count: number of scatterlists which should be assigned with buffers
- * of size page
- *
- * Returns: 0 on success, -ENOMEM otherwise
- */
-int zfcp_sg_setup_table(struct scatterlist *sg, int count)
-{
-	void *addr;
-	int i;
-
-	sg_init_table(sg, count);
-	for (i = 0; i < count; i++, sg++) {
-		addr = (void *) get_zeroed_page(GFP_KERNEL);
-		if (!addr) {
-			zfcp_sg_free_table(sg, i);
-			return -ENOMEM;
-		}
-		sg_set_buf(sg, addr, PAGE_SIZE);
-	}
-	return 0;
-}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 3b368fc..dccdb41 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -63,7 +63,8 @@
 
 /**
  * zfcp_dbf_hba_fsf_res - trace event for fsf responses
- * @tag: tag indicating which kind of unsolicited status has been received
+ * @tag: tag indicating which kind of FSF response has been received
+ * @level: trace level to be used for event
  * @req: request for which a response was received
  */
 void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
@@ -81,8 +82,8 @@
 	rec->id = ZFCP_DBF_HBA_RES;
 	rec->fsf_req_id = req->req_id;
 	rec->fsf_req_status = req->status;
-	rec->fsf_cmd = req->fsf_command;
-	rec->fsf_seq_no = req->seq_no;
+	rec->fsf_cmd = q_head->fsf_command;
+	rec->fsf_seq_no = q_pref->req_seq_no;
 	rec->u.res.req_issued = req->issued;
 	rec->u.res.prot_status = q_pref->prot_status;
 	rec->u.res.fsf_status = q_head->fsf_status;
@@ -94,7 +95,7 @@
 	memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
 	       FSF_STATUS_QUALIFIER_SIZE);
 
-	if (req->fsf_command != FSF_QTCB_FCP_CMND) {
+	if (q_head->fsf_command != FSF_QTCB_FCP_CMND) {
 		rec->pl_len = q_head->log_length;
 		zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
 				  rec->pl_len, "fsf_res", req->req_id);
@@ -127,7 +128,7 @@
 	rec->id = ZFCP_DBF_HBA_USS;
 	rec->fsf_req_id = req->req_id;
 	rec->fsf_req_status = req->status;
-	rec->fsf_cmd = req->fsf_command;
+	rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
 
 	if (!srb)
 		goto log;
@@ -153,7 +154,7 @@
 
 /**
  * zfcp_dbf_hba_bit_err - trace event for bit error conditions
- * @tag: tag indicating which kind of unsolicited status has been received
+ * @tag: tag indicating which kind of bit error unsolicited status was received
  * @req: request which caused the bit_error condition
  */
 void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
@@ -174,7 +175,7 @@
 	rec->id = ZFCP_DBF_HBA_BIT;
 	rec->fsf_req_id = req->req_id;
 	rec->fsf_req_status = req->status;
-	rec->fsf_cmd = req->fsf_command;
+	rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
 	memcpy(&rec->u.be, &sr_buf->payload.bit_error,
 	       sizeof(struct fsf_bit_error_payload));
 
@@ -224,6 +225,7 @@
 
 /**
  * zfcp_dbf_hba_basic - trace event for basic adapter events
+ * @tag: identifier for event
  * @adapter: pointer to struct zfcp_adapter
  */
 void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
@@ -357,7 +359,7 @@
 	rec->u.run.fsf_req_id = erp->fsf_req_id;
 	rec->u.run.rec_status = erp->status;
 	rec->u.run.rec_step = erp->step;
-	rec->u.run.rec_action = erp->action;
+	rec->u.run.rec_action = erp->type;
 
 	if (erp->sdev)
 		rec->u.run.rec_count =
@@ -478,7 +480,8 @@
 /**
  * zfcp_dbf_san_req - trace event for issued SAN request
  * @tag: identifier for event
- * @fsf_req: request containing issued CT data
+ * @fsf: request containing issued CT or ELS data
+ * @d_id: N_Port_ID where SAN request is sent to
  * d_id: destination ID
  */
 void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
@@ -560,7 +563,7 @@
 /**
  * zfcp_dbf_san_res - trace event for received SAN request
  * @tag: identifier for event
- * @fsf_req: request containing issued CT data
+ * @fsf: request containing received CT or ELS data
  */
 void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
 {
@@ -580,7 +583,7 @@
 /**
  * zfcp_dbf_san_in_els - trace event for incoming ELS
  * @tag: identifier for event
- * @fsf_req: request containing issued CT data
+ * @fsf: request containing received ELS data
  */
 void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
 {
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index d116c07..900c779 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -42,7 +42,8 @@
  * @fsf_req_id: request id for fsf requests
  * @rec_status: status of the fsf request
  * @rec_step: current step of the recovery action
- * rec_count: recovery counter
+ * @rec_action: ERP action type
+ * @rec_count: recoveries including retries for particular @rec_action
  */
 struct zfcp_dbf_rec_running {
 	u64 fsf_req_id;
@@ -72,6 +73,7 @@
  * @adapter_status: current status of the adapter
  * @port_status: current status of the port
  * @lun_status: current status of the lun
+ * @u: record type specific data
  * @u.trig: structure zfcp_dbf_rec_trigger
  * @u.run: structure zfcp_dbf_rec_running
  */
@@ -126,6 +128,8 @@
  * @prot_status_qual: protocol status qualifier
  * @fsf_status: fsf status
  * @fsf_status_qual: fsf status qualifier
+ * @port_handle: handle for port
+ * @lun_handle: handle for LUN
  */
 struct zfcp_dbf_hba_res {
 	u64 req_issued;
@@ -158,6 +162,7 @@
  * @ZFCP_DBF_HBA_RES: response trace record
  * @ZFCP_DBF_HBA_USS: unsolicited status trace record
  * @ZFCP_DBF_HBA_BIT: bit error trace record
+ * @ZFCP_DBF_HBA_BASIC: basic adapter event, only trace tag, no other data
  */
 enum zfcp_dbf_hba_id {
 	ZFCP_DBF_HBA_RES	= 1,
@@ -176,6 +181,9 @@
  * @fsf_seq_no: fsf sequence number
  * @pl_len: length of payload stored as zfcp_dbf_pay
  * @u: record type specific data
+ * @u.res: data for fsf responses
+ * @u.uss: data for unsolicited status buffer
+ * @u.be:  data for bit error unsolicited status buffer
  */
 struct zfcp_dbf_hba {
 	u8 id;
@@ -339,8 +347,8 @@
 				      zfcp_dbf_hba_fsf_resp_suppress(req)
 				      ? 5 : 1, req);
 
-	} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
-		   (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
+	} else if ((qtcb->header.fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
+		   (qtcb->header.fsf_command == FSF_QTCB_OPEN_LUN)) {
 		zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
 
 	} else if (qtcb->header.log_length) {
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 3396a47..87d2f47 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -4,7 +4,7 @@
  *
  * Global definitions for the zfcp device driver.
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2017
  */
 
 #ifndef ZFCP_DEF_H
@@ -41,24 +41,16 @@
 #include "zfcp_fc.h"
 #include "zfcp_qdio.h"
 
-struct zfcp_reqlist;
-
-/********************* SCSI SPECIFIC DEFINES *********************************/
-#define ZFCP_SCSI_ER_TIMEOUT                    (10*HZ)
-
 /********************* FSF SPECIFIC DEFINES *********************************/
 
 /* ATTENTION: value must not be used by hardware */
 #define FSF_QTCB_UNSOLICITED_STATUS		0x6305
 
-/* timeout value for "default timer" for fsf requests */
-#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
-
 /*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
 
 /*
- * Note, the leftmost status byte is common among adapter, port
- * and unit
+ * Note, the leftmost 12 status bits (3 nibbles) are common among adapter, port
+ * and unit. This is a mask for bitwise 'and' with status values.
  */
 #define ZFCP_COMMON_FLAGS			0xfff00000
 
@@ -97,7 +89,49 @@
 
 /************************* STRUCTURE DEFINITIONS *****************************/
 
-struct zfcp_fsf_req;
+/**
+ * enum zfcp_erp_act_type - Type of ERP action object.
+ * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
+ *
+ * Values must fit into u8 because of code dependencies:
+ * zfcp_dbf_rec_trig(), &zfcp_dbf_rec_trigger.want, &zfcp_dbf_rec_trigger.need;
+ * zfcp_dbf_rec_run_lvl(), zfcp_dbf_rec_run(), &zfcp_dbf_rec_running.rec_action.
+ */
+enum zfcp_erp_act_type {
+	ZFCP_ERP_ACTION_REOPEN_LUN	   = 1,
+	ZFCP_ERP_ACTION_REOPEN_PORT	   = 2,
+	ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
+	ZFCP_ERP_ACTION_REOPEN_ADAPTER	   = 4,
+};
+
+/*
+ * Values must fit into u16 because of code dependencies:
+ * zfcp_dbf_rec_run_lvl(), zfcp_dbf_rec_run(), zfcp_dbf_rec_run_wka(),
+ * &zfcp_dbf_rec_running.rec_step.
+ */
+enum zfcp_erp_steps {
+	ZFCP_ERP_STEP_UNINITIALIZED	= 0x0000,
+	ZFCP_ERP_STEP_PHYS_PORT_CLOSING	= 0x0010,
+	ZFCP_ERP_STEP_PORT_CLOSING	= 0x0100,
+	ZFCP_ERP_STEP_PORT_OPENING	= 0x0800,
+	ZFCP_ERP_STEP_LUN_CLOSING	= 0x1000,
+	ZFCP_ERP_STEP_LUN_OPENING	= 0x2000,
+};
+
+struct zfcp_erp_action {
+	struct list_head list;
+	enum zfcp_erp_act_type type;  /* requested action code */
+	struct zfcp_adapter *adapter; /* device which should be recovered */
+	struct zfcp_port *port;
+	struct scsi_device *sdev;
+	u32		status;	      /* recovery status */
+	enum zfcp_erp_steps	step;	/* active step of this erp action */
+	unsigned long		fsf_req_id;
+	struct timer_list timer;
+};
 
 /* holds various memory pools of an adapter */
 struct zfcp_adapter_mempool {
@@ -111,37 +145,6 @@
 	mempool_t *qtcb_pool;
 };
 
-struct zfcp_erp_action {
-	struct list_head list;
-	int action;	              /* requested action code */
-	struct zfcp_adapter *adapter; /* device which should be recovered */
-	struct zfcp_port *port;
-	struct scsi_device *sdev;
-	u32		status;	      /* recovery status */
-	u32 step;	              /* active step of this erp action */
-	unsigned long		fsf_req_id;
-	struct timer_list timer;
-};
-
-struct fsf_latency_record {
-	u32 min;
-	u32 max;
-	u64 sum;
-};
-
-struct latency_cont {
-	struct fsf_latency_record channel;
-	struct fsf_latency_record fabric;
-	u64 counter;
-};
-
-struct zfcp_latencies {
-	struct latency_cont read;
-	struct latency_cont write;
-	struct latency_cont cmd;
-	spinlock_t lock;
-};
-
 struct zfcp_adapter {
 	struct kref		ref;
 	u64			peer_wwnn;	   /* P2P peer WWNN */
@@ -220,6 +223,25 @@
 	unsigned int		starget_id;
 };
 
+struct zfcp_latency_record {
+	u32 min;
+	u32 max;
+	u64 sum;
+};
+
+struct zfcp_latency_cont {
+	struct zfcp_latency_record channel;
+	struct zfcp_latency_record fabric;
+	u64 counter;
+};
+
+struct zfcp_latencies {
+	struct zfcp_latency_cont read;
+	struct zfcp_latency_cont write;
+	struct zfcp_latency_cont cmd;
+	spinlock_t lock;
+};
+
 /**
  * struct zfcp_unit - LUN configured via zfcp sysfs
  * @dev: struct device for sysfs representation and reference counting
@@ -287,9 +309,7 @@
  * @qdio_req: qdio queue related values
  * @completion: used to signal the completion of the request
  * @status: status of the request
- * @fsf_command: FSF command issued
  * @qtcb: associated QTCB
- * @seq_no: sequence number of this request
  * @data: private data
  * @timer: timer data of this request
  * @erp_action: reference to erp action if request issued on behalf of ERP
@@ -304,9 +324,7 @@
 	struct zfcp_qdio_req	qdio_req;
 	struct completion	completion;
 	u32			status;
-	u32			fsf_command;
 	struct fsf_qtcb		*qtcb;
-	u32			seq_no;
 	void			*data;
 	struct timer_list	timer;
 	struct zfcp_erp_action	*erp_action;
@@ -321,4 +339,9 @@
 	return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT;
 }
 
+static inline bool zfcp_fsf_req_is_status_read_buffer(struct zfcp_fsf_req *req)
+{
+	return req->qtcb == NULL;
+}
+
 #endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e7e6b63..96f0d34 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -4,13 +4,14 @@
  *
  * Error Recovery Procedures (ERP).
  *
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
  */
 
 #define KMSG_COMPONENT "zfcp"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/kthread.h>
+#include <linux/bug.h>
 #include "zfcp_ext.h"
 #include "zfcp_reqlist.h"
 
@@ -24,38 +25,18 @@
 	ZFCP_STATUS_ERP_NO_REF		= 0x00800000,
 };
 
-enum zfcp_erp_steps {
-	ZFCP_ERP_STEP_UNINITIALIZED	= 0x0000,
-	ZFCP_ERP_STEP_PHYS_PORT_CLOSING	= 0x0010,
-	ZFCP_ERP_STEP_PORT_CLOSING	= 0x0100,
-	ZFCP_ERP_STEP_PORT_OPENING	= 0x0800,
-	ZFCP_ERP_STEP_LUN_CLOSING	= 0x1000,
-	ZFCP_ERP_STEP_LUN_OPENING	= 0x2000,
-};
-
-/**
- * enum zfcp_erp_act_type - Type of ERP action object.
- * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
- * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
- * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
- * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
- * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
- *			  either of the first four enum values.
- *			  Used to indicate that an ERP action could not be
- *			  set up despite a detected need for some recovery.
- * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
- *			    either of the first four enum values.
- *			    Used to indicate that ERP not needed because
- *			    the object has ZFCP_STATUS_COMMON_ERP_FAILED.
+/*
+ * Eyecatcher pseudo flag to bitwise or-combine with enum zfcp_erp_act_type.
+ * Used to indicate that an ERP action could not be set up despite a detected
+ * need for some recovery.
  */
-enum zfcp_erp_act_type {
-	ZFCP_ERP_ACTION_REOPEN_LUN         = 1,
-	ZFCP_ERP_ACTION_REOPEN_PORT	   = 2,
-	ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
-	ZFCP_ERP_ACTION_REOPEN_ADAPTER     = 4,
-	ZFCP_ERP_ACTION_NONE		   = 0xc0,
-	ZFCP_ERP_ACTION_FAILED		   = 0xe0,
-};
+#define ZFCP_ERP_ACTION_NONE		0xc0
+/*
+ * Eyecatcher pseudo flag to bitwise or-combine with enum zfcp_erp_act_type.
+ * Used to indicate that ERP not needed because the object has
+ * ZFCP_STATUS_COMMON_ERP_FAILED.
+ */
+#define ZFCP_ERP_ACTION_FAILED		0xe0
 
 enum zfcp_erp_act_result {
 	ZFCP_ERP_SUCCEEDED = 0,
@@ -136,11 +117,11 @@
 	}
 }
 
-static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
-				  struct zfcp_port *port,
-				  struct scsi_device *sdev)
+static enum zfcp_erp_act_type zfcp_erp_handle_failed(
+	enum zfcp_erp_act_type want, struct zfcp_adapter *adapter,
+	struct zfcp_port *port,	struct scsi_device *sdev)
 {
-	int need = want;
+	enum zfcp_erp_act_type need = want;
 	struct zfcp_scsi_dev *zsdev;
 
 	switch (want) {
@@ -171,19 +152,17 @@
 				adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
 		}
 		break;
-	default:
-		need = 0;
-		break;
 	}
 
 	return need;
 }
 
-static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
+static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
+				 struct zfcp_adapter *adapter,
 				 struct zfcp_port *port,
 				 struct scsi_device *sdev)
 {
-	int need = want;
+	enum zfcp_erp_act_type need = want;
 	int l_status, p_status, a_status;
 	struct zfcp_scsi_dev *zfcp_sdev;
 
@@ -230,7 +209,8 @@
 	return need;
 }
 
-static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
+static struct zfcp_erp_action *zfcp_erp_setup_act(enum zfcp_erp_act_type need,
+						  u32 act_status,
 						  struct zfcp_adapter *adapter,
 						  struct zfcp_port *port,
 						  struct scsi_device *sdev)
@@ -238,6 +218,12 @@
 	struct zfcp_erp_action *erp_action;
 	struct zfcp_scsi_dev *zfcp_sdev;
 
+	if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
+			 need != ZFCP_ERP_ACTION_REOPEN_PORT &&
+			 need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
+			 need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
+		return NULL;
+
 	switch (need) {
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
 		zfcp_sdev = sdev_to_zfcp(sdev);
@@ -278,9 +264,6 @@
 		      ZFCP_STATUS_COMMON_RUNNING))
 			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
 		break;
-
-	default:
-		return NULL;
 	}
 
 	WARN_ON_ONCE(erp_action->adapter != adapter);
@@ -288,18 +271,19 @@
 	memset(&erp_action->timer, 0, sizeof(erp_action->timer));
 	erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
 	erp_action->fsf_req_id = 0;
-	erp_action->action = need;
+	erp_action->type = need;
 	erp_action->status = act_status;
 
 	return erp_action;
 }
 
-static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
+static void zfcp_erp_action_enqueue(enum zfcp_erp_act_type want,
+				    struct zfcp_adapter *adapter,
 				    struct zfcp_port *port,
 				    struct scsi_device *sdev,
-				    char *id, u32 act_status)
+				    char *dbftag, u32 act_status)
 {
-	int need;
+	enum zfcp_erp_act_type need;
 	struct zfcp_erp_action *act;
 
 	need = zfcp_erp_handle_failed(want, adapter, port, sdev);
@@ -327,10 +311,11 @@
 	list_add_tail(&act->list, &adapter->erp_ready_head);
 	wake_up(&adapter->erp_ready_wq);
  out:
-	zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
+	zfcp_dbf_rec_trig(dbftag, adapter, port, sdev, want, need);
 }
 
-void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
+void zfcp_erp_port_forced_no_port_dbf(char *dbftag,
+				      struct zfcp_adapter *adapter,
 				      u64 port_name, u32 port_id)
 {
 	unsigned long flags;
@@ -344,29 +329,30 @@
 	atomic_set(&tmpport.status, -1); /* unknown */
 	tmpport.wwpn = port_name;
 	tmpport.d_id = port_id;
-	zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
+	zfcp_dbf_rec_trig(dbftag, adapter, &tmpport, NULL,
 			  ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
 			  ZFCP_ERP_ACTION_NONE);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
 static void _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
-				    int clear_mask, char *id)
+				    int clear_mask, char *dbftag)
 {
 	zfcp_erp_adapter_block(adapter, clear_mask);
 	zfcp_scsi_schedule_rports_block(adapter);
 
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
-				adapter, NULL, NULL, id, 0);
+				adapter, NULL, NULL, dbftag, 0);
 }
 
 /**
  * zfcp_erp_adapter_reopen - Reopen adapter.
  * @adapter: Adapter to reopen.
  * @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
  */
-void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
+void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
+			     char *dbftag)
 {
 	unsigned long flags;
 
@@ -375,7 +361,7 @@
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
-				NULL, NULL, id, 0);
+				NULL, NULL, dbftag, 0);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
@@ -383,25 +369,25 @@
  * zfcp_erp_adapter_shutdown - Shutdown adapter.
  * @adapter: Adapter to shut down.
  * @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
  */
 void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
-			       char *id)
+			       char *dbftag)
 {
 	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_adapter_reopen(adapter, clear | flags, id);
+	zfcp_erp_adapter_reopen(adapter, clear | flags, dbftag);
 }
 
 /**
  * zfcp_erp_port_shutdown - Shutdown port
  * @port: Port to shut down.
  * @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
  */
-void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
+void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *dbftag)
 {
 	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_port_reopen(port, clear | flags, id);
+	zfcp_erp_port_reopen(port, clear | flags, dbftag);
 }
 
 static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
@@ -411,53 +397,55 @@
 }
 
 static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
-					 char *id)
+					 char *dbftag)
 {
 	zfcp_erp_port_block(port, clear);
 	zfcp_scsi_schedule_rport_block(port);
 
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
-				port->adapter, port, NULL, id, 0);
+				port->adapter, port, NULL, dbftag, 0);
 }
 
 /**
  * zfcp_erp_port_forced_reopen - Forced close of port and open again
  * @port: Port to force close and to reopen.
  * @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
  */
-void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
+void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
+				 char *dbftag)
 {
 	unsigned long flags;
 	struct zfcp_adapter *adapter = port->adapter;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_port_forced_reopen(port, clear, id);
+	_zfcp_erp_port_forced_reopen(port, clear, dbftag);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
-static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
+				  char *dbftag)
 {
 	zfcp_erp_port_block(port, clear);
 	zfcp_scsi_schedule_rport_block(port);
 
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
-				port->adapter, port, NULL, id, 0);
+				port->adapter, port, NULL, dbftag, 0);
 }
 
 /**
  * zfcp_erp_port_reopen - trigger remote port recovery
  * @port: port to recover
- * @clear_mask: flags in port status to be cleared
- * @id: Id for debug trace event.
+ * @clear: flags in port status to be cleared
+ * @dbftag: Tag for debug trace event.
  */
-void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *dbftag)
 {
 	unsigned long flags;
 	struct zfcp_adapter *adapter = port->adapter;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_port_reopen(port, clear, id);
+	_zfcp_erp_port_reopen(port, clear, dbftag);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
@@ -467,8 +455,8 @@
 				  ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
 }
 
-static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
-				 u32 act_status)
+static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear,
+				 char *dbftag, u32 act_status)
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
@@ -476,18 +464,18 @@
 	zfcp_erp_lun_block(sdev, clear);
 
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
-				zfcp_sdev->port, sdev, id, act_status);
+				zfcp_sdev->port, sdev, dbftag, act_status);
 }
 
 /**
  * zfcp_erp_lun_reopen - initiate reopen of a LUN
  * @sdev: SCSI device / LUN to be reopened
- * @clear_mask: specifies flags in LUN status to be cleared
- * @id: Id for debug trace event.
+ * @clear: specifies flags in LUN status to be cleared
+ * @dbftag: Tag for debug trace event.
  *
  * Return: 0 on success, < 0 on error
  */
-void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
+void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *dbftag)
 {
 	unsigned long flags;
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -495,7 +483,7 @@
 	struct zfcp_adapter *adapter = port->adapter;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_lun_reopen(sdev, clear, id, 0);
+	_zfcp_erp_lun_reopen(sdev, clear, dbftag, 0);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
@@ -503,25 +491,25 @@
  * zfcp_erp_lun_shutdown - Shutdown LUN
  * @sdev: SCSI device / LUN to shut down.
  * @clear: Status flags to clear.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
  */
-void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
+void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *dbftag)
 {
 	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_lun_reopen(sdev, clear | flags, id);
+	zfcp_erp_lun_reopen(sdev, clear | flags, dbftag);
 }
 
 /**
  * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
  * @sdev: SCSI device / LUN to shut down.
- * @id: Id for debug trace event.
+ * @dbftag: Tag for debug trace event.
  *
  * Do not acquire a reference for the LUN when creating the ERP
  * action. It is safe, because this function waits for the ERP to
  * complete first. This allows to shutdown the LUN, even when the SCSI
  * device is in the state SDEV_DEL when scsi_device_get will fail.
  */
-void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
+void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *dbftag)
 {
 	unsigned long flags;
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -530,7 +518,7 @@
 	int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
+	_zfcp_erp_lun_reopen(sdev, clear, dbftag, ZFCP_STATUS_ERP_NO_REF);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 
 	zfcp_erp_wait(adapter);
@@ -619,7 +607,7 @@
 
 /**
  * zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
- * @data: ERP action (from timer data)
+ * @t: timer list entry embedded in zfcp FSF request
  */
 void zfcp_erp_timeout_handler(struct timer_list *t)
 {
@@ -643,32 +631,46 @@
 	add_timer(&erp_action->timer);
 }
 
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+				     int clear, char *dbftag)
+{
+	unsigned long flags;
+	struct zfcp_port *port;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	read_lock(&adapter->port_list_lock);
+	list_for_each_entry(port, &adapter->port_list, list)
+		_zfcp_erp_port_forced_reopen(port, clear, dbftag);
+	read_unlock(&adapter->port_list_lock);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
 static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
-				      int clear, char *id)
+				      int clear, char *dbftag)
 {
 	struct zfcp_port *port;
 
 	read_lock(&adapter->port_list_lock);
 	list_for_each_entry(port, &adapter->port_list, list)
-		_zfcp_erp_port_reopen(port, clear, id);
+		_zfcp_erp_port_reopen(port, clear, dbftag);
 	read_unlock(&adapter->port_list_lock);
 }
 
 static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
-				     char *id)
+				     char *dbftag)
 {
 	struct scsi_device *sdev;
 
 	spin_lock(port->adapter->scsi_host->host_lock);
 	__shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port)
-			_zfcp_erp_lun_reopen(sdev, clear, id, 0);
+			_zfcp_erp_lun_reopen(sdev, clear, dbftag, 0);
 	spin_unlock(port->adapter->scsi_host->host_lock);
 }
 
 static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
 {
-	switch (act->action) {
+	switch (act->type) {
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 		_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
 		break;
@@ -686,7 +688,7 @@
 
 static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
 {
-	switch (act->action) {
+	switch (act->type) {
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 		_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
 		break;
@@ -696,6 +698,9 @@
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 		_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
 		break;
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		/* NOP */
+		break;
 	}
 }
 
@@ -723,7 +728,8 @@
 	_zfcp_erp_port_reopen(port, 0, "ereptp1");
 }
 
-static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
+	struct zfcp_erp_action *erp_action)
 {
 	int retries;
 	int sleep = 1;
@@ -768,7 +774,8 @@
 	return ZFCP_ERP_SUCCEEDED;
 }
 
-static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
+	struct zfcp_erp_action *act)
 {
 	int ret;
 	struct zfcp_adapter *adapter = act->adapter;
@@ -793,7 +800,8 @@
 	return ZFCP_ERP_SUCCEEDED;
 }
 
-static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
+	struct zfcp_erp_action *act)
 {
 	if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED)
 		return ZFCP_ERP_FAILED;
@@ -832,7 +840,8 @@
 			  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
 }
 
-static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open(
+	struct zfcp_erp_action *act)
 {
 	struct zfcp_adapter *adapter = act->adapter;
 
@@ -853,7 +862,8 @@
 	return ZFCP_ERP_SUCCEEDED;
 }
 
-static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_adapter_strategy(
+	struct zfcp_erp_action *act)
 {
 	struct zfcp_adapter *adapter = act->adapter;
 
@@ -871,7 +881,8 @@
 	return ZFCP_ERP_SUCCEEDED;
 }
 
-static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_port_forced_strategy_close(
+	struct zfcp_erp_action *act)
 {
 	int retval;
 
@@ -885,7 +896,8 @@
 	return ZFCP_ERP_CONTINUES;
 }
 
-static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_port_forced_strategy(
+	struct zfcp_erp_action *erp_action)
 {
 	struct zfcp_port *port = erp_action->port;
 	int status = atomic_read(&port->status);
@@ -901,11 +913,19 @@
 	case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
 		if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN))
 			return ZFCP_ERP_SUCCEEDED;
+		break;
+	case ZFCP_ERP_STEP_PORT_CLOSING:
+	case ZFCP_ERP_STEP_PORT_OPENING:
+	case ZFCP_ERP_STEP_LUN_CLOSING:
+	case ZFCP_ERP_STEP_LUN_OPENING:
+		/* NOP */
+		break;
 	}
 	return ZFCP_ERP_FAILED;
 }
 
-static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_port_strategy_close(
+	struct zfcp_erp_action *erp_action)
 {
 	int retval;
 
@@ -918,7 +938,8 @@
 	return ZFCP_ERP_CONTINUES;
 }
 
-static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_port_strategy_open_port(
+	struct zfcp_erp_action *erp_action)
 {
 	int retval;
 
@@ -944,7 +965,8 @@
 	return zfcp_erp_port_strategy_open_port(act);
 }
 
-static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
+static enum zfcp_erp_act_result zfcp_erp_port_strategy_open_common(
+	struct zfcp_erp_action *act)
 {
 	struct zfcp_adapter *adapter = act->adapter;
 	struct zfcp_port *port = act->port;
@@ -975,12 +997,18 @@
 			port->d_id = 0;
 			return ZFCP_ERP_FAILED;
 		}
-		/* fall through otherwise */
+		/* no early return otherwise, continue after switch case */
+		break;
+	case ZFCP_ERP_STEP_LUN_CLOSING:
+	case ZFCP_ERP_STEP_LUN_OPENING:
+		/* NOP */
+		break;
 	}
 	return ZFCP_ERP_FAILED;
 }
 
-static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_port_strategy(
+	struct zfcp_erp_action *erp_action)
 {
 	struct zfcp_port *port = erp_action->port;
 	int p_status = atomic_read(&port->status);
@@ -999,6 +1027,12 @@
 		if (p_status & ZFCP_STATUS_COMMON_OPEN)
 			return ZFCP_ERP_FAILED;
 		break;
+	case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+	case ZFCP_ERP_STEP_PORT_OPENING:
+	case ZFCP_ERP_STEP_LUN_CLOSING:
+	case ZFCP_ERP_STEP_LUN_OPENING:
+		/* NOP */
+		break;
 	}
 
 close_init_done:
@@ -1016,7 +1050,8 @@
 			  &zfcp_sdev->status);
 }
 
-static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_lun_strategy_close(
+	struct zfcp_erp_action *erp_action)
 {
 	int retval = zfcp_fsf_close_lun(erp_action);
 	if (retval == -ENOMEM)
@@ -1027,7 +1062,8 @@
 	return ZFCP_ERP_CONTINUES;
 }
 
-static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_lun_strategy_open(
+	struct zfcp_erp_action *erp_action)
 {
 	int retval = zfcp_fsf_open_lun(erp_action);
 	if (retval == -ENOMEM)
@@ -1038,7 +1074,8 @@
 	return ZFCP_ERP_CONTINUES;
 }
 
-static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_lun_strategy(
+	struct zfcp_erp_action *erp_action)
 {
 	struct scsi_device *sdev = erp_action->sdev;
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -1048,7 +1085,8 @@
 		zfcp_erp_lun_strategy_clearstati(sdev);
 		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
 			return zfcp_erp_lun_strategy_close(erp_action);
-		/* already closed, fall through */
+		/* already closed */
+		/* fall through */
 	case ZFCP_ERP_STEP_LUN_CLOSING:
 		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
 			return ZFCP_ERP_FAILED;
@@ -1059,11 +1097,18 @@
 	case ZFCP_ERP_STEP_LUN_OPENING:
 		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
 			return ZFCP_ERP_SUCCEEDED;
+		break;
+	case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
+	case ZFCP_ERP_STEP_PORT_CLOSING:
+	case ZFCP_ERP_STEP_PORT_OPENING:
+		/* NOP */
+		break;
 	}
 	return ZFCP_ERP_FAILED;
 }
 
-static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
+static enum zfcp_erp_act_result zfcp_erp_strategy_check_lun(
+	struct scsi_device *sdev, enum zfcp_erp_act_result result)
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
@@ -1084,6 +1129,12 @@
 						ZFCP_STATUS_COMMON_ERP_FAILED);
 		}
 		break;
+	case ZFCP_ERP_CONTINUES:
+	case ZFCP_ERP_EXIT:
+	case ZFCP_ERP_DISMISSED:
+	case ZFCP_ERP_NOMEM:
+		/* NOP */
+		break;
 	}
 
 	if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
@@ -1093,7 +1144,8 @@
 	return result;
 }
 
-static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
+static enum zfcp_erp_act_result zfcp_erp_strategy_check_port(
+	struct zfcp_port *port, enum zfcp_erp_act_result result)
 {
 	switch (result) {
 	case ZFCP_ERP_SUCCEEDED :
@@ -1115,6 +1167,12 @@
 					 ZFCP_STATUS_COMMON_ERP_FAILED);
 		}
 		break;
+	case ZFCP_ERP_CONTINUES:
+	case ZFCP_ERP_EXIT:
+	case ZFCP_ERP_DISMISSED:
+	case ZFCP_ERP_NOMEM:
+		/* NOP */
+		break;
 	}
 
 	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
@@ -1124,8 +1182,8 @@
 	return result;
 }
 
-static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
-					   int result)
+static enum zfcp_erp_act_result zfcp_erp_strategy_check_adapter(
+	struct zfcp_adapter *adapter, enum zfcp_erp_act_result result)
 {
 	switch (result) {
 	case ZFCP_ERP_SUCCEEDED :
@@ -1143,6 +1201,12 @@
 					    ZFCP_STATUS_COMMON_ERP_FAILED);
 		}
 		break;
+	case ZFCP_ERP_CONTINUES:
+	case ZFCP_ERP_EXIT:
+	case ZFCP_ERP_DISMISSED:
+	case ZFCP_ERP_NOMEM:
+		/* NOP */
+		break;
 	}
 
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
@@ -1152,14 +1216,14 @@
 	return result;
 }
 
-static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
-					  int result)
+static enum zfcp_erp_act_result zfcp_erp_strategy_check_target(
+	struct zfcp_erp_action *erp_action, enum zfcp_erp_act_result result)
 {
 	struct zfcp_adapter *adapter = erp_action->adapter;
 	struct zfcp_port *port = erp_action->port;
 	struct scsi_device *sdev = erp_action->sdev;
 
-	switch (erp_action->action) {
+	switch (erp_action->type) {
 
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
 		result = zfcp_erp_strategy_check_lun(sdev, result);
@@ -1192,16 +1256,17 @@
 	return 0;
 }
 
-static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
+static enum zfcp_erp_act_result zfcp_erp_strategy_statechange(
+	struct zfcp_erp_action *act, enum zfcp_erp_act_result result)
 {
-	int action = act->action;
+	enum zfcp_erp_act_type type = act->type;
 	struct zfcp_adapter *adapter = act->adapter;
 	struct zfcp_port *port = act->port;
 	struct scsi_device *sdev = act->sdev;
 	struct zfcp_scsi_dev *zfcp_sdev;
 	u32 erp_status = act->status;
 
-	switch (action) {
+	switch (type) {
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 		if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
 			_zfcp_erp_adapter_reopen(adapter,
@@ -1231,7 +1296,7 @@
 		}
 		break;
 	}
-	return ret;
+	return result;
 }
 
 static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
@@ -1248,7 +1313,7 @@
 	list_del(&erp_action->list);
 	zfcp_dbf_rec_run("eractd1", erp_action);
 
-	switch (erp_action->action) {
+	switch (erp_action->type) {
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
 		zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
 		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
@@ -1297,6 +1362,9 @@
 		struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
 		int lun_status;
 
+		if (sdev->sdev_state == SDEV_DEL ||
+		    sdev->sdev_state == SDEV_CANCEL)
+			continue;
 		if (zsdev->port != port)
 			continue;
 		/* LUN under port of interest */
@@ -1324,13 +1392,14 @@
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
-static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
+static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act,
+				    enum zfcp_erp_act_result result)
 {
 	struct zfcp_adapter *adapter = act->adapter;
 	struct zfcp_port *port = act->port;
 	struct scsi_device *sdev = act->sdev;
 
-	switch (act->action) {
+	switch (act->type) {
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
 		if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
 			scsi_device_put(sdev);
@@ -1364,9 +1433,10 @@
 	}
 }
 
-static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_strategy_do_action(
+	struct zfcp_erp_action *erp_action)
 {
-	switch (erp_action->action) {
+	switch (erp_action->type) {
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 		return zfcp_erp_adapter_strategy(erp_action);
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1379,9 +1449,10 @@
 	return ZFCP_ERP_FAILED;
 }
 
-static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
+static enum zfcp_erp_act_result zfcp_erp_strategy(
+	struct zfcp_erp_action *erp_action)
 {
-	int retval;
+	enum zfcp_erp_act_result result;
 	unsigned long flags;
 	struct zfcp_adapter *adapter = erp_action->adapter;
 
@@ -1392,12 +1463,12 @@
 
 	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
 		zfcp_erp_action_dequeue(erp_action);
-		retval = ZFCP_ERP_DISMISSED;
+		result = ZFCP_ERP_DISMISSED;
 		goto unlock;
 	}
 
 	if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
-		retval = ZFCP_ERP_FAILED;
+		result = ZFCP_ERP_FAILED;
 		goto check_target;
 	}
 
@@ -1405,13 +1476,13 @@
 
 	/* no lock to allow for blocking operations */
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
-	retval = zfcp_erp_strategy_do_action(erp_action);
+	result = zfcp_erp_strategy_do_action(erp_action);
 	write_lock_irqsave(&adapter->erp_lock, flags);
 
 	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
-		retval = ZFCP_ERP_CONTINUES;
+		result = ZFCP_ERP_CONTINUES;
 
-	switch (retval) {
+	switch (result) {
 	case ZFCP_ERP_NOMEM:
 		if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
 			++adapter->erp_low_mem_count;
@@ -1421,7 +1492,7 @@
 			_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
 		else {
 			zfcp_erp_strategy_memwait(erp_action);
-			retval = ZFCP_ERP_CONTINUES;
+			result = ZFCP_ERP_CONTINUES;
 		}
 		goto unlock;
 
@@ -1431,27 +1502,33 @@
 			erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
 		}
 		goto unlock;
+	case ZFCP_ERP_SUCCEEDED:
+	case ZFCP_ERP_FAILED:
+	case ZFCP_ERP_EXIT:
+	case ZFCP_ERP_DISMISSED:
+		/* NOP */
+		break;
 	}
 
 check_target:
-	retval = zfcp_erp_strategy_check_target(erp_action, retval);
+	result = zfcp_erp_strategy_check_target(erp_action, result);
 	zfcp_erp_action_dequeue(erp_action);
-	retval = zfcp_erp_strategy_statechange(erp_action, retval);
-	if (retval == ZFCP_ERP_EXIT)
+	result = zfcp_erp_strategy_statechange(erp_action, result);
+	if (result == ZFCP_ERP_EXIT)
 		goto unlock;
-	if (retval == ZFCP_ERP_SUCCEEDED)
+	if (result == ZFCP_ERP_SUCCEEDED)
 		zfcp_erp_strategy_followup_success(erp_action);
-	if (retval == ZFCP_ERP_FAILED)
+	if (result == ZFCP_ERP_FAILED)
 		zfcp_erp_strategy_followup_failed(erp_action);
 
  unlock:
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 
-	if (retval != ZFCP_ERP_CONTINUES)
-		zfcp_erp_action_cleanup(erp_action, retval);
+	if (result != ZFCP_ERP_CONTINUES)
+		zfcp_erp_action_cleanup(erp_action, result);
 
 	kref_put(&adapter->ref, zfcp_adapter_release);
-	return retval;
+	return result;
 }
 
 static int zfcp_erp_thread(void *data)
@@ -1489,7 +1566,7 @@
  * zfcp_erp_thread_setup - Start ERP thread for adapter
  * @adapter: Adapter to start the ERP thread for
  *
- * Returns 0 on success or error code from kernel_thread()
+ * Return: 0 on success, or error code from kthread_run().
  */
 int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
 {
@@ -1694,11 +1771,11 @@
 /**
  * zfcp_erp_adapter_reset_sync() - Really reopen adapter and wait.
  * @adapter: Pointer to zfcp_adapter to reopen.
- * @id: Trace tag string of length %ZFCP_DBF_TAG_LEN.
+ * @dbftag: Trace tag string of length %ZFCP_DBF_TAG_LEN.
  */
-void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id)
+void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *dbftag)
 {
 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
-	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, id);
+	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
 	zfcp_erp_wait(adapter);
 }
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index bd0c5a9..31e8a72 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -59,16 +59,19 @@
 /* zfcp_erp.c */
 extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
 extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
-extern void zfcp_erp_port_forced_no_port_dbf(char *id,
+extern void zfcp_erp_port_forced_no_port_dbf(char *dbftag,
 					     struct zfcp_adapter *adapter,
 					     u64 port_name, u32 port_id);
 extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
 extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
 extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
 extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
-extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id);
+extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
+				 char *dbftag);
 extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
 extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+					    int clear, char *dbftag);
 extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
@@ -79,7 +82,8 @@
 extern void zfcp_erp_wait(struct zfcp_adapter *);
 extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
 extern void zfcp_erp_timeout_handler(struct timer_list *t);
-extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id);
+extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter,
+					char *dbftag);
 
 /* zfcp_fc.c */
 extern struct kmem_cache *zfcp_fc_req_cache;
@@ -144,6 +148,7 @@
 extern void zfcp_qdio_siosl(struct zfcp_adapter *);
 
 /* zfcp_scsi.c */
+extern bool zfcp_experimental_dix;
 extern struct scsi_transport_template *zfcp_scsi_transport_template;
 extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
 extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
@@ -162,6 +167,7 @@
 extern struct mutex zfcp_sysfs_port_units_mutex;
 extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
 extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
 
 /* zfcp_unit.c */
 extern int zfcp_unit_add(struct zfcp_port *, u64);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index f6c415d..b018b61 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -239,10 +239,6 @@
 	list_for_each_entry(port, &adapter->port_list, list) {
 		if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
 			zfcp_fc_test_link(port);
-		if (!port->d_id)
-			zfcp_erp_port_reopen(port,
-					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "fcrscn1");
 	}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
@@ -250,6 +246,7 @@
 static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 {
 	struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct fc_els_rscn *head;
 	struct fc_els_rscn_page *page;
 	u16 i;
@@ -263,6 +260,22 @@
 	no_entries = be16_to_cpu(head->rscn_plen) /
 		sizeof(struct fc_els_rscn_page);
 
+	if (no_entries > 1) {
+		/* handle failed ports */
+		unsigned long flags;
+		struct zfcp_port *port;
+
+		read_lock_irqsave(&adapter->port_list_lock, flags);
+		list_for_each_entry(port, &adapter->port_list, list) {
+			if (port->d_id)
+				continue;
+			zfcp_erp_port_reopen(port,
+					     ZFCP_STATUS_COMMON_ERP_FAILED,
+					     "fcrscn1");
+		}
+		read_unlock_irqrestore(&adapter->port_list_lock, flags);
+	}
+
 	for (i = 1; i < no_entries; i++) {
 		/* skip head and start with 1st element */
 		page++;
@@ -312,7 +325,7 @@
 
 /**
  * zfcp_fc_incoming_els - handle incoming ELS
- * @fsf_req - request which contains incoming ELS
+ * @fsf_req: request which contains incoming ELS
  */
 void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
 {
@@ -597,6 +610,48 @@
 		put_device(&port->dev);
 }
 
+/**
+ * zfcp_fc_sg_free_table - free memory used by scatterlists
+ * @sg: pointer to scatterlist
+ * @count: number of scatterlist which are to be free'ed
+ * the scatterlist are expected to reference pages always
+ */
+static void zfcp_fc_sg_free_table(struct scatterlist *sg, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++, sg = sg_next(sg))
+		if (sg)
+			free_page((unsigned long) sg_virt(sg));
+		else
+			break;
+}
+
+/**
+ * zfcp_fc_sg_setup_table - init scatterlist and allocate, assign buffers
+ * @sg: pointer to struct scatterlist
+ * @count: number of scatterlists which should be assigned with buffers
+ * of size page
+ *
+ * Returns: 0 on success, -ENOMEM otherwise
+ */
+static int zfcp_fc_sg_setup_table(struct scatterlist *sg, int count)
+{
+	void *addr;
+	int i;
+
+	sg_init_table(sg, count);
+	for (i = 0; i < count; i++, sg = sg_next(sg)) {
+		addr = (void *) get_zeroed_page(GFP_KERNEL);
+		if (!addr) {
+			zfcp_fc_sg_free_table(sg, i);
+			return -ENOMEM;
+		}
+		sg_set_buf(sg, addr, PAGE_SIZE);
+	}
+	return 0;
+}
+
 static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
 {
 	struct zfcp_fc_req *fc_req;
@@ -605,7 +660,7 @@
 	if (!fc_req)
 		return NULL;
 
-	if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
+	if (zfcp_fc_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
 		kmem_cache_free(zfcp_fc_req_cache, fc_req);
 		return NULL;
 	}
@@ -763,7 +818,7 @@
 				break;
 		}
 	}
-	zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
+	zfcp_fc_sg_free_table(&fc_req->sg_rsp, buf_num);
 	kmem_cache_free(zfcp_fc_req_cache, fc_req);
 out:
 	zfcp_fc_wka_port_put(&adapter->gs->ds);
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 3cd7472..6902ae1 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -121,9 +121,24 @@
 /**
  * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
  * @ct_els: data required for issuing fsf command
- * @sg_req: scatterlist entry for request data
- * @sg_rsp: scatterlist entry for response data
- * @u: request specific data
+ * @sg_req: scatterlist entry for request data, refers to embedded @u submember
+ * @sg_rsp: scatterlist entry for response data, refers to embedded @u submember
+ * @u: request and response specific data
+ * @u.adisc: ADISC specific data
+ * @u.adisc.req: ADISC request
+ * @u.adisc.rsp: ADISC response
+ * @u.gid_pn: GID_PN specific data
+ * @u.gid_pn.req: GID_PN request
+ * @u.gid_pn.rsp: GID_PN response
+ * @u.gpn_ft: GPN_FT specific data
+ * @u.gpn_ft.sg_rsp2: GPN_FT response, not embedded here, allocated elsewhere
+ * @u.gpn_ft.req: GPN_FT request
+ * @u.gspn: GSPN specific data
+ * @u.gspn.req: GSPN request
+ * @u.gspn.rsp: GSPN response
+ * @u.rspn: RSPN specific data
+ * @u.rspn.req: RSPN request
+ * @u.rspn.rsp: RSPN response
  */
 struct zfcp_fc_req {
 	struct zfcp_fsf_ct_els				ct_els;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 3c86e27..cf63916 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -11,6 +11,7 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/blktrace_api.h>
+#include <linux/types.h>
 #include <linux/slab.h>
 #include <scsi/fc/fc_els.h>
 #include "zfcp_ext.h"
@@ -19,8 +20,18 @@
 #include "zfcp_qdio.h"
 #include "zfcp_reqlist.h"
 
+/* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
+#define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
+/* timeout for: exchange config/port data outside ERP, or open/close WKA port */
+#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
+
 struct kmem_cache *zfcp_fsf_qtcb_cache;
 
+static bool ber_stop = true;
+module_param(ber_stop, bool, 0600);
+MODULE_PARM_DESC(ber_stop,
+		 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
+
 static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
 {
 	struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
@@ -74,18 +85,18 @@
 
 /**
  * zfcp_fsf_req_free - free memory used by fsf request
- * @fsf_req: pointer to struct zfcp_fsf_req
+ * @req: pointer to struct zfcp_fsf_req
  */
 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
 {
 	if (likely(req->pool)) {
-		if (likely(req->qtcb))
+		if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
 			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
 		mempool_free(req, req->pool);
 		return;
 	}
 
-	if (likely(req->qtcb))
+	if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
 		kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
 	kfree(req);
 }
@@ -230,10 +241,15 @@
 	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
 		break;
 	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
-		dev_warn(&adapter->ccw_device->dev,
-			 "The error threshold for checksum statistics "
-			 "has been exceeded\n");
 		zfcp_dbf_hba_bit_err("fssrh_3", req);
+		if (ber_stop) {
+			dev_warn(&adapter->ccw_device->dev,
+				 "All paths over this FCP device are disused because of excessive bit errors\n");
+			zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
+		} else {
+			dev_warn(&adapter->ccw_device->dev,
+				 "The error threshold for checksum statistics has been exceeded\n");
+		}
 		break;
 	case FSF_STATUS_READ_LINK_DOWN:
 		zfcp_fsf_status_read_link_down(req);
@@ -379,7 +395,7 @@
 
 /**
  * zfcp_fsf_req_complete - process completion of a FSF request
- * @fsf_req: The FSF request that has been completed.
+ * @req: The FSF request that has been completed.
  *
  * When a request has been completed either from the FCP adapter,
  * or it has been dismissed due to a queue shutdown, this function
@@ -388,7 +404,7 @@
  */
 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
 {
-	if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
+	if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
 		zfcp_fsf_status_read_handler(req);
 		return;
 	}
@@ -705,7 +721,6 @@
 	init_completion(&req->completion);
 
 	req->adapter = adapter;
-	req->fsf_command = fsf_cmd;
 	req->req_id = adapter->req_no;
 
 	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
@@ -720,14 +735,13 @@
 			return ERR_PTR(-ENOMEM);
 		}
 
-		req->seq_no = adapter->fsf_req_seq_no;
 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
 		req->qtcb->prefix.req_id = req->req_id;
 		req->qtcb->prefix.ulp_info = 26;
-		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
+		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
 		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
 		req->qtcb->header.req_handle = req->req_id;
-		req->qtcb->header.fsf_command = req->fsf_command;
+		req->qtcb->header.fsf_command = fsf_cmd;
 	}
 
 	zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
@@ -738,9 +752,9 @@
 
 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
 {
+	const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
 	struct zfcp_adapter *adapter = req->adapter;
 	struct zfcp_qdio *qdio = adapter->qdio;
-	int with_qtcb = (req->qtcb != NULL);
 	int req_id = req->req_id;
 
 	zfcp_reqlist_add(adapter->req_list, req);
@@ -755,8 +769,20 @@
 		return -EIO;
 	}
 
+	/*
+	 * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
+	 *	 ONLY TOUCH SYNC req AGAIN ON req->completion.
+	 *
+	 * The request might complete and be freed concurrently at any point
+	 * now. This is not protected by the QDIO-lock (req_q_lock). So any
+	 * uncontrolled access after this might result in an use-after-free bug.
+	 * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
+	 * when it is completed via req->completion, is it safe to use req
+	 * again.
+	 */
+
 	/* Don't increase for unsolicited status */
-	if (with_qtcb)
+	if (!is_srb)
 		adapter->fsf_req_seq_no++;
 	adapter->req_no++;
 
@@ -765,8 +791,7 @@
 
 /**
  * zfcp_fsf_status_read - send status read request
- * @adapter: pointer to struct zfcp_adapter
- * @req_flags: request flags
+ * @qdio: pointer to struct zfcp_qdio
  * Returns: 0 on success, ERROR otherwise
  */
 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
@@ -804,6 +829,7 @@
 	retval = zfcp_fsf_req_send(req);
 	if (retval)
 		goto failed_req_send;
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 
 	goto out;
 
@@ -912,9 +938,11 @@
 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
 	req->qtcb->bottom.support.req_handle = (u64) old_req_id;
 
-	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
-	if (!zfcp_fsf_req_send(req))
+	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
+	if (!zfcp_fsf_req_send(req)) {
+		/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
 		goto out;
+	}
 
 out_error_free:
 	zfcp_fsf_req_free(req);
@@ -1057,8 +1085,10 @@
 
 /**
  * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
+ * @wka_port: pointer to zfcp WKA port to send CT/GS to
  * @ct: pointer to struct zfcp_send_ct with data for request
  * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
+ * @timeout: timeout that hardware should use, and a later software timeout
  */
 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
 		     struct zfcp_fsf_ct_els *ct, mempool_t *pool,
@@ -1095,6 +1125,7 @@
 	ret = zfcp_fsf_req_send(req);
 	if (ret)
 		goto failed_send;
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 
 	goto out;
 
@@ -1151,7 +1182,10 @@
 
 /**
  * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
+ * @adapter: pointer to zfcp adapter
+ * @d_id: N_Port_ID to send ELS to
  * @els: pointer to struct zfcp_send_els with data for the command
+ * @timeout: timeout that hardware should use, and a later software timeout
  */
 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
 		      struct zfcp_fsf_ct_els *els, unsigned int timeout)
@@ -1192,6 +1226,7 @@
 	ret = zfcp_fsf_req_send(req);
 	if (ret)
 		goto failed_send;
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 
 	goto out;
 
@@ -1237,6 +1272,7 @@
 		zfcp_fsf_req_free(req);
 		erp_action->fsf_req_id = 0;
 	}
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	return retval;
@@ -1273,8 +1309,10 @@
 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
 	retval = zfcp_fsf_req_send(req);
 	spin_unlock_irq(&qdio->req_q_lock);
-	if (!retval)
+	if (!retval) {
+		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
 		wait_for_completion(&req->completion);
+	}
 
 	zfcp_fsf_req_free(req);
 	return retval;
@@ -1324,6 +1362,7 @@
 		zfcp_fsf_req_free(req);
 		erp_action->fsf_req_id = 0;
 	}
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	return retval;
@@ -1366,8 +1405,10 @@
 	retval = zfcp_fsf_req_send(req);
 	spin_unlock_irq(&qdio->req_q_lock);
 
-	if (!retval)
+	if (!retval) {
+		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
 		wait_for_completion(&req->completion);
+	}
 
 	zfcp_fsf_req_free(req);
 
@@ -1487,6 +1528,7 @@
 		erp_action->fsf_req_id = 0;
 		put_device(&port->dev);
 	}
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	return retval;
@@ -1551,6 +1593,7 @@
 		zfcp_fsf_req_free(req);
 		erp_action->fsf_req_id = 0;
 	}
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	return retval;
@@ -1594,6 +1637,7 @@
 {
 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
 	struct zfcp_fsf_req *req;
+	unsigned long req_id = 0;
 	int retval = -EIO;
 
 	spin_lock_irq(&qdio->req_q_lock);
@@ -1616,14 +1660,17 @@
 	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
 	req->data = wka_port;
 
+	req_id = req->req_id;
+
 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
 	retval = zfcp_fsf_req_send(req);
 	if (retval)
 		zfcp_fsf_req_free(req);
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	if (!retval)
-		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
+		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
 	return retval;
 }
 
@@ -1649,6 +1696,7 @@
 {
 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
 	struct zfcp_fsf_req *req;
+	unsigned long req_id = 0;
 	int retval = -EIO;
 
 	spin_lock_irq(&qdio->req_q_lock);
@@ -1671,14 +1719,17 @@
 	req->data = wka_port;
 	req->qtcb->header.port_handle = wka_port->handle;
 
+	req_id = req->req_id;
+
 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
 	retval = zfcp_fsf_req_send(req);
 	if (retval)
 		zfcp_fsf_req_free(req);
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	if (!retval)
-		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
+		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
 	return retval;
 }
 
@@ -1770,6 +1821,7 @@
 		zfcp_fsf_req_free(req);
 		erp_action->fsf_req_id = 0;
 	}
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	return retval;
@@ -1809,7 +1861,7 @@
 	case FSF_LUN_SHARING_VIOLATION:
 		if (qual->word[0])
 			dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
-				 "LUN 0x%Lx on port 0x%Lx is already in "
+				 "LUN 0x%016Lx on port 0x%016Lx is already in "
 				 "use by CSS%d, MIF Image ID %x\n",
 				 zfcp_scsi_dev_lun(sdev),
 				 (unsigned long long)zfcp_sdev->port->wwpn,
@@ -1893,6 +1945,7 @@
 		zfcp_fsf_req_free(req);
 		erp_action->fsf_req_id = 0;
 	}
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	return retval;
@@ -1981,12 +2034,13 @@
 		zfcp_fsf_req_free(req);
 		erp_action->fsf_req_id = 0;
 	}
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	return retval;
 }
 
-static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
+static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat)
 {
 	lat_rec->sum += lat;
 	lat_rec->min = min(lat_rec->min, lat);
@@ -1996,7 +2050,7 @@
 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
 {
 	struct fsf_qual_latency_info *lat_in;
-	struct latency_cont *lat = NULL;
+	struct zfcp_latency_cont *lat = NULL;
 	struct zfcp_scsi_dev *zfcp_sdev;
 	struct zfcp_blk_drv_data blktrc;
 	int ticks = req->adapter->timer_ticks;
@@ -2088,11 +2142,8 @@
 		break;
 	case FSF_CMND_LENGTH_NOT_VALID:
 		dev_err(&req->adapter->ccw_device->dev,
-			"Incorrect CDB length %d, LUN 0x%016Lx on "
-			"port 0x%016Lx closed\n",
-			req->qtcb->bottom.io.fcp_cmnd_length,
-			(unsigned long long)zfcp_scsi_dev_lun(sdev),
-			(unsigned long long)zfcp_sdev->port->wwpn);
+			"Incorrect FCP_CMND length %d, FCP device closed\n",
+			req->qtcb->bottom.io.fcp_cmnd_length);
 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
@@ -2296,6 +2347,7 @@
 	retval = zfcp_fsf_req_send(req);
 	if (unlikely(retval))
 		goto failed_scsi_cmnd;
+	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
 
 	goto out;
 
@@ -2369,9 +2421,11 @@
 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
 	zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
 
-	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
-	if (!zfcp_fsf_req_send(req))
+	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
+	if (!zfcp_fsf_req_send(req)) {
+		/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
 		goto out;
+	}
 
 	zfcp_fsf_req_free(req);
 	req = NULL;
@@ -2382,7 +2436,7 @@
 
 /**
  * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
- * @adapter: pointer to struct zfcp_adapter
+ * @qdio: pointer to struct zfcp_qdio
  * @sbal_idx: response queue index of SBAL to be processed
  */
 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 535628b..2c658b6 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -438,8 +438,8 @@
 
 /**
  * struct zfcp_fsf_ct_els - zfcp data for ct or els request
- * @req: scatter-gather list for request
- * @resp: scatter-gather list for response
+ * @req: scatter-gather list for request, points to &zfcp_fc_req.sg_req or BSG
+ * @resp: scatter-gather list for response, points to &zfcp_fc_req.sg_rsp or BSG
  * @handler: handler function (called for response to the request)
  * @handler_data: data passed to handler function
  * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 4ab02e8..661436a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -4,7 +4,7 @@
  *
  * Setup and helper functions to access QDIO.
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2017
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -19,7 +19,7 @@
 module_param_named(datarouter, enable_multibuffer, bool, 0400);
 MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
 
-static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
+static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
 				    unsigned int qdio_err)
 {
 	struct zfcp_adapter *adapter = qdio->adapter;
@@ -28,12 +28,12 @@
 
 	if (qdio_err & QDIO_ERROR_SLSB_STATE) {
 		zfcp_qdio_siosl(adapter);
-		zfcp_erp_adapter_shutdown(adapter, 0, id);
+		zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
 		return;
 	}
 	zfcp_erp_adapter_reopen(adapter,
 				ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
-				ZFCP_STATUS_COMMON_ERP_FAILED, id);
+				ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
 }
 
 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
@@ -180,7 +180,6 @@
  * @qdio: pointer to struct zfcp_qdio
  * @q_req: pointer to struct zfcp_qdio_req
  * @sg: scatter-gather list
- * @max_sbals: upper bound for number of SBALs to be used
  * Returns: zero or -EINVAL on error
  */
 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
@@ -295,15 +294,15 @@
 	id->input_handler = zfcp_qdio_int_resp;
 	id->output_handler = zfcp_qdio_int_req;
 	id->int_parm = (unsigned long) qdio;
-	id->input_sbal_addr_array = (void **) (qdio->res_q);
-	id->output_sbal_addr_array = (void **) (qdio->req_q);
+	id->input_sbal_addr_array = qdio->res_q;
+	id->output_sbal_addr_array = qdio->req_q;
 	id->scan_threshold =
 		QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
 }
 
 /**
  * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
- * @adapter: pointer to struct zfcp_adapter
+ * @qdio: pointer to struct zfcp_qdio
  * Returns: -ENOMEM on memory allocation error or return value from
  *          qdio_allocate
  */
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 886c662..2a816a3 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -30,6 +30,8 @@
  * @req_q_full: queue full incidents
  * @req_q_wq: used to wait for SBAL availability
  * @adapter: adapter used in conjunction with this qdio structure
+ * @max_sbale_per_sbal: qdio limit per sbal
+ * @max_sbale_per_req: qdio limit per request
  */
 struct zfcp_qdio {
 	struct qdio_buffer	*res_q[QDIO_MAX_BUFFERS_PER_Q];
@@ -70,7 +72,7 @@
 /**
  * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
  * @qdio: pointer to struct zfcp_qdio
- * @q_rec: pointer to struct zfcp_qdio_req
+ * @q_req: pointer to struct zfcp_qdio_req
  * Returns: pointer to qdio_buffer_element (sbale) structure
  */
 static inline struct qdio_buffer_element *
@@ -82,7 +84,7 @@
 /**
  * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
  * @qdio: pointer to struct zfcp_qdio
- * @fsf_req: pointer to struct zfcp_fsf_req
+ * @q_req: pointer to struct zfcp_qdio_req
  * Returns: pointer to qdio_buffer_element (sbale) structure
  */
 static inline struct qdio_buffer_element *
@@ -135,6 +137,8 @@
  * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
  * @qdio: pointer to struct zfcp_qdio
  * @q_req: pointer to struct zfcp_queue_req
+ * @data: pointer to data
+ * @len: length of data
  *
  * This is only required for single sbal requests, calling it when
  * wrapping around to the next sbal is a bug.
@@ -182,6 +186,7 @@
 
 /**
  * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
+ * @qdio: pointer to struct zfcp_qdio
  * @q_req: The current zfcp_qdio_req
  */
 static inline
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index 59a943c..9b8ff24 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -17,7 +17,7 @@
 /**
  * struct zfcp_reqlist - Container for request list (reqlist)
  * @lock: Spinlock for protecting the hash list
- * @list: Array of hashbuckets, each is a list of requests in this bucket
+ * @buckets: Array of hashbuckets, each is a list of requests in this bucket
  */
 struct zfcp_reqlist {
 	spinlock_t lock;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index a8efcb3..e9ded2b 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -27,7 +27,11 @@
 
 static bool enable_dif;
 module_param_named(dif, enable_dif, bool, 0400);
-MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
+MODULE_PARM_DESC(dif, "Enable DIF data integrity support (default off)");
+
+bool zfcp_experimental_dix;
+module_param_named(dix, zfcp_experimental_dix, bool, 0400);
+MODULE_PARM_DESC(dix, "Enable experimental DIX (data integrity extension) support which implies DIF support (default off)");
 
 static bool allow_lun_scan = true;
 module_param(allow_lun_scan, bool, 0600);
@@ -125,6 +129,15 @@
 
 	zfcp_sdev->erp_action.port = port;
 
+	mutex_lock(&zfcp_sysfs_port_units_mutex);
+	if (zfcp_sysfs_port_is_removing(port)) {
+		/* port is already gone */
+		mutex_unlock(&zfcp_sysfs_port_units_mutex);
+		put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
+		return -ENXIO;
+	}
+	mutex_unlock(&zfcp_sysfs_port_units_mutex);
+
 	unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
 	if (unit)
 		put_device(&unit->dev);
@@ -226,7 +239,9 @@
 		(struct zfcp_scsi_req_filter *)data;
 
 	/* already aborted - prevent side-effects - or not a SCSI command */
-	if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
+	if (old_req->data == NULL ||
+	    zfcp_fsf_req_is_status_read_buffer(old_req) ||
+	    old_req->qtcb->header.fsf_command != FSF_QTCB_FCP_CMND)
 		return;
 
 	/* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
@@ -362,6 +377,10 @@
 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 	int ret = SUCCESS, fc_ret;
 
+	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+		zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+		zfcp_erp_wait(adapter);
+	}
 	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
 	zfcp_erp_wait(adapter);
 	fc_ret = fc_block_scsi_eh(scpnt);
@@ -422,8 +441,9 @@
 	.max_sectors		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
 				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
 				   /* GCD, adjusted later */
+	/* report size limit per scatter-gather segment */
+	.max_segment_size	 = ZFCP_QDIO_SBALE_LEN,
 	.dma_boundary		 = ZFCP_QDIO_SBALE_LEN - 1,
-	.use_clustering		 = 1,
 	.shost_attrs		 = zfcp_sysfs_shost_attrs,
 	.sdev_attrs		 = zfcp_sysfs_sdev_attrs,
 	.track_queue_depth	 = 1,
@@ -788,11 +808,11 @@
 	data_div = atomic_read(&adapter->status) &
 		   ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
 
-	if (enable_dif &&
+	if ((enable_dif || zfcp_experimental_dix) &&
 	    adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
 		mask |= SHOST_DIF_TYPE1_PROTECTION;
 
-	if (enable_dif && data_div &&
+	if (zfcp_experimental_dix && data_div &&
 	    adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
 		mask |= SHOST_DIX_TYPE1_PROTECTION;
 		scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index b277be6..af197e2 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -235,6 +235,53 @@
 
 DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
 
+static void zfcp_sysfs_port_set_removing(struct zfcp_port *const port)
+{
+	lockdep_assert_held(&zfcp_sysfs_port_units_mutex);
+	atomic_set(&port->units, -1);
+}
+
+bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port)
+{
+	lockdep_assert_held(&zfcp_sysfs_port_units_mutex);
+	return atomic_read(&port->units) == -1;
+}
+
+static bool zfcp_sysfs_port_in_use(struct zfcp_port *const port)
+{
+	struct zfcp_adapter *const adapter = port->adapter;
+	unsigned long flags;
+	struct scsi_device *sdev;
+	bool in_use = true;
+
+	mutex_lock(&zfcp_sysfs_port_units_mutex);
+	if (atomic_read(&port->units) > 0)
+		goto unlock_port_units_mutex; /* zfcp_unit(s) under port */
+
+	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+	__shost_for_each_device(sdev, adapter->scsi_host) {
+		const struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
+
+		if (sdev->sdev_state == SDEV_DEL ||
+		    sdev->sdev_state == SDEV_CANCEL)
+			continue;
+		if (zsdev->port != port)
+			continue;
+		/* alive scsi_device under port of interest */
+		goto unlock_host_lock;
+	}
+
+	/* port is about to be removed, so no more unit_add or slave_alloc */
+	zfcp_sysfs_port_set_removing(port);
+	in_use = false;
+
+unlock_host_lock:
+	spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
+unlock_port_units_mutex:
+	mutex_unlock(&zfcp_sysfs_port_units_mutex);
+	return in_use;
+}
+
 static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
 					    struct device_attribute *attr,
 					    const char *buf, size_t count)
@@ -257,15 +304,11 @@
 	else
 		retval = 0;
 
-	mutex_lock(&zfcp_sysfs_port_units_mutex);
-	if (atomic_read(&port->units) > 0) {
+	if (zfcp_sysfs_port_in_use(port)) {
 		retval = -EBUSY;
-		mutex_unlock(&zfcp_sysfs_port_units_mutex);
+		put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
 		goto out;
 	}
-	/* port is about to be removed, so no more unit_add */
-	atomic_set(&port->units, -1);
-	mutex_unlock(&zfcp_sysfs_port_units_mutex);
 
 	write_lock_irq(&adapter->port_list_lock);
 	list_del(&port->list);
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 1bf0a09..e67bf73 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -124,7 +124,7 @@
 	int retval = 0;
 
 	mutex_lock(&zfcp_sysfs_port_units_mutex);
-	if (atomic_read(&port->units) == -1) {
+	if (zfcp_sysfs_port_is_removing(port)) {
 		/* port is already gone */
 		retval = -ENODEV;
 		goto out;
@@ -168,8 +168,14 @@
 	write_lock_irq(&port->unit_list_lock);
 	list_add_tail(&unit->list, &port->unit_list);
 	write_unlock_irq(&port->unit_list_lock);
+	/*
+	 * lock order: shost->scan_mutex before zfcp_sysfs_port_units_mutex
+	 * due to      zfcp_unit_scsi_scan() => zfcp_scsi_slave_alloc()
+	 */
+	mutex_unlock(&zfcp_sysfs_port_units_mutex);
 
 	zfcp_unit_scsi_scan(unit);
+	return retval;
 
 out:
 	mutex_unlock(&zfcp_sysfs_port_units_mutex);